summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--deps/v8/.gitignore2
-rw-r--r--deps/v8/AUTHORS5
-rw-r--r--deps/v8/BUILD.gn214
-rw-r--r--deps/v8/ChangeLog2238
-rw-r--r--deps/v8/DEPS16
-rw-r--r--deps/v8/Makefile9
-rw-r--r--deps/v8/PRESUBMIT.py28
-rw-r--r--deps/v8/README.md4
-rw-r--r--deps/v8/WATCHLISTS14
-rw-r--r--deps/v8/build/all.gyp2
-rw-r--r--deps/v8/build/features.gypi9
-rwxr-xr-xdeps/v8/build/get_landmines.py1
-rw-r--r--deps/v8/build/standalone.gypi56
-rw-r--r--deps/v8/build/toolchain.gypi125
-rw-r--r--deps/v8/docs/README.md2
-rw-r--r--deps/v8/docs/arm_debugging_with_the_simulator.md205
-rw-r--r--deps/v8/docs/becoming_v8_committer.md40
-rw-r--r--deps/v8/docs/building_with_gyp.md260
-rw-r--r--deps/v8/docs/contributing.md32
-rw-r--r--deps/v8/docs/cross_compiling_for_arm.md151
-rw-r--r--deps/v8/docs/d8_on_android.md101
-rw-r--r--deps/v8/docs/debugger_protocol.md934
-rw-r--r--deps/v8/docs/gdb_jit_interface.md63
-rw-r--r--deps/v8/docs/handling_of_ports.md24
-rw-r--r--deps/v8/docs/i18n_support.md44
-rw-r--r--deps/v8/docs/javascript.md6
-rw-r--r--deps/v8/docs/javascript_stack_trace_api.md161
-rw-r--r--deps/v8/docs/merging_and_patching.md67
-rw-r--r--deps/v8/docs/profiling_chromium_with_v8.md34
-rw-r--r--deps/v8/docs/release_process.md57
-rw-r--r--deps/v8/docs/runtime_functions.md7
-rw-r--r--deps/v8/docs/source.md41
-rw-r--r--deps/v8/docs/testing.md58
-rw-r--r--deps/v8/docs/triaging_issues.md22
-rw-r--r--deps/v8/docs/using_git.md147
-rw-r--r--deps/v8/docs/v8_c_plus_plus_styleand_sops.md3
-rw-r--r--deps/v8/docs/v8_committers_responsibility.md39
-rw-r--r--deps/v8/docs/v8_profiler.md141
-rw-r--r--deps/v8/include/v8-debug.h28
-rw-r--r--deps/v8/include/v8-experimental.h53
-rw-r--r--deps/v8/include/v8-platform.h47
-rw-r--r--deps/v8/include/v8-testing.h2
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h417
-rw-r--r--deps/v8/include/v8config.h3
-rw-r--r--deps/v8/infra/config/cq.cfg37
-rw-r--r--deps/v8/samples/samples.gyp4
-rw-r--r--deps/v8/snapshot_toolchain.gni4
-rw-r--r--deps/v8/src/DEPS6
-rw-r--r--deps/v8/src/OWNERS2
-rw-r--r--deps/v8/src/accessors.cc11
-rw-r--r--deps/v8/src/allocation-site-scopes.h2
-rw-r--r--deps/v8/src/api-experimental.cc126
-rw-r--r--deps/v8/src/api-experimental.h28
-rw-r--r--deps/v8/src/api-natives.cc14
-rw-r--r--deps/v8/src/api.cc774
-rw-r--r--deps/v8/src/api.h14
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h23
-rw-r--r--deps/v8/src/arm/assembler-arm.cc96
-rw-r--r--deps/v8/src/arm/assembler-arm.h26
-rw-r--r--deps/v8/src/arm/builtins-arm.cc1724
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc379
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h5
-rw-r--r--deps/v8/src/arm/codegen-arm.cc65
-rw-r--r--deps/v8/src/arm/codegen-arm.h2
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc7
-rw-r--r--deps/v8/src/arm/disasm-arm.cc8
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc59
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc510
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h131
-rw-r--r--deps/v8/src/arm/simulator-arm.cc124
-rw-r--r--deps/v8/src/arm/simulator-arm.h44
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h24
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc34
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h30
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc1817
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc435
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h1
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc32
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h2
-rw-r--r--deps/v8/src/arm64/constants-arm64.h8
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc3
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc15
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h7
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc67
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h34
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc605
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h167
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc25
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h46
-rw-r--r--deps/v8/src/assembler.cc68
-rw-r--r--deps/v8/src/assembler.h74
-rw-r--r--deps/v8/src/ast/OWNERS7
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc409
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h54
-rw-r--r--deps/v8/src/ast/ast-expression-visitor.cc (renamed from deps/v8/src/ast-expression-visitor.cc)25
-rw-r--r--deps/v8/src/ast/ast-expression-visitor.h (renamed from deps/v8/src/ast-expression-visitor.h)10
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.cc (renamed from deps/v8/src/ast-literal-reindexer.cc)12
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.h (renamed from deps/v8/src/ast-literal-reindexer.h)11
-rw-r--r--deps/v8/src/ast/ast-numbering.cc (renamed from deps/v8/src/ast-numbering.cc)27
-rw-r--r--deps/v8/src/ast/ast-numbering.h (renamed from deps/v8/src/ast-numbering.h)6
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc (renamed from deps/v8/src/ast-value-factory.cc)2
-rw-r--r--deps/v8/src/ast/ast-value-factory.h (renamed from deps/v8/src/ast-value-factory.h)21
-rw-r--r--deps/v8/src/ast/ast.cc (renamed from deps/v8/src/ast.cc)369
-rw-r--r--deps/v8/src/ast/ast.h (renamed from deps/v8/src/ast.h)762
-rw-r--r--deps/v8/src/ast/modules.cc (renamed from deps/v8/src/modules.cc)4
-rw-r--r--deps/v8/src/ast/modules.h (renamed from deps/v8/src/modules.h)6
-rw-r--r--deps/v8/src/ast/prettyprinter.cc (renamed from deps/v8/src/prettyprinter.cc)104
-rw-r--r--deps/v8/src/ast/prettyprinter.h (renamed from deps/v8/src/prettyprinter.h)13
-rw-r--r--deps/v8/src/ast/scopeinfo.cc (renamed from deps/v8/src/scopeinfo.cc)10
-rw-r--r--deps/v8/src/ast/scopeinfo.h (renamed from deps/v8/src/scopeinfo.h)10
-rw-r--r--deps/v8/src/ast/scopes.cc (renamed from deps/v8/src/scopes.cc)110
-rw-r--r--deps/v8/src/ast/scopes.h (renamed from deps/v8/src/scopes.h)37
-rw-r--r--deps/v8/src/ast/variables.cc (renamed from deps/v8/src/variables.cc)6
-rw-r--r--deps/v8/src/ast/variables.h (renamed from deps/v8/src/variables.h)12
-rw-r--r--deps/v8/src/atomic-utils.h7
-rw-r--r--deps/v8/src/background-parsing-task.h2
-rw-r--r--deps/v8/src/bailout-reason.h5
-rw-r--r--deps/v8/src/base.isolate18
-rw-r--r--deps/v8/src/base/bits.h20
-rw-r--r--deps/v8/src/base/build_config.h28
-rw-r--r--deps/v8/src/base/flags.h23
-rw-r--r--deps/v8/src/base/macros.h90
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc34
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h40
-rw-r--r--deps/v8/src/bit-vector.cc1
-rw-r--r--deps/v8/src/bootstrapper.cc1064
-rw-r--r--deps/v8/src/bootstrapper.h4
-rw-r--r--deps/v8/src/builtins.cc2042
-rw-r--r--deps/v8/src/builtins.h401
-rw-r--r--deps/v8/src/cancelable-task.cc105
-rw-r--r--deps/v8/src/cancelable-task.h110
-rw-r--r--deps/v8/src/code-factory.cc75
-rw-r--r--deps/v8/src/code-factory.h12
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc151
-rw-r--r--deps/v8/src/code-stubs.cc95
-rw-r--r--deps/v8/src/code-stubs.h220
-rw-r--r--deps/v8/src/codegen.cc62
-rw-r--r--deps/v8/src/codegen.h19
-rw-r--r--deps/v8/src/compilation-dependencies.cc15
-rw-r--r--deps/v8/src/compilation-dependencies.h3
-rw-r--r--deps/v8/src/compiler.cc219
-rw-r--r--deps/v8/src/compiler.h33
-rw-r--r--deps/v8/src/compiler/access-builder.cc226
-rw-r--r--deps/v8/src/compiler/access-builder.h42
-rw-r--r--deps/v8/src/compiler/access-info.cc117
-rw-r--r--deps/v8/src/compiler/access-info.h28
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc90
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h5
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc129
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc157
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc220
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h15
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc224
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc291
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc367
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h33
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc12
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.h6
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc9
-rw-r--r--deps/v8/src/compiler/binary-operator-reducer.cc128
-rw-r--r--deps/v8/src/compiler/binary-operator-reducer.h52
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.cc125
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.h79
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc1401
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h194
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/change-lowering.cc208
-rw-r--r--deps/v8/src/compiler/change-lowering.h8
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.cc2
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.h2
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h3
-rw-r--r--deps/v8/src/compiler/code-generator.cc199
-rw-r--r--deps/v8/src/compiler/code-generator.h50
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.cc176
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.h96
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc6
-rw-r--r--deps/v8/src/compiler/common-node-cache.h8
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/common-operator.cc170
-rw-r--r--deps/v8/src/compiler/common-operator.h30
-rw-r--r--deps/v8/src/compiler/control-builders.h30
-rw-r--r--deps/v8/src/compiler/control-equivalence.cc4
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc313
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h63
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc1471
-rw-r--r--deps/v8/src/compiler/escape-analysis.h169
-rw-r--r--deps/v8/src/compiler/fast-accessor-assembler.cc220
-rw-r--r--deps/v8/src/compiler/fast-accessor-assembler.h106
-rw-r--r--deps/v8/src/compiler/frame-elider.cc3
-rw-r--r--deps/v8/src/compiler/frame-states.cc9
-rw-r--r--deps/v8/src/compiler/frame-states.h11
-rw-r--r--deps/v8/src/compiler/frame.cc40
-rw-r--r--deps/v8/src/compiler/frame.h190
-rw-r--r--deps/v8/src/compiler/graph-reducer.h5
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc208
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h13
-rw-r--r--deps/v8/src/compiler/graph.h3
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc153
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h3
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc135
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc218
-rw-r--r--deps/v8/src/compiler/instruction-codes.h69
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc280
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h162
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h65
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc436
-rw-r--r--deps/v8/src/compiler/instruction-selector.h75
-rw-r--r--deps/v8/src/compiler/instruction.cc173
-rw-r--r--deps/v8/src/compiler/instruction.h223
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.cc286
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.h38
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc45
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc557
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h67
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc23
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc39
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc246
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc54
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.h17
-rw-r--r--deps/v8/src/compiler/js-graph.cc50
-rw-r--r--deps/v8/src/compiler/js-graph.h1
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc40
-rw-r--r--deps/v8/src/compiler/js-inlining.cc243
-rw-r--r--deps/v8/src/compiler/js-inlining.h15
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc338
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h22
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc351
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h24
-rw-r--r--deps/v8/src/compiler/js-operator.cc414
-rw-r--r--deps/v8/src/compiler/js-operator.h162
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc1377
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h20
-rw-r--r--deps/v8/src/compiler/linkage.cc235
-rw-r--r--deps/v8/src/compiler/linkage.h75
-rw-r--r--deps/v8/src/compiler/live-range-separator.cc52
-rw-r--r--deps/v8/src/compiler/live-range-separator.h5
-rw-r--r--deps/v8/src/compiler/load-elimination.cc15
-rw-r--r--deps/v8/src/compiler/loop-analysis.h2
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc5
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc45
-rw-r--r--deps/v8/src/compiler/machine-operator.cc138
-rw-r--r--deps/v8/src/compiler/machine-operator.h76
-rw-r--r--deps/v8/src/compiler/machine-type.cc46
-rw-r--r--deps/v8/src/compiler/machine-type.h130
-rw-r--r--deps/v8/src/compiler/mips/OWNERS1
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc332
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h16
-rw-r--r--deps/v8/src/compiler/mips/instruction-scheduler-mips.cc26
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc337
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS1
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc426
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h25
-rw-r--r--deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc26
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc617
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc149
-rw-r--r--deps/v8/src/compiler/move-optimizer.h11
-rw-r--r--deps/v8/src/compiler/node-cache.h3
-rw-r--r--deps/v8/src/compiler/node-matchers.h26
-rw-r--r--deps/v8/src/compiler/node-properties.cc105
-rw-r--r--deps/v8/src/compiler/node-properties.h27
-rw-r--r--deps/v8/src/compiler/node.cc16
-rw-r--r--deps/v8/src/compiler/node.h1
-rw-r--r--deps/v8/src/compiler/opcodes.h57
-rw-r--r--deps/v8/src/compiler/operator-properties.cc10
-rw-r--r--deps/v8/src/compiler/operator.h61
-rw-r--r--deps/v8/src/compiler/osr.cc2
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc6
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h4
-rw-r--r--deps/v8/src/compiler/pipeline.cc204
-rw-r--r--deps/v8/src/compiler/pipeline.h12
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc323
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h4
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc143
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc264
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc170
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h170
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc27
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h4
-rw-r--r--deps/v8/src/compiler/register-allocator.cc986
-rw-r--r--deps/v8/src/compiler/register-allocator.h107
-rw-r--r--deps/v8/src/compiler/representation-change.cc537
-rw-r--r--deps/v8/src/compiler/representation-change.h499
-rw-r--r--deps/v8/src/compiler/schedule.cc14
-rw-r--r--deps/v8/src/compiler/schedule.h6
-rw-r--r--deps/v8/src/compiler/scheduler.cc96
-rw-r--r--deps/v8/src/compiler/select-lowering.cc2
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc1429
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h20
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc19
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc19
-rw-r--r--deps/v8/src/compiler/simplified-operator.h3
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc2
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc98
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.h51
-rw-r--r--deps/v8/src/compiler/type-hints.cc83
-rw-r--r--deps/v8/src/compiler/type-hints.h84
-rw-r--r--deps/v8/src/compiler/typer.cc172
-rw-r--r--deps/v8/src/compiler/typer.h1
-rw-r--r--deps/v8/src/compiler/verifier.cc48
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc2031
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h190
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc282
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc263
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h7
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc182
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc306
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc323
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h1
-rw-r--r--deps/v8/src/compiler/x87/instruction-scheduler-x87.cc26
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc255
-rw-r--r--deps/v8/src/compiler/zone-pool.h9
-rw-r--r--deps/v8/src/contexts-inl.h26
-rw-r--r--deps/v8/src/contexts.cc82
-rw-r--r--deps/v8/src/contexts.h106
-rw-r--r--deps/v8/src/conversions-inl.h3
-rw-r--r--deps/v8/src/conversions.cc5
-rw-r--r--deps/v8/src/counters.h6
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc40
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h68
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc264
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h22
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc43
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h78
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc273
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h18
-rw-r--r--deps/v8/src/crankshaft/hydrogen-escape-analysis.cc3
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc30
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h146
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.h2
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc295
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h13
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc261
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h22
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc4
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc42
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h70
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc68
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.h7
-rw-r--r--deps/v8/src/crankshaft/lithium.cc54
-rw-r--r--deps/v8/src/crankshaft/lithium.h1
-rw-r--r--deps/v8/src/crankshaft/mips/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc342
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h22
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc40
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h68
-rw-r--r--deps/v8/src/crankshaft/mips64/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc280
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h22
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc40
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h68
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc270
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h22
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc40
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h64
-rw-r--r--deps/v8/src/crankshaft/typing.cc47
-rw-r--r--deps/v8/src/crankshaft/typing.h4
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc272
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h22
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc4
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc39
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h67
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc325
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h22
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc42
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h70
-rw-r--r--deps/v8/src/d8.cc99
-rw-r--r--deps/v8/src/d8.gyp14
-rw-r--r--deps/v8/src/d8.js28
-rw-r--r--deps/v8/src/date.cc16
-rw-r--r--deps/v8/src/date.h8
-rw-r--r--deps/v8/src/dateparser-inl.h16
-rw-r--r--deps/v8/src/dateparser.cc11
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc32
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc37
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc208
-rw-r--r--deps/v8/src/debug/debug-evaluate.h28
-rw-r--r--deps/v8/src/debug/debug-frames.cc7
-rw-r--r--deps/v8/src/debug/debug-frames.h1
-rw-r--r--deps/v8/src/debug/debug-scopes.cc110
-rw-r--r--deps/v8/src/debug/debug-scopes.h24
-rw-r--r--deps/v8/src/debug/debug.cc621
-rw-r--r--deps/v8/src/debug/debug.h98
-rw-r--r--deps/v8/src/debug/debug.js39
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc38
-rw-r--r--deps/v8/src/debug/liveedit.cc182
-rw-r--r--deps/v8/src/debug/liveedit.h11
-rw-r--r--deps/v8/src/debug/liveedit.js45
-rw-r--r--deps/v8/src/debug/mips/OWNERS1
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc31
-rw-r--r--deps/v8/src/debug/mips64/OWNERS1
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc31
-rw-r--r--deps/v8/src/debug/mirrors.js122
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc32
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc39
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc38
-rw-r--r--deps/v8/src/deoptimizer.cc344
-rw-r--r--deps/v8/src/deoptimizer.h17
-rw-r--r--deps/v8/src/disassembler.cc6
-rw-r--r--deps/v8/src/elements.cc88
-rw-r--r--deps/v8/src/elements.h8
-rw-r--r--deps/v8/src/execution.cc57
-rw-r--r--deps/v8/src/execution.h19
-rw-r--r--deps/v8/src/factory.cc305
-rw-r--r--deps/v8/src/factory.h47
-rw-r--r--deps/v8/src/flag-definitions.h100
-rw-r--r--deps/v8/src/frames.cc68
-rw-r--r--deps/v8/src/frames.h27
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc668
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc671
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc70
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h22
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc697
-rw-r--r--deps/v8/src/full-codegen/mips/OWNERS1
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc691
-rw-r--r--deps/v8/src/full-codegen/mips64/OWNERS1
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc676
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc679
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc699
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc699
-rw-r--r--deps/v8/src/futex-emulation.cc3
-rw-r--r--deps/v8/src/gdb-jit.cc4
-rw-r--r--deps/v8/src/global-handles.cc13
-rw-r--r--deps/v8/src/globals.h22
-rw-r--r--deps/v8/src/handles.h2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc279
-rw-r--r--deps/v8/src/heap/gc-tracer.h79
-rw-r--r--deps/v8/src/heap/heap-inl.h55
-rw-r--r--deps/v8/src/heap/heap.cc506
-rw-r--r--deps/v8/src/heap/heap.h389
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc6
-rw-r--r--deps/v8/src/heap/incremental-marking.cc200
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h49
-rw-r--r--deps/v8/src/heap/mark-compact.cc2273
-rw-r--r--deps/v8/src/heap/mark-compact.h171
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc6
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h306
-rw-r--r--deps/v8/src/heap/objects-visiting.cc182
-rw-r--r--deps/v8/src/heap/objects-visiting.h29
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h6
-rw-r--r--deps/v8/src/heap/scavenger.cc50
-rw-r--r--deps/v8/src/heap/scavenger.h2
-rw-r--r--deps/v8/src/heap/spaces-inl.h81
-rw-r--r--deps/v8/src/heap/spaces.cc153
-rw-r--r--deps/v8/src/heap/spaces.h184
-rw-r--r--deps/v8/src/heap/store-buffer.cc100
-rw-r--r--deps/v8/src/heap/store-buffer.h15
-rw-r--r--deps/v8/src/i18n.cc15
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h28
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc48
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h22
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc1752
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc403
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc40
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h2
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc35
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc59
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc347
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h414
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h13
-rw-r--r--deps/v8/src/ic/access-compiler.cc6
-rw-r--r--deps/v8/src/ic/access-compiler.h2
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc1
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc49
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc123
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc104
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc1
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc49
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc61
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc108
-rw-r--r--deps/v8/src/ic/handler-compiler.cc8
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc2
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc59
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc99
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc104
-rw-r--r--deps/v8/src/ic/ic-compiler.cc135
-rw-r--r--deps/v8/src/ic/ic-compiler.h26
-rw-r--r--deps/v8/src/ic/ic-inl.h7
-rw-r--r--deps/v8/src/ic/ic-state.cc60
-rw-r--r--deps/v8/src/ic/ic-state.h16
-rw-r--r--deps/v8/src/ic/ic.cc474
-rw-r--r--deps/v8/src/ic/ic.h26
-rw-r--r--deps/v8/src/ic/mips/OWNERS1
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc1
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc49
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc110
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc124
-rw-r--r--deps/v8/src/ic/mips64/OWNERS1
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc1
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc50
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc110
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc168
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc1
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc50
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc107
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc131
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc2
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc66
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc106
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc76
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc2
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc59
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc99
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc104
-rw-r--r--deps/v8/src/interface-descriptors.cc124
-rw-r--r--deps/v8/src/interface-descriptors.h63
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc675
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h160
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc41
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc705
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h30
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.cc72
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h49
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc124
-rw-r--r--deps/v8/src/interpreter/bytecodes.h112
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc174
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h97
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc47
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h37
-rw-r--r--deps/v8/src/interpreter/interpreter.cc515
-rw-r--r--deps/v8/src/interpreter/interpreter.h13
-rw-r--r--deps/v8/src/isolate-inl.h2
-rw-r--r--deps/v8/src/isolate.cc108
-rw-r--r--deps/v8/src/isolate.h34
-rw-r--r--deps/v8/src/js/OWNERS11
-rw-r--r--deps/v8/src/js/array-iterator.js36
-rw-r--r--deps/v8/src/js/array.js174
-rw-r--r--deps/v8/src/js/arraybuffer.js52
-rw-r--r--deps/v8/src/js/code-stubs.js69
-rw-r--r--deps/v8/src/js/collection.js38
-rw-r--r--deps/v8/src/js/date.js884
-rw-r--r--deps/v8/src/js/generator.js16
-rw-r--r--deps/v8/src/js/harmony-array-includes.js118
-rw-r--r--deps/v8/src/js/harmony-atomics.js2
-rw-r--r--deps/v8/src/js/harmony-reflect.js2
-rw-r--r--deps/v8/src/js/harmony-regexp.js22
-rw-r--r--deps/v8/src/js/harmony-sharedarraybuffer.js33
-rw-r--r--deps/v8/src/js/harmony-simd.js53
-rw-r--r--deps/v8/src/js/harmony-species.js60
-rw-r--r--deps/v8/src/js/harmony-unicode-regexps.js39
-rw-r--r--deps/v8/src/js/i18n.js209
-rw-r--r--deps/v8/src/js/json.js112
-rw-r--r--deps/v8/src/js/macros.py161
-rw-r--r--deps/v8/src/js/math.js47
-rw-r--r--deps/v8/src/js/messages.js261
-rw-r--r--deps/v8/src/js/object-observe.js32
-rw-r--r--deps/v8/src/js/prologue.js31
-rw-r--r--deps/v8/src/js/promise-extra.js26
-rw-r--r--deps/v8/src/js/promise.js305
-rw-r--r--deps/v8/src/js/proxy.js198
-rw-r--r--deps/v8/src/js/regexp.js137
-rw-r--r--deps/v8/src/js/runtime.js178
-rw-r--r--deps/v8/src/js/string-iterator.js2
-rw-r--r--deps/v8/src/js/string.js92
-rw-r--r--deps/v8/src/js/symbol.js6
-rw-r--r--deps/v8/src/js/templates.js4
-rw-r--r--deps/v8/src/js/typedarray.js340
-rw-r--r--deps/v8/src/js/v8natives.js803
-rw-r--r--deps/v8/src/js/weak-collection.js24
-rw-r--r--deps/v8/src/json-stringifier.h12
-rw-r--r--deps/v8/src/key-accumulator.cc54
-rw-r--r--deps/v8/src/key-accumulator.h7
-rw-r--r--deps/v8/src/libplatform/default-platform.cc25
-rw-r--r--deps/v8/src/libplatform/default-platform.h11
-rw-r--r--deps/v8/src/locked-queue-inl.h91
-rw-r--r--deps/v8/src/locked-queue.h43
-rw-r--r--deps/v8/src/log-inl.h14
-rw-r--r--deps/v8/src/lookup.cc64
-rw-r--r--deps/v8/src/lookup.h33
-rw-r--r--deps/v8/src/machine-type.cc75
-rw-r--r--deps/v8/src/machine-type.h204
-rw-r--r--deps/v8/src/messages.cc121
-rw-r--r--deps/v8/src/messages.h175
-rw-r--r--deps/v8/src/mips/OWNERS1
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h46
-rw-r--r--deps/v8/src/mips/assembler-mips.cc130
-rw-r--r--deps/v8/src/mips/assembler-mips.h43
-rw-r--r--deps/v8/src/mips/builtins-mips.cc1815
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc396
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h9
-rw-r--r--deps/v8/src/mips/codegen-mips.cc57
-rw-r--r--deps/v8/src/mips/codegen-mips.h2
-rw-r--r--deps/v8/src/mips/constants-mips.h48
-rw-r--r--deps/v8/src/mips/cpu-mips.cc11
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc7
-rw-r--r--deps/v8/src/mips/disasm-mips.cc93
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc59
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc843
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h209
-rw-r--r--deps/v8/src/mips/simulator-mips.cc245
-rw-r--r--deps/v8/src/mips/simulator-mips.h45
-rw-r--r--deps/v8/src/mips64/OWNERS1
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h62
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc579
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h276
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc1811
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc401
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h9
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc57
-rw-r--r--deps/v8/src/mips64/codegen-mips64.h2
-rw-r--r--deps/v8/src/mips64/constants-mips64.cc36
-rw-r--r--deps/v8/src/mips64/constants-mips64.h683
-rw-r--r--deps/v8/src/mips64/cpu-mips64.cc13
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc7
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc162
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc59
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc2421
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h254
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc603
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h81
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h565
-rw-r--r--deps/v8/src/objects-body-descriptors.h141
-rw-r--r--deps/v8/src/objects-debug.cc62
-rw-r--r--deps/v8/src/objects-inl.h819
-rw-r--r--deps/v8/src/objects-printer.cc92
-rw-r--r--deps/v8/src/objects.cc4509
-rw-r--r--deps/v8/src/objects.h1201
-rw-r--r--deps/v8/src/ostreams.cc1
-rw-r--r--deps/v8/src/ostreams.h2
-rw-r--r--deps/v8/src/parsing/OWNERS6
-rw-r--r--deps/v8/src/parsing/expression-classifier.h (renamed from deps/v8/src/expression-classifier.h)68
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc (renamed from deps/v8/src/func-name-inferrer.cc)6
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h (renamed from deps/v8/src/func-name-inferrer.h)6
-rw-r--r--deps/v8/src/parsing/json-parser.h (renamed from deps/v8/src/json-parser.h)23
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.cc (renamed from deps/v8/src/parameter-initializer-rewriter.cc)18
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.h (renamed from deps/v8/src/parameter-initializer-rewriter.h)8
-rw-r--r--deps/v8/src/parsing/parser-base.h (renamed from deps/v8/src/preparser.h)1679
-rw-r--r--deps/v8/src/parsing/parser.cc (renamed from deps/v8/src/parser.cc)1783
-rw-r--r--deps/v8/src/parsing/parser.h (renamed from deps/v8/src/parser.h)414
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc (renamed from deps/v8/src/pattern-rewriter.cc)311
-rw-r--r--deps/v8/src/parsing/preparse-data-format.h (renamed from deps/v8/src/preparse-data-format.h)6
-rw-r--r--deps/v8/src/parsing/preparse-data.cc (renamed from deps/v8/src/preparse-data.cc)6
-rw-r--r--deps/v8/src/parsing/preparse-data.h (renamed from deps/v8/src/preparse-data.h)8
-rw-r--r--deps/v8/src/parsing/preparser.cc (renamed from deps/v8/src/preparser.cc)82
-rw-r--r--deps/v8/src/parsing/preparser.h1175
-rw-r--r--deps/v8/src/parsing/rewriter.cc (renamed from deps/v8/src/rewriter.cc)8
-rw-r--r--deps/v8/src/parsing/rewriter.h (renamed from deps/v8/src/rewriter.h)6
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc (renamed from deps/v8/src/scanner-character-streams.cc)5
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h (renamed from deps/v8/src/scanner-character-streams.h)8
-rw-r--r--deps/v8/src/parsing/scanner.cc (renamed from deps/v8/src/scanner.cc)107
-rw-r--r--deps/v8/src/parsing/scanner.h (renamed from deps/v8/src/scanner.h)47
-rw-r--r--deps/v8/src/parsing/token.cc (renamed from deps/v8/src/token.cc)3
-rw-r--r--deps/v8/src/parsing/token.h (renamed from deps/v8/src/token.h)16
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h35
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc57
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h33
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc1772
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc426
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h4
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc44
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h2
-rw-r--r--deps/v8/src/ppc/constants-ppc.h48
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc7
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc28
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc59
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc590
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h147
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc245
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h47
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc13
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h9
-rw-r--r--deps/v8/src/profiler/heap-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h7
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc145
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h8
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h4
-rw-r--r--deps/v8/src/profiler/profile-generator.cc28
-rw-r--r--deps/v8/src/profiler/profile-generator.h10
-rw-r--r--deps/v8/src/property-descriptor.cc232
-rw-r--r--deps/v8/src/property-descriptor.h16
-rw-r--r--deps/v8/src/property-details.h40
-rw-r--r--deps/v8/src/prototype.h48
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc145
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h10
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc129
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h8
-rw-r--r--deps/v8/src/regexp/bytecodes-irregexp.h20
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc135
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h10
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc77
-rw-r--r--deps/v8/src/regexp/jsregexp.cc269
-rw-r--r--deps/v8/src/regexp/jsregexp.h192
-rw-r--r--deps/v8/src/regexp/mips/OWNERS1
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc137
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h10
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS1
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc127
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h14
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc130
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h10
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc337
-rw-r--r--deps/v8/src/regexp/regexp-ast.h496
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc18
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.h6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc42
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.h7
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc14
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h18
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc1180
-rw-r--r--deps/v8/src/regexp/regexp-parser.h277
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc137
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h10
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc135
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h10
-rw-r--r--deps/v8/src/register-configuration.cc104
-rw-r--r--deps/v8/src/runtime-profiler.cc8
-rw-r--r--deps/v8/src/runtime/runtime-array.cc148
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc259
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc85
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc67
-rw-r--r--deps/v8/src/runtime/runtime-date.cc165
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc213
-rw-r--r--deps/v8/src/runtime/runtime-function.cc328
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc10
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc8
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc9
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc134
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc31
-rw-r--r--deps/v8/src/runtime/runtime-json.cc2
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc46
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc34
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc64
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc19
-rw-r--r--deps/v8/src/runtime/runtime-object.cc422
-rw-r--r--deps/v8/src/runtime/runtime-observe.cc25
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc167
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc157
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc208
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc57
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc7
-rw-r--r--deps/v8/src/runtime/runtime-test.cc3
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc55
-rw-r--r--deps/v8/src/runtime/runtime.h338
-rw-r--r--deps/v8/src/snapshot/natives-common.cc7
-rw-r--r--deps/v8/src/snapshot/natives-external.cc31
-rw-r--r--deps/v8/src/snapshot/natives.h2
-rw-r--r--deps/v8/src/snapshot/serialize.cc85
-rw-r--r--deps/v8/src/snapshot/serialize.h20
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc11
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc25
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h5
-rw-r--r--deps/v8/src/snapshot/snapshot.h3
-rw-r--r--deps/v8/src/startup-data-util.cc14
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp4
-rw-r--r--deps/v8/src/tracing/trace-event.cc19
-rw-r--r--deps/v8/src/tracing/trace-event.h535
-rw-r--r--deps/v8/src/transitions-inl.h25
-rw-r--r--deps/v8/src/transitions.cc131
-rw-r--r--deps/v8/src/transitions.h42
-rw-r--r--deps/v8/src/type-cache.h42
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h57
-rw-r--r--deps/v8/src/type-feedback-vector.cc49
-rw-r--r--deps/v8/src/type-feedback-vector.h27
-rw-r--r--deps/v8/src/type-info.cc102
-rw-r--r--deps/v8/src/type-info.h22
-rw-r--r--deps/v8/src/types.cc6
-rw-r--r--deps/v8/src/typing-asm.cc823
-rw-r--r--deps/v8/src/typing-asm.h87
-rw-r--r--deps/v8/src/typing-reset.cc4
-rw-r--r--deps/v8/src/typing-reset.h2
-rw-r--r--deps/v8/src/utils.cc27
-rw-r--r--deps/v8/src/utils.h22
-rw-r--r--deps/v8/src/v8.cc7
-rw-r--r--deps/v8/src/wasm/OWNERS5
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.cc1045
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.h33
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc1583
-rw-r--r--deps/v8/src/wasm/ast-decoder.h116
-rw-r--r--deps/v8/src/wasm/decoder.h233
-rw-r--r--deps/v8/src/wasm/encoder.cc592
-rw-r--r--deps/v8/src/wasm/encoder.h157
-rw-r--r--deps/v8/src/wasm/module-decoder.cc547
-rw-r--r--deps/v8/src/wasm/module-decoder.h33
-rw-r--r--deps/v8/src/wasm/wasm-js.cc345
-rw-r--r--deps/v8/src/wasm/wasm-js.h27
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h265
-rw-r--r--deps/v8/src/wasm/wasm-module.cc511
-rw-r--r--deps/v8/src/wasm/wasm-module.h192
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc133
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h476
-rw-r--r--deps/v8/src/wasm/wasm-result.cc53
-rw-r--r--deps/v8/src/wasm/wasm-result.h116
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h29
-rw-r--r--deps/v8/src/x64/assembler-x64.cc41
-rw-r--r--deps/v8/src/x64/assembler-x64.h38
-rw-r--r--deps/v8/src/x64/builtins-x64.cc1767
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc401
-rw-r--r--deps/v8/src/x64/codegen-x64.cc121
-rw-r--r--deps/v8/src/x64/codegen-x64.h2
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc7
-rw-r--r--deps/v8/src/x64/disasm-x64.cc5
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc60
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc452
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h161
-rw-r--r--deps/v8/src/x64/simulator-x64.h11
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h30
-rw-r--r--deps/v8/src/x87/assembler-x87.cc35
-rw-r--r--deps/v8/src/x87/assembler-x87.h21
-rw-r--r--deps/v8/src/x87/builtins-x87.cc1759
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc397
-rw-r--r--deps/v8/src/x87/codegen-x87.cc34
-rw-r--r--deps/v8/src/x87/codegen-x87.h2
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc35
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc59
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc349
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h364
-rw-r--r--deps/v8/src/x87/simulator-x87.h11
-rw-r--r--deps/v8/src/zone-containers.h6
-rw-r--r--deps/v8/test/cctest/OWNERS5
-rw-r--r--deps/v8/test/cctest/cctest.gyp34
-rw-r--r--deps/v8/test/cctest/cctest.h157
-rw-r--r--deps/v8/test/cctest/cctest.status141
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h66
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc66
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h167
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h55
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h26
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc59
-rw-r--r--deps/v8/test/cctest/compiler/test-changes-lowering.cc37
-rw-r--r--deps/v8/test/cctest/compiler/test-code-stub-assembler.cc125
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc40
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-visualizer.cc17
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc13
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc95
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc13
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc41
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc15
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-operator.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-osr.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc387
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc2124
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc27
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsbranches.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc1447
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc115
-rw-r--r--deps/v8/test/cctest/compiler/test-run-properties.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stackcheck.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc89
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc461
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h199
-rw-r--r--deps/v8/test/cctest/expression-type-collector-macros.h21
-rw-r--r--deps/v8/test/cctest/expression-type-collector.cc4
-rw-r--r--deps/v8/test/cctest/expression-type-collector.h2
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h (renamed from deps/v8/test/cctest/heap-tester.h)38
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc (renamed from deps/v8/test/cctest/test-alloc.cc)13
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc340
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc (renamed from deps/v8/test/cctest/test-heap.cc)470
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc (renamed from deps/v8/test/cctest/test-incremental-marking.cc)27
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc284
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc (renamed from deps/v8/test/cctest/test-mark-compact.cc)10
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc (renamed from deps/v8/test/cctest/test-spaces.cc)76
-rw-r--r--deps/v8/test/cctest/heap/utils-inl.h137
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc4038
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc751
-rw-r--r--deps/v8/test/cctest/print-extension.cc5
-rw-r--r--deps/v8/test/cctest/print-extension.h5
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc18
-rw-r--r--deps/v8/test/cctest/test-accessors.cc3
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc62
-rw-r--r--deps/v8/test/cctest/test-api-fast-accessor-builder.cc288
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc1444
-rw-r--r--deps/v8/test/cctest/test-api.cc9138
-rw-r--r--deps/v8/test/cctest/test-asm-validator.cc1499
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc194
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc342
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc44
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc698
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc742
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc62
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc51
-rw-r--r--deps/v8/test/cctest/test-assembler-x87.cc3
-rw-r--r--deps/v8/test/cctest/test-ast-expression-visitor.cc37
-rw-r--r--deps/v8/test/cctest/test-ast.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc5
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc5
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x87.cc3
-rw-r--r--deps/v8/test/cctest/test-compiler.cc222
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc478
-rw-r--r--deps/v8/test/cctest/test-date.cc31
-rw-r--r--deps/v8/test/cctest/test-debug.cc3323
-rw-r--r--deps/v8/test/cctest/test-decls.cc117
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc264
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc15
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc125
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc59
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc122
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc2
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc6
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc139
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc25
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc362
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc150
-rw-r--r--deps/v8/test/cctest/test-global-object.cc30
-rw-r--r--deps/v8/test/cctest/test-hashing.cc7
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc259
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc1111
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc122
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc20
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc18
-rw-r--r--deps/v8/test/cctest/test-lockers.cc130
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc17
-rw-r--r--deps/v8/test/cctest/test-log.cc57
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc14
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-ia32.cc3
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc177
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc277
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc89
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x87.cc3
-rw-r--r--deps/v8/test/cctest/test-microtask-delivery.cc55
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc43
-rw-r--r--deps/v8/test/cctest/test-parsing.cc810
-rw-r--r--deps/v8/test/cctest/test-platform-linux.cc3
-rw-r--r--deps/v8/test/cctest/test-platform-win32.cc3
-rw-r--r--deps/v8/test/cctest/test-platform.cc3
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc21
-rw-r--r--deps/v8/test/cctest/test-random-number-generator.cc127
-rw-r--r--deps/v8/test/cctest/test-receiver-check-hidden-prototype.cc73
-rw-r--r--deps/v8/test/cctest/test-regexp.cc229
-rw-r--r--deps/v8/test/cctest/test-reloc-info.cc12
-rw-r--r--deps/v8/test/cctest/test-representation.cc3
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc5
-rw-r--r--deps/v8/test/cctest/test-serialize.cc60
-rw-r--r--deps/v8/test/cctest/test-simd.cc3
-rw-r--r--deps/v8/test/cctest/test-slots-buffer.cc4
-rw-r--r--deps/v8/test/cctest/test-strings.cc102
-rw-r--r--deps/v8/test/cctest/test-strtod.cc3
-rw-r--r--deps/v8/test/cctest/test-symbols.cc3
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc3
-rw-r--r--deps/v8/test/cctest/test-threads.cc3
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc258
-rw-r--r--deps/v8/test/cctest/test-transitions.cc23
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc3
-rw-r--r--deps/v8/test/cctest/test-types.cc3
-rw-r--r--deps/v8/test/cctest/test-typing-reset.cc13
-rw-r--r--deps/v8/test/cctest/test-unbound-queue.cc3
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc52
-rw-r--r--deps/v8/test/cctest/test-unique.cc3
-rw-r--r--deps/v8/test/cctest/test-unscopables-hidden-prototype.cc3
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc27
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h26
-rw-r--r--deps/v8/test/cctest/test-utils.cc3
-rw-r--r--deps/v8/test/cctest/test-version.cc3
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc6
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc6
-rw-r--r--deps/v8/test/cctest/testcfg.py4
-rw-r--r--deps/v8/test/cctest/trace-extension.cc3
-rw-r--r--deps/v8/test/cctest/wasm/OWNERS3
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc141
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc199
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc3254
-rw-r--r--deps/v8/test/cctest/wasm/test-signatures.h111
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h391
-rw-r--r--deps/v8/test/ignition.gyp27
-rw-r--r--deps/v8/test/ignition.isolate9
-rw-r--r--deps/v8/test/intl/date-format/format-test.js4
-rw-r--r--deps/v8/test/intl/date-format/resolved-options.js3
-rw-r--r--deps/v8/test/intl/date-format/timezone.js5
-rw-r--r--deps/v8/test/intl/intl.status5
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json1
-rw-r--r--deps/v8/test/message/arrow-bare-rest-param.js2
-rw-r--r--deps/v8/test/message/arrow-missing.js2
-rw-r--r--deps/v8/test/message/arrow-param-after-rest-2.js2
-rw-r--r--deps/v8/test/message/arrow-param-after-rest.js2
-rw-r--r--deps/v8/test/message/arrow-two-rest-params.js2
-rw-r--r--deps/v8/test/message/default-parameter-tdz-arrow.js7
-rw-r--r--deps/v8/test/message/default-parameter-tdz-arrow.out6
-rw-r--r--deps/v8/test/message/default-parameter-tdz.js7
-rw-r--r--deps/v8/test/message/default-parameter-tdz.out6
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-array.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-array2.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-obj.js2
-rw-r--r--deps/v8/test/message/destructuring-decl-no-init-obj2.js2
-rw-r--r--deps/v8/test/message/destructuring-modify-const.js2
-rw-r--r--deps/v8/test/message/for-in-loop-initializers-destructuring.js9
-rw-r--r--deps/v8/test/message/for-in-loop-initializers-destructuring.out4
-rw-r--r--deps/v8/test/message/formal-parameters-bad-rest.js2
-rw-r--r--deps/v8/test/message/invalid-spread-2.js2
-rw-r--r--deps/v8/test/message/invalid-spread.js2
-rw-r--r--deps/v8/test/message/let-lexical-name-in-array-prohibited.js2
-rw-r--r--deps/v8/test/message/let-lexical-name-in-object-prohibited.js2
-rw-r--r--deps/v8/test/message/nf-yield-in-generator.js7
-rw-r--r--deps/v8/test/message/nf-yield-in-generator.out4
-rw-r--r--deps/v8/test/message/nf-yield-strict-in-generator.js8
-rw-r--r--deps/v8/test/message/nf-yield-strict-in-generator.out4
-rw-r--r--deps/v8/test/message/nf-yield-strict.js5
-rw-r--r--deps/v8/test/message/nf-yield-strict.out4
-rw-r--r--deps/v8/test/message/nfe-yield-generator.js5
-rw-r--r--deps/v8/test/message/nfe-yield-generator.out4
-rw-r--r--deps/v8/test/message/nfe-yield-strict.js5
-rw-r--r--deps/v8/test/message/nfe-yield-strict.out4
-rw-r--r--deps/v8/test/message/no-legacy-const-2.js3
-rw-r--r--deps/v8/test/message/no-legacy-const-2.out2
-rw-r--r--deps/v8/test/message/no-legacy-const-3.js3
-rw-r--r--deps/v8/test/message/no-legacy-const-3.out2
-rw-r--r--deps/v8/test/message/no-legacy-const.js3
-rw-r--r--deps/v8/test/message/no-legacy-const.out2
-rw-r--r--deps/v8/test/message/paren_in_arg_string.out1
-rw-r--r--deps/v8/test/message/rest-param-class-setter-strict.js2
-rw-r--r--deps/v8/test/message/rest-param-object-setter-sloppy.js2
-rw-r--r--deps/v8/test/message/rest-param-object-setter-strict.js2
-rw-r--r--deps/v8/test/message/single-function-literal.out5
-rw-r--r--deps/v8/test/message/testcfg.py4
-rw-r--r--deps/v8/test/message/try-catch-lexical-conflict.js2
-rw-r--r--deps/v8/test/message/try-catch-variable-conflict.js2
-rw-r--r--deps/v8/test/mjsunit/apply.js4
-rw-r--r--deps/v8/test/mjsunit/array-constructor.js21
-rw-r--r--deps/v8/test/mjsunit/array-isarray.js16
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-add.js45
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-and.js45
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-compareexchange.js45
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-exchange.js42
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-load.js45
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-or.js42
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-store.js45
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-sub.js45
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-xor.js45
-rw-r--r--deps/v8/test/mjsunit/asm/infinite-loops-taken.js6
-rw-r--r--deps/v8/test/mjsunit/bugs/bug-4577.js13
-rw-r--r--deps/v8/test/mjsunit/callsite.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js (renamed from deps/v8/test/mjsunit/regress/regress-351315.js)28
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-1.js (renamed from deps/v8/test/message/single-function-literal.js)23
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-10.js (renamed from deps/v8/test/mjsunit/regress/regress-1945.js)19
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-2.js45
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-3.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-4.js48
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-5.js48
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-6.js48
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-7.js (renamed from deps/v8/test/mjsunit/compiler/stubs/floor-stub.js)53
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-8.js49
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-9.js52
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js47
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js57
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js41
-rw-r--r--deps/v8/test/mjsunit/compiler/mul-div-52bit.js86
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-572409.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-96989.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-const.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js23
-rw-r--r--deps/v8/test/mjsunit/const-declaration.js2
-rw-r--r--deps/v8/test/mjsunit/const-eval-init.js2
-rw-r--r--deps/v8/test/mjsunit/const-redecl.js2
-rw-r--r--deps/v8/test/mjsunit/const.js2
-rw-r--r--deps/v8/test/mjsunit/constant-fold-control-instructions.js4
-rw-r--r--deps/v8/test/mjsunit/constant-folding.js2
-rw-r--r--deps/v8/test/mjsunit/cross-realm-global-prototype.js22
-rw-r--r--deps/v8/test/mjsunit/d8-os.js22
-rw-r--r--deps/v8/test/mjsunit/date.js27
-rw-r--r--deps/v8/test/mjsunit/debug-allscopes-on-debugger.js2
-rw-r--r--deps/v8/test/mjsunit/debug-break-native.js2
-rw-r--r--deps/v8/test/mjsunit/debug-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/debug-continue.js5
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-closure.js18
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-const.js10
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-declaration.js44
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js8
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-locals.js8
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js39
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-shadowed-context.js83
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-stepin.js81
-rw-r--r--deps/v8/test/mjsunit/debug-return-value.js2
-rw-r--r--deps/v8/test/mjsunit/debug-script.js4
-rw-r--r--deps/v8/test/mjsunit/debug-sourceinfo.js12
-rw-r--r--deps/v8/test/mjsunit/debug-step-4.js (renamed from deps/v8/test/mjsunit/debug-step-4-in-frame.js)35
-rw-r--r--deps/v8/test/mjsunit/debug-step-end-of-script.js21
-rw-r--r--deps/v8/test/mjsunit/debug-step-into-json.js36
-rw-r--r--deps/v8/test/mjsunit/debug-step-into-valueof.js35
-rw-r--r--deps/v8/test/mjsunit/debug-step-turbofan.js2
-rw-r--r--deps/v8/test/mjsunit/debug-step.js21
-rw-r--r--deps/v8/test/mjsunit/debug-stepframe-clearing.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepframe.js21
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-accessor-ic.js2
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-accessor.js12
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js37
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-builtin-callback.js157
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-builtin.js12
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-call-function-stub.js8
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-construct-call.js2
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-foreach.js2
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-function-call.js8
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-property-function-call.js12
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-recursive-function.js11
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part1.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part2.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part3.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part4.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part5.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part6.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part7.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-scope-part8.js4
-rw-r--r--deps/v8/test/mjsunit/debug-stepout-to-builtin.js6
-rw-r--r--deps/v8/test/mjsunit/declare-locally.js2
-rw-r--r--deps/v8/test/mjsunit/error-constructors.js43
-rw-r--r--deps/v8/test/mjsunit/error-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-tostring.js2
-rw-r--r--deps/v8/test/mjsunit/es6/arrow-rest-params-lazy-parsing.js (renamed from deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/arrow-rest-params.js (renamed from deps/v8/test/mjsunit/harmony/arrow-rest-params.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-early-errors.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js (renamed from deps/v8/test/mjsunit/harmony/block-eval-var-over-legacy-const.js)75
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js)3
-rw-r--r--deps/v8/test/mjsunit/es6/block-non-strict-errors.js3
-rw-r--r--deps/v8/test/mjsunit/es6/block-scope-class.js (renamed from deps/v8/test/mjsunit/harmony/block-scope-class.js)0
-rw-r--r--deps/v8/test/mjsunit/es6/built-in-accessor-names.js26
-rw-r--r--deps/v8/test/mjsunit/es6/classes-derived-return-type.js (renamed from deps/v8/test/mjsunit/harmony/classes-derived-return-type.js)0
-rw-r--r--deps/v8/test/mjsunit/es6/classes-proxy.js73
-rw-r--r--deps/v8/test/mjsunit/es6/classes-subclass-builtins.js480
-rw-r--r--deps/v8/test/mjsunit/es6/classof-proxy.js27
-rw-r--r--deps/v8/test/mjsunit/es6/debug-blockscopes.js11
-rw-r--r--deps/v8/test/mjsunit/es6/debug-break-default-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-evaluate-arrow-function-receiver.js116
-rw-r--r--deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js75
-rw-r--r--deps/v8/test/mjsunit/es6/debug-liveedit-new-target-2.js63
-rw-r--r--deps/v8/test/mjsunit/es6/debug-liveedit-new-target-3.js73
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reentry.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-step-into-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js36
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js124
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-generators.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-string-template.js60
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepnext-for.js17
-rw-r--r--deps/v8/test/mjsunit/es6/generators-parsing.js4
-rw-r--r--deps/v8/test/mjsunit/es6/instanceof-proxies.js62
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-semantics.js53
-rw-r--r--deps/v8/test/mjsunit/es6/legacy-subclassing.js38
-rw-r--r--deps/v8/test/mjsunit/es6/new-target.js2
-rw-r--r--deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js24
-rw-r--r--deps/v8/test/mjsunit/es6/object-tostring.js28
-rw-r--r--deps/v8/test/mjsunit/es6/promise-internal-setter.js2
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js101
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-constructor.js99
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-flags.js6
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4211.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-4211.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-468661.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-508074.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-508074.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-513474.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-513474.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr372788.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr493566.js66
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr512574.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js13
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-new-target-context.js25
-rw-r--r--deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js (renamed from deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/rest-params.js (renamed from deps/v8/test/mjsunit/harmony/rest-params.js)25
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call-new-class.js2
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call-super-property.js2
-rw-r--r--deps/v8/test/mjsunit/es6/string-repeat.js6
-rw-r--r--deps/v8/test/mjsunit/es6/string-search.js20
-rw-r--r--deps/v8/test/mjsunit/es6/super.js2
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js36
-rw-r--r--deps/v8/test/mjsunit/es6/templates.js19
-rw-r--r--deps/v8/test/mjsunit/es6/typed-array-iterator.js33
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-of.js46
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-proto.js62
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js10
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes-to-object-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/array-includes-to-object-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes-to-object-strict.js (renamed from deps/v8/test/mjsunit/harmony/array-includes-to-object-strict.js)2
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes.js (renamed from deps/v8/test/mjsunit/harmony/array-includes.js)2
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe.js4
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-443982.js2
-rw-r--r--deps/v8/test/mjsunit/es7/typed-array-includes.js (renamed from deps/v8/test/mjsunit/harmony/typed-array-includes.js)2
-rw-r--r--deps/v8/test/mjsunit/fast-prototype.js5
-rw-r--r--deps/v8/test/mjsunit/for-in-opt.js28
-rw-r--r--deps/v8/test/mjsunit/function-bind.js9
-rw-r--r--deps/v8/test/mjsunit/get-caller-js-function-throws.js14
-rw-r--r--deps/v8/test/mjsunit/get-caller-js-function.js21
-rw-r--r--deps/v8/test/mjsunit/get-prototype-of.js18
-rw-r--r--deps/v8/test/mjsunit/global-const-var-conflicts.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat.js168
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species.js156
-rw-r--r--deps/v8/test/mjsunit/harmony/arraybuffer-species.js36
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics.js247
-rw-r--r--deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js136
-rw-r--r--deps/v8/test/mjsunit/harmony/block-sloppy-function.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js86
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js110
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js46
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameters.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring-assignment.js482
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions.js32
-rw-r--r--deps/v8/test/mjsunit/harmony/function-name.js161
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js76
-rw-r--r--deps/v8/test/mjsunit/harmony/private-symbols.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/promise-species.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-accesschecks.js15
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-apply.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-bind.js137
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-construct.js158
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-define-property.js84
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-delete-property.js190
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-enumerate.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-example-membrane.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-for.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-function.js1478
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js129
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js93
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-get.js127
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-global-reference.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-has-own-property.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-has.js63
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-hash.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-integrity.js213
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-is-extensible.js74
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-json.js390
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-keys.js41
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-object-assign.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-ownkeys.js84
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js87
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js118
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js97
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-revocable.js26
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js122
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-set.js312
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-symbols.js106
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js77
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-with.js243
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js1454
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-construct.js104
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-own-keys.js93
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-lookbehind.js165
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2219.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-2225.js37
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-405844.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4395.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4585.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4658.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-517455.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-576662.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-571149.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer.js11
-rw-r--r--deps/v8/test/mjsunit/harmony/simd.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/species.js37
-rw-r--r--deps/v8/test/mjsunit/harmony/string-match.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarray-species.js86
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js45
-rw-r--r--deps/v8/test/mjsunit/json.js40
-rw-r--r--deps/v8/test/mjsunit/messages.js48
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js77
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status241
-rw-r--r--deps/v8/test/mjsunit/parallel-optimize-disabled.js2
-rw-r--r--deps/v8/test/mjsunit/property-load-across-eval.js2
-rw-r--r--deps/v8/test/mjsunit/random-bit-correlations.js69
-rw-r--r--deps/v8/test/mjsunit/readonly.js5
-rw-r--r--deps/v8/test/mjsunit/regexp-not-sticky-yet.js1
-rw-r--r--deps/v8/test/mjsunit/regexp.js9
-rw-r--r--deps/v8/test/mjsunit/regress-3225.js14
-rw-r--r--deps/v8/test/mjsunit/regress/debug-prepare-step-in.js2
-rw-r--r--deps/v8/test/mjsunit/regress/property-descriptor-to-object.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-109195.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1178598.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1182832.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1199637.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1201933.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1207276.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1213575.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1229.js146
-rw-r--r--deps/v8/test/mjsunit/regress/regress-147497.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-186.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2596.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3138.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-325676.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3641.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-380049.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-417709b.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-436896.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4576.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4640.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4665.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4693.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-544991.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-552302.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-554865.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-556543.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-568765.js93
-rw-r--r--deps/v8/test/mjsunit/regress/regress-572589.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-575364.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-493568.js)12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-578775.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-583260.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-641.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-70066.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-799761.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-88591.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-91120.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-995.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-conditional-position.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-109362.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-119800.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-323936.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-352586.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-364374.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-380671.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-390925.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-401915.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-405517.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-405922.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-409614.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-412319.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-422858.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-435825.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-451770.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js)0
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-467180.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-481896.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-487322.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-505907.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-506956.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-517592.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523308.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-551287.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-554831.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-557807.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-561973.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-563929.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-565917.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js56
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-568525.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-569534.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-570241.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-570651.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-571064.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-571370.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-571517.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-572590.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-573857.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-573858.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-575080.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-575082.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-575314.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-578039-Proxy_construct_prototype_change.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-debugger-redirect.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-function-constructor-receiver.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-osr-in-literal.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-typedarray-length.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-undefined-nan3.js4
-rw-r--r--deps/v8/test/mjsunit/stack-traces-2.js3
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js25
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js12
-rw-r--r--deps/v8/test/mjsunit/strong/declaration-after-use.js2
-rw-r--r--deps/v8/test/mjsunit/strong/destructuring.js2
-rw-r--r--deps/v8/test/mjsunit/strong/eval-direct.js5
-rw-r--r--deps/v8/test/mjsunit/strong/function-arity.js2
-rw-r--r--deps/v8/test/mjsunit/strong/literals.js3
-rw-r--r--deps/v8/test/mjsunit/strong/load-proxy.js36
-rw-r--r--deps/v8/test/mjsunit/tail-call-intrinsic.js (renamed from deps/v8/test/mjsunit/call-runtime-tail.js)48
-rw-r--r--deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js102
-rw-r--r--deps/v8/test/mjsunit/wasm/OWNERS3
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js785
-rw-r--r--deps/v8/test/mjsunit/wasm/calls.js145
-rw-r--r--deps/v8/test/mjsunit/wasm/compile-run-basic.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/divrem-trap.js97
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js79
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js333
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js73
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js62
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js181
-rw-r--r--deps/v8/test/mjsunit/wasm/params.js139
-rw-r--r--deps/v8/test/mjsunit/wasm/stackwalk.js135
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-simple.js44
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js248
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-object-api.js11
-rw-r--r--deps/v8/test/mozilla/mozilla.status34
-rw-r--r--deps/v8/test/test262/test262.status320
-rw-r--r--deps/v8/test/unittests/cancelable-tasks-unittest.cc218
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc453
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc595
-rw-r--r--deps/v8/test/unittests/compiler/binary-operator-reducer-unittest.cc94
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc39
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc259
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc224
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc104
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc36
-rw-r--r--deps/v8/test/unittests/compiler/diamond-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc396
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc111
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc119
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h18
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc255
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h7
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc116
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc358
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc82
-rw-r--r--deps/v8/test/unittests/compiler/live-range-builder.h2
-rw-r--r--deps/v8/test/unittests/compiler/live-range-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc50
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc46
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc83
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc500
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc743
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc120
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h18
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc80
-rw-r--r--deps/v8/test/unittests/compiler/select-lowering-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc49
-rw-r--r--deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc30
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc116
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc208
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc261
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc5
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc67
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc225
-rw-r--r--deps/v8/test/unittests/locked-queue-unittest.cc90
-rw-r--r--deps/v8/test/unittests/unittests.gyp20
-rw-r--r--deps/v8/test/unittests/unittests.status8
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS3
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc2439
-rw-r--r--deps/v8/test/unittests/wasm/encoder-unittest.cc151
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc957
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc319
-rw-r--r--deps/v8/test/webkit/class-syntax-call-expected.txt10
-rw-r--r--deps/v8/test/webkit/class-syntax-call.js10
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration-expected.txt4
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-default-constructor-expected.txt4
-rw-r--r--deps/v8/test/webkit/class-syntax-default-constructor.js4
-rw-r--r--deps/v8/test/webkit/class-syntax-expression-expected.txt4
-rw-r--r--deps/v8/test/webkit/class-syntax-expression.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-extends-expected.txt2
-rw-r--r--deps/v8/test/webkit/class-syntax-extends.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-super-expected.txt4
-rw-r--r--deps/v8/test/webkit/class-syntax-super.js4
-rw-r--r--deps/v8/test/webkit/const-without-initializer.js2
-rw-r--r--deps/v8/test/webkit/constant-count.js2
-rw-r--r--deps/v8/test/webkit/exception-for-nonobject-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/JSON-parse-reviver-expected.txt21
-rw-r--r--deps/v8/test/webkit/fast/js/JSON-parse-reviver.js5
-rw-r--r--deps/v8/test/webkit/fast/js/arguments-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/function-apply-expected.txt6
-rw-r--r--deps/v8/test/webkit/fast/js/function-apply.js2
-rw-r--r--deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/kde/RegExp-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/func-decl-expected.txt9
-rw-r--r--deps/v8/test/webkit/fast/js/native-error-prototype-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/native-error-prototype.js4
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt20
-rw-r--r--deps/v8/test/webkit/fast/js/toString-overrides-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/regex/constructor-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/regex/constructor.js4
-rw-r--r--deps/v8/test/webkit/fast/regex/toString-expected.txt6
-rw-r--r--deps/v8/test/webkit/regexp-compile-expected.txt2
-rw-r--r--deps/v8/test/webkit/regexp-compile.js2
-rw-r--r--deps/v8/test/webkit/run-json-stringify-expected.txt4
-rw-r--r--deps/v8/test/webkit/toString-recursion-expected.txt1
-rw-r--r--deps/v8/test/webkit/toString-recursion.js3
-rw-r--r--deps/v8/test/webkit/webkit.status10
-rwxr-xr-xdeps/v8/tools/bash-completion.sh8
-rw-r--r--deps/v8/tools/check-static-initializers.gyp26
-rw-r--r--deps/v8/tools/check-static-initializers.isolate16
-rwxr-xr-xdeps/v8/tools/cpu.sh39
-rwxr-xr-xdeps/v8/tools/eval_gc_time.sh106
-rw-r--r--deps/v8/tools/gc_nvp_common.py2
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py8
-rw-r--r--deps/v8/tools/gyp/v8.gyp209
-rwxr-xr-xdeps/v8/tools/ll_prof.py34
-rw-r--r--deps/v8/tools/parser-shell.cc10
-rwxr-xr-xdeps/v8/tools/presubmit.py68
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py222
-rwxr-xr-xdeps/v8/tools/release/chromium_roll.py161
-rw-r--r--deps/v8/tools/release/common_includes.py50
-rwxr-xr-xdeps/v8/tools/release/releases.py9
-rw-r--r--deps/v8/tools/release/test_scripts.py145
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py5
-rwxr-xr-xdeps/v8/tools/run-tests.py17
-rwxr-xr-xdeps/v8/tools/run-valgrind.py35
-rw-r--r--deps/v8/tools/testrunner/local/execution.py179
-rw-r--r--deps/v8/tools/testrunner/local/perfdata.py26
-rw-r--r--deps/v8/tools/testrunner/local/pool.py29
-rw-r--r--deps/v8/tools/testrunner/local/progress.py37
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py36
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py21
-rw-r--r--deps/v8/tools/testrunner/network/endpoint.py1
-rw-r--r--deps/v8/tools/testrunner/objects/context.py8
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py8
-rwxr-xr-xdeps/v8/tools/try_perf.py29
-rw-r--r--deps/v8/tools/v8heapconst.py464
-rw-r--r--deps/v8/tools/whitespace.txt2
1562 files changed, 132681 insertions, 70663 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 69afcfd23e..adf39ff831 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -25,6 +25,7 @@
.cproject
.d8_history
.gclient_entries
+.gdb_history
.landmines
.project
.pydevproject
@@ -39,6 +40,7 @@ gcsuspects
shell
shell_g
/_*
+/base/trace_event/common
/build/Debug
/build/gyp
/build/ipch/
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index fdef3668bb..c9be8bbcda 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -32,6 +32,7 @@ StrongLoop, Inc. <*@strongloop.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
Akinori MUSHA <knu@FreeBSD.org>
+Alex Kodat <akodat@rocketsoftware.com>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
@@ -51,6 +52,7 @@ Daniel James <dnljms@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
Erich Ocean <erich.ocean@me.com>
+Evan Lucas <evan.lucas@help.com>
Fedor Indutny <fedor@indutny.com>
Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
@@ -102,7 +104,8 @@ Stefan Penner <stefan.penner@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
+Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Yu Yin <xwafish@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
-柳荣一 <admin@web-tinker.com>
+柳荣一 <admin@web-tinker.com> \ No newline at end of file
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 443ffe1e48..5279a4a783 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -171,12 +171,22 @@ config("toolchain") {
if (v8_target_arch == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
}
+ if (v8_target_arch == "s390") {
+ defines += [ "V8_TARGET_ARCH_S390" ]
+ }
+ if (v8_target_arch == "s390x") {
+ defines += [
+ "V8_TARGET_ARCH_S390",
+ "V8_TARGET_ARCH_S390X",
+ ]
+ }
if (v8_target_arch == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
}
if (v8_target_arch == "x64") {
defines += [ "V8_TARGET_ARCH_X64" ]
}
+
if (is_win) {
defines += [ "WIN32" ]
# TODO(jochen): Support v8_enable_prof.
@@ -222,7 +232,6 @@ action("js2c") {
"src/js/uri.js",
"src/js/math.js",
"src/third_party/fdlibm/fdlibm.js",
- "src/js/date.js",
"src/js/regexp.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js",
@@ -266,40 +275,6 @@ action("js2c") {
}
}
-action("js2c_code_stubs") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- script = "tools/js2c.py"
-
- # The script depends on this other script, this rule causes a rebuild if it
- # changes.
- inputs = [ "tools/jsmin.py" ]
-
- sources = [
- "src/js/macros.py",
- "src/messages.h",
- "src/js/code-stubs.js"
- ]
-
- outputs = [
- "$target_gen_dir/code-stub-libraries.cc",
- ]
-
- args = [
- rebase_path("$target_gen_dir/code-stub-libraries.cc",
- root_build_dir),
- "CODE_STUB",
- ] + rebase_path(sources, root_build_dir)
-
- if (v8_use_external_startup_data) {
- outputs += [ "$target_gen_dir/libraries_code_stub.bin" ]
- args += [
- "--startup_blob",
- rebase_path("$target_gen_dir/libraries_code_stub.bin", root_build_dir),
- ]
- }
-}
-
action("js2c_experimental") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -315,12 +290,14 @@ action("js2c_experimental") {
"src/js/proxy.js",
"src/js/generator.js",
"src/js/harmony-atomics.js",
- "src/js/harmony-array-includes.js",
"src/js/harmony-regexp.js",
"src/js/harmony-reflect.js",
"src/js/harmony-object-observe.js",
"src/js/harmony-sharedarraybuffer.js",
- "src/js/harmony-simd.js"
+ "src/js/harmony-simd.js",
+ "src/js/harmony-species.js",
+ "src/js/harmony-unicode-regexps.js",
+ "src/js/promise-extra.js"
]
outputs = [
@@ -439,7 +416,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -447,7 +423,6 @@ if (v8_use_external_startup_data) {
sources = [
"$target_gen_dir/libraries.bin",
- "$target_gen_dir/libraries_code_stub.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
"$target_gen_dir/libraries_experimental_extras.bin",
@@ -535,7 +510,6 @@ source_set("v8_nosnapshot") {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -544,7 +518,6 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
- "$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
@@ -570,7 +543,6 @@ source_set("v8_snapshot") {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -584,7 +556,6 @@ source_set("v8_snapshot") {
sources = [
"$target_gen_dir/libraries.cc",
- "$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/experimental-extras-libraries.cc",
@@ -606,7 +577,6 @@ if (v8_use_external_startup_data) {
deps = [
":js2c",
- ":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
":js2c_experimental_extras",
@@ -636,7 +606,10 @@ source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
+ # TODO(fmeawad): This needs to be updated to support standalone V8 builds.
+ "../base/trace_event/common/trace_event_common.h",
"include/v8-debug.h",
+ "include/v8-experimental.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
@@ -654,6 +627,8 @@ source_set("v8_base") {
"src/allocation-site-scopes.h",
"src/api.cc",
"src/api.h",
+ "src/api-experimental.cc",
+ "src/api-experimental.h",
"src/api-natives.cc",
"src/api-natives.h",
"src/arguments.cc",
@@ -662,16 +637,28 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
- "src/ast-expression-visitor.cc",
- "src/ast-expression-visitor.h",
- "src/ast-literal-reindexer.cc",
- "src/ast-literal-reindexer.h",
- "src/ast-numbering.cc",
- "src/ast-numbering.h",
- "src/ast-value-factory.cc",
- "src/ast-value-factory.h",
- "src/ast.cc",
- "src/ast.h",
+ "src/ast/ast-expression-rewriter.cc",
+ "src/ast/ast-expression-rewriter.h",
+ "src/ast/ast-expression-visitor.cc",
+ "src/ast/ast-expression-visitor.h",
+ "src/ast/ast-literal-reindexer.cc",
+ "src/ast/ast-literal-reindexer.h",
+ "src/ast/ast-numbering.cc",
+ "src/ast/ast-numbering.h",
+ "src/ast/ast-value-factory.cc",
+ "src/ast/ast-value-factory.h",
+ "src/ast/ast.cc",
+ "src/ast/ast.h",
+ "src/ast/modules.cc",
+ "src/ast/modules.h",
+ "src/ast/prettyprinter.cc",
+ "src/ast/prettyprinter.h",
+ "src/ast/scopeinfo.cc",
+ "src/ast/scopeinfo.h",
+ "src/ast/scopes.cc",
+ "src/ast/scopes.h",
+ "src/ast/variables.cc",
+ "src/ast/variables.h",
"src/atomic-utils.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
@@ -722,10 +709,10 @@ source_set("v8_base") {
"src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
- "src/compiler/binary-operator-reducer.cc",
- "src/compiler/binary-operator-reducer.h",
"src/compiler/branch-elimination.cc",
"src/compiler/branch-elimination.h",
+ "src/compiler/bytecode-branch-analysis.cc",
+ "src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
"src/compiler/change-lowering.cc",
@@ -736,6 +723,8 @@ source_set("v8_base") {
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
+ "src/compiler/code-stub-assembler.cc",
+ "src/compiler/code-stub-assembler.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",
@@ -751,6 +740,12 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
+ "src/compiler/escape-analysis.cc",
+ "src/compiler/escape-analysis.h",
+ "src/compiler/escape-analysis-reducer.cc",
+ "src/compiler/escape-analysis-reducer.h",
+ "src/compiler/fast-accessor-assembler.cc",
+ "src/compiler/fast-accessor-assembler.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
@@ -772,6 +767,8 @@ source_set("v8_base") {
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
+ "src/compiler/instruction-scheduler.cc",
+ "src/compiler/instruction-scheduler.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
"src/compiler/instruction-selector.h",
@@ -781,6 +778,8 @@ source_set("v8_base") {
"src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
+ "src/compiler/js-call-reducer.cc",
+ "src/compiler/js-call-reducer.h",
"src/compiler/js-context-relaxation.cc",
"src/compiler/js-context-relaxation.h",
"src/compiler/js-context-specialization.cc",
@@ -822,8 +821,6 @@ source_set("v8_base") {
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
- "src/compiler/machine-type.cc",
- "src/compiler/machine-type.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
"src/compiler/node-aux-data.h",
@@ -855,6 +852,7 @@ source_set("v8_base") {
"src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
+ "src/compiler/representation-change.cc",
"src/compiler/representation-change.h",
"src/compiler/schedule.cc",
"src/compiler/schedule.h",
@@ -874,12 +872,19 @@ source_set("v8_base") {
"src/compiler/state-values-utils.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
+ "src/compiler/type-hint-analyzer.cc",
+ "src/compiler/type-hint-analyzer.h",
+ "src/compiler/type-hints.cc",
+ "src/compiler/type-hints.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
+ "src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-compiler.h",
+ "src/compiler/wasm-linkage.cc",
"src/compiler/zone-pool.cc",
"src/compiler/zone-pool.h",
"src/compiler.cc",
@@ -988,7 +993,6 @@ source_set("v8_base") {
"src/elements.h",
"src/execution.cc",
"src/execution.h",
- "src/expression-classifier.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@@ -1015,8 +1019,6 @@ source_set("v8_base") {
"src/frames.h",
"src/full-codegen/full-codegen.cc",
"src/full-codegen/full-codegen.h",
- "src/func-name-inferrer.cc",
- "src/func-name-inferrer.h",
"src/futex-emulation.cc",
"src/futex-emulation.h",
"src/gdb-jit.cc",
@@ -1095,7 +1097,11 @@ source_set("v8_base") {
"src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
+ "src/interpreter/bytecode-register-allocator.cc",
+ "src/interpreter/bytecode-register-allocator.h",
"src/interpreter/bytecode-traits.h",
+ "src/interpreter/constant-array-builder.cc",
+ "src/interpreter/constant-array-builder.h",
"src/interpreter/control-flow-builders.cc",
"src/interpreter/control-flow-builders.h",
"src/interpreter/interpreter.cc",
@@ -1103,7 +1109,6 @@ source_set("v8_base") {
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
- "src/json-parser.h",
"src/json-stringifier.h",
"src/key-accumulator.h",
"src/key-accumulator.cc",
@@ -1120,11 +1125,13 @@ source_set("v8_base") {
"src/lookup.cc",
"src/lookup.h",
"src/macro-assembler.h",
+ "src/machine-type.cc",
+ "src/machine-type.h",
"src/messages.cc",
"src/messages.h",
- "src/modules.cc",
- "src/modules.h",
"src/msan.h",
+ "src/objects-body-descriptors-inl.h",
+ "src/objects-body-descriptors.h",
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
@@ -1134,20 +1141,31 @@ source_set("v8_base") {
"src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
- "src/parameter-initializer-rewriter.cc",
- "src/parameter-initializer-rewriter.h",
- "src/parser.cc",
- "src/parser.h",
- "src/pattern-rewriter.cc",
+ "src/parsing/expression-classifier.h",
+ "src/parsing/func-name-inferrer.cc",
+ "src/parsing/func-name-inferrer.h",
+ "src/parsing/json-parser.h",
+ "src/parsing/parameter-initializer-rewriter.cc",
+ "src/parsing/parameter-initializer-rewriter.h",
+ "src/parsing/parser-base.h",
+ "src/parsing/parser.cc",
+ "src/parsing/parser.h",
+ "src/parsing/pattern-rewriter.cc",
+ "src/parsing/preparse-data-format.h",
+ "src/parsing/preparse-data.cc",
+ "src/parsing/preparse-data.h",
+ "src/parsing/preparser.cc",
+ "src/parsing/preparser.h",
+ "src/parsing/rewriter.cc",
+ "src/parsing/rewriter.h",
+ "src/parsing/scanner-character-streams.cc",
+ "src/parsing/scanner-character-streams.h",
+ "src/parsing/scanner.cc",
+ "src/parsing/scanner.h",
+ "src/parsing/token.cc",
+ "src/parsing/token.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
- "src/preparse-data-format.h",
- "src/preparse-data.cc",
- "src/preparse-data.h",
- "src/preparser.cc",
- "src/preparser.h",
- "src/prettyprinter.cc",
- "src/prettyprinter.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@@ -1175,14 +1193,14 @@ source_set("v8_base") {
"src/property.cc",
"src/property.h",
"src/prototype.h",
- "src/rewriter.cc",
- "src/rewriter.h",
"src/regexp/bytecodes-irregexp.h",
"src/regexp/interpreter-irregexp.cc",
"src/regexp/interpreter-irregexp.h",
"src/regexp/jsregexp-inl.h",
"src/regexp/jsregexp.cc",
"src/regexp/jsregexp.h",
+ "src/regexp/regexp-ast.cc",
+ "src/regexp/regexp-ast.h",
"src/regexp/regexp-macro-assembler-irregexp-inl.h",
"src/regexp/regexp-macro-assembler-irregexp.cc",
"src/regexp/regexp-macro-assembler-irregexp.h",
@@ -1190,6 +1208,8 @@ source_set("v8_base") {
"src/regexp/regexp-macro-assembler-tracer.h",
"src/regexp/regexp-macro-assembler.cc",
"src/regexp/regexp-macro-assembler.h",
+ "src/regexp/regexp-parser.cc",
+ "src/regexp/regexp-parser.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
"src/register-configuration.cc",
@@ -1232,14 +1252,6 @@ source_set("v8_base") {
"src/runtime/runtime.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
- "src/scanner-character-streams.cc",
- "src/scanner-character-streams.h",
- "src/scanner.cc",
- "src/scanner.h",
- "src/scopeinfo.cc",
- "src/scopeinfo.h",
- "src/scopes.cc",
- "src/scopes.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
@@ -1262,8 +1274,8 @@ source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
- "src/token.cc",
- "src/token.h",
+ "src/tracing/trace-event.cc",
+ "src/tracing/trace-event.h",
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
@@ -1295,12 +1307,28 @@ source_set("v8_base") {
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
- "src/variables.cc",
- "src/variables.h",
"src/version.cc",
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
+ "src/wasm/asm-wasm-builder.cc",
+ "src/wasm/asm-wasm-builder.h",
+ "src/wasm/ast-decoder.cc",
+ "src/wasm/ast-decoder.h",
+ "src/wasm/decoder.h",
+ "src/wasm/encoder.cc",
+ "src/wasm/encoder.h",
+ "src/wasm/module-decoder.cc",
+ "src/wasm/module-decoder.h",
+ "src/wasm/wasm-js.cc",
+ "src/wasm/wasm-js.h",
+ "src/wasm/wasm-macro-gen.h",
+ "src/wasm/wasm-module.cc",
+ "src/wasm/wasm-module.h",
+ "src/wasm/wasm-opcodes.cc",
+ "src/wasm/wasm-opcodes.h",
+ "src/wasm/wasm-result.cc",
+ "src/wasm/wasm-result.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
@@ -1319,6 +1347,7 @@ source_set("v8_base") {
"src/crankshaft/ia32/lithium-ia32.h",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
+ "src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
@@ -1350,6 +1379,7 @@ source_set("v8_base") {
sources += [
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
+ "src/compiler/x64/instruction-scheduler-x64.cc",
"src/compiler/x64/instruction-selector-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.h",
@@ -1408,6 +1438,7 @@ source_set("v8_base") {
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
+ "src/compiler/arm/instruction-scheduler-arm.cc",
"src/compiler/arm/instruction-selector-arm.cc",
"src/crankshaft/arm/lithium-arm.cc",
"src/crankshaft/arm/lithium-arm.h",
@@ -1460,6 +1491,7 @@ source_set("v8_base") {
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
+ "src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
@@ -1484,6 +1516,7 @@ source_set("v8_base") {
sources += [
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
+ "src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.h",
@@ -1525,6 +1558,7 @@ source_set("v8_base") {
sources += [
"compiler/mips64/code-generator-mips64.cc",
"compiler/mips64/instruction-codes-mips64.h",
+ "compiler/mips64/instruction-scheduler-mips64.cc",
"compiler/mips64/instruction-selector-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.h",
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 4311545a5a..54bcbe4275 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,2241 @@
+2016-01-14: Version 4.9.385
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.384
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.383
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.382
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.381
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.380
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.379
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.378
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.377
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.376
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.375
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.374
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.373
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.372
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.371
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.370
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.369
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.368
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.367
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.366
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.365
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.364
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.363
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.362
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.361
+
+ Disable concurrent osr (issue 4650).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.360
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-13: Version 4.9.359
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.358
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.357
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.356
+
+ [wasm] Rename the WASM object to _WASMEXP_ (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.355
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.354
+
+ Reland of "[Proxies] Ship Proxies + Reflect." (issues 1543, 3931).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.353
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.352
+
+ Gracefully handle proxies in AllCanWrite() (issue 1543, Chromium issue
+ 576662).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.351
+
+ [wasm] Fix double to int conversions (Chromium issue 576560).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.350
+
+ [Proxies] Ship Proxies + Reflect (issues 1543, 3931).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.349
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.348
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.347
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.346
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.345
+
+ Add @@species/better subclassing support to Promises (issue 4633,
+ Chromium issue 575314).
+
+ TypedArray and ArrayBuffer support for @@species (issue 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-12: Version 4.9.344
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.343
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.342
+
+ Ship ES2015 sloppy-mode const semantics (issue 3305).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.341
+
+ Partial rollback of Promise error checking (issue 4633).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.340
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.339
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.338
+
+ [wasm] Fix set_local appearing in unreachable code (Chromium issue
+ 575861).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.337
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.336
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.335
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.334
+
+ [wasm] Avoid crashing if parsing fails in asm -> wasm (Chromium issue
+ 575369).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.333
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.332
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-11: Version 4.9.331
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-09: Version 4.9.330
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.329
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.328
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.327
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.326
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.325
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.324
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.323
+
+ Fix sloppy block-scoped function hoisting with nested zones (Chromium
+ issue 537816).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.322
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.321
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.320
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.319
+
+ [wasm] Fix validation error for missing return statement in asm.js
+ module (Chromium issue 575364).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.318
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.317
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.316
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.315
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-08: Version 4.9.314
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.313
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.312
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.311
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.310
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.309
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.308
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.307
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.306
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.305
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.304
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.303
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.302
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.301
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-07: Version 4.9.300
+
+ Add Array support for @@species and subclassing (issue 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-06: Version 4.9.299
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-06: Version 4.9.298
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-06: Version 4.9.297
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.296
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.295
+
+ Ship ES2015 sloppy-mode function hoisting, let, class (issues 3305,
+ 4285).
+
+ Ship destructuring assignment (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.294
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.293
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.292
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.291
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.290
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.289
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.288
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.287
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.286
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-05: Version 4.9.285
+
+ Accept time zones like GMT-8 in the legacy date parser (Chromium issue
+ 422858).
+
+ Timezone name check fix (Chromium issue 364374).
+
+ Add a --harmony-species flag, defining @@species on constructors (issue
+ 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.284
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.283
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.282
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.281
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-04: Version 4.9.280
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-01: Version 4.9.279
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-31: Version 4.9.278
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-31: Version 4.9.277
+
+ Fix 'illegal access' in Date constructor edge case (issue 4640).
+
+ Reland of Use ES2015-style TypedArray prototype chain (patchset #1 id:1
+ of https://codereview.chromium.org/1554523002/ ) (issue 4085).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.276
+
+ Reland "Clean up promises and fix an edge case bug (patchset #4 id:60001
+ of https://codereview.chromium.org/1488783002/ )" (issue 3641).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.275
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.274
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-30: Version 4.9.273
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.272
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.271
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.270
+
+ [crankshaft] Don't inline array resize operations if receiver's proto is
+ not a JSObject (Chromium issue 571064).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.269
+
+ [ic] Fixed receiver_map register trashing in KeyedStoreIC megamorphic
+ (Chromium issue 571370).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-29: Version 4.9.268
+
+ Use ES2015-style TypedArray prototype chain (issue 4085).
+
+ Guard the property RegExp.prototype.unicode behind --harmony-regexp-
+ unicode (issue 4644).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-28: Version 4.9.267
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-28: Version 4.9.266
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-26: Version 4.9.265
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-26: Version 4.9.264
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-25: Version 4.9.263
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-24: Version 4.9.262
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-24: Version 4.9.261
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.260
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.259
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.258
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.257
+
+ [elements] Enable left-trimming again (issue 4606).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.256
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-23: Version 4.9.255
+
+ Reland of Add web compat workarounds for ES2015 RegExp semantics
+ (patchset #3 id:40001 of https://codereview.chromium.org/1543723002/ )
+ (issues 4617, 4637).
+
+ Add web compat workarounds for ES2015 RegExp semantics (issues 4617,
+ 4637).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-22: Version 4.9.254
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-22: Version 4.9.253
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.252
+
+ [ES6] Stage sloppy function block scoping (issue 3305).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.251
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.250
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.249
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.248
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.247
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.246
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-21: Version 4.9.245
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-20: Version 4.9.244
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-20: Version 4.9.243
+
+ Mark all APIs without callers in Blink as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-19: Version 4.9.242
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-19: Version 4.9.241
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.240
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.239
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.238
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.237
+
+ Stage Proxies and Reflect behind --harmony flag (issues 1543, 3931).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.236
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.235
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-18: Version 4.9.234
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.233
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.232
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.231
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.230
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.229
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.228
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.227
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.226
+
+ [IC] Fix "compatible receiver" checks hidden behind interceptors
+ (Chromium issue 497632).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.225
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.224
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.223
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.222
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-17: Version 4.9.221
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.220
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.219
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.218
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.217
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.216
+
+ Stage destructuring assignment (issue 811).
+
+ Update DEPS entry for tracing to point at correct location.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-16: Version 4.9.215
+
+ [harmony] unstage regexp lookbehind assertions (issue 4545).
+
+ Move Object.observe back to shipping temporarily (Chromium issues
+ 552100, 569417, 569647).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.214
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.213
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.212
+
+ [harmony] stage regexp lookbehind assertions (issue 4545).
+
+ [es6] ship regexp sticky flag (issue 4342).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-14: Version 4.9.211
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.210
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.209
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.208
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.207
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.206
+
+ [es6] Support Function name inference in variable declarations (issue
+ 3699).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.205
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.204
+
+ Disable --harmony-object-observe (Chromium issue 552100).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.203
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.202
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.201
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.200
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.199
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.198
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.197
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.196
+
+ Re-re-land FastAccessorBuilder (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.195
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-11: Version 4.9.194
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.193
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.192
+
+ Unstage non-standard Promise functions (issue 3237).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.191
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.190
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.189
+
+ Allow ICU to normalize time zones (Chromium issue 487322).
+
+ Fix FuncNameInferrer usage in ParseAssignmentExpression (issue 4595).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.188
+
+ Fix Function subclassing (issues 3101, 3330, 4597).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.187
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.186
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.185
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.184
+
+ Re-land FastAccessorBuilder (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.183
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.182
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.181
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.180
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.179
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.178
+
+ Implement FastAccessorBuilder (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-10: Version 4.9.177
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-09: Version 4.9.176
+
+ Updated the check for unmodfied objects to handle Smi Objects (Chromium
+ issue 553287).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-09: Version 4.9.175
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-09: Version 4.9.174
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-08: Version 4.9.173
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-08: Version 4.9.172
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-08: Version 4.9.171
+
+ Remove usage of deprecated APIs from api interceptor tests (issue 4341).
+
+ Deprecate Promise::Chain from V8 APIs (issue 3237).
+
+ Set the Gregorian changeover date to the beginning of time in Intl
+ (Chromium issue 537382).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.170
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.169
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.168
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.167
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.166
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.165
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.164
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.163
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.162
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.161
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.160
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.159
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-07: Version 4.9.158
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-06: Version 4.9.157
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-06: Version 4.9.156
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-06: Version 4.9.155
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-05: Version 4.9.154
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-05: Version 4.9.153
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-05: Version 4.9.152
+
+ Clean up promises and fix an edge case bug (issue 3641).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.151
+
+ [es6] implement destructuring assignment (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.150
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.149
+
+ Mark deprecated debugger APIs as such.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.148
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.147
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.146
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.145
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.144
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.143
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.142
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.141
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.140
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.139
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.138
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.137
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-04: Version 4.9.136
+
+ Mark BooleanObject::New() as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.135
+
+ For non-prototype objects constructed using base==new.target, use the
+ cached constructor to render the name (Chromium issue 563791).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.134
+
+ Deprecate non-standard Array methods and clarify Object::isArray.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.133
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.132
+
+ Fix inobject slack tracking for both subclassing and non-subclassing
+ cases (Chromium issue 563339).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.131
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-03: Version 4.9.130
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.129
+
+ Removed support deprecated (//@|/*@) source(URL|MappingURL)= (Chromium
+ issue 558998).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.128
+
+ Improve rendering of callsite with non-function target (issue 3953).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.127
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.126
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.125
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.124
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.123
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.122
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-02: Version 4.9.121
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.120
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.119
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.118
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.117
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.116
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.115
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.114
+
+ Performance and stability improvements on all platforms.
+
+
+2015-12-01: Version 4.9.113
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.112
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.111
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.110
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.109
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.108
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.107
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.106
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.105
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.104
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.103
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.102
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.101
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.100
+
+ Move RMA::Label out of the class, so it can be forward declared
+ (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.99
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.98
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.97
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.96
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-30: Version 4.9.95
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-29: Version 4.9.94
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-29: Version 4.9.93
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-28: Version 4.9.92
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-28: Version 4.9.91
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.90
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.89
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.88
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.87
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.86
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.85
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.84
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-27: Version 4.9.83
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.82
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.81
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.80
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.79
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.78
+
+ Mark PromiseRejectMessage::GetStackTrace as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.77
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.76
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.75
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.74
+
+ Add explicit Isolate parameter to Exception::CreateMessage() (Chromium
+ issue 495801).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.73
+
+ Allow in-object properties in JSArrayBuffer (issue 4531).
+
+ Allow in-object properties in JSTypedArray and JSDataView (issue 4531).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.72
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.71
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.70
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-26: Version 4.9.69
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-25: Version 4.9.68
+
+ Reland shipping of --harmony-destructuring-bind (issue 811).
+
+ Fix promotion of JSFunctions with in-object properties (issue 4572,
+ Chromium issue 561481).
+
+ Allow in-object properties in JSCollections, JSWeakCollections and
+ JSRegExp (issue 4531).
+
+ Fix JSFunction's in-object properties initialization (issue 4572).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-25: Version 4.9.67
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-25: Version 4.9.66
+
+ Removed support deprecated (//@|/*@) source(URL|MappingURL)= (Chromium
+ issue 558998).
+
+ PPC: Reshuffle registers in JSConstructStub to avoid trashing costructor
+ and new.target on fast path (so we don't need to push/pop them)
+ (Chromium issue 560239).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.65
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.64
+
+ Move --harmony-destructuring-bind to shipping (issue 811).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.63
+
+ Reshuffle registers in JSConstructStub to avoid trashing costructor and
+ new.target on fast path (so we don't need to push/pop them) (Chromium
+ issue 560239).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.62
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.61
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.60
+
+ Allow in-object properties in JSFunctions (issue 4531).
+
+ Disable non-standard Promise functions in staging (issue 3237).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.59
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.58
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-24: Version 4.9.57
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.56
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.55
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.54
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.53
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.52
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.51
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.50
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.49
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.48
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.47
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-23: Version 4.9.46
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-22: Version 4.9.45
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-22: Version 4.9.44
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-22: Version 4.9.43
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-21: Version 4.9.42
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.41
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.40
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.39
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.38
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.37
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.36
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.35
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.34
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.33
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.32
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.31
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.30
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-20: Version 4.9.29
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.28
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.27
+
+ [V8] Unify get function name for debugging purpose (Chromium issue
+ 17356).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.26
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.25
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.24
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.23
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.22
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.21
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-19: Version 4.9.20
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.19
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.18
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.17
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.16
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.15
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-18: Version 4.9.14
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.13
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.12
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.11
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-17: Version 4.9.10
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.9
+
+ Map v8::Object to v8::internal::JSReceiver.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.8
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.7
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.5
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-16: Version 4.9.4
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-15: Version 4.9.3
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-15: Version 4.9.2
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-15: Version 4.9.1
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-13: Version 4.8.294
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-13: Version 4.8.293
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.292
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.291
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.290
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.289
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.288
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.287
+
+ [JSON stringifier] Correctly load array elements (Chromium issue
+ 554946).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.286
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.285
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.284
+
+ Ship --harmony-default-parameters (issue 2160).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.283
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.282
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.281
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.280
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-12: Version 4.8.279
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.278
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.277
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.276
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.275
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.274
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.273
+
+ Performance and stability improvements on all platforms.
+
+
+2015-11-11: Version 4.8.272
+
+ Performance and stability improvements on all platforms.
+
+
2015-11-11: Version 4.8.271
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index fc1ad8585e..07b11a4ae3 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,13 +8,15 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "2c1e6cced23554ce84806e570acea637f6473afc",
+ Var("git_url") + "/external/gyp.git" + "@" + "b85ad3e578da830377dbc1843aa4fbc5af17a192",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "42c58d4e49f2250039f0e98d43e0b76e8f5ca024",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "8d342a405be5ae8aacb1e16f0bc31c3a4fbf26a2",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "4a95614772d9bcbd8bc197e1d9bd034e088fc740",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "0f8e6e4b126ee88137930a0ae4776c4741808740",
+ "v8/base/trace_event/common":
+ Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "d83d44b13d07c2fd0a40101a7deef9b93b841732",
"v8/tools/swarming_client":
- Var('git_url') + '/external/swarming.client.git' + '@' + "8fce79620b04bbe5415ace1103db27505bdc4c06",
+ Var('git_url') + '/external/swarming.client.git' + '@' + "9cdd76171e517a430a72dcd7d66ade67e109aa00",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@@ -25,15 +27,15 @@ deps = {
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
"v8/test/test262/data":
- Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "ea222fb7d09e334c321b987656315ad4056ded96",
+ Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "67ba34b03a46bac4254223ae25f42c7b959540f0",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "66f5328417331216569e8beb244fd887f62e8997",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "24e8c1c92fe54ef8ed7651b5850c056983354a4a",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "54492f99c84cab0826a8e656efeb33a1b1bf5a04",
+ Var("git_url") + "/android_tools.git" + "@" + "f4c36ad89b2696b37d9cd7ca7d984b691888b188",
},
"win": {
"v8/third_party/cygwin":
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 28c1af2e07..6ae9b24576 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -220,12 +220,6 @@ ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
-# Optionally enable wasm prototype.
-# Assume you've placed a link to v8-native-prototype in third_party/wasm.
-ifeq ($(wasm), on)
- GYPFLAGS += -Dv8_wasm=1
-endif
-
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
@@ -244,7 +238,8 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 \
+ s390 s390x
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index ab9bba8845..f8516afc44 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -69,6 +69,7 @@ def _V8PresubmitChecks(input_api, output_api):
from presubmit import SourceProcessor
from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
+ from presubmit import CheckStatusFiles
results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
@@ -80,6 +81,8 @@ def _V8PresubmitChecks(input_api, output_api):
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
+ if not CheckStatusFiles(input_api.PresubmitLocalPath()):
+ results.append(output_api.PresubmitError("Status file check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
return results
@@ -272,28 +275,3 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
-
-
-def GetPreferredTryMasters(project, change):
- return {
- 'tryserver.v8': {
- 'v8_linux_rel': set(['defaulttests']),
- 'v8_linux_dbg': set(['defaulttests']),
- 'v8_linux_nodcheck_rel': set(['defaulttests']),
- 'v8_linux_gcc_compile_rel': set(['defaulttests']),
- 'v8_linux64_rel': set(['defaulttests']),
- 'v8_linux64_asan_rel': set(['defaulttests']),
- 'v8_linux64_avx2_rel': set(['defaulttests']),
- 'v8_win_rel': set(['defaulttests']),
- 'v8_win_compile_dbg': set(['defaulttests']),
- 'v8_win_nosnap_shared_compile_rel': set(['defaulttests']),
- 'v8_win64_rel': set(['defaulttests']),
- 'v8_mac_rel': set(['defaulttests']),
- 'v8_linux_arm_rel': set(['defaulttests']),
- 'v8_linux_arm64_rel': set(['defaulttests']),
- 'v8_linux_mipsel_compile_rel': set(['defaulttests']),
- 'v8_linux_mips64el_compile_rel': set(['defaulttests']),
- 'v8_android_arm_compile_rel': set(['defaulttests']),
- 'v8_linux_chromium_gn_rel': set(['defaulttests']),
- },
- }
diff --git a/deps/v8/README.md b/deps/v8/README.md
index c649f02ec2..840c4971f9 100644
--- a/deps/v8/README.md
+++ b/deps/v8/README.md
@@ -10,7 +10,7 @@ browser from Google.
V8 can run standalone, or can be embedded into any C++ application.
-V8 Project page: https://code.google.com/p/v8/
+V8 Project page: https://github.com/v8/v8/wiki
Getting the Code
@@ -37,4 +37,4 @@ Contributing
=============
Please follow the instructions mentioned on the
-[V8 wiki](https://code.google.com/p/v8-wiki/wiki/Contributing).
+[V8 wiki](https://github.com/v8/v8/wiki/Contributing).
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index a5699abcf1..81e941f28a 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -50,6 +50,12 @@
'feature_shipping_status': {
'filepath': 'src/flag-definitions.h',
},
+ 'gc_changes': {
+ 'filepath': 'src/heap/',
+ },
+ 'merges': {
+ 'filepath': '.',
+ },
},
'WATCHLISTS': {
@@ -69,5 +75,13 @@
'feature_shipping_status': [
'hablich@chromium.org',
],
+ 'gc_changes': [
+ 'hpayer@chromium.org',
+ 'ulan@chromium.org',
+ ],
+ 'merges': [
+ # Only enabled on branches created with tools/release/create_release.py
+ 'v8-merges@googlegroups.com',
+ ],
},
}
diff --git a/deps/v8/build/all.gyp b/deps/v8/build/all.gyp
index 0195083b01..0a05a2f02f 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/build/all.gyp
@@ -24,6 +24,7 @@
'../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*',
'../test/default.gyp:*',
+ '../test/ignition.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',
'../test/mjsunit/mjsunit.gyp:*',
@@ -33,6 +34,7 @@
'../test/simdjs/simdjs.gyp:*',
'../test/test262/test262.gyp:*',
'../test/webkit/webkit.gyp:*',
+ '../tools/check-static-initializers.gyp:*',
],
}],
]
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 03b3072c9b..5a21a63e32 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -67,9 +67,6 @@
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
- # Set to 1 to enable building with wasm prototype.
- 'v8_wasm%': 0,
-
# Enable/disable JavaScript API accessors.
'v8_js_accessors%': 0,
},
@@ -111,12 +108,6 @@
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
- ['v8_wasm!=0', {
- 'defines': ['V8_WASM',],
- }],
- ['v8_js_accessors!=0', {
- 'defines': ['V8_JS_ACCESSORS'],
- }],
], # conditions
'configurations': {
'DebugBaseCommon': {
diff --git a/deps/v8/build/get_landmines.py b/deps/v8/build/get_landmines.py
index 6e4dbb3468..ea0ae0d415 100755
--- a/deps/v8/build/get_landmines.py
+++ b/deps/v8/build/get_landmines.py
@@ -25,6 +25,7 @@ def main():
print 'Remove build/android.gypi'
print 'Cleanup after windows ninja switch attempt.'
print 'Switching to pinned msvs toolchain.'
+ print 'Clobbering to hopefully resolve problem with mksnapshot'
return 0
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index fa4d45d4eb..273d72b744 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -42,8 +42,7 @@
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
- # TODO(jochen): Turn this on.
- 'v8_imminent_deprecation_warnings%': 0,
+ 'v8_imminent_deprecation_warnings': 1,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'release_extra_cflags%': '',
@@ -68,11 +67,15 @@
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
+
+ # Instrument for code coverage with gcov.
+ 'coverage%': 0,
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
+ 'coverage%': '<(coverage)',
'asan%': 0,
'lsan%': 0,
'msan%': 0,
@@ -106,6 +109,7 @@
# If no gomadir is set, it uses the default gomadir.
'use_goma%': 0,
'gomadir%': '',
+
'conditions': [
# Set default gomadir.
['OS=="win"', {
@@ -113,10 +117,11 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
- ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
- 'host_clang%': '1',
+ ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le" and host_arch!="s390" and host_arch!="s390x" and \
+ coverage==0', {
+ 'host_clang%': 1,
}, {
- 'host_clang%': '0',
+ 'host_clang%': 0,
}],
# linux_use_bundled_gold: whether to use the gold linker binary checked
# into third_party/binutils. Force this off via GYP_DEFINES when you
@@ -160,6 +165,7 @@
'cfi_blacklist%': '<(cfi_blacklist)',
'test_isolation_mode%': '<(test_isolation_mode)',
'fastbuild%': '<(fastbuild)',
+ 'coverage%': '<(coverage)',
# Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@@ -221,7 +227,7 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
- (v8_target_arch!="x87" and v8_target_arch!="x32")', {
+ (v8_target_arch!="x87" and v8_target_arch!="x32") and coverage==0', {
'clang%': 1,
}, {
'clang%': 0,
@@ -406,13 +412,16 @@
],
},
'conditions':[
- ['(clang==1 or host_clang==1) and OS!="win"', {
+ ['clang==0', {
+ 'cflags+': ['-Wno-sign-compare',],
+ }],
+ ['clang==1 or host_clang==1', {
# This is here so that all files get recompiled after a clang roll and
# when turning clang on or off.
# (defines are passed via the command line, and build systems rebuild
# things when their commandline changes). Nothing should ever read this
# define.
- 'defines': ['CR_CLANG_REVISION=<!(<(DEPTH)/tools/clang/scripts/update.sh --print-revision)'],
+ 'defines': ['CR_CLANG_REVISION=<!(python <(DEPTH)/tools/clang/scripts/update.py --print-revision)'],
'conditions': [
['host_clang==1', {
'target_conditions': [
@@ -575,9 +584,11 @@
'cflags': [
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
+ '-fPIC',
],
'ldflags': [
'-fsanitize=memory',
+ '-pie',
],
'defines': [
'MEMORY_SANITIZER',
@@ -675,6 +686,7 @@
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
+ '-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
'-Wnon-virtual-dtor',
@@ -684,6 +696,16 @@
],
'ldflags': [ '-pthread', ],
'conditions': [
+ # Don't warn about TRACE_EVENT_* macros with zero arguments passed to
+ # ##__VA_ARGS__. C99 strict mode prohibits having zero variadic macro
+ # arguments in gcc.
+ [ 'clang==0', {
+ 'cflags!' : [
+ '-pedantic' ,
+ # Don't warn about unrecognized command line option.
+ '-Wno-gnu-zero-variadic-macro-arguments',
+ ],
+ }],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
'cflags': [ '-Wshorten-64-to-32' ],
@@ -697,6 +719,11 @@
[ 'component=="shared_library"', {
'cflags': [ '-fPIC', ],
}],
+ [ 'coverage==1', {
+ 'cflags!': [ '-O3', '-O2', '-O1', ],
+ 'cflags': [ '-fprofile-arcs', '-ftest-coverage', '-O0'],
+ 'ldflags': [ '-fprofile-arcs'],
+ }],
],
},
}],
@@ -710,6 +737,7 @@
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
+ '-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
'-Wnon-virtual-dtor',
@@ -817,7 +845,6 @@
4309, # Truncation of constant value
4311, # Pointer truncation from 'type' to 'type'
4312, # Conversion from 'type1' to 'type2' of greater size
- 4481, # Nonstandard extension used: override specifier 'keyword'
4505, # Unreferenced local function has been removed
4510, # Default constructor could not be generated
4512, # Assignment operator could not be generated
@@ -934,6 +961,7 @@
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
+ '-Wno-gnu-zero-variadic-macro-arguments',
],
},
'conditions': [
@@ -1215,6 +1243,16 @@
['CC', '<(clang_dir)/bin/clang-cl'],
],
}],
+ ['OS=="linux" and target_arch=="arm" and host_arch!="arm" and clang==0 and "<(GENERATOR)"=="ninja"', {
+ # Set default ARM cross tools on linux. These can be overridden
+ # using CC,CXX,CC.host and CXX.host environment variables.
+ 'make_global_settings': [
+ ['CC', '<!(which arm-linux-gnueabihf-gcc)'],
+ ['CXX', '<!(which arm-linux-gnueabihf-g++)'],
+ ['CC.host', '<(host_cc)'],
+ ['CXX.host', '<(host_cxx)'],
+ ],
+ }],
# TODO(yyanagisawa): supports GENERATOR==make
# make generator doesn't support CC_wrapper without CC
# in make_global_settings yet.
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index 7c96144b38..e1cd791490 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -135,6 +135,7 @@
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or \
host_arch=="ppc" or host_arch=="ppc64" or \
+ host_arch=="s390" or host_arch=="s390x" or \
clang==1', {
'variables': {
'host_cxx_is_biarch%': 1,
@@ -145,8 +146,8 @@
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
- target_arch=="ppc" or target_arch=="ppc64" or \
- clang==1', {
+ target_arch=="ppc" or target_arch=="ppc64" or target_arch=="s390" or \
+ target_arch=="s390x" or clang==1', {
'variables': {
'target_cxx_is_biarch%': 1,
},
@@ -297,6 +298,23 @@
'V8_TARGET_ARCH_ARM64',
],
}],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_S390',
+ ],
+ 'conditions': [
+ ['v8_target_arch=="s390x"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_S390X',
+ ],
+ }],
+ ['v8_host_byteorder=="little"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_S390_LE_SIM',
+ ],
+ }],
+ ],
+ }], # s390
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'defines': [
'V8_TARGET_ARCH_PPC',
@@ -357,6 +375,9 @@
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
+ [ 'clang==1', {
+ 'cflags': ['-integrated-as'],
+ }],
],
}],
],
@@ -406,7 +427,12 @@
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
- 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r6'],
+ }],
+ ],
+ 'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
@@ -432,8 +458,11 @@
],
'cflags': ['-mfp32'],
}],
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r2'],
+ }],
],
- 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ 'cflags': ['-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
@@ -441,7 +470,12 @@
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
- 'cflags': ['-mips32', '-Wa,-mips32'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
@@ -450,7 +484,12 @@
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
- 'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
],
@@ -589,7 +628,12 @@
'FPU_MODE_FP64',
],
'cflags!': ['-mfp32', '-mfpxx'],
- 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r6'],
+ }],
+ ],
+ 'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
@@ -615,13 +659,21 @@
],
'cflags': ['-mfp32'],
}],
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32r2'],
+ }],
],
- 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+ 'cflags': ['-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64', '-mfpxx'],
- 'cflags': ['-mips32', '-Wa,-mips32'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
@@ -630,7 +682,12 @@
'FPU_MODE_FPXX',
],
'cflags!': ['-mfp64', '-mfp32'],
- 'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips32'],
+ }],
+ ],
+ 'cflags': ['-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="loongson"', {
@@ -639,7 +696,12 @@
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
- 'cflags': ['-mips3', '-Wa,-mips3', '-mfp32'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips3'],
+ }],
+ ],
+ 'cflags': ['-mips3', '-mfp32'],
}],
],
}, {
@@ -800,12 +862,22 @@
}],
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
- 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips64r6'],
+ }],
+ ],
+ 'cflags': ['-mips64r6', '-mabi=64'],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
- 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
+ 'conditions': [
+ [ 'clang==0', {
+ 'cflags': ['-Wa,-mips64r2'],
+ }],
+ ],
+ 'cflags': ['-mips64r2', '-mabi=64'],
'ldflags': ['-mips64r2', '-mabi=64'],
}],
],
@@ -925,13 +997,21 @@
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
- v8_target_arch=="mipsel" or v8_target_arch=="ppc")', {
+ v8_target_arch=="mipsel" or v8_target_arch=="ppc" or \
+ v8_target_arch=="s390")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
['host_cxx_is_biarch==1', {
- 'cflags': [ '-m32' ],
- 'ldflags': [ '-m32' ]
+ 'conditions': [
+ ['host_arch=="s390" or host_arch=="s390x"', {
+ 'cflags': [ '-m31' ],
+ 'ldflags': [ '-m31' ]
+ },{
+ 'cflags': [ '-m32' ],
+ 'ldflags': [ '-m32' ]
+ }],
+ ],
}],
],
'xcode_settings': {
@@ -941,8 +1021,15 @@
['_toolset=="target"', {
'conditions': [
['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
- 'cflags': [ '-m32' ],
- 'ldflags': [ '-m32' ],
+ 'conditions': [
+ ['host_arch=="s390" or host_arch=="s390x"', {
+ 'cflags': [ '-m31' ],
+ 'ldflags': [ '-m31' ]
+ },{
+ 'cflags': [ '-m32' ],
+ 'ldflags': [ '-m32' ],
+ }],
+ ],
}],
],
'xcode_settings': {
@@ -953,7 +1040,7 @@
}],
['(OS=="linux" or OS=="android") and \
(v8_target_arch=="x64" or v8_target_arch=="arm64" or \
- v8_target_arch=="ppc64")', {
+ v8_target_arch=="ppc64" or v8_target_arch=="s390x")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
diff --git a/deps/v8/docs/README.md b/deps/v8/docs/README.md
new file mode 100644
index 0000000000..0eded673b8
--- /dev/null
+++ b/deps/v8/docs/README.md
@@ -0,0 +1,2 @@
+The documentation for V8 can be found at the
+[V8 Wiki](https://github.com/v8/v8/wiki).
diff --git a/deps/v8/docs/arm_debugging_with_the_simulator.md b/deps/v8/docs/arm_debugging_with_the_simulator.md
deleted file mode 100644
index eceb7a58ab..0000000000
--- a/deps/v8/docs/arm_debugging_with_the_simulator.md
+++ /dev/null
@@ -1,205 +0,0 @@
-# ARM debugging with the simulator
-
-The simulator and debugger can be very helpful when working with v8 code generation.
-
- * It is convenient as it allows you to test code generation without access to actual hardware.
- * No cross or native compilation is needed.
- * The simulator fully supports the debugging of generated code.
-
-Please note that this simulator is designed for v8 purposes. Only the features used by v8 are implemented, and you might encounter unimplemented features or instructions. In this case, feel free to implement them and submit the code!
-
-
-## Details on the ARM Debugger
-
-Compile the ARM simulator shell with:
-```
-make arm.debug
-```
-on an x86 host using your regular compiler.
-
-### Starting the Debugger
-There are different ways of starting the debugger:
-
-```
-$ out/arm.debug/d8 --stop_sim_at <n>
-```
-The simulator will start the debugger after executing n instructions.
-
-```
-$ out/arm.debug/d8 --stop_at <function name>
-```
-
-The simulator will stop at the given JavaScript function.
-
-Also you can directly generate 'stop' instructions in the ARM code. Stops are generated with
-
-```
-Assembler::stop(const char* msg, Condition cond, int32_t code)
-```
-
-When the Simulator hits a stop, it will print msg and start the debugger.
-
-
-### Debugging commands.
-
-**Usual commands:**
-
-Enter `help` in the debugger prompt to get details on available commands. These include usual gdb-like commands, such as stepi, cont, disasm, etc. If the Simulator is run under gdb, the “gdb” debugger command will give control to gdb. You can then use cont from gdb to go back to the debugger.
-
-
-**Debugger specific commands:**
-
-Here's a list of the ARM debugger specific commands, along with examples.
-The JavaScript file “func.js” used below contains:
-
-```
-function test() {
- print(“In function test.”);
-}
-test();
-```
-
- * **printobject** `<`register`>` (alias po), will describe an object held in a register.
-
-```
-$ out/arm.debug/d8 func.js --stop_at test
-
-Simulator hit stop-at
- 0xb544d6a8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
-sim> print r0
-r0: 0xb547ec15 -1253577707
-sim> printobject r0
-r0:
-0xb547ec15: [Function]
- - map = 0x0xb540ff01
- - initial_map =
- - shared_info = 0xb547eb2d <SharedFunctionInfo>
- - name = #test
- - context = 0xb60083f1 <FixedArray[52]>
- - code = 0xb544d681 <Code>
- #arguments: 0xb545a15d <Proxy> (callback)
- #length: 0xb545a14d <Proxy> (callback)
- #name: 0xb545a155 <Proxy> (callback)
- #prototype: 0xb545a145 <Proxy> (callback)
- #caller: 0xb545a165 <Proxy> (callback)
-```
-
- * **break** `<`address`>`, will insert a breakpoint at the specified address.
-
- * **del**, will delete the current breakpoint.
-
-You can have only one such breakpoint. This is useful if you want to insert a breakpoint at runtime.
-```
-$ out/arm.debug/d8 func.js --stop_at test
-
-Simulator hit stop-at
- 0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
-sim> disasm 5
- 0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
- 0xb53a1eec e28db008 add fp, sp, #8
- 0xb53a1ef0 e59a200c ldr r2, [r10, #+12]
- 0xb53a1ef4 e28fe004 add lr, pc, #4
- 0xb53a1ef8 e15d0002 cmp sp, r2
-sim> break 0xb53a1ef8
-sim> cont
- 0xb53a1ef8 e15d0002 cmp sp, r2
-sim> disasm 5
- 0xb53a1ef8 e15d0002 cmp sp, r2
- 0xb53a1efc 359ff034 ldrcc pc, [pc, #+52]
- 0xb53a1f00 e5980017 ldr r0, [r8, #+23]
- 0xb53a1f04 e59f1030 ldr r1, [pc, #+48]
- 0xb53a1f08 e52d0004 str r0, [sp, #-4]!
-sim> break 0xb53a1f08
-setting breakpoint failed
-sim> del
-sim> break 0xb53a1f08
-sim> cont
- 0xb53a1f08 e52d0004 str r0, [sp, #-4]!
-sim> del
-sim> cont
-In function test.
-```
-
- * Generated `stop` instuctions, will work as breakpoints with a few additional features.
-
-The first argument is a help message, the second is the condition, and the third is the stop code. If a code is specified, and is less than 256, the stop is said to be “watched”, and can be disabled/enabled; a counter also keeps track of how many times the Simulator hits this code.
-
-If we are working on this v8 C++ code, which is reached when running our JavaScript file.
-
-```
-__ stop("My stop.", al, 123);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ mov(r0, r0);
-__ stop("My second stop.", al, 0x1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-__ mov(r1, r1);
-```
-
-Here's a sample debugging session:
-
-We hit the first stop.
-
-```
-Simulator hit My stop.
- 0xb53559e8 e1a00000 mov r0, r0
-```
-
-We can see the following stop using disasm. The address of the message string is inlined in the code after the svc stop instruction.
-
-```
-sim> disasm
- 0xb53559e8 e1a00000 mov r0, r0
- 0xb53559ec e1a00000 mov r0, r0
- 0xb53559f0 e1a00000 mov r0, r0
- 0xb53559f4 e1a00000 mov r0, r0
- 0xb53559f8 e1a00000 mov r0, r0
- 0xb53559fc ef800001 stop 1 - 0x1
- 0xb5355a00 08338a97 stop message: My second stop
- 0xb5355a04 e1a00000 mov r1, r1
- 0xb5355a08 e1a00000 mov r1, r1
- 0xb5355a0c e1a00000 mov r1, r1
-```
-
-Information can be printed for all (watched) stops which were hit at least once.
-
-```
-sim> stop info all
-Stop information:
-stop 123 - 0x7b: Enabled, counter = 1, My stop.
-sim> cont
-Simulator hit My second stop
- 0xb5355a04 e1a00000 mov r1, r1
-sim> stop info all
-Stop information:
-stop 1 - 0x1: Enabled, counter = 1, My second stop
-stop 123 - 0x7b: Enabled, counter = 1, My stop.
-```
-
-Stops can be disabled or enabled. (Only available for watched stops.)
-
-```
-sim> stop disable 1
-sim> cont
-Simulator hit My stop.
- 0xb5356808 e1a00000 mov r0, r0
-sim> cont
-Simulator hit My stop.
- 0xb5356c28 e1a00000 mov r0, r0
-sim> stop info all
-Stop information:
-stop 1 - 0x1: Disabled, counter = 2, My second stop
-stop 123 - 0x7b: Enabled, counter = 3, My stop.
-sim> stop enable 1
-sim> cont
-Simulator hit My second stop
- 0xb5356c44 e1a00000 mov r1, r1
-sim> stop disable all
-sim> con
-In function test.
-``` \ No newline at end of file
diff --git a/deps/v8/docs/becoming_v8_committer.md b/deps/v8/docs/becoming_v8_committer.md
deleted file mode 100644
index 0a927b3ca9..0000000000
--- a/deps/v8/docs/becoming_v8_committer.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Becoming a V8 committer
-
-## What is a committer?
-
-Technically, a committer is someone who has write access to the V8 Git repository. A committer can submit his or her own patches or patches from others.
-
-This privilege is granted with some expectation of responsibility: committers are people who care about the V8 project and want to help meet its goals. A committer is not just someone who can make changes, but someone who has demonstrated his or her ability to collaborate with the team, get the most knowledgeable people to review code, contribute high-quality code, and follow through to fix issues (in code or tests).
-
-A committer is a contributor to the V8 projects' success and a citizen helping the projects succeed. See V8CommittersResponsibility.
-
-## How do I become a committer?
-
-In a nutshell, contribute 20 non-trivial patches and get at least three different people to review them (you'll need three people to support you). Then ask someone to nominate you. You're demonstrating your:
-
- * commitment to the project (20 good patches requires a lot of your valuable time),
- * ability to collaborate with the team,
- * understanding of how the team works (policies, processes for testing and code review, etc),
- * understanding of the projects' code base and coding style, and
- * ability to write good code (last but certainly not least)
-
-A current committer nominates you by sending email to v8-committers@googlegroups.com containing:
-
- * your first and last name
- * your Google Code email address
- * an explanation of why you should be a committer,
- * embedded list of links to revisions (about top 10) containing your patches
-
-Two other committers need to second your nomination. If no one objects in 5 working days (U.S.), you're a committer. If anyone objects or wants more information, the committers discuss and usually come to a consensus (within the 5 working days). If issues cannot be resolved, there's a vote among current committers.
-
-Once you get approval from the existing committers, we'll send you instructions for write access to SVN or Git. You'll also be added to v8-committers@googlegroups.com.
-
-In the worst case, this can drag out for two weeks. Keep writing patches! Even in the rare cases where a nomination fails, the objection is usually something easy to address like "more patches" or "not enough people are familiar with this person's work."
-
-## Maintaining committer status
-
-You don't really need to do much to maintain committer status: just keep being awesome and helping the V8 project!
-
-In the unhappy event that a committer continues to disregard good citizenship (or actively disrupts the project), we may need to revoke that person's status. The process is the same as for nominating a new committer: someone suggests the revocation with a good reason, two people second the motion, and a vote may be called if consensus cannot be reached. I hope that's simple enough, and that we never have to test it in practice.
-
-(Source: inspired by http://dev.chromium.org/getting-involved/become-a-committer )
diff --git a/deps/v8/docs/building_with_gyp.md b/deps/v8/docs/building_with_gyp.md
deleted file mode 100644
index 0183fd2de5..0000000000
--- a/deps/v8/docs/building_with_gyp.md
+++ /dev/null
@@ -1,260 +0,0 @@
-**Build issues? File a bug at code.google.com/p/v8/issues or ask for help on v8-users@googlegroups.com.**
-
-# Building V8
-
-V8 is built with the help of [GYP](http://code.google.com/p/gyp/). GYP is a meta build system of sorts, as it generates build files for a number of other build systems. How you build therefore depends on what "back-end" build system and compiler you're using.
-The instructions below assume that you already have a [checkout of V8](using_git.md) but haven't yet installed the build dependencies.
-
-If you intend to develop on V8, i.e., send patches and work with changelists, you will need to install the dependencies as described [here](using_git.md).
-
-
-## Prerequisite: Installing GYP
-
-First, you need GYP itself. GYP is fetched together with the other dependencies by running:
-
-```
-gclient sync
-```
-
-## Building
-
-### GCC + make
-
-Requires GNU make 3.81 or later. Should work with any GCC >= 4.8 or any recent clang (3.5 highly recommended).
-
-#### Build instructions
-
-
-The top-level Makefile defines a number of targets for each target architecture (`ia32`, `x64`, `arm`, `arm64`) and mode (`debug`, `optdebug`, or `release`). So your basic command for building is:
-```
-make ia32.release
-```
-
-or analogously for the other architectures and modes. You can build both debug and release binaries with just one command:
-```
-make ia32
-```
-
-To automatically build in release mode for the host architecture:
-```
-make native
-```
-
-You can also can build all architectures in a given mode at once:
-```
-make release
-```
-
-Or everything:
-```
-make
-```
-
-#### Optional parameters
-
- * `-j` specifies the number of parallel build processes. Set it (roughly) to the number of CPU cores your machine has. The GYP/make based V8 build also supports distcc, so you can compile with `-j100` or so, provided you have enough machines around.
-
- * `OUTDIR=foo` specifies where the compiled binaries go. It defaults to `./out/`. In this directory, a subdirectory will be created for each architecture and mode. You will find the d8 shell's binary in `foo/ia32.release/d8`, for example.
-
- * `library=shared` or `component=shared_library` (the two are completely equivalent) builds V8 as a shared library (`libv8.so`).
-
- * `soname_version=1.2.3` is only relevant for shared library builds and configures the SONAME of the library. Both the SONAME and the filename of the library will be `libv8.so.1.2.3` if you specify this. Due to a peculiarity in GYP, if you specify a custom SONAME, the library's path will no longer be encoded in the binaries, so you'll have to run d8 as follows:
-```
-LD_LIBRARY_PATH=out/ia32.release/lib.target out/ia32.release/d8
-```
-
- * `console=readline` enables readline support for the d8 shell. You need readline development headers for this (`libreadline-dev` on Ubuntu).
-
- * `disassembler=on` enables the disassembler for release mode binaries (it's always enabled for debug binaries). This is useful if you want to inspect generated machine code.
-
- * `snapshot=off` disables building with a heap snapshot. Compiling will be a little faster, but V8’s start up will be slightly slower.
-
- * `gdbjit=on` enables GDB JIT support.
-
- * `liveobjectlist=on` enables the Live Object List feature.
-
- * `vfp3=off` is only relevant for ARM builds with snapshot and disables the use of VFP3 instructions in the snapshot.
-
- * `debuggersupport=off` disables the javascript debugger.
-
- * `werror=no` omits the -Werror flag. This is especially useful for not officially supported C++ compilers (e.g. newer versions of the GCC) so that compile warnings are ignored.
-
- * `strictaliasing=off` passes the -fno-strict-aliasing flag to GCC. This may help to work around build failures on officially unsupported platforms and/or GCC versions.
-
- * `regexp=interpreted` chooses the interpreted mode of the irregexp regular expression engine instead of the native code mode.
-
- * `hardfp=on` creates "hardfp" binaries on ARM.
-
-### Ninja
-
-To build d8:
-```
-export GYP_GENERATORS=ninja
-build/gyp_v8
-ninja -C out/Debug d8
-```
-
-Specify `out/Release` for a release build. I recommend setting up an alias so that you don't need to type out that build directory path.
-
-If you want to build all targets, use `ninja -C out/Debug all`. It's faster to build only the target you're working on, like `d8` or `unittests`.
-
-Note: You need to set `v8_target_arch` if you want a non-native build, i.e. either
-```
-export GYP_DEFINES="v8_target_arch=arm"
-build/gyp_v8 ...
-```
-or
-```
-build/gyp_v8 -Dv8_target_arch=arm ...
-```
-
-
-#### Using goma (Googlers only)
-
-To use goma you need to set the `use_goma` gyp define, either by passing it to `gyp_v8`, i.e.
-```
-build/gyp_v8 -Duse_goma=1
-```
-or by setting the environment variable `$GYP_DEFINES` appropriately:
-```
-export GYP_DEFINES="use_goma=1"
-```
-Note: You may need to also set `gomadir` to point to the directory where you installed goma, if it's not in the default location.
-
-If you are using goma, you'll also want to bump the job limit, i.e.
-```
-ninja -j 100 -C out/Debug d8
-```
-
-
-### Cross-compiling
-
-Similar to building with Clang, you can also use a cross-compiler. Just export your toolchain (`CXX`/`LINK` environment variables should be enough) and compile. For example:
-```
-export CXX=/path/to/cross-compile-g++
-export LINK=/path/to/cross-compile-g++
-make arm.release
-```
-
-
-### Xcode
-
-From the root of your V8 checkout, run either of:
-```
-build/gyp_v8 -Dtarget_arch=ia32
-build/gyp_v8 -Dtarget_arch=x64
-```
-
-This will generate Xcode project files in `build/` that you can then either open with Xcode or compile directly from the command line:
-```
-xcodebuild -project build/all.xcodeproj -configuration Release
-xcodebuild -project build/all.xcodeproj
-```
-
-Note: If you have configured your `GYP_GENERATORS` environment variable, either unset it, or set it to `xcode` for this to work.
-
-
-#### Custom build settings
-
-You can export the `GYP_DEFINES` environment variable in your shell to configure custom build options. The syntax is `GYP_DEFINES="-Dvariable1=value1 -Dvariable2=value2"` and so on for as many variables as you wish. Possibly interesting options include:
- * `-Dcomponent=shared_library` (see `library=shared` in the [GCC + make](#Optional_parameters.md) section above)
- * `-Dconsole=readline` (see `console=readline`)
- * `-Dv8_enable_disassembler=1` (see `disassembler=on`)
- * `-Dv8_use_snapshot='false'` (see `snapshot=off`)
- * `-Dv8_enable_gdbjit=1` (see `gdbjit=on`)
- * `-Dv8_use_liveobjectlist=true` (see `liveobjectlist=on`)
-
-
-### Visual Studio
-
-You need Visual Studio 2013, older versions might still work at the moment, but this will probably change soon because we intend to use C++11 features.
-
-#### Prerequisites
-
-After you created [checkout of V8](using_git.md), all dependencies will be already installed.
-
-If you are getting errors during build mentioning that 'python' could not be found, add the 'python.exe' to PATH.
-
-If you have Visual Studio 2013 and 2015 installed side-by-side and set the environment variable GYP\_MSVS\_VERSION to '2013'. In that case the right project files are going to be created.
-
-#### Building
- * If you use the command prompt:
- 1. Generate project files:
-```
-python build\gyp_v8
-```
-> > > Specify the path to `python.exe` if you don't have it in your PATH.
-> > > Append `-Dtarget_arch=x64` if you want to build 64bit binaries. If you switch between ia32 and x64 targets, you may have to manually delete the generated .vcproj/.sln files before regenerating them.
-> > > Example:
-```
-third_party/python_26/python.exe build\gyp_v8 -Dtarget_arch=x64
-```
- 1. Build:
-> > > Either open `build\All.sln` in Visual Studio, or compile on the command line as follows (adapt the path as necessary, or simply put `devenv.com` in your PATH):
-```
-"c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\IDE\devenv.com" /build Release build\All.sln
-```
-> > > Replace `Release` with `Debug` to build in Debug mode.
-> > > The built binaries will be in build\Release\ or build\Debug\.
-
- * If you use cygwin, the workflow is the same, but the syntax is slightly different:
- 1. Generate project files:
-```
-build/gyp_v8
-```
-> > > This will spit out a bunch of warnings about missing input files, but it seems to be OK to ignore them. (If you have time to figure this out, we'd happily accept a patch that makes the warnings go away!)
- 1. Build:
-```
-/cygdrive/c/Program\ Files\ (x86)/Microsoft\ Visual\ Studio\ 9.0/Common7/IDE/devenv.com /build Release build/all.sln
-```
-
-
-#### Custom build settings
-
-See the "custom build settings" section for [Xcode](#Xcode) above.
-
-
-#### Running tests
-
-You can abuse the test driver's --buildbot flag to make it find the executables where MSVC puts them:
-```
-python tools/run-tests.py --buildbot --outdir build --arch ia32 --mode Release
-```
-
-
-### MinGW
-
-Building on MinGW is not officially supported, but it is possible. You even have two options:
-
-#### Option 1: With Cygwin Installed
-
-Requirements:
- * MinGW
- * Cygwin, including Python
- * Python from www.python.org _(yes, you need two Python installations!)_
-
-Building:
- 1. Open a MinGW shell
- 1. `export PATH=$PATH:/c/cygwin/bin` _(or wherever you installed Cygwin)_
- 1. `make ia32.release -j8`
-
-Running tests:
- 1. Open a MinGW shell
- 1. `export PATH=/c/Python27:$PATH` _(or wherever you installed Python)_
- 1. `make ia32.release.check -j8`
-
-#### Option 2: Without Cygwin, just MinGW
-
-Requirements:
- * MinGW
- * Python from www.python.org
-
-Building and testing:
- 1. Open a MinGW shell
- 1. `tools/mingw-generate-makefiles.sh` _(re-run this any time a `*`.gyp`*` file changed, such as after updating your checkout)_
- 1. `make ia32.release` _(unfortunately -jX doesn't seem to work here)_
- 1. `make ia32.release.check -j8`
-
-
-# Final Note
-<font color='darkred'><b>If you have problems or questions, please file bugs at code.google.com/p/v8/issues or send mail to v8-users@googlegroups.com. Comments on this page are likely to go unnoticed and unanswered.</b></font> \ No newline at end of file
diff --git a/deps/v8/docs/contributing.md b/deps/v8/docs/contributing.md
deleted file mode 100644
index aa8e665976..0000000000
--- a/deps/v8/docs/contributing.md
+++ /dev/null
@@ -1,32 +0,0 @@
-Here you will find information that you'll need to be able to contribute to V8. Be sure to read the whole thing before sending us a contribution, including the small print at the end.
-
-## Before you contribute
-
-Before you start working on a larger contribution V8 you should get in touch with us first through the V8 [contributor mailing list](http://groups.google.com/group/v8-dev) so we can help out and possibly guide you; coordinating up front makes it much easier to avoid frustration later on.
-
-## Getting the code
-
-See [UsingGit](using_git.md).
-
-## Submitting code
-
-The source code of V8 follows the [Google C++ Style Guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) so you should familiarize yourself with those guidelines. Before submitting code you must pass all our [tests](http://code.google.com/p/v8-wiki/wiki/Testing), and have to successfully run the presubmit checks:
-
-> `tools/presubmit.py`
-
-The presubmit script uses a linter from Google, `cpplint.py`. External contributors can get this from [here](http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py) and place it in their path.
-
-All submissions, including submissions by project members, require review. We use the same code-review tools and process as the chromium project. In order to submit a patch, you need to get the [depot\_tools](http://dev.chromium.org/developers/how-tos/install-depot-tools) and follow these instructions on [requesting a review](http://dev.chromium.org/developers/contributing-code) (using your V8 workspace instead of a chromium workspace).
-
-### Look out for breakage or regressions
-
-Before submitting your code please check the [buildbot console](http://build.chromium.org/p/client.v8/console) to see that the columns are mostly green before checking in your changes. Otherwise you will not know if your changes break the build or not. When your change is committed watch the [buildbot console](http://build.chromium.org/p/client.v8/console) until the bots turn green after your change.
-
-
-## The small print
-
-Before we can use your code you have to sign the [Google Individual Contributor License Agreement](http://code.google.com/legal/individual-cla-v1.0.html), which you can do online. This is mainly because you own the copyright to your changes, even after your contribution becomes part of our codebase, so we need your permission to use and distribute your code. We also need to be sure of various other things, for instance that you'll tell us if you know that your code infringes on other people's patents. You don't have to do this until after you've submitted your code for review and a member has approved it, but you will have to do it before we can put your code into our codebase.
-
-Contributions made by corporations are covered by a different agreement than the one above, the [Software Grant and Corporate Contributor License Agreement](http://code.google.com/legal/corporate-cla-v1.0.html).
-
-Sign them online [here](https://cla.developers.google.com/) \ No newline at end of file
diff --git a/deps/v8/docs/cross_compiling_for_arm.md b/deps/v8/docs/cross_compiling_for_arm.md
deleted file mode 100644
index 68464eff1f..0000000000
--- a/deps/v8/docs/cross_compiling_for_arm.md
+++ /dev/null
@@ -1,151 +0,0 @@
-<font color='darkred'><b><h2>Building V8 with SCons is no longer supported. See <a href='https://code.google.com/p/v8-wiki/wiki/BuildingWithGYP'>BuildingWithGYP</a>.</h2></b></font>
-
----
-
-
-# Using Sourcery G++ Lite
-
-The Sourcery G++ Lite cross compiler suite is a free version of Sourcery G++ from [CodeSourcery](http://www.codesourcery.com). There is a page for the [GNU Toolchain for ARM Processors](http://www.codesourcery.com/sgpp/lite/arm). Determine the version you need for your host/target combination.
-
-The following instructions uses [2009q1-203 for ARM GNU/Linux](http://www.codesourcery.com/sgpp/lite/arm/portal/release858), and if using a different version please change the URLs and `TOOL_PREFIX` below accordingly.
-
-## Installing on host and target
-
-The simplest way of setting this up is to install the full Sourcery G++ Lite package on both the host and target at the same location. This will ensure that all the libraries required are available on both sides. If you want to use the default libraries on the host there is no need the install anything on the target.
-
-The following script will install in `/opt/codesourcery`:
-
-```
-#!/bin/sh
-
-sudo mkdir /opt/codesourcery
-cd /opt/codesourcery
-sudo chown $USERNAME .
-chmod g+ws .
-umask 2
-wget http://www.codesourcery.com/sgpp/lite/arm/portal/package4571/public/arm-none-linux-gnueabi/arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
-tar -xvf arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
-```
-
-
-## Building using scons without snapshot
-
-The simplest way to build is without snapshot, as that does no involve using the simulator to generate the snapshot. The following script will build the sample shell without snapshot for ARM v7.
-
-```
-#!/bin/sh
-
-export TOOL_PREFIX=/opt/codesourcery/arm-2009q1/bin/arm-none-linux-gnueabi
-export CXX=$TOOL_PREFIX-g++
-export AR=$TOOL_PREFIX-ar
-export RANLIB=$TOOL_PREFIX-ranlib
-export CC=$TOOL_PREFIX-gcc
-export LD=$TOOL_PREFIX-ld
-
-export CCFLAGS="-march=armv7-a -mtune=cortex-a8 -mfpu=vfp"
-export ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-
-scons wordsize=32 snapshot=off arch=arm sample=shell
-```
-
-If the processor is not Cortex A8 or does not have VFP enabled the `-mtune=cortex-a8` and `-mfpu=vfp` part of `CCFLAGS` needs to be changed accordingly. By default the V8 SCons build adds `-mfloat-abi=softfp`.
-
-If using the default libraries on the target just leave out the setting of `ARM_TARGET_LIB` and if the target libraies are in a different location ARM\_TARGET\_LIB` needs to be adjusted accordingly.
-
-The default for Sourcery G++ Lite is ARM v5te with software floating point emulation, so if testing building for ARM v5te the setting of `CCFLAGS` and `ARM_TARGET_LIB` should be changed to:
-
-```
-CCFLAGS=""
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-
-scons armeabi=soft ...
-```
-
-Relying on defaults in the tool chain might lead to surprises, so for ARM v5te with software floating point emulation the following is more explicit:
-
-```
-CCFLAGS="-march=armv5te"
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-
-scons armeabi=soft ...
-```
-
-If the target has an VFP unit use the following:
-
-```
-CCFLAGS="-mfpu=vfpv3"
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
-```
-
-To allow G++ to use Thumb2 instructions and the VFP unit when compiling the C/C++ code use:
-
-```
-CCFLAGS="-mthumb -mfpu=vfpv3"
-ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc/thumb2
-```
-
-_Note:_ V8 will not use Thumb2 instructions in its generated code it always uses the full ARM instruction set.
-
-For other ARM versions please check the Sourcery G++ Lite documentation.
-
-As mentioned above the default for Sourcery G++ Lite used here is ARM v5te with software floating point emulation. However beware that this default might change between versions and that there is no unique defaults for ARM tool chains in general, so always passing `-march` and possibly `-mfpu` is recommended. Passing `-mfloat-abi` is not required as this is controlled by the SCons option `armeabi`.
-
-## Building using scons with snapshot
-
-When building with snapshot the simulator is used to build the snapshot on the host and then building for the target with that snapshot. The following script will accomplish that (using both Thumb2 and VFP instructions):
-
-```
-#!/bin/sh
-
-V8DIR=..
-
-cd host
-
-scons -Y$V8DIR simulator=arm snapshot=on
-mv obj/release/snapshot.cc $V8DIR/src/snapshot.cc
-
-cd ..
-
-export TOOL_PREFIX=/opt/codesourcery/arm-2010.09-103/bin/arm-none-linux-gnueabi
-export CXX=$TOOL_PREFIX-g++
-export AR=$TOOL_PREFIX-ar
-export RANLIB=$TOOL_PREFIX-ranlib
-export CC=$TOOL_PREFIX-gcc
-export LD=$TOOL_PREFIX-ld
-
-export CCFLAGS="-mthumb -march=armv7-a -mfpu=vfpv3"
-export ARM_TARGET_LIB=/opt/codesourcery/arm-2010.09-103/arm-none-linux-gnueabi/libc/thumb2
-
-cd target
-
-scons -Y$V8DIR wordsize=32 snapshot=nobuild arch=armsample=shell
-rm $V8DIR/src/snapshot.cc
-
-cd ..
-```
-
-This script required the two subdirectories `host` and `target`. V8 is first build for the host with the ARM simulator which supports running ARM code on the host. This is used to build a snapshot file which is then used for the actual cross compilation of V8.
-
-## Building for target which supports unaligned access
-
-The default when building V8 for an ARM target (either cross compiling or compiling on an ARM machine) is to disable unaligned memory access. However in some situations (most noticeably handling of regular expressions) performance will be better if unaligned memory access is used on processors which supports it. To enable unaligned memory access set `unalignedaccesses` to `on` when building:
-
-```
-scons unalignedaccesses=on ...
-```
-
-When running in the simulator the default is to enable unaligned memory access, so to test in the simulator with unaligned memory access disabled set `unalignedaccesses` to `off` when building:
-
-```
-scons unalignedaccesses=off simulator=arm ...
-```
-
-## Using V8 with hardfp calling convention
-
-By default V8 uses the softfp calling convention when calling C functions from generated code. However it is possible to use hardfp as well. To enable this set `armeabi` to `hardfp` when building:
-
-```
-scons armeabi=hardfp ...
-```
-
-Passing `armeabi=hardfp` to SCons will automatically set the compiler flag `-mfloat-abi=hardfp`. If using snapshots remember to pass `armeabi=hardfp` when building V8 on the host for generating the snapshot as well. \ No newline at end of file
diff --git a/deps/v8/docs/d8_on_android.md b/deps/v8/docs/d8_on_android.md
deleted file mode 100644
index eda6419345..0000000000
--- a/deps/v8/docs/d8_on_android.md
+++ /dev/null
@@ -1,101 +0,0 @@
-# Prerequisites
- * a Linux/Mac workstation
- * v8 r12178 (on Google Code) or later
- * an Android emulator or device with matching USB cable
- * make sure [building with GYP](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP) works
-
-
-# Get the code
-
- * Use the instructions from https://code.google.com/p/v8-wiki/wiki/UsingGit to get the code
- * Once you need to add the android dependencies:
-```
-v8$ echo "target_os = ['android']" >> ../.gclient && gclient sync --nohooks
-```
- * The sync will take a while the first time as it downloads the Android NDK to v8/third\_party
- * If you want to use a different NDK, you need to set the gyp variable android\_ndk\_root
-
-
-# Get the Android SDK
- * tested version: `r15`
- * download the SDK from http://developer.android.com/sdk/index.html
- * extract it
- * install the "Platform tools" using the SDK manager that you can start by running `tools/android`
- * now you have a `platform_tools/adb` binary which will be used later; put it in your `PATH` or remember where it is
-
-
-# Set up your device
- * Enable USB debugging (Gingerbread: Settings > Applications > Development > USB debugging; Ice Cream Sandwich: Settings > Developer Options > USB debugging)
- * connect your device to your workstation
- * make sure `adb devices` shows it; you may have to edit `udev` rules to give yourself proper permissions
- * run `adb shell` to get an ssh-like shell on the device. In that shell, do:
-```
-cd /data/local/tmp
-mkdir v8
-cd v8
-```
-
-
-# Push stuff onto the device
- * make sure your device is connected
- * from your workstation's shell:
-```
-adb push /file/you/want/to/push /data/local/tmp/v8/
-```
-
-
-# Compile V8 for Android
-Currently two architectures (`android_arm` and `android_ia32`) are supported, each in `debug` or `release` mode. The following steps work equally well for both ARM and ia32, on either the emulator or real devices.
- * compile:
-```
-make android_arm.release -j16
-```
- * push the resulting binary to the device:
-```
-adb push out/android_arm.release/d8 /data/local/tmp/v8/d8
-```
- * the most comfortable way to run it is from your workstation's shell as a one-off command (rather than starting an interactive shell session on the device), that way you can use pipes or whatever to process the output as necessary:
-```
-adb shell /data/local/tmp/v8/d8 <parameters>
-```
- * warning: when you cancel such an "adb shell whatever" command using Ctrl+C, the process on the phone will sometimes keep running.
- * Alternatively, use the `.check` suffix to automatically push test binaries and test cases onto the device and run them.
-```
-make android_arm.release.check
-```
-
-
-# Profile
- * compile a binary, push it to the device, keep a copy of it on the host
-```
-make android_arm.release -j16
-adb push out/android_arm.release/d8 /data/local/tmp/v8/d8-version.under.test
-cp out/android_arm.release/d8 ./d8-version.under.test
-```
- * get a profiling log and copy it to the host:
-```
-adb shell /data/local/tmp/v8/d8-version.under.test benchmark.js --prof
-adb pull /data/local/tmp/v8/v8.log ./
-```
- * open `v8.log` in your favorite editor and edit the first line to match the full path of the `d8-version.under.test` binary on your workstation (instead of the `/data/local/tmp/v8/` path it had on the device)
- * run the tick processor with the host's `d8` and an appropriate `nm` binary:
-```
-cp out/ia32.release/d8 ./d8 # only required once
-tools/linux-tick-processor --nm=$ANDROID_NDK_ROOT/toolchain/bin/arm-linux-androideabi-nm
-```
-
-# Compile SpiderMonkey for Lollipop
-```
-cd firefox/js/src
-autoconf2.13
-./configure \
- --target=arm-linux-androideabi \
- --with-android-ndk=$ANDROID_NDK_ROOT \
- --with-android-version=21 \
- --without-intl-api \
- --disable-tests \
- --enable-android-libstdcxx \
- --enable-pie
-make
-adb push -p js/src/shell/js /data/local/tmp/js
-``` \ No newline at end of file
diff --git a/deps/v8/docs/debugger_protocol.md b/deps/v8/docs/debugger_protocol.md
deleted file mode 100644
index 6b6b448a0f..0000000000
--- a/deps/v8/docs/debugger_protocol.md
+++ /dev/null
@@ -1,934 +0,0 @@
-# Introduction
-
-V8 has support for debugging the JavaScript code running in it. There are two API's for this a function based API using JavaScript objects and a message based API using a JSON based protocol. The function based API can be used by an in-process debugger agent, whereas the message based API can be used out of process as well.
-**> The message based API is no longer maintained. Please ask in v8-users@googlegroups.com if you want to attach a debugger to the run-time.**
-
-The debugger protocol is based on [JSON](http://www.json.org/)). Each protocol packet is defined in terms of JSON and is transmitted as a string value. All packets have two basic elements `seq` and `type`.
-
-```
-{ "seq" : <number>,
- "type" : <type>,
- ...
-}
-```
-
-The element `seq` holds the sequence number of the packet. And element type is the type of the packet. The type is a string value with one of the following values `"request"`, `"response"` or `"event"`.
-
-A `"request"` packet has the following structure:
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : <command>
- "arguments" : ...
-}
-```
-
-A `"response"` packet has the following structure. If `success` is true `body` will contain the response data. If `success` is false `message` will contain an error message.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : <command>
- "body" : ...
- "running" : <is the VM running after sending this response>
- "success" : <boolean indicating success>
- "message" : <if command failed this property contains an error message>
-}
-```
-
-An `"event"` packet has the following structure:
-
-```
-{ "seq" : <number>,
- "type" : "event",
- "event" : <event name>
- body : ...
-}
-```
-
-# Request/response pairs
-
-## Request `continue`
-
-The request `continue` is a request from the debugger to start the VM running again. As part of the `continue` request the debugger can specify if it wants the VM to perform a single step action.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "continue",
- "arguments" : { "stepaction" : <"in", "next" or "out">,
- "stepcount" : <number of steps (default 1)>
- }
-}
-```
-
-In the response the property `running` will always be true as the VM will be running after executing the `continue` command. If a single step action is requested the VM will respond with a `break` event after running the step.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "continue",
- "running" : true
- "success" : true
-}
-```
-
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"continue"}
-{"seq":118,"type":"request","command":"continue","arguments":{"stepaction":"out"}}
-{"seq":119,"type":"request","command":"continue","arguments":{"stepaction":"next","stepcount":5}}
-```
-
-## Request `evaluate`
-
-The request `evaluate` is used to evaluate an expression. The body of the result is as described in response object serialization below.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "evaluate",
- "arguments" : { "expression" : <expression to evaluate>,
- "frame" : <number>,
- "global" : <boolean>,
- "disable_break" : <boolean>,
- "additional_context" : [
- { "name" : <name1>, "handle" : <handle1> },
- { "name" : <name2>, "handle" : <handle2> },
- ...
- ]
- }
-}
-```
-
-Optional argument `additional_context` specifies handles that will be visible from the expression under corresponding names (see example below).
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "evaluate",
- "body" : ...
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"evaluate","arguments":{"expression":"1+2"}}
-{"seq":118,"type":"request","command":"evaluate","arguments":{"expression":"a()","frame":3,"disable_break":false}}
-{"seq":119,"type":"request","command":"evaluate","arguments":{"expression":"[o.a,o.b,o.c]","global":true,"disable_break":true}}
-{"seq":120,"type":"request","command":"evaluate","arguments":{"expression":"obj.toString()", "additional_context": [{ "name":"obj","handle":25 }] }}
-```
-
-## Request `lookup`
-
-The request `lookup` is used to lookup objects based on their handle. The individual array elements of the body of the result is as described in response object serialization below.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "lookup",
- "arguments" : { "handles" : <array of handles>,
- "includeSource" : <boolean indicating whether the source will be included when script objects are returned>,
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "lookup",
- "body" : <array of serialized objects indexed using their handle>
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"lookup","arguments":{"handles":"[1]"}}
-{"seq":118,"type":"request","command":"lookup","arguments":{"handles":"[7,12]"}}
-```
-
-## Request `backtrace`
-
-The request `backtrace` returns a backtrace (or stacktrace) from the current execution state. When issuing a request a range of frames can be supplied. The top frame is frame number 0. If no frame range is supplied data for 10 frames will be returned.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "backtrace",
- "arguments" : { "fromFrame" : <number>
- "toFrame" : <number>
- "bottom" : <boolean, set to true if the bottom of the stack is requested>
- }
-}
-```
-
-The response contains the frame data together with the actual frames returned and the toalt frame count.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "backtrace",
- "body" : { "fromFrame" : <number>
- "toFrame" : <number>
- "totalFrames" : <number>
- "frames" : <array of frames - see frame request for details>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-If there are no stack frames the result body only contains `totalFrames` with a value of `0`. When an exception event is generated due to compilation failures it is possible that there are no stack frames.
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"backtrace"}
-{"seq":118,"type":"request","command":"backtrace","arguments":{"toFrame":2}}
-{"seq":119,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":9}}
-```
-
-## Request `frame`
-
-The request frame selects a new selected frame and returns information for that. If no frame number is specified the selected frame is returned.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "frame",
- "arguments" : { "number" : <frame number>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "frame",
- "body" : { "index" : <frame number>,
- "receiver" : <frame receiver>,
- "func" : <function invoked>,
- "script" : <script for the function>,
- "constructCall" : <boolean indicating whether the function was called as constructor>,
- "debuggerFrame" : <boolean indicating whether this is an internal debugger frame>,
- "arguments" : [ { name: <name of the argument - missing of anonymous argument>,
- value: <value of the argument>
- },
- ... <the array contains all the arguments>
- ],
- "locals" : [ { name: <name of the local variable>,
- value: <value of the local variable>
- },
- ... <the array contains all the locals>
- ],
- "position" : <source position>,
- "line" : <source line>,
- "column" : <source column within the line>,
- "sourceLineText" : <text for current source line>,
- "scopes" : [ <array of scopes, see scope request below for format> ],
-
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"frame"}
-{"seq":118,"type":"request","command":"frame","arguments":{"number":1}}
-```
-
-## Request `scope`
-
-The request scope returns information on a givne scope for a givne frame. If no frame number is specified the selected frame is used.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "scope",
- "arguments" : { "number" : <scope number>
- "frameNumber" : <frame number, optional uses selected frame if missing>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "scope",
- "body" : { "index" : <index of this scope in the scope chain. Index 0 is the top scope
- and the global scope will always have the highest index for a
- frame>,
- "frameIndex" : <index of the frame>,
- "type" : <type of the scope:
- 0: Global
- 1: Local
- 2: With
- 3: Closure
- 4: Catch >,
- "object" : <the scope object defining the content of the scope.
- For local and closure scopes this is transient objects,
- which has a negative handle value>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"scope"}
-{"seq":118,"type":"request","command":"scope","arguments":{"frameNumber":1,"number":1}}
-```
-
-## Request `scopes`
-
-The request scopes returns all the scopes for a given frame. If no frame number is specified the selected frame is returned.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "scopes",
- "arguments" : { "frameNumber" : <frame number, optional uses selected frame if missing>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "scopes",
- "body" : { "fromScope" : <number of first scope in response>,
- "toScope" : <number of last scope in response>,
- "totalScopes" : <total number of scopes for this frame>,
- "scopes" : [ <array of scopes, see scope request above for format> ],
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"scopes"}
-{"seq":118,"type":"request","command":"scopes","arguments":{"frameNumber":1}}
-```
-
-## Request `scripts`
-
-The request `scripts` retrieves active scripts from the VM. An active script is source code from which there is still live objects in the VM. This request will always force a full garbage collection in the VM.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "scripts",
- "arguments" : { "types" : <types of scripts to retrieve
- set bit 0 for native scripts
- set bit 1 for extension scripts
- set bit 2 for normal scripts
- (default is 4 for normal scripts)>
- "ids" : <array of id's of scripts to return. If this is not specified all scripts are requrned>
- "includeSource" : <boolean indicating whether the source code should be included for the scripts returned>
- "filter" : <string or number: filter string or script id.
- If a number is specified, then only the script with the same number as its script id will be retrieved.
- If a string is specified, then only scripts whose names contain the filter string will be retrieved.>
- }
-}
-```
-
-The request contains an array of the scripts in the VM. This information includes the relative location of the script within the containing resource.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "scripts",
- "body" : [ { "name" : <name of the script>,
- "id" : <id of the script>
- "lineOffset" : <line offset within the containing resource>
- "columnOffset" : <column offset within the containing resource>
- "lineCount" : <number of lines in the script>
- "data" : <optional data object added through the API>
- "source" : <source of the script if includeSource was specified in the request>
- "sourceStart" : <first 80 characters of the script if includeSource was not specified in the request>
- "sourceLength" : <total length of the script in characters>
- "scriptType" : <script type (see request for values)>
- "compilationType" : < How was this script compiled:
- 0 if script was compiled through the API
- 1 if script was compiled through eval
- >
- "evalFromScript" : <if "compilationType" is 1 this is the script from where eval was called>
- "evalFromLocation" : { line : < if "compilationType" is 1 this is the line in the script from where eval was called>
- column : < if "compilationType" is 1 this is the column in the script from where eval was called>
- ]
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"scripts"}
-{"seq":118,"type":"request","command":"scripts","arguments":{"types":7}}
-```
-
-## Request `source`
-
-The request `source` retrieves source code for a frame. It returns a number of source lines running from the `fromLine` to but not including the `toLine`, that is the interval is open on the "to" end. For example, requesting source from line 2 to 4 returns two lines (2 and 3). Also note that the line numbers are 0 based: the first line is line 0.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "source",
- "arguments" : { "frame" : <frame number (default selected frame)>
- "fromLine" : <from line within the source default is line 0>
- "toLine" : <to line within the source this line is not included in
- the result default is the number of lines in the script>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "source",
- "body" : { "source" : <the source code>
- "fromLine" : <actual from line within the script>
- "toLine" : <actual to line within the script this line is not included in the source>
- "fromPosition" : <actual start position within the script>
- "toPosition" : <actual end position within the script>
- "totalLines" : <total lines in the script>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"source","arguments":{"fromLine":10,"toLine":20}}
-{"seq":118,"type":"request","command":"source","arguments":{"frame":2,"fromLine":10,"toLine":20}}
-```
-
-## Request `setbreakpoint`
-
-The request `setbreakpoint` creates a new break point. This request can be used to set both function and script break points. A function break point sets a break point in an existing function whereas a script break point sets a break point in a named script. A script break point can be set even if the named script is not found.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "setbreakpoint",
- "arguments" : { "type" : <"function" or "script" or "scriptId" or "scriptRegExp">
- "target" : <function expression or script identification>
- "line" : <line in script or function>
- "column" : <character position within the line>
- "enabled" : <initial enabled state. True or false, default is true>
- "condition" : <string with break point condition>
- "ignoreCount" : <number specifying the number of break point hits to ignore, default value is 0>
- }
-}
-```
-
-The result of the `setbreakpoint` request is a response with the number of the newly created break point. This break point number is used in the `changebreakpoint` and `clearbreakpoint` requests.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "setbreakpoint",
- "body" : { "type" : <"function" or "script">
- "breakpoint" : <break point number of the new break point>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f"}}
-{"seq":118,"type":"request","command":"setbreakpoint","arguments":{type:"script","target":"test.js","line":100}}
-{"seq":119,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f","condition":"i > 7"}}
-```
-
-
-## Request `changebreakpoint`
-
-The request `changebreakpoint` changes the status of a break point.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "changebreakpoint",
- "arguments" : { "breakpoint" : <number of the break point to clear>
- "enabled" : <initial enabled state. True or false, default is true>
- "condition" : <string with break point condition>
- "ignoreCount" : <number specifying the number of break point hits }
-}
-```
-
-## Request `clearbreakpoint`
-
-The request `clearbreakpoint` clears a break point.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "clearbreakpoint",
- "arguments" : { "breakpoint" : <number of the break point to clear>
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "clearbreakpoint",
- "body" : { "type" : <"function" or "script">
- "breakpoint" : <number of the break point cleared>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"request","command":"clearbreakpoint","arguments":{"type":"function,"breakpoint":1}}
-{"seq":118,"type":"request","command":"clearbreakpoint","arguments":{"type":"script","breakpoint":2}}
-```
-
-## Request `setexceptionbreak`
-
-The request `setexceptionbreak` is a request to enable/disable breaks on all / uncaught exceptions. If the "enabled" argument is not specify, the debuggee will toggle the state of the specified break type.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "setexceptionbreak",
- "arguments" : { "type" : <string: "all", or "uncaught">,
- "enabled" : <optional bool: enables the break type if true>
- }
-}
-```
-
-In response, the break on exception property of the debuggee will be set accordingly, and the following response message will be dispatched to the debugger.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "setexceptionbreak",
- “body” : { "type" : <string: "all" or "uncaught" corresponding to the request.>,
- "enabled" : <bool: true if the break type is currently enabled as a result of the request>
- }
- "running" : true
- "success" : true
-}
-```
-
-Here are a few examples.
-
-```
-{"seq":117,"type":"request","command":"setexceptionbreak","arguments":{"type":"all"}}
-{"seq":118,"type":"request","command":" setexceptionbreak","arguments":{"type":"all",”enabled”:false}}
-{"seq":119,"type":"request","command":" setexceptionbreak","arguments":{"type":"uncaught","enabled":true}}
-```
-
-## Request `v8flags`
-The request v8flags is a request to apply the specified v8 flags (analogous to how they are specified on the command line).
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "v8flags",
- "arguments" : { "flags" : <string: a sequence of v8 flags just like those used on the command line>
- }
-}
-```
-
-In response, the specified flags will be applied in the debuggee if they are legal flags. Their effects vary depending on the implementation of the flag.
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "v8flags",
- "running" : true
- "success" : true
-}
-```
-
-Here are a few examples.
-
-```
-{"seq":117,"type":"request","command":"v8flags","arguments":{"flags":"--trace_gc —always_compact"}}
-{"seq":118,"type":"request","command":" v8flags","arguments":{"flags":"--notrace_gc"}}
-```
-
-## Request `version`
-
-The request `version` reports version of the running V8.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "version",
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "type" : "request",
- "body" : { "V8Version": <string, version of V8>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here is an example.
-
-```
-{"seq":1,"type":"request","command":"version"}
-{"seq":134,"request_seq":1,"type":"response","command":"version","success":true,"body":{"V8Version":"1.3.19 (candidate)"},"refs":[],"running":false}
-```
-
-## Request `disconnect`
-
-The request `disconnect` is used to detach the remote debugger from the debuggee. This will trigger the debuggee to disable all active breakpoints and resumes execution if the debuggee was previously stopped at a break.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "disconnect",
-}
-```
-
-The only response for the `disconnect` request is the response to a connect request if the debugger is still able to get a response before the debuggee successfully disconnects.
-
-Here is an examples:
-
-```
-{"seq":117,"type":"request","command":"disconnect"}
-```
-
-## Request `gc`
-The request `gc` is a request to run the garbage collector in the debuggee.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "gc",
- "arguments" : { "type" : <string: "all">,
- }
-}
-```
-
-In response, the debuggee will run the specified GC type and send the following response message:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "gc",
- “body” : { "before" : <int: total heap usage in bytes before the GC>,
- "after" : <int: total heap usage in bytes after the GC>
- }
- "running" : true
- "success" : true
-}
-```
-
-Here is an example.
-
-```
-{"seq":117,"type":"request","command":"gc","arguments":{"type":"all"}}
-```
-
-## Request `listbreakpoints`
-
-The request `listbreakpoints` is used to get information on breakpoints that may have been set by the debugger.
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "listbreakpoints",
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "command" : "listbreakpoints",
- "body" : { "breakpoints": [ { "type" : <string: "scriptId" or "scriptName".>,
- "script_id" : <int: script id. Only defined if type is scriptId.>,
- "script_name" : <string: script name. Only defined if type is scriptName.>,
- "number" : <int: breakpoint number. Starts from 1.>,
- "line" : <int: line number of this breakpoint. Starts from 0.>,
- "column" : <int: column number of this breakpoint. Starts from 0.>,
- "groupId" : <int: group id of this breakpoint.>,
- "hit_count" : <int: number of times this breakpoint has been hit. Starts from 0.>,
- "active" : <bool: true if this breakpoint is enabled.>,
- "ignoreCount" : <int: remaining number of times to ignore breakpoint. Starts from 0.>,
- "actual_locations" : <actual locations of the breakpoint.>,
- }
- ],
- "breakOnExceptions" : <true if break on all exceptions is enabled>,
- "breakOnUncaughtExceptions" : <true if break on uncaught exceptions is enabled>
- }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-Here is an examples:
-
-```
-{"seq":117,"type":"request","command":"listbreakpoints"}
-```
-
-
-## Request `setvariablevalue`
-This requests sets the value of a variable from the specified scope.
-
-Request:
-
-```
-{ "seq" : <number>,
- "type" : "request",
- "command" : "setvariablevalue",
- "arguments : { "name" : <string: variable name>,
- "scope" : { "number" : <scope number>
- "frameNumber" : <frame number, optional uses selected frame if missing>
- }
- }
-}
-```
-
-Response:
-
-```
-{ "seq" : <number>,
- "type" : "response",
- "request_seq" : <number>,
- "type" : "request",
- "body" : { "newValue": <object: mirror object of the new value> }
- "running" : <is the VM running after sending this response>
- "success" : true
-}
-```
-
-# Events
-
-## Event `break`
-
-The event `break` indicate that the execution in the VM has stopped due to a break condition. This can be caused by an unconditional break request, by a break point previously set, a stepping action have completed or by executing the `debugger` statement in JavaScript.
-
-```
-{ "seq" : <number>,
- "type" : "event",
-
- "event" : "break",
- "body" : { "invocationText" : <text representation of the stack frame>,
- "sourceLine" : <source line where execution is stopped>,
- "sourceColumn" : <column within the source line where execution is stopped>,
- "sourceLineText" : <text for the source line where execution is stopped>,
- "script" : { name : <resource name of the origin of the script>
- lineOffset : <line offset within the origin of the script>
- columnOffset : <column offset within the origin of the script>
- lineCount : <number of lines in the script>
- "breakpoints" : <array of break point numbers hit if any>
- }
-}
-```
-
-Here are a couple of examples.
-
-```
-{"seq":117,"type":"event","event":"break","body":{"functionName":"f","sourceLine":1,"sourceColumn":14}}
-{"seq":117,"type":"event","event":"break","body":{"functionName":"g","scriptData":"test.js","sourceLine":12,"sourceColumn":22,"breakpoints":[1]}}
-{"seq":117,"type":"event","event":"break","body":{"functionName":"h","sourceLine":100,"sourceColumn":12,"breakpoints":[3,5,7]}}
-```
-
-## Event `exception`
-
-The event `exception` indicate that the execution in the VM has stopped due to an exception.
-
-```
-{ "seq" : <number>,
- "type" : "event",
- "event" : "exception",
- "body" : { "uncaught" : <boolean>,
- "exception" : ...
- "sourceLine" : <source line where the exception was thrown>,
- "sourceColumn" : <column within the source line from where the exception was thrown>,
- "sourceLineText" : <text for the source line from where the exception was thrown>,
- "script" : { "name" : <name of script>
- "lineOffset" : <number>
- "columnOffset" : <number>
- "lineCount" : <number>
- }
-
- }
-}
-```
-
-# Response object serialization
-
-Some responses contain objects as part of the body, e.g. the response to the evaluate request contains the result of the expression evaluated.
-
-All objects exposed through the debugger is assigned an ID called a handle. This handle is serialized and can be used to identify objects. A handle has a certain lifetime after which it will no longer refer to the same object. Currently the lifetime of handles match the processing of a debug event. For each debug event handles are recycled.
-
-An object can be serialized either as a reference to a given handle or as a value representation containing the object content.
-
-An object serialized as a reference looks follows this where `<handle>` is an integer.
-
-```
-{"ref":<handle>}
-```
-
-For objects serialized as value they all contains the handle and the type of the object.
-
-```
-{ "handle" : <handle>,
- "type" : <"undefined", "null", "boolean", "number", "string", "object", "function" or "frame">
-}
-```
-
-In some situations special transient objects are created by the debugger. These objects are not really visible in from JavaScript, but are created to materialize something inside the VM as an object visible to the debugger. One example of this is the local scope object returned from the `scope` and `scopes` request. Transient objects are identified by having a negative handle. A transient object can never be retrieved using the `lookup` request, so all transient objects referenced will be in the `refs` part of the response. The lifetime of transient objects is basically the request they are involved in.
-
-For the primitive JavaScript types `undefined` and `null` the type describes the value fully.
-
-```
-{"handle":<handle>,"type":"undefined"}
-```
-
-```
-{"handle":<handle>,"type":"null"}
-```
-
-For the rest of the primitive types `boolean`, `number` and `string` the value is part of the result.
-
-```
-{ "handle":<handle>,
- "type" : <"boolean", "number" or "string">
- "value" : <JSON encoded value>
-}
-```
-
-Boolean value.
-
-```
-{"handle":7,"type":"boolean","value":true}
-```
-
-Number value.
-
-```
-{"handle":8,"type":"number","value":42}
-```
-
-String value.
-
-```
-{"handle":9,"type":"string","value":"a string"}
-```
-
-An object is encoded with additional information.
-
-```
-{ "handle" : <handle>,
- "type" : "object",
- "className" : <Class name, ECMA-262 property [[Class]]>,
- "constructorFunction" : {"ref":<handle>},
- "protoObject" : {"ref":<handle>},
- "prototypeObject" : {"ref":<handle>},
- "properties" : [ {"name" : <name>,
- "ref" : <handle>
- },
- ...
- ]
-}
-```
-
-The difference between the `protoObject` and the `prototypeObject` is that the `protoObject` contains a reference to the actual prototype object (for which accessibility is not defined in ECMA-262, but in V8 it is accessible using the `__proto__` property) whereas the `prototypeObject` is the value of the `prototype` property.
-
-Here is an example.
-
-```
-{"handle":3,"type":"object","className":"Object","constructorFunction":{"ref":4},"protoObject":{"ref":5},"prototypeObject":{"ref":6},"properties":[{"name":"a","ref:7},{"name":"b","ref":8}]}
-```
-
-An function is encoded as an object but with additional information in the properties `name`, `inferredName`, `source` and `script`.
-
-```
-{ "handle" : <handle>,
- "type" : "function",
- "className" : "Function",
- "constructorFunction" : {"ref":<handle>},
- "protoObject" : {"ref":<handle>},
- "prototypeObject" : {"ref":<handle>},
- "name" : <function name>,
- "inferredName" : <inferred function name for anonymous functions>
- "source" : <function source>,
- "script" : <reference to function script>,
- "scriptId" : <id of function script>,
- "position" : <function begin position in script>,
- "line" : <function begin source line in script>,
- "column" : <function begin source column in script>,
- "properties" : [ {"name" : <name>,
- "ref" : <handle>
- },
- ...
- ]
-}
-``` \ No newline at end of file
diff --git a/deps/v8/docs/gdb_jit_interface.md b/deps/v8/docs/gdb_jit_interface.md
deleted file mode 100644
index 753626cf6f..0000000000
--- a/deps/v8/docs/gdb_jit_interface.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Prerequisites
-
- * V8 3.0.9 or newer
- * GDB 7.0 or newer
- * Linux OS
- * CPU with Intel-compatible architecture (ia32 or x64)
-
-# Introduction
-
-GDB JIT interface integration allows V8 to provide GDB with the symbol and debugging information for a native code emitted in runtime.
-
-When GDB JIT interface is disabled a typical backtrace in GDB will contain frames marked with ??. This frames correspond to dynamically generated code:
-
-```
-#8 0x08281674 in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
-#9 0xf5cae28e in ?? ()
-#10 0xf5cc3a0a in ?? ()
-#11 0xf5cc38f4 in ?? ()
-#12 0xf5cbef19 in ?? ()
-#13 0xf5cb09a2 in ?? ()
-#14 0x0809e0a5 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
- has_pending_exception=0xffffd46f) at src/execution.cc:97
-```
-
-However enabling GDB JIT integration allows GDB to produce more informative stack trace:
-
-```
-#6 0x082857fc in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
-#7 0xf5cae28e in ?? ()
-#8 0xf5cc3a0a in loop () at test.js:6
-#9 0xf5cc38f4 in test.js () at test.js:13
-#10 0xf5cbef19 in ?? ()
-#11 0xf5cb09a2 in ?? ()
-#12 0x0809e1f9 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
- has_pending_exception=0xffffd44f) at src/execution.cc:97
-```
-
-Frames still unknown to GDB correspond to native code without source information. See [GDBJITInterface#KnownLimitations](GDBJITInterface#KnownLimitations.md) for more details.
-
-GDB JIT interface is specified in the GDB documentation: http://sourceware.org/gdb/current/onlinedocs/gdb/JIT-Interface.html
-
-# Enabling GDB JIT integration
-
-GDBJIT currently is by default excluded from the compilation and disabled in runtime. To enable it:
-
- 1. Build V8 library with `ENABLE_GDB_JIT_INTERFACE` defined. If you are using scons to build V8 run it with `gdbjit=on`.
- 1. Pass `--gdbjit` flag when starting V8.
-
-To check that you have enabled GDB JIT integration correctly try setting breakpoint on `__jit_debug_register_code`. This function will be invoked to notify GDB about new code objects.
-
-# Known Limitations
-
- * GDB side of JIT Interface currently (as of GDB 7.2) does not handle registration of code objects very effectively. Each next registration takes more time: with 500 registered objects each next registration takes more than 50ms, with 1000 registered code objects - more than 300 ms. This problem was reported to GDB developers (http://sourceware.org/ml/gdb/2011-01/msg00002.html) but currently there is no solution available. To reduce pressure on GDB current implementation of GDB JIT integration operates in two modes: _default_ and _full_ (enabled by `--gdbjit-full` flag). In _default_ mode V8 notifies GDB only about code objects that have source information attached (this usually includes all user scripts). In _full_ - about all generated code objects (stubs, ICs, trampolines).
-
- * On x64 GDB is unable to properly unwind stack without `.eh_frame` section (Issue 1053 (on Google Code))
-
- * GDB is not notified about code deserialized from the snapshot (Issue 1054 (on Google Code))
-
- * Only Linux OS on Intel-compatible CPUs is supported. For different OSes either a different ELF-header should be generated or a completely different object format should be used.
-
- * Enabling GDB JIT interface disables compacting GC. This is done to reduce pressure on GDB as unregistering and registering each moved code object will incur considerable overhead.
-
- * GDB JIT integration provides only _approximate_ source information. It does not provide any information about local variables, function's arguments, stack layout etc. It does not enable stepping through JavaScript code or setting breakpoint on the given line. However one can set a breakpoint on a function by it's name. \ No newline at end of file
diff --git a/deps/v8/docs/handling_of_ports.md b/deps/v8/docs/handling_of_ports.md
deleted file mode 100644
index 9706546e23..0000000000
--- a/deps/v8/docs/handling_of_ports.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# General
-This article describes how ports should be handled.
-
-# MIPS
-## Straight-forward MIPS ports
- 1. Do them yourself.
-
-## More complicated MIPS ports
- 1. CC the MIPS team in the CL. Use the mailing list v8-mips-ports.at.googlegroups.com for that purpose.
- 1. The MIPS team will provide you with a patch which you need to merge into your CL.
- 1. Then land the CL.
-
-# PPC (not officially supported)
- 1. Contact/CC the PPC team in the CL if needed. Use the mailing list v8-ppc-ports.at.googlegroups.com for that purpose.
-
-# x87 (not officially supported)
- 1. Contact/CC the x87 team in the CL if needed. Use the mailing list v8-x87-ports.at.googlegroups.com for that purpose.
-
-# ARM
-## Straight-forward ARM ports
- 1. Do them yourself.
-
-## When you are lost
- 1. CC the ARM team in the CL. Use the mailing list v8-arm-ports.at.googlegroups.com for that purpose. \ No newline at end of file
diff --git a/deps/v8/docs/i18n_support.md b/deps/v8/docs/i18n_support.md
deleted file mode 100644
index a1eb1c8f0a..0000000000
--- a/deps/v8/docs/i18n_support.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# ECMAScript 402
-
-V8 optionally implements the [ECMAScript 402](http://www.ecma-international.org/ecma-402/1.0/) API. The API is enabled by default, but can be turned off at compile time.
-
-
-## Prerequisites
-
-The i18n implementation adds a dependency on ICU. If you run
-
-```
-make dependencies
-```
-
-a suitable version of ICU is checked out into `third_party/icu`.
-
-
-### Alternative ICU checkout
-
-You can check out the ICU sources at a different location and define the gyp variable `icu_gyp_path` to point at the `icu.gyp` file.
-
-
-### System ICU
-
-Last but not least, you can compile V8 against a version of ICU installed in your system. To do so, specify the gyp variable `use_system_icu=1`. If you also have `want_separate_host_toolset` enabled, the bundled ICU will still be compiled to generate the V8 snapshot. The system ICU will only be used for the target architecture.
-
-
-## Embedding V8
-
-If you embed V8 in your application, but your application itself doesn't use ICU, you will need to initialize ICU before calling into V8 by executing:
-
-```
-v8::V8::InitializeICU();
-```
-
-It is safe to invoke this method if ICU was not compiled in, then it does nothing.
-
-
-## Compiling without i18n support
-
-To build V8 without i18n support use
-
-```
-make i18nsupport=off native
-``` \ No newline at end of file
diff --git a/deps/v8/docs/javascript.md b/deps/v8/docs/javascript.md
deleted file mode 100644
index f3a501b985..0000000000
--- a/deps/v8/docs/javascript.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Introduction
-
-JavaScript is a dynamically typed scripting language universally used to
-script web content in browsers.
-
-Its specification by ECMA can be found [here](http://www.ecma-international.org/publications/standards/Ecma-262.htm). \ No newline at end of file
diff --git a/deps/v8/docs/javascript_stack_trace_api.md b/deps/v8/docs/javascript_stack_trace_api.md
deleted file mode 100644
index 4a0d104c05..0000000000
--- a/deps/v8/docs/javascript_stack_trace_api.md
+++ /dev/null
@@ -1,161 +0,0 @@
-All internal errors thrown in V8 capture a stack trace when they are created that can be accessed from JavaScript through the error.stack property. V8 also has various hooks for controlling how stack traces are collected and formatted, and for allowing custom errors to also collect stack traces. This document outlines V8's JavaScript stack trace API.
-
-### Basic stack traces
-
-By default, almost all errors thrown by V8 have a `stack` property that holds the topmost 10 stack frames, formatted as a string. Here's an example of a fully formatted stack trace:
-
-```
-ReferenceError: FAIL is not defined
- at Constraint.execute (deltablue.js:525:2)
- at Constraint.recalculate (deltablue.js:424:21)
- at Planner.addPropagate (deltablue.js:701:6)
- at Constraint.satisfy (deltablue.js:184:15)
- at Planner.incrementalAdd (deltablue.js:591:21)
- at Constraint.addConstraint (deltablue.js:162:10)
- at Constraint.BinaryConstraint (deltablue.js:346:7)
- at Constraint.EqualityConstraint (deltablue.js:515:38)
- at chainTest (deltablue.js:807:6)
- at deltaBlue (deltablue.js:879:2)
-```
-
-The stack trace is collected when the error is created and is the same regardless of where or how many times the error is thrown. We collect 10 frames because it is usually enough to be useful but not so many that it has a noticeable performance impact. You can control how many stack frames are collected by setting the variable
-
-```
-Error.stackTraceLimit
-```
-
-Setting it to 0 will disable stack trace collection. Any finite integer value will be used as the maximum number of frames to collect. Setting it to `Infinity` means that all frames will be collected. This variable only affects the current context, it has to be set explicitly for each context that needs a different value. (Note that what is known as a "context" in V8 terminology corresponds to a page or iframe in Google Chrome). To set a different default value that affects all contexts use the
-
-```
---stack-trace-limit <value>
-```
-
-command-line flag to V8. To pass this flag to V8 when running Google Chrome use
-
-```
---js-flags="--stack-trace-limit <value>"
-```
-
-### Stack trace collection for custom exceptions
-The stack trace mechanism used for built-in errors is implemented using a general stack trace collection API that is also available to user scripts. The function
-
-```
-Error.captureStackTrace(error, constructorOpt)
-```
-
-adds a stack property to the given `error` object that will yield the stack trace at the time captureStackTrace was called. The reason for not just returning the formatted stack trace directly is that this way we can postpone the formatting of the stack trace until the stack property is accessed and avoid formatting completely if it never is.
-
-The optional `constructorOpt` parameter allows you to pass in a function value. When collecting the stack trace all frames above the topmost call to this function, including that call, will be left out of the stack trace. This can be useful to hide implementation details that won't be useful to the user. The usual way of defining a custom error that captures a stack trace would be:
-
-```
-function MyError() {
- Error.captureStackTrace(this, MyError);
- // any other initialization
-}
-```
-
-Passing in MyError as a second argument means that the constructor call to MyError won't show up in the stack trace.
-
-### Customizing stack traces
-Unlike Java where the stack trace of an exception is a structured value that allows inspection of the stack state, the stack property in V8 just holds a flat string containing the formatted stack trace. This is for no other reason than compatibility with other browsers. However, this is not hardcoded but only the default behavior and can be overridden by user scripts.
-
-For efficiency stack traces are not formatted when they are captured but on demand, the first time the stack property is accessed. A stack trace is formatted by calling
-
-```
-Error.prepareStackTrace(error, structuredStackTrace)
-```
-
-and using whatever this call returns as the value of the `stack` property. If you assign a different function value to `Error.prepareStackTrace` that function will be used to format stack traces. It will be passed the error object that it is preparing a stack trace for and a structured representation of the stack. User stack trace formatters are free to format the stack trace however they want and even return non-string values. It is safe to retain references to the structured stack trace object after a call to prepareStackTrace completes so that it is also a valid return value. Note that the custom prepareStackTrace function is immediately called at the point when the error object is created (e.g. with `new Error()`).
-
-The structured stack trace is an Array of CallSite objects, each of which represents a stack frame. A CallSite object defines the following methods
-
- * **getThis**: returns the value of this
- * **getTypeName**: returns the type of this as a string. This is the name of the function stored in the constructor field of this, if available, otherwise the object's `[[Class]]` internal property.
- * **getFunction**: returns the current function
- * **getFunctionName**: returns the name of the current function, typically its name property. If a name property is not available an attempt will be made to try to infer a name from the function's context.
- * **getMethodName**: returns the name of the property of this or one of its prototypes that holds the current function
- * **getFileName**: if this function was defined in a script returns the name of the script
- * **getLineNumber**: if this function was defined in a script returns the current line number
- * **getColumnNumber**: if this function was defined in a script returns the current column number
- * **getEvalOrigin**: if this function was created using a call to eval returns a CallSite object representing the location where eval was called
- * **isToplevel**: is this a toplevel invocation, that is, is this the global object?
- * **isEval**: does this call take place in code defined by a call to eval?
- * **isNative**: is this call in native V8 code?
- * **isConstructor**: is this a constructor call?
-
-The default stack trace is created using the CallSite API so any information that is available there is also available through this API.
-
-To maintain restrictions imposed on strict mode functions, frames that have a strict mode function and all frames below (its caller etc.) are not allow to access their receiver and function objects. For those frames, `getFunction()` and `getThis()` will return `undefined`.
-
-### Compatibility
-The API described here is specific to V8 and is not supported by any other JavaScript implementations. Most implementations do provide an `error.stack` property but the format of the stack trace is likely to be different from the format described here. The recommended use of this API is
-
- * Only rely on the layout of the formatted stack trace if you know your code is running in v8.
- * It is safe to set `Error.stackTraceLimit` and `Error.prepareStackTrace` regardless of which implementation is running your code but be aware that it will only have an effect if your code is running in V8.
-
-### Appendix: Stack trace format
-The default stack trace format used by V8 can for each stack frame give the following information:
-
- * Whether the call is a construct call.
- * The type of the this value (Type).
- * The name of the function called (functionName).
- * The name of the property of this or one of its prototypes that holds the function (methodName).
- * The current location within the source (location)
-
-Any of these may be unavailable and different formats for stack frames are used depending on how much of this information is available. If all the above information is available a formatted stack frame will look like this:
-
-```
-at Type.functionName [as methodName] (location)
-```
-
-or, in the case of a construct call
-
-```
-at new functionName (location)
-```
-
-If only one of functionName and methodName is available, or if they are both available but the same, the format will be:
-
-```
-at Type.name (location)
-```
-
-If neither is available `<anonymous>` will be used as the name.
-
-The Type value is the name of the function stored in the constructor field of this. In v8 all constructor calls set this property to the constructor function so unless this field has been actively changed after the object was created it it will hold the name of the function it was created by. If it is unavailable the `[[Class]]` property of the object will be used.
-
-One special case is the global object where the Type is not shown. In that case the stack frame will be formatted as
-
-```
-at functionName [as methodName] (location)
-```
-
-The location itself has several possible formats. Most common is the file name, line and column number within the script that defined the current function
-
-```
-fileName:lineNumber:columnNumber
-```
-
-If the current function was created using eval the format will be
-
-```
-eval at position
-```
-
-where position is the full position where the call to eval occurred. Note that this means that positions can be nested if there are nested calls to eval, for instance:
-
-```
-eval at Foo.a (eval at Bar.z (myscript.js:10:3))
-```
-
-If a stack frame is within V8's libraries the location will be
-
-```
-native
-```
-
-and if is unavailable it will be
-
-```
-unknown location
-``` \ No newline at end of file
diff --git a/deps/v8/docs/merging_and_patching.md b/deps/v8/docs/merging_and_patching.md
deleted file mode 100644
index b173b475c3..0000000000
--- a/deps/v8/docs/merging_and_patching.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Introduction
-
-If you have a patch to the master branch (e.g. an important bug fix) that needs to be merged into one of the production V8 branches, read on.
-
-For the examples, a branched 2.4 version of V8 will be used. Substitute "2.4" with your version number.
-
-**An associated issue on Chromium's or V8's issue tracker is mandatory if a patch is merged. This helps with keeping track of merges.
-You can use [a template](https://code.google.com/p/v8/issues/entry?template=Merge%20request) to create an issue.**
-
-# Merge process outlined
-
-The merge process in the Chromium and V8 tracker is driven by labels in the form of
-```
-Merge-[Status]-[Branch]
-```
-The currently important labels for V8 are:
-
- 1. Merge-Request-## initiates the process => This fix should be merged into M-##
- 1. Merge-Review-## The merge is not approved yet for M-## e.g. because Canary coverage is missing
- 1. Merge-Approved-## => Simply means that the Chrome TPM are signing the merge off
- 1. Merge-Merged-$BRANCHNUMBER$ => When the merge is done the Merge-Approved label is swapped with this one. $BRANCHNUMBER$ is the name/number of the V8 branch e.g. 4.3 for M-43.
-
-# Instructions for git using the automated script
-
-## How to check if a commit was already merged/reverted
-
-Use mergeinfo.py to get all the commits which are connected to the HASH according to Git.
-
-```
-tools/release/mergeinfo.py HASH
-```
-
-## Step 1: Run the script
-
-Let's assume you're merging revision af3cf11 to branch 2.4 (please specify full git hashes - abbreviations are used here for simplicity).
-
-```
-tools/release/merge_to_branch.py --branch 2.4 af3cf11
-```
-
-Run the script with '-h' to display its help message, which includes more options (e.g. you can specify a file containing your patch, or you can reverse a patch, specify a custom commit message, or resume a merging process you've canceled before). Note that the script will use a temporary checkout of v8 - it won't touch your work space.
-You can also merge more than one revision at once, just list them all.
-
-```
-tools/release/merge_to_branch.py --branch 2.4 af3cf11 cf33f1b sf3cf09
-```
-
-## Step 2: Send a notification letter to hablich@chromium.org
-
-Saying something like this:
-```
-_Subject:_ Regression fix merged into V8 2.4 branch (Chrome 8)
-
-_Body:_ We have merged a fix to the V8 version 2.4 branch (the version used in Chrome 8)
-
-Version 2.4.9.10: Issue xxx: The parser doesn't parse.
-```
-
-# FAQ
-
-## I get an error during merge that is related to tagging. What should I do?
-When two people are merging at the same time a race-condition can happen in the merge scripts. If this is the case, contact machenbach@chromium.org and hablich@chromium.org.
-## Is there a TL;DR;?
- 1. [Create issue on issue tracker](https://code.google.com/p/v8/issues/entry?template=Merge%20request)
- 1. Add Merge-Request-{Branch} to the issue
- 1. Wait until somebody will add Merge-Approved-{Branch}
- 1. Merge
diff --git a/deps/v8/docs/profiling_chromium_with_v8.md b/deps/v8/docs/profiling_chromium_with_v8.md
deleted file mode 100644
index 46cdac44ad..0000000000
--- a/deps/v8/docs/profiling_chromium_with_v8.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Introduction
-
-V8's CPU & Heap profilers are trivial to use from V8's shells (see V8Profiler), but it may appear confusing how to use them with Chromium. This page should help you with it.
-
-# Instructions
-
-## Why using V8's profilers with Chromium is different from using them with V8 shells?
-
-Chromium is a complex application, unlike V8 shells. Below is the list of Chromium features that affect profiler usage:
-
- * each renderer is a separate process (OK, not actually each, but let's omit this detail), so they can't share the same log file;
- * sandbox built around renderer process prevents it from writing to a disk;
- * Developer Tools configure profilers for their own purposes;
- * V8's logging code contains some optimizations to simplify logging state checks.
-
-## So, how to run Chromium to get a CPU profile?
-
-Here is how to run Chromium in order to get a CPU profile from the start of the process:
-```
-./Chromium --no-sandbox --js-flags="--logfile=%t.log --prof"
-```
-
-Please note that you wouldn't see profiles in Developer Tools, because all the data is being logged to a file, not to Developer Tools.
-
-### Flags description
-
- * **--no-sandbox** - turns off the renderer sandbox, obviously must have;
- * **--js-flags** - this is the containers for flags passed to V8:
- * **--logfile=%t.log** - specifies a name pattern for log files; **%t** gets expanded into current time in milliseconds, so each process gets its own log file; you can use prefixes and suffixes if you want, like this: **prefix-%t-suffix.log**;
- * **--prof** - tells V8 to write statistical profiling information into the log file.
-
-## Notes
-
-Under Windows, be sure to turn on .MAP file creation for **chrome.dll**, but not for **chrome.exe**. \ No newline at end of file
diff --git a/deps/v8/docs/release_process.md b/deps/v8/docs/release_process.md
deleted file mode 100644
index c6b36ad68e..0000000000
--- a/deps/v8/docs/release_process.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# Introduction
-
-The V8 release process is tightly connected to [Chrome's](https://www.chromium.org/getting-involved/dev-channel). The V8 team is using all four Chrome release channels to push new versions to the users.
-
-If you want to look up what V8 version is in a Chrome release you can check [OmahaProxy](https://omahaproxy.appspot.com/). For each Chrome release a separate branch is created in the V8 repository to make the trace-back easier e.g. for [Chrome 45.0.2413.0](https://chromium.googlesource.com/v8/v8.git/+/chromium/2413).
-
-# Canary releases
-Every day a new Canary build is pushed to the users via [Chrome's Canary channel](https://www.google.com/chrome/browser/canary.html?platform=win64). Normally the deliverable is the latest, stable enough version from [master](https://chromium.googlesource.com/v8/v8.git/+/roll).
-
-Branches for a Canary normally look like this
-
-```
-remotes/origin/4.5.35
-```
-
-# Dev releases
-Every week a new Dev build is pushed to the users via [Chrome's Dev channel](https://www.google.com/chrome/browser/desktop/index.html?extra=devchannel&platform=win64). Normally the deliverable includes the latest stable enough V8 version on the Canary channel.
-
-Branches for a Dev normally look like this
-
-```
-remotes/origin/4.5.35
-```
-
-# Beta releases
-Roughly every 6 weeks a new major branch is created e.g. [for Chrome 44](https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.4). This is happening in sync with the creation of [Chrome's Beta channel](https://www.google.com/chrome/browser/beta.html?platform=win64). The Chrome Beta is pinned to the head of V8's branch. After approx. 6 weeks the branch is promoted to Stable.
-
-Changes are only cherry-picked onto the branch in order to stabilize the version.
-
-Branches for a Beta normally look like this
-
-```
-remotes/branch-heads/4.5
-```
-
-They are based on a Canary branch.
-
-# Stable releases
-Roughly every 6 weeks a new major Stable release is done. No special branch is created as the latest Beta branch is simply promoted to Stable. This version is pushed to the users via [Chrome's Stable channel](https://www.google.com/chrome/browser/desktop/index.html?platform=win64).
-
-Branches for a Stable normally look like this
-
-```
-remotes/branch-heads/4.5
-```
-
-They are promoted (reused) Beta branches.
-
-# Which version should I embed in my application?
-
-The tip of the same branch that Chrome's Stable channel uses.
-
-We often backmerge important bug fixes to a stable branch, so if you care about stability and security and correctness, you should include those updates too -- that's why we recommend "the tip of the branch", as opposed to an exact version.
-
-As soon as a new branch is promoted to Stable, we stop maintaining the previous stable branch. This happens every six weeks, so you should be prepared to update at least this often.
-
-Example: The current stable Chrome release is [44.0.2403.125](https://omahaproxy.appspot.com), with V8 4.4.63.25. So you should embed [branch-heads/4.4](https://chromium.googlesource.com/v8/v8.git/+/branch-heads/4.4). And you should update to branch-heads/4.5 when Chrome 45 is released on the Stable channel. \ No newline at end of file
diff --git a/deps/v8/docs/runtime_functions.md b/deps/v8/docs/runtime_functions.md
deleted file mode 100644
index 3ebd587868..0000000000
--- a/deps/v8/docs/runtime_functions.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Runtime functions
-
-Much of the JavaScript library is implemented in JavaScript code itself,
-using a minimal set of C++ runtime functions callable from JavaScript.
-Some of these are called using names that start with %, and using the flag
-"--allow-natives-syntax". Others are only called by code generated by the
-code generators, and are not visible in JS, even using the % syntax.
diff --git a/deps/v8/docs/source.md b/deps/v8/docs/source.md
deleted file mode 100644
index 22d280fd30..0000000000
--- a/deps/v8/docs/source.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# Source
-
-**Quick links:** [browse](https://chromium.googlesource.com/v8/v8/) | [browse bleeding edge](https://chromium.googlesource.com/v8/v8/+/master) | [changes](https://chromium.googlesource.com/v8/v8/+log/master).
-
-## Command-Line Access
-
-### Git
-See [UsingGit](using_git.md).
-
-### Subversion (deprecated)
-
-Use this command to anonymously check out the up-to-date stable version of the project source code:
-
-> `svn checkout http://v8.googlecode.com/svn/trunk/ v8`
-
-If you plan to contribute to V8 but are not a member, use this command to anonymously check out a read-only version of the development branch:
-
-> `svn checkout http://v8.googlecode.com/svn/branches/bleeding_edge/ v8`
-
-If you're a member of the project, use this command to check out a writable development branch as yourself using HTTPS:
-
-> `svn checkout https://v8.googlecode.com/svn/branches/bleeding_edge/ v8 --username <your username>`
-
-When prompted, enter your generated [googlecode.com](http://code.google.com/hosting/settings) password.
-
-## Source Code Branches
-
-There are several different branches of V8; if you're unsure of which version to get, you most likely want the up-to-date stable version in `trunk/`. Here's an overview of the different branches:
-
- * The bleeding edge, `branches/bleeding_edge/`, is where active development takes place. If you're considering contributing to V8 this is the branch to get.
- * Under `trunk/` is the "stable edge", which is updated a few times per week. It is a copy of the bleeding edge that has been successfully tested. Use this if you want to be almost up to date and don't want your code to break whenever we accidentally forget to add a file on the bleeding edge. Some of the trunk revisions are tagged with X.Y.Z.T version labels. When we decide which of X.Y.**.** is the "most stable", it becomes the X.Y branch in subversion.
- * If you want a well-tested version that doesn't change except for bugfixes, use one of the versioned branches (e.g. `branches/3.16/` at the time of this writing). Note that usually only the last two branches are actively maintained; any older branches could have unfixed security holes. You may want to follow the V8 version that Chrome is shipping on its stable (or beta) channels, see http://omahaproxy.appspot.com.
-
-## V8 public API compatibility
-
-V8 public API (basically the files under include/ directory) may change over time. New types/methods may be added without breaking existing functionality. When we decide that want to drop some existing class/methods, we first mark it with [V8\_DEPRECATED](https://code.google.com/p/chromium/codesearch#search/&q=V8_DEPRECATED&sq=package:chromium&type=cs) macro which will cause compile time warnings when the deprecated methods are called by the embedder. We keep deprecated method for one branch and then remove it. E.g. if `v8::CpuProfiler::FindCpuProfile` was plain non deprecated in _3.17_ branch, marked as `V8_DEPRECATED` in _3.18_, it may well be removed in _3.19_ branch.
-
-
-## GUI and IDE Access
-
-This project's Subversion repository may be accessed using many different client programs and plug-ins. See your client's documentation for more information.
diff --git a/deps/v8/docs/testing.md b/deps/v8/docs/testing.md
deleted file mode 100644
index a777c0c5a0..0000000000
--- a/deps/v8/docs/testing.md
+++ /dev/null
@@ -1,58 +0,0 @@
-V8 includes a test framework that allows you to test the engine. The framework lets you run both our own test suites that are included with the source code and others, currently only the Mozilla tests.
-
-## Running the V8 tests
-
-Before you run the tests, you will have to build V8 with GYP using the instructions [here](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP)
-
-You can append `.check` to any build target to have tests run for it, e.g.
-```
-make ia32.release.check
-make ia32.check
-make release.check
-make check # builds and tests everything (no dot before "check"!)
-```
-
-Before submitting patches, you should always run the quickcheck target, which builds a fast debug build and runs only the most relevant tests:
-```
-make quickcheck
-```
-
-You can also run tests manually:
-```
-tools/run-tests.py --arch-and-mode=ia32.release [--outdir=foo]
-```
-
-Or you can run individual tests:
-```
-tools/run-tests.py --arch=ia32 cctest/test-heap/SymbolTable mjsunit/delete-in-eval
-```
-
-Run the script with `--help` to find out about its other options, `--outdir` defaults to `out`. Also note that using the `cctest` binary to run multiple tests in one process is not supported.
-
-## Running the Mozilla and Test262 tests
-
-The V8 test framework comes with support for running the Mozilla as well as the Test262 test suite. To download the test suites and then run them for the first time, do the following:
-
-```
-tools/run-tests.py --download-data mozilla
-tools/run-tests.py --download-data test262
-```
-
-To run the tests subsequently, you may omit the flag that downloads the test suite:
-
-```
-tools/run-tests.py mozilla
-tools/run-tests.py test262
-```
-
-Note that V8 fails a number of Mozilla tests because they require Firefox-specific extensions.
-
-## Running the WebKit tests
-
-Sometimes all of the above tests pass but WebKit build bots fail. To make sure WebKit tests pass run:
-
-```
-tools/run-tests.py --progress=verbose --outdir=out --arch=ia32 --mode=release webkit --timeout=200
-```
-
-Replace --arch and other parameters with values that match your build options. \ No newline at end of file
diff --git a/deps/v8/docs/triaging_issues.md b/deps/v8/docs/triaging_issues.md
deleted file mode 100644
index 075186f697..0000000000
--- a/deps/v8/docs/triaging_issues.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# How to get an issue triaged
-* *V8 tracker*: Set the state to `Untriaged`
-* *Chromium tracker*: Set the state to `Untriaged` and add the label `Cr-Blink-JavaScript`
-
-# How to assign V8 issues in the Chromium tracker
-Please assign issues to the V8 specialty sheriffs of one of the
-following categories:
-
- * Stability: jkummerow@c....org, adamk@c....org
- * Performance: bmeurer@c....org, mvstanton@c....org
- * Clusterfuzz: Set the bug to the following state:
- * `label:ClusterFuzz label:Cr-Blink-JavaScript status:Available -has:owner`
- * Will show up in [this](https://code.google.com/p/chromium/issues/list?can=2&q=label%3AClusterFuzz+label%3ACr-Blink-JavaScript+status%3AAvailable+-has%3Aowner&colspec=ID+Pri+M+Week+ReleaseBlock+Cr+Status+Owner+Summary+OS+Modified&x=m&y=releaseblock&cells=tiles) query.
- * CC mstarzinger@ and ishell@
-
-Please CC hablich@c....org on all issues.
-
-Assign remaining issues to hablich@c....org.
-
-Use the label Cr-Blink-JavaScript on all issues.
-
-**Please note that this only applies to issues tracked in the Chromium issue tracker.** \ No newline at end of file
diff --git a/deps/v8/docs/using_git.md b/deps/v8/docs/using_git.md
deleted file mode 100644
index b5e392aedd..0000000000
--- a/deps/v8/docs/using_git.md
+++ /dev/null
@@ -1,147 +0,0 @@
-# Git repository
-
-V8's git repository is located at https://chromium.googlesource.com/v8/v8.git
-
-V8's master branch has also an official git mirror on github: http://github.com/v8/v8-git-mirror.
-
-**Don't just `git-clone` either of these URLs** if you want to build V8 from your checkout, instead follow the instructions below to get everything set up correctly.
-
-## Prerequisites
-
- 1. **Git**. To install using `apt-get`:
-```
-apt-get install git
-```
- 1. **depot\_tools**. See [instructions](http://dev.chromium.org/developers/how-tos/install-depot-tools).
- 1. For **push access**, you need to setup a .netrc file with your git password:
- 1. Go to https://chromium.googlesource.com/new-password - login with your committer account (e.g. @chromium.org account, non-chromium.org ones work too). Note: creating a new password doesn't automatically revoke any previously created passwords.
- 1. Follow the instructions in the "Staying Authenticated" section. It would ask you to copy-paste two lines into your ~/.netrc file.
- 1. In the end, ~/.netrc should have two lines that look like:
-```
-machine chromium.googlesource.com login git-yourusername.chromium.org password <generated pwd>
-machine chromium-review.googlesource.com login git-yourusername.chromium.org password <generated pwd>
-```
- 1. Make sure that ~/.netrc file's permissions are 0600 as many programs refuse to read .netrc files which are readable by anyone other than you.
-
-
-## How to start
-
-Make sure depot\_tools are up-to-date by typing once:
-
-```
-gclient
-```
-
-
-Then get V8, including all branches and dependencies:
-
-```
-fetch v8
-cd v8
-```
-
-After that you're intentionally in a detached head state.
-
-Optionally you can specify how new branches should be tracked:
-
-```
-git config branch.autosetupmerge always
-git config branch.autosetuprebase always
-```
-
-Alternatively, you can create new local branches like this (recommended):
-
-```
-git new-branch mywork
-```
-
-## Staying up-to-date
-
-Update your current branch with git pull. Note that if you're not on a branch, git pull won't work, and you'll need to use git fetch instead.
-
-```
-git pull
-```
-
-Sometimes dependencies of v8 are updated. You can synchronize those by running
-
-```
-gclient sync
-```
-
-## Sending code for reviewing
-
-```
-git cl upload
-```
-
-## Committing
-
-You can use the CQ checkbox on codereview for committing (preferred). See also the [chromium instructions](http://www.chromium.org/developers/testing/commit-queue) for CQ flags and troubleshooting.
-
-If you need more trybots than the default, add the following to your commit message on rietveld (e.g. for adding a nosnap bot):
-
-```
-CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_nosnap_rel
-```
-
-To land manually, update your branch:
-
-```
-git pull --rebase origin
-```
-
-Then commit using
-
-```
-git cl land
-```
-
-# For project members
-
-
-## Try jobs
-
-### Creating a try job from codereview
-
- 1. Upload a CL to rietveld.
-```
-git cl upload
-```
- 1. Try the CL by sending a try job to the try bots like this:
-```
-git cl try
-```
- 1. Wait for the try bots to build and you will get an e-mail with the result. You can also check the try state at your patch on codereview.
- 1. If applying the patch fails you either need to rebase your patch or specify the v8 revision to sync to:
-```
-git cl try --revision=1234
-```
-
-### Creating a try job from a local branch
-
- 1. Commit some changes to a git branch in the local repo.
- 1. Try the change by sending a try job to the try bots like this:
-```
-git try
-```
- 1. Wait for the try bots to build and you will get an e-mail with the result. Note: There are issues with some of the slaves at the moment. Sending try jobs from codereview is recommended.
-
-### Useful arguments
-
-The revision argument tells the try bot what revision of the code base will be used for applying your local changes to. Without the revision, our LKGR revision is used as the base (http://v8-status.appspot.com/lkgr).
-```
-git try --revision=1234
-```
-To avoid running your try job on all bots, use the --bot flag with a comma-separated list of builder names. Example:
-```
-git try --bot=v8_mac_rel
-```
-
-### Viewing the try server
-
-http://build.chromium.org/p/tryserver.v8/waterfall
-
-### Access credentials
-
-If asked for access credentials, use your @chromium.org email address and your generated password from [googlecode.com](http://code.google.com/hosting/settings). \ No newline at end of file
diff --git a/deps/v8/docs/v8_c_plus_plus_styleand_sops.md b/deps/v8/docs/v8_c_plus_plus_styleand_sops.md
deleted file mode 100644
index 983ed59a2f..0000000000
--- a/deps/v8/docs/v8_c_plus_plus_styleand_sops.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# V8 C++ Style Guide
-
-In general, V8 should conform to Google's/Chrome's C++ Style Guide for new code that is written. Your V8 code should conform to them as much as possible. There will always be cases where Google/Chrome Style Guide conformity or Google/Chrome best practices are extremely cumbersome or underspecified for our use cases. We document these exceptions here.
diff --git a/deps/v8/docs/v8_committers_responsibility.md b/deps/v8/docs/v8_committers_responsibility.md
deleted file mode 100644
index c2ff6766f8..0000000000
--- a/deps/v8/docs/v8_committers_responsibility.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# V8 committer's responsibility
-
-## Basic commit guidelines
-
-When you're committing to the V8 repositories, ensure that you follow those guidelines:
-
- 1. Find the right reviewer for your changes and for patches you're asked to review.
- 1. Be available on IM and/or email before and after you land the change.
- 1. Watch the [waterfall](http://build.chromium.org/p/client.v8/console) until all bots turn green after your change.
- 1. When landing a TBR change (To Be Reviewed), make sure to notify the people whose code you're changing. Usually just send the review e-mail.
-
-In short, do the right thing for the project, not the easiest thing to get code committed, and above all: use your best judgement.
-
-**Don't be afraid to ask questions. There is always someone who will immediately read messages sent to the v8-committers mailing list who can help you.**
-
-## Changes with multiple reviewers
-
-There are occasionally changes with a lot of reviewers on them, since sometimes several people might need to be in the loop for a change because of multiple areas of responsibility and expertise.
-
-The problem is that without some guidelines, there's no clear responsibility given in these reviews.
-
-If you're the sole reviewer on a change, you know you have to do a good job. When there are three other people, you sometimes assume that somebody else must have looked carefully at some part of the review. Sometimes all the reviewers think this and the change isn't reviewed properly.
-
-In other cases, some reviewers say "LGTM" for a patch, while others are still expecting changes. The author can get confused as to the status of the review, and some patches have been checked in where at least one reviewer expected further changes before committing.
-
-At the same time, we want to encourage many people to participate in the review process and keep tabs on what's going on.
-
-So, here are some guidelines to help clarify the process:
- 1. When a patch author requests more than one reviewer, they should make clear in the review request email what they expect the responsibility of each reviewer to be. For example, you could write this in the email:
- * larry: bitmap changes
- * sergey: process hacks
- * everybody else: FYI
- 1. In this case, you might be on the review list because you've asked to be in the loop for multiprocess changes, but you wouldn't be the primary reviewer and the author and other reviewers wouldn't be expecting you to review all the diffs in detail.
- 1. If you get a review that includes many other people, and the author didn't do (1), please ask them what part you're responsible for if you don't want to review the whole thing in detail.
- 1. The author should wait for approval from everybody on the reviewer list before checking in.
- 1. People who are on a review without clear review responsibility (i.e. drive-by reviews) should be super responsive and not hold up the review. The patch author should feel free to ping them mercilessly if they are.
- 1. If you're an "FYI" person on a review and you didn't actually review in detail (or at all), but don't have a problem with the patch, note this. You could say something like "rubber stamp" or "ACK" instead of "LGTM." This way the real reviewers know not to trust that you did their work for them, but the author of the patch knows they don't have to wait for further feedback from you. Hopefully we can still keep everybody in the loop but have clear ownership and detailed reviews. It might even speed up some changes since you can quickly "ACK" changes you don't care about, and the author knows they don't have to wait for feedback from you.
-
-(Adapted from: http://dev.chromium.org/developers/committers-responsibility )
diff --git a/deps/v8/docs/v8_profiler.md b/deps/v8/docs/v8_profiler.md
deleted file mode 100644
index 670fe11dd7..0000000000
--- a/deps/v8/docs/v8_profiler.md
+++ /dev/null
@@ -1,141 +0,0 @@
-# Introduction
-
-V8 has built-in sample based profiling. Profiling is turned off by default, but can be enabled via the --prof command line option. The sampler records stacks of both JavaScript and C/C++ code.
-
-# Build
-Build the d8 shell following the instructions at [BuildingWithGYP](BuildingWithGYP.md).
-
-
-# Command Line
-To start profiling, use the `--prof` option. When profiling, V8 generates a `v8.log` file which contains profiling data.
-
-Windows:
-```
-build\Release\d8 --prof script.js
-```
-
-Other platforms (replace "ia32" with "x64" if you want to profile the x64 build):
-```
-out/ia32.release/d8 --prof script.js
-```
-
-# Process the Generated Output
-
-Log file processing is done using JS scripts running by the d8 shell. For this to work, a `d8` binary (or symlink, or `d8.exe` on Windows) must be in the root of your V8 checkout, or in the path specified by the environment variable `D8_PATH`. Note: this binary is just used to process the log, but not for the actual profiling, so it doesn't matter which version etc. it is.
-
-Windows:
-```
-tools\windows-tick-processor.bat v8.log
-```
-
-Linux:
-```
-tools/linux-tick-processor v8.log
-```
-
-Mac OS X:
-```
-tools/mac-tick-processor v8.log
-```
-
-## Snapshot-based VM build and builtins reporting
-
-When a snapshot-based VM build is being used, code objects from a snapshot that don't correspond to functions are reported with generic names like _"A builtin from the snapshot"_, because their real names are not stored in the snapshot. To see the names the following steps must be taken:
-
- * `--log-snapshot-positions` flag must be passed to VM (along with `--prof`); this way, for deserialized objects the `(memory address, snapshot offset)` pairs are being emitted into profiler log;
-
- * `--snapshot-log=<log file from mksnapshot>` flag must be passed to the tick processor script; a log file from the `mksnapshot` program (a snapshot log) contains address-offset pairs for serialized objects, and their names; using the snapshot log, names can be mapped onto deserialized objects during profiler log processing; the snapshot log file is called `snapshot.log` and resides alongside with V8's compiled files.
-
-An example of usage:
-```
-out/ia32.release/d8 --prof --log-snapshot-positions script.js
-tools/linux-tick-processor --snapshot-log=out/ia32.release/obj.target/v8_snapshot/geni/snapshot.log v8.log
-```
-
-# Programmatic Control of Profiling
-If you would like to control in your application when profile samples are collected, you can do so.
-
-First you'll probably want to use the `--noprof-auto` command line switch which prevents the profiler from automatically starting to record profile ticks.
-
-Profile ticks will not be recorded until your application specifically invokes these APIs:
- * `V8::ResumeProfiler()` - start/resume collection of data
- * `V8::PauseProfiler()` - pause collection of data
-
-# Example Output
-
-```
-Statistical profiling result from benchmarks\v8.log, (4192 ticks, 0 unaccounted, 0 excluded).
-
- [Shared libraries]:
- ticks total nonlib name
- 9 0.2% 0.0% C:\WINDOWS\system32\ntdll.dll
- 2 0.0% 0.0% C:\WINDOWS\system32\kernel32.dll
-
- [JavaScript]:
- ticks total nonlib name
- 741 17.7% 17.7% LazyCompile: am3 crypto.js:108
- 113 2.7% 2.7% LazyCompile: Scheduler.schedule richards.js:188
- 103 2.5% 2.5% LazyCompile: rewrite_nboyer earley-boyer.js:3604
- 103 2.5% 2.5% LazyCompile: TaskControlBlock.run richards.js:324
- 96 2.3% 2.3% Builtin: JSConstructCall
- ...
-
- [C++]:
- ticks total nonlib name
- 94 2.2% 2.2% v8::internal::ScavengeVisitor::VisitPointers
- 33 0.8% 0.8% v8::internal::SweepSpace
- 32 0.8% 0.8% v8::internal::Heap::MigrateObject
- 30 0.7% 0.7% v8::internal::Heap::AllocateArgumentsObject
- ...
-
-
- [GC]:
- ticks total nonlib name
- 458 10.9%
-
- [Bottom up (heavy) profile]:
- Note: percentage shows a share of a particular caller in the total
- amount of its parent calls.
- Callers occupying less than 2.0% are not shown.
-
- ticks parent name
- 741 17.7% LazyCompile: am3 crypto.js:108
- 449 60.6% LazyCompile: montReduce crypto.js:583
- 393 87.5% LazyCompile: montSqrTo crypto.js:603
- 212 53.9% LazyCompile: bnpExp crypto.js:621
- 212 100.0% LazyCompile: bnModPowInt crypto.js:634
- 212 100.0% LazyCompile: RSADoPublic crypto.js:1521
- 181 46.1% LazyCompile: bnModPow crypto.js:1098
- 181 100.0% LazyCompile: RSADoPrivate crypto.js:1628
- ...
-```
-
-# Timeline plot
-The timeline plot visualizes where V8 is spending time. This can be used to find bottlenecks and spot things that are unexpected (for example, too much time spent in the garbage collector). Data for the plot are gathered by both sampling and instrumentation. Linux with gnuplot 4.6 is required.
-
-To create a timeline plot, run V8 as described above, with the option `--log-timer-events` additional to `--prof`:
-```
-out/ia32.release/d8 --prof --log-timer-events script.js
-```
-
-The output is then passed to a plot script, similar to the tick-processor:
-```
-tools/plot-timer-events v8.log
-```
-
-This creates `timer-events.png` in the working directory, which can be opened with most image viewers.
-
-# Options
-Since recording log output comes with a certain performance overhead, the script attempts to correct this using a distortion factor. If not specified, it tries to find out automatically. You can however also specify the distortion factor manually.
-```
-tools/plot-timer-events --distortion=4500 v8.log
-```
-
-You can also manually specify a certain range for which to create the plot or statistical profile, expressed in milliseconds:
-```
-tools/plot-timer-events --distortion=4500 --range=1000,2000 v8.log
-tools/linux-tick-processor --distortion=4500 --range=1000,2000 v8.log
-```
-
-# HTML 5 version
-Both statistical profile and timeline plot are available [in the browser](http://v8.googlecode.com/svn/branches/bleeding_edge/tools/profviz/profviz.html). However, the statistical profile lacks C++ symbol resolution and the Javascript port of gnuplot performs an order of magnitude slower than the native one. \ No newline at end of file
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 0b64fb3882..0d0ee739c0 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -155,8 +155,11 @@ class V8_EXPORT Debug {
*/
typedef void (*DebugMessageDispatchHandler)();
- static bool SetDebugEventListener(EventCallback that,
+ static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
+ V8_DEPRECATED("Use version with an Isolate",
+ static bool SetDebugEventListener(
+ EventCallback that, Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
@@ -170,7 +173,9 @@ class V8_EXPORT Debug {
static bool CheckDebugBreak(Isolate* isolate);
// Message based interface. The message protocol is JSON.
- static void SetMessageHandler(MessageHandler handler);
+ static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
+ V8_DEPRECATED("Use version with an Isolate",
+ static void SetMessageHandler(MessageHandler handler));
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
@@ -194,10 +199,9 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Value> Call(v8::Local<v8::Function> fun,
- Local<Value> data = Local<Value>()));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Value> Call(v8::Local<v8::Function> fun,
+ Local<Value> data = Local<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
@@ -206,8 +210,8 @@ class V8_EXPORT Debug {
/**
* Returns a mirror object for the given object.
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> GetMirror(v8::Local<v8::Value> obj));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Value> GetMirror(v8::Local<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj);
@@ -242,7 +246,9 @@ class V8_EXPORT Debug {
* "Evaluate" debug command behavior currently is not specified in scope
* of this method.
*/
- static void ProcessDebugMessages();
+ static void ProcessDebugMessages(Isolate* isolate);
+ V8_DEPRECATED("Use version with an Isolate",
+ static void ProcessDebugMessages());
/**
* Debugger is running in its own context which is entered while debugger
@@ -251,7 +257,9 @@ class V8_EXPORT Debug {
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
- static Local<Context> GetDebugContext();
+ static Local<Context> GetDebugContext(Isolate* isolate);
+ V8_DEPRECATED("Use version with an Isolate",
+ static Local<Context> GetDebugContext());
/**
diff --git a/deps/v8/include/v8-experimental.h b/deps/v8/include/v8-experimental.h
new file mode 100644
index 0000000000..f988e14054
--- /dev/null
+++ b/deps/v8/include/v8-experimental.h
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * This header contains a set of experimental V8 APIs. We hope these will
+ * become a part of standard V8, but they may also be removed if we deem the
+ * experiment to not be successul.
+ */
+#ifndef V8_INCLUDE_V8_EXPERIMENTAL_H_
+#define V8_INCLUDE_V8_EXPERIMENTAL_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace experimental {
+
+// Allow the embedder to construct accessors that V8 can compile and use
+// directly, without jumping into the runtime.
+class V8_EXPORT FastAccessorBuilder {
+ public:
+ struct ValueId {
+ size_t value_id;
+ };
+ struct LabelId {
+ size_t label_id;
+ };
+
+ static FastAccessorBuilder* New(Isolate* isolate);
+
+ ValueId IntegerConstant(int int_constant);
+ ValueId GetReceiver();
+ ValueId LoadInternalField(ValueId value_id, int field_no);
+ ValueId LoadValue(ValueId value_id, int offset);
+ ValueId LoadObject(ValueId value_id, int offset);
+ void ReturnValue(ValueId value_id);
+ void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
+ void CheckNotZeroOrReturnNull(ValueId value_id);
+ LabelId MakeLabel();
+ void SetLabel(LabelId label_id);
+ void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+
+ private:
+ FastAccessorBuilder() = delete;
+ FastAccessorBuilder(const FastAccessorBuilder&) = delete;
+ ~FastAccessorBuilder() = delete;
+ void operator=(const FastAccessorBuilder&) = delete;
+};
+
+} // namespace experimental
+} // namespace v8
+
+#endif // V8_INCLUDE_V8_EXPERIMENTAL_H_
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index c6cba0f982..4fbef0f5d9 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -5,6 +5,8 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
+#include <stdint.h>
+
namespace v8 {
class Isolate;
@@ -107,6 +109,51 @@ class Platform {
* the epoch.
**/
virtual double MonotonicallyIncreasingTime() = 0;
+
+ /**
+ * Called by TRACE_EVENT* macros, don't call this directly.
+ * The name parameter is a category group for example:
+ * TRACE_EVENT0("v8,parse", "V8.Parse")
+ * The pointer returned points to a value with zero or more of the bits
+ * defined in CategoryGroupEnabledFlags.
+ **/
+ virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
+ static uint8_t no = 0;
+ return &no;
+ }
+
+ /**
+ * Gets the category group name of the given category_enabled_flag pointer.
+ * Usually used while serliazing TRACE_EVENTs.
+ **/
+ virtual const char* GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) {
+ static const char dummy[] = "dummy";
+ return dummy;
+ }
+
+ /**
+ * Adds a trace event to the platform tracing system. This function call is
+ * usually the result of a TRACE_* macro from trace_event_common.h when
+ * tracing and the category of the particular trace are enabled. It is not
+ * advisable to call this function on its own; it is really only meant to be
+ * used by the trace macros. The returned handle can be used by
+ * UpdateTraceEventDuration to update the duration of COMPLETE events.
+ */
+ virtual uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values,
+ unsigned int flags) {
+ return 0;
+ }
+
+ /**
+ * Sets the duration field of a COMPLETE trace event. It must be called with
+ * the handle returned from AddTraceEvent().
+ **/
+ virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle) {}
};
} // namespace v8
diff --git a/deps/v8/include/v8-testing.h b/deps/v8/include/v8-testing.h
index d18fc72583..f67bf2530d 100644
--- a/deps/v8/include/v8-testing.h
+++ b/deps/v8/include/v8-testing.h
@@ -39,7 +39,7 @@ class V8_EXPORT Testing {
/**
* Force deoptimization of all functions.
*/
- static void DeoptimizeAll();
+ static void DeoptimizeAll(Isolate* isolate);
};
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 0fe7d9fdc0..5fc15a3061 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 8
-#define V8_BUILD_NUMBER 271
-#define V8_PATCH_LEVEL 17
+#define V8_MINOR_VERSION 9
+#define V8_BUILD_NUMBER 385
+#define V8_PATCH_LEVEL 18
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 66c3043b4a..36df60a5f5 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -12,8 +12,8 @@
* For other documentation see http://code.google.com/apis/v8/
*/
-#ifndef V8_H_
-#define V8_H_
+#ifndef INCLUDE_V8_H_
+#define INCLUDE_V8_H_
#include <stddef.h>
#include <stdint.h>
@@ -92,6 +92,7 @@ class ObjectTemplate;
class Platform;
class Primitive;
class Promise;
+class Proxy;
class RawOperationDescriptor;
class Script;
class SharedArrayBuffer;
@@ -136,6 +137,10 @@ class CallHandlerHelper;
class EscapableHandleScope;
template<typename T> class ReturnValue;
+namespace experimental {
+class FastAccessorBuilder;
+} // namespace experimental
+
namespace internal {
class Arguments;
class Heap;
@@ -420,12 +425,12 @@ class WeakCallbackInfo {
V8_INLINE T* GetParameter() const { return parameter_; }
V8_INLINE void* GetInternalField(int index) const;
- V8_INLINE V8_DEPRECATE_SOON("use indexed version",
- void* GetInternalField1() const) {
+ V8_INLINE V8_DEPRECATED("use indexed version",
+ void* GetInternalField1() const) {
return internal_fields_[0];
}
- V8_INLINE V8_DEPRECATE_SOON("use indexed version",
- void* GetInternalField2() const) {
+ V8_INLINE V8_DEPRECATED("use indexed version",
+ void* GetInternalField2() const) {
return internal_fields_[1];
}
@@ -551,13 +556,13 @@ template <class T> class PersistentBase {
* critical form of resource management!
*/
template <typename P>
- V8_INLINE V8_DEPRECATE_SOON(
+ V8_INLINE V8_DEPRECATED(
"use WeakCallbackInfo version",
void SetWeak(P* parameter,
typename WeakCallbackData<T, P>::Callback callback));
template <typename S, typename P>
- V8_INLINE V8_DEPRECATE_SOON(
+ V8_INLINE V8_DEPRECATED(
"use WeakCallbackInfo version",
void SetWeak(P* parameter,
typename WeakCallbackData<S, P>::Callback callback));
@@ -569,7 +574,7 @@ template <class T> class PersistentBase {
// specify a parameter for the callback or the location of two internal
// fields in the dying object.
template <typename P>
- V8_INLINE V8_DEPRECATE_SOON(
+ V8_INLINE V8_DEPRECATED(
"use SetWeak",
void SetPhantom(P* parameter,
typename WeakCallbackInfo<P>::Callback callback,
@@ -1313,10 +1318,10 @@ class V8_EXPORT ScriptCompiler {
* \return Compiled script object (context independent; for running it must be
* bound to a context).
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<UnboundScript> CompileUnbound(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions));
+ static V8_DEPRECATED("Use maybe version",
+ Local<UnboundScript> CompileUnbound(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions));
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
@@ -1332,7 +1337,7 @@ class V8_EXPORT ScriptCompiler {
* when this function was called. When run it will always use this
* context.
*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use maybe version",
Local<Script> Compile(Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
@@ -1362,11 +1367,11 @@ class V8_EXPORT ScriptCompiler {
* (ScriptStreamingTask has been run). V8 doesn't construct the source string
* during streaming, so the embedder needs to pass the full source here.
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<Script> Compile(Isolate* isolate, StreamedSource* source,
- Local<String> full_source_string,
- const ScriptOrigin& origin));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Script> Compile(Isolate* isolate,
+ StreamedSource* source,
+ Local<String> full_source_string,
+ const ScriptOrigin& origin));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, StreamedSource* source,
Local<String> full_source_string, const ScriptOrigin& origin);
@@ -1492,7 +1497,7 @@ class V8_EXPORT Message {
* Returns the index within the line of the last character where
* the error occurred.
*/
- V8_DEPRECATE_SOON("Use maybe version", int GetEndColumn() const);
+ V8_DEPRECATED("Use maybe version", int GetEndColumn() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
/**
@@ -1604,8 +1609,7 @@ class V8_EXPORT StackFrame {
/**
* Returns the name of the resource that contains the script for the
* function for this StackFrame or sourceURL value if the script name
- * is undefined and its source ends with //# sourceURL=... string or
- * deprecated //@ sourceURL=... string.
+ * is undefined and its source ends with //# sourceURL=... string.
*/
Local<String> GetScriptNameOrSourceURL() const;
@@ -1661,8 +1665,8 @@ class V8_EXPORT JSON {
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
- static V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> Parse(Local<String> json_string));
+ static V8_DEPRECATED("Use maybe version",
+ Local<Value> Parse(Local<String> json_string));
static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
Isolate* isolate, Local<String> json_string);
};
@@ -1737,7 +1741,8 @@ class V8_EXPORT Value : public Data {
bool IsFunction() const;
/**
- * Returns true if this value is an array.
+ * Returns true if this value is an array. Note that it will return false for
+ * an Proxy for an array.
*/
bool IsArray() const;
@@ -1950,6 +1955,11 @@ class V8_EXPORT Value : public Data {
*/
bool IsSharedArrayBuffer() const;
+ /**
+ * Returns true if this value is a JavaScript Proxy.
+ */
+ bool IsProxy() const;
+
V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
Local<Context> context) const;
@@ -1973,34 +1983,34 @@ class V8_EXPORT Value : public Data {
Local<Number> ToNumber(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<String> ToString(Isolate* isolate) const);
- V8_DEPRECATE_SOON("Use maybe version",
- Local<String> ToDetailString(Isolate* isolate) const);
+ V8_DEPRECATED("Use maybe version",
+ Local<String> ToDetailString(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Object> ToObject(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger(Isolate* isolate) const);
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Uint32> ToUint32(Isolate* isolate) const);
+ V8_DEPRECATED("Use maybe version",
+ Local<Uint32> ToUint32(Isolate* isolate) const);
V8_DEPRECATE_SOON("Use maybe version",
Local<Int32> ToInt32(Isolate* isolate) const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Boolean> ToBoolean() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Number> ToNumber() const);
+ inline V8_DEPRECATED("Use maybe version", Local<Number> ToNumber() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<String> ToString() const);
- inline V8_DEPRECATE_SOON("Use maybe version",
- Local<String> ToDetailString() const);
+ inline V8_DEPRECATED("Use maybe version",
+ Local<String> ToDetailString() const);
inline V8_DEPRECATE_SOON("Use maybe version", Local<Object> ToObject() const);
inline V8_DEPRECATE_SOON("Use maybe version",
Local<Integer> ToInteger() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToUint32() const);
- inline V8_DEPRECATE_SOON("Use maybe version", Local<Int32> ToInt32() const);
+ inline V8_DEPRECATED("Use maybe version", Local<Uint32> ToUint32() const);
+ inline V8_DEPRECATED("Use maybe version", Local<Int32> ToInt32() const);
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToArrayIndex() const);
+ V8_DEPRECATED("Use maybe version", Local<Uint32> ToArrayIndex() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
Local<Context> context) const;
@@ -2188,6 +2198,8 @@ class V8_EXPORT String : public Name {
public:
virtual ~ExternalStringResourceBase() {}
+ virtual bool IsCompressible() const { return false; }
+
protected:
ExternalStringResourceBase() {}
@@ -2304,7 +2316,7 @@ class V8_EXPORT String : public Name {
int length = -1);
/** Allocates a new string from Latin-1 data.*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use maybe version",
Local<String> NewFromOneByte(Isolate* isolate, const uint8_t* data,
NewStringType type = kNormalString,
@@ -2343,10 +2355,9 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static V8_DEPRECATE_SOON(
- "Use maybe version",
- Local<String> NewExternal(Isolate* isolate,
- ExternalStringResource* resource));
+ static V8_DEPRECATED("Use maybe version",
+ Local<String> NewExternal(
+ Isolate* isolate, ExternalStringResource* resource));
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
Isolate* isolate, ExternalStringResource* resource);
@@ -2456,8 +2467,8 @@ class V8_EXPORT Symbol : public Name {
Local<Value> Name() const;
// Create a symbol. If name is not empty, it will be used as the description.
- static Local<Symbol> New(
- Isolate *isolate, Local<String> name = Local<String>());
+ static Local<Symbol> New(Isolate* isolate,
+ Local<String> name = Local<String>());
// Access global symbol registry.
// Note that symbols created this way are never collected, so
@@ -2663,13 +2674,13 @@ class V8_EXPORT Object : public Value {
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
- V8_DEPRECATE_SOON("Use CreateDataProperty",
- bool ForceSet(Local<Value> key, Local<Value> value,
- PropertyAttribute attribs = None));
- V8_DEPRECATE_SOON("Use CreateDataProperty",
- Maybe<bool> ForceSet(Local<Context> context,
- Local<Value> key, Local<Value> value,
- PropertyAttribute attribs = None));
+ V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
+ bool ForceSet(Local<Value> key, Local<Value> value,
+ PropertyAttribute attribs = None));
+ V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
+ Maybe<bool> ForceSet(Local<Context> context, Local<Value> key,
+ Local<Value> value,
+ PropertyAttribute attribs = None));
V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
@@ -2684,16 +2695,16 @@ class V8_EXPORT Object : public Value {
* any combination of ReadOnly, DontEnum and DontDelete. Returns
* None when the property doesn't exist.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- PropertyAttribute GetPropertyAttributes(Local<Value> key));
+ V8_DEPRECATED("Use maybe version",
+ PropertyAttribute GetPropertyAttributes(Local<Value> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
Local<Context> context, Local<Value> key);
/**
* Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> GetOwnPropertyDescriptor(Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> GetOwnPropertyDescriptor(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
Local<Context> context, Local<String> key);
@@ -2705,27 +2716,27 @@ class V8_EXPORT Object : public Value {
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> Delete(Local<Context> context, Local<Value> key);
- V8_DEPRECATE_SOON("Use maybe version", bool Has(uint32_t index));
+ V8_DEPRECATED("Use maybe version", bool Has(uint32_t index));
V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
- V8_DEPRECATE_SOON("Use maybe version", bool Delete(uint32_t index));
+ V8_DEPRECATED("Use maybe version", bool Delete(uint32_t index));
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> Delete(Local<Context> context, uint32_t index);
- V8_DEPRECATE_SOON("Use maybe version",
- bool SetAccessor(Local<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter = 0,
- Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
- V8_DEPRECATE_SOON("Use maybe version",
- bool SetAccessor(Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = 0,
- Local<Value> data = Local<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None));
+ V8_DEPRECATED("Use maybe version",
+ bool SetAccessor(Local<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter = 0,
+ Local<Value> data = Local<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None));
+ V8_DEPRECATED("Use maybe version",
+ bool SetAccessor(Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ Local<Value> data = Local<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None));
// TODO(dcarney): mark V8_WARN_UNUSED_RESULT
Maybe<bool> SetAccessor(Local<Context> context, Local<Name> name,
AccessorNameGetterCallback getter,
@@ -2782,8 +2793,7 @@ class V8_EXPORT Object : public Value {
* be skipped by __proto__ and it does not consult the security
* handler.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- bool SetPrototype(Local<Value> prototype));
+ V8_DEPRECATED("Use maybe version", bool SetPrototype(Local<Value> prototype));
V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
Local<Value> prototype);
@@ -2798,7 +2808,7 @@ class V8_EXPORT Object : public Value {
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
- V8_DEPRECATE_SOON("Use maybe version", Local<String> ObjectProtoToString());
+ V8_DEPRECATED("Use maybe version", Local<String> ObjectProtoToString());
V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
Local<Context> context);
@@ -2843,8 +2853,7 @@ class V8_EXPORT Object : public Value {
void SetAlignedPointerInInternalField(int index, void* value);
// Testers for local properties.
- V8_DEPRECATE_SOON("Use maybe version",
- bool HasOwnProperty(Local<String> key));
+ V8_DEPRECATED("Use maybe version", bool HasOwnProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
Local<Name> key);
V8_DEPRECATE_SOON("Use maybe version",
@@ -2864,7 +2873,7 @@ class V8_EXPORT Object : public Value {
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use maybe version",
Local<Value> GetRealNamedPropertyInPrototypeChain(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
@@ -2875,7 +2884,7 @@ class V8_EXPORT Object : public Value {
* which can be None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use maybe version",
Maybe<PropertyAttribute> GetRealNamedPropertyAttributesInPrototypeChain(
Local<String> key));
@@ -2888,8 +2897,8 @@ class V8_EXPORT Object : public Value {
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> GetRealNamedProperty(Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> GetRealNamedProperty(Local<String> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
Local<Context> context, Local<Name> key);
@@ -2898,9 +2907,9 @@ class V8_EXPORT Object : public Value {
* None or any combination of ReadOnly, DontEnum and DontDelete.
* Interceptors in the prototype chain are not called.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
- Local<String> key));
+ V8_DEPRECATED("Use maybe version",
+ Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
+ Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key);
@@ -2919,12 +2928,12 @@ class V8_EXPORT Object : public Value {
*/
int GetIdentityHash();
- V8_DEPRECATE_SOON("Use v8::Object::SetPrivate instead.",
- bool SetHiddenValue(Local<String> key, Local<Value> value));
- V8_DEPRECATE_SOON("Use v8::Object::GetHidden instead.",
- Local<Value> GetHiddenValue(Local<String> key));
- V8_DEPRECATE_SOON("Use v8::Object::DeletePrivate instead.",
- bool DeleteHiddenValue(Local<String> key));
+ V8_DEPRECATED("Use v8::Object::SetPrivate instead.",
+ bool SetHiddenValue(Local<String> key, Local<Value> value));
+ V8_DEPRECATED("Use v8::Object::GetPrivate instead.",
+ Local<Value> GetHiddenValue(Local<String> key));
+ V8_DEPRECATED("Use v8::Object::DeletePrivate instead.",
+ bool DeleteHiddenValue(Local<String> key));
/**
* Clone this object with a fast but shallow copy. Values will point
@@ -2949,9 +2958,9 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> CallAsFunction(Local<Value> recv, int argc,
- Local<Value> argv[]));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> CallAsFunction(Local<Value> recv, int argc,
+ Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
Local<Value> recv,
int argc,
@@ -2962,9 +2971,8 @@ class V8_EXPORT Object : public Value {
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Value> CallAsConstructor(int argc,
- Local<Value> argv[]));
+ V8_DEPRECATED("Use maybe version",
+ Local<Value> CallAsConstructor(int argc, Local<Value> argv[]));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
Local<Context> context, int argc, Local<Value> argv[]);
@@ -2996,10 +3004,11 @@ class V8_EXPORT Array : public Object {
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Object> CloneElementAt(uint32_t index));
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> CloneElementAt(
- Local<Context> context, uint32_t index);
+ V8_DEPRECATED("Cloning is not supported.",
+ Local<Object> CloneElementAt(uint32_t index));
+ V8_DEPRECATED("Cloning is not supported.",
+ MaybeLocal<Object> CloneElementAt(Local<Context> context,
+ uint32_t index));
/**
* Creates a JavaScript array with the given length. If the length
@@ -3042,15 +3051,6 @@ class V8_EXPORT Map : public Object {
*/
static Local<Map> New(Isolate* isolate);
- /**
- * Creates a new Map containing the elements of array, which must be formatted
- * in the same manner as the array returned from AsArray().
- * Guaranteed to be side-effect free if the array contains no holes.
- */
- static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
- "Use mutation methods instead",
- MaybeLocal<Map> FromArray(Local<Context> context, Local<Array> array));
-
V8_INLINE static Map* Cast(Value* obj);
private:
@@ -3083,14 +3083,6 @@ class V8_EXPORT Set : public Object {
*/
static Local<Set> New(Isolate* isolate);
- /**
- * Creates a new Set containing the items in array.
- * Guaranteed to be side-effect free if the array contains no holes.
- */
- static V8_WARN_UNUSED_RESULT V8_DEPRECATED(
- "Use mutation methods instead",
- MaybeLocal<Set> FromArray(Local<Context> context, Local<Array> array));
-
V8_INLINE static Set* Cast(Value* obj);
private:
@@ -3238,13 +3230,12 @@ class V8_EXPORT Function : public Object {
Local<Function> New(Isolate* isolate, FunctionCallback callback,
Local<Value> data = Local<Value>(), int length = 0));
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Object> NewInstance(int argc, Local<Value> argv[])
- const);
+ V8_DEPRECATED("Use maybe version",
+ Local<Object> NewInstance(int argc, Local<Value> argv[]) const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context, int argc, Local<Value> argv[]) const;
- V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance() const);
+ V8_DEPRECATED("Use maybe version", Local<Object> NewInstance() const);
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
Local<Context> context) const {
return NewInstance(context, 0, nullptr);
@@ -3269,6 +3260,12 @@ class V8_EXPORT Function : public Object {
Local<Value> GetInferredName() const;
/**
+ * displayName if it is set, otherwise name if it is configured, otherwise
+ * function name, otherwise inferred name.
+ */
+ Local<Value> GetDebugName() const;
+
+ /**
* User-defined name assigned to the "displayName" property of this function.
* Used to facilitate debugging and profiling of JavaScript code.
*/
@@ -3357,18 +3354,19 @@ class V8_EXPORT Promise : public Object {
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Promise> Chain(Local<Function> handler));
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(Local<Context> context,
- Local<Function> handler);
+ V8_DEPRECATED("Use maybe version of Then",
+ Local<Promise> Chain(Local<Function> handler));
+ V8_DEPRECATED("Use Then",
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(
+ Local<Context> context, Local<Function> handler));
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Promise> Catch(Local<Function> handler));
+ V8_DEPRECATED("Use maybe version",
+ Local<Promise> Catch(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
Local<Function> handler);
- V8_DEPRECATE_SOON("Use maybe version",
- Local<Promise> Then(Local<Function> handler));
+ V8_DEPRECATED("Use maybe version",
+ Local<Promise> Then(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
Local<Function> handler);
@@ -3386,6 +3384,32 @@ class V8_EXPORT Promise : public Object {
};
+/**
+ * An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
+ * 26.2.1).
+ */
+class V8_EXPORT Proxy : public Object {
+ public:
+ Local<Object> GetTarget();
+ Local<Value> GetHandler();
+ bool IsRevoked();
+ void Revoke();
+
+ /**
+ * Creates a new empty Map.
+ */
+ static MaybeLocal<Proxy> New(Local<Context> context,
+ Local<Object> local_target,
+ Local<Object> local_handler);
+
+ V8_INLINE static Proxy* Cast(Value* obj);
+
+ private:
+ Proxy();
+ static void CheckCast(Value* obj);
+};
+
+
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@@ -3943,7 +3967,8 @@ class V8_EXPORT NumberObject : public Object {
*/
class V8_EXPORT BooleanObject : public Object {
public:
- static Local<Value> New(bool value);
+ static Local<Value> New(Isolate* isolate, bool value);
+ V8_DEPRECATED("Pass an isolate", static Local<Value> New(bool value));
bool ValueOf() const;
@@ -4422,6 +4447,16 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0);
+ /**
+ * Creates a function template with a fast handler. If a fast handler is set,
+ * the callback cannot be null.
+ */
+ static Local<FunctionTemplate> NewWithFastHandler(
+ Isolate* isolate, FunctionCallback callback,
+ experimental::FastAccessorBuilder* fast_handler = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0);
+
/** Returns the unique function instance in the current execution context.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
@@ -4432,8 +4467,9 @@ class V8_EXPORT FunctionTemplate : public Template {
* callback is called whenever the function created from this
* FunctionTemplate is called.
*/
- void SetCallHandler(FunctionCallback callback,
- Local<Value> data = Local<Value>());
+ void SetCallHandler(
+ FunctionCallback callback, Local<Value> data = Local<Value>(),
+ experimental::FastAccessorBuilder* fast_handler = nullptr);
/** Set the predefined length property for the FunctionTemplate. */
void SetLength(int length);
@@ -4584,7 +4620,7 @@ class V8_EXPORT ObjectTemplate : public Template {
static Local<ObjectTemplate> New(
Isolate* isolate,
Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
- static V8_DEPRECATE_SOON("Use isolate version", Local<ObjectTemplate> New());
+ static V8_DEPRECATED("Use isolate version", Local<ObjectTemplate> New());
/** Creates a new instance of this template.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance());
@@ -4717,7 +4753,7 @@ class V8_EXPORT ObjectTemplate : public Template {
void SetAccessCheckCallback(AccessCheckCallback callback,
Local<Value> data = Local<Value>());
- V8_DEPRECATE_SOON(
+ V8_DEPRECATED(
"Use SetAccessCheckCallback instead",
void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
IndexedSecurityCallback indexed_handler,
@@ -4772,21 +4808,6 @@ class V8_EXPORT AccessorSignature : public Data {
};
-/**
- * A utility for determining the type of objects based on the template
- * they were constructed from.
- */
-class V8_EXPORT TypeSwitch : public Data {
- public:
- static Local<TypeSwitch> New(Local<FunctionTemplate> type);
- static Local<TypeSwitch> New(int argc, Local<FunctionTemplate> types[]);
- int match(Local<Value> value);
-
- private:
- TypeSwitch();
-};
-
-
// --- Extensions ---
class V8_EXPORT ExternalOneByteStringResourceImpl
@@ -4932,7 +4953,9 @@ class V8_EXPORT Exception {
* Will try to reconstruct the original stack trace from the exception value,
* or capture the current stack trace if not available.
*/
- static Local<Message> CreateMessage(Local<Value> exception);
+ static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
+ V8_DEPRECATED("Use version with an Isolate*",
+ static Local<Message> CreateMessage(Local<Value> exception));
/**
* Returns the original stack trace that was captured at the creation time
@@ -4997,8 +5020,10 @@ class PromiseRejectMessage {
V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
V8_INLINE Local<Value> GetValue() const { return value_; }
- // DEPRECATED. Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()
- V8_INLINE Local<StackTrace> GetStackTrace() const { return stack_trace_; }
+ V8_DEPRECATED("Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()",
+ V8_INLINE Local<StackTrace> GetStackTrace() const) {
+ return stack_trace_;
+ }
private:
Local<Promise> promise_;
@@ -5050,12 +5075,6 @@ enum GCCallbackFlags {
kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3
};
-V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCPrologueCallback)(GCType type,
- GCCallbackFlags flags));
-V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCEpilogueCallback)(GCType type,
- GCCallbackFlags flags));
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
typedef void (*InterruptCallback)(Isolate* isolate, void* data);
@@ -5426,6 +5445,15 @@ class V8_EXPORT Isolate {
kSloppyMode = 8,
kStrictMode = 9,
kStrongMode = 10,
+ kRegExpPrototypeStickyGetter = 11,
+ kRegExpPrototypeToString = 12,
+ kRegExpPrototypeUnicodeGetter = 13,
+ kIntlV8Parse = 14,
+ kIntlPattern = 15,
+ kIntlResolved = 16,
+ kPromiseChain = 17,
+ kPromiseAccept = 18,
+ kPromiseDefer = 19,
kUseCounterFeatureCount // This enum value must be last.
};
@@ -5493,6 +5521,15 @@ class V8_EXPORT Isolate {
void Dispose();
/**
+ * Discards all V8 thread-specific data for the Isolate. Should be used
+ * if a thread is terminating and it has used an Isolate that will outlive
+ * the thread -- all thread-specific data for an Isolate is discarded when
+ * an Isolate is disposed so this call is pointless if an Isolate is about
+ * to be Disposed.
+ */
+ void DiscardThreadSpecificMetadata();
+
+ /**
* Associate embedder-specific data with the isolate. |slot| has to be
* between 0 and GetNumberOfDataSlots() - 1.
*/
@@ -5656,14 +5693,6 @@ class V8_EXPORT Isolate {
template<typename T, typename S>
void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
- V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCPrologueCallback)(Isolate* isolate,
- GCType type,
- GCCallbackFlags flags));
- V8_DEPRECATE_SOON("Use GCCallBack instead",
- typedef void (*GCEpilogueCallback)(Isolate* isolate,
- GCType type,
- GCCallbackFlags flags));
typedef void (*GCCallback)(Isolate* isolate, GCType type,
GCCallbackFlags flags);
@@ -5850,8 +5879,8 @@ class V8_EXPORT Isolate {
*/
bool IdleNotificationDeadline(double deadline_in_seconds);
- V8_DEPRECATE_SOON("use IdleNotificationDeadline()",
- bool IdleNotification(int idle_time_in_ms));
+ V8_DEPRECATED("use IdleNotificationDeadline()",
+ bool IdleNotification(int idle_time_in_ms));
/**
* Optional notification that the system is running low on memory.
@@ -6072,7 +6101,7 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetFatalErrorHandler(FatalErrorCallback that));
@@ -6080,7 +6109,7 @@ class V8_EXPORT V8 {
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version", void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that));
@@ -6088,7 +6117,7 @@ class V8_EXPORT V8 {
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
- V8_INLINE static V8_DEPRECATE_SOON("no alternative", bool IsDead());
+ V8_INLINE static V8_DEPRECATED("Use isolate version", bool IsDead());
/**
* Hand startup data to V8, in case the embedder has chosen to build
@@ -6124,7 +6153,7 @@ class V8_EXPORT V8 {
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
bool AddMessageListener(MessageCallback that,
Local<Value> data = Local<Value>()));
@@ -6132,14 +6161,14 @@ class V8_EXPORT V8 {
/**
* Remove all message listeners from the specified callback function.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version", void RemoveMessageListeners(MessageCallback that));
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit = 10,
@@ -6161,7 +6190,7 @@ class V8_EXPORT V8 {
static const char* GetVersion();
/** Callback function for reporting failed access checks.*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
@@ -6175,7 +6204,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use isolate version",
void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
@@ -6184,7 +6213,7 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveGCPrologueCallback(GCCallback callback));
@@ -6198,7 +6227,7 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
- static V8_DEPRECATE_SOON(
+ static V8_DEPRECATED(
"Use isolate version",
void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
@@ -6207,7 +6236,7 @@ class V8_EXPORT V8 {
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveGCEpilogueCallback(GCCallback callback));
@@ -6215,7 +6244,7 @@ class V8_EXPORT V8 {
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
@@ -6224,7 +6253,7 @@ class V8_EXPORT V8 {
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback));
@@ -6256,8 +6285,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to terminate the current JS execution.
*/
- V8_INLINE static V8_DEPRECATE_SOON("Use isolate version",
- void TerminateExecution(Isolate* isolate));
+ V8_INLINE static V8_DEPRECATED("Use isolate version",
+ void TerminateExecution(Isolate* isolate));
/**
* Is V8 terminating JavaScript execution.
@@ -6269,7 +6298,7 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to check.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
bool IsExecutionTerminating(Isolate* isolate = NULL));
@@ -6289,7 +6318,7 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to resume execution capability.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version", void CancelTerminateExecution(Isolate* isolate));
/**
@@ -6308,15 +6337,15 @@ class V8_EXPORT V8 {
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
- "Use isoalte version",
+ V8_INLINE static V8_DEPRECATED(
+ "Use isolate version",
void VisitExternalResources(ExternalResourceVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor));
@@ -6324,7 +6353,7 @@ class V8_EXPORT V8 {
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(Isolate* isolate,
PersistentHandleVisitor* visitor));
@@ -6336,7 +6365,7 @@ class V8_EXPORT V8 {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
- V8_INLINE static V8_DEPRECATE_SOON(
+ V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesForPartialDependence(Isolate* isolate,
PersistentHandleVisitor* visitor));
@@ -6494,7 +6523,7 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
- V8_DEPRECATE_SOON("Use isolate version", TryCatch());
+ V8_DEPRECATED("Use isolate version", TryCatch());
/**
* Creates a new try/catch block and registers it with v8. Note that
@@ -7180,7 +7209,7 @@ class Internals {
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate,
uint32_t slot,
void* data) {
- uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
+ uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
@@ -8059,6 +8088,14 @@ Promise* Promise::Cast(v8::Value* value) {
}
+Proxy* Proxy::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Proxy*>(value);
+}
+
+
Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -8483,4 +8520,4 @@ void V8::VisitHandlesForPartialDependence(Isolate* isolate,
#undef TYPE_CHECK
-#endif // V8_H_
+#endif // INCLUDE_V8_H_
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index c88e1da15e..d2be68561c 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -163,7 +163,6 @@
//
// V8_HAS_CXX11_ALIGNAS - alignas specifier supported
// V8_HAS_CXX11_ALIGNOF - alignof(type) operator supported
-// V8_HAS_CXX11_STATIC_ASSERT - static_assert() supported
//
// Compiler-specific feature detection
//
@@ -230,7 +229,6 @@
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
-# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
#elif defined(__GNUC__)
@@ -277,7 +275,6 @@
# if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
# define V8_HAS_CXX11_ALIGNAS (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
-# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# endif
#endif
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index c560374353..6d3624992c 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -28,22 +28,49 @@ verifiers {
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
- builders { name: "v8_linux64_rel" }
+ builders { name: "v8_linux64_rel_ng" }
+ builders {
+ name: "v8_linux64_rel_ng_triggered"
+ triggered_by: "v8_linux64_rel_ng"
+ }
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_chromium_gn_rel" }
- builders { name: "v8_linux_dbg" }
+ builders { name: "v8_linux_dbg_ng" }
+ builders {
+ name: "v8_linux_dbg_ng_triggered"
+ triggered_by: "v8_linux_dbg_ng"
+ }
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
- builders { name: "v8_linux_rel" }
+ builders { name: "v8_linux_rel_ng" }
+ builders {
+ name: "v8_linux_rel_ng_triggered"
+ triggered_by: "v8_linux_rel_ng"
+ }
builders { name: "v8_mac_rel" }
builders { name: "v8_presubmit" }
- builders { name: "v8_win64_rel" }
+ builders { name: "v8_win64_rel_ng" }
+ builders {
+ name: "v8_win64_rel_ng_triggered"
+ triggered_by: "v8_win64_rel_ng"
+ }
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
- builders { name: "v8_win_rel" }
+ builders { name: "v8_win_rel_ng" }
+ builders {
+ name: "v8_win_rel_ng_triggered"
+ triggered_by: "v8_win_rel_ng"
+ }
+ }
+ buckets {
+ name: "tryserver.blink"
+ builders {
+ name: "linux_blink_rel"
+ experiment_percentage: 20
+ }
}
}
diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp
index 0c8e5cc764..7e0608b213 100644
--- a/deps/v8/samples/samples.gyp
+++ b/deps/v8/samples/samples.gyp
@@ -40,10 +40,6 @@
'include_dirs': [
'..',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni
index f41a5ee0e8..11b73c5804 100644
--- a/deps/v8/snapshot_toolchain.gni
+++ b/deps/v8/snapshot_toolchain.gni
@@ -34,10 +34,10 @@
if (host_cpu == "x64" && host_os == "linux") {
if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
- } else if (target_cpu == "x64") {
+ } else if (target_cpu == "x64" || target_cpu == "arm64" || target_cpu == "mips64el") {
snapshot_toolchain = "//build/toolchain/linux:clang_x64"
} else {
- assert(false, "Need environment for this arch")
+ assert(false, "Need environment for this arch: $target_cpu")
}
} else {
snapshot_toolchain = default_toolchain
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 26b0808740..b54cd04563 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -1,7 +1,10 @@
include_rules = [
+ "+base/trace_event/common/trace_event_common.h",
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
+ "+src/compiler/code-stub-assembler.h",
+ "+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
@@ -24,4 +27,7 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
+ "api-experimental\.cc": [
+ "+src/compiler/fast-accessor-assembler.h",
+ ],
}
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index f38fecad4e..94b7fbb32e 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,2 +1,4 @@
per-file i18n.*=cira@chromium.org
per-file i18n.*=mnita@google.com
+per-file typing-asm.*=aseemgarg@chromium.org
+per-file typing-asm.*=bradnelson@chromium.org
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 73270d187c..2094cdb20d 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -161,7 +161,8 @@ void Accessors::ArgumentsIteratorSetter(
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> object_handle = Utils::OpenHandle(*info.This());
+ Handle<JSObject> object_handle =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<Object> value_handle = Utils::OpenHandle(*val);
Handle<Name> name_handle = Utils::OpenHandle(*name);
@@ -205,7 +206,7 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> object = Utils::OpenHandle(*info.This());
+ Handle<JSReceiver> object = Utils::OpenHandle(*info.This());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
@@ -1328,12 +1329,6 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (!caller->shared()->native() && potential_caller != NULL) {
caller = potential_caller;
}
- // If caller is bound, return null. This is compatible with JSC, and
- // allows us to make bound functions use the strict function map
- // and its associated throwing caller and arguments.
- if (caller->shared()->bound()) {
- return MaybeHandle<JSFunction>();
- }
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index 96e74c5ddb..70dd63e1dd 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -5,7 +5,7 @@
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/zone.h"
diff --git a/deps/v8/src/api-experimental.cc b/deps/v8/src/api-experimental.cc
new file mode 100644
index 0000000000..2b49e9723a
--- /dev/null
+++ b/deps/v8/src/api-experimental.cc
@@ -0,0 +1,126 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * Implementation for v8-experimental.h.
+ */
+
+#include "src/api-experimental.h"
+
+#include "include/v8.h"
+#include "include/v8-experimental.h"
+#include "src/api.h"
+#include "src/compiler/fast-accessor-assembler.h"
+
+namespace {
+
+
+v8::internal::compiler::FastAccessorAssembler* FromApi(
+ v8::experimental::FastAccessorBuilder* builder) {
+ return reinterpret_cast<v8::internal::compiler::FastAccessorAssembler*>(
+ builder);
+}
+
+
+v8::experimental::FastAccessorBuilder* FromInternal(
+ v8::internal::compiler::FastAccessorAssembler* fast_accessor_assembler) {
+ return reinterpret_cast<v8::experimental::FastAccessorBuilder*>(
+ fast_accessor_assembler);
+}
+
+} // namespace
+
+namespace v8 {
+namespace internal {
+namespace experimental {
+
+
+MaybeHandle<Code> BuildCodeFromFastAccessorBuilder(
+ v8::experimental::FastAccessorBuilder* fast_handler) {
+ i::MaybeHandle<i::Code> code;
+ if (fast_handler != nullptr) {
+ auto faa = FromApi(fast_handler);
+ code = faa->Build();
+ CHECK(!code.is_null());
+ delete faa;
+ }
+ return code;
+}
+
+} // namespace experimental
+} // namespace internal
+
+
+namespace experimental {
+
+
+FastAccessorBuilder* FastAccessorBuilder::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal::compiler::FastAccessorAssembler* faa =
+ new internal::compiler::FastAccessorAssembler(i_isolate);
+ return FromInternal(faa);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::IntegerConstant(
+ int const_value) {
+ return FromApi(this)->IntegerConstant(const_value);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::GetReceiver() {
+ return FromApi(this)->GetReceiver();
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
+ ValueId value, int field_no) {
+ return FromApi(this)->LoadInternalField(value, field_no);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
+ int offset) {
+ return FromApi(this)->LoadValue(value_id, offset);
+}
+
+
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
+ int offset) {
+ return FromApi(this)->LoadObject(value_id, offset);
+}
+
+
+void FastAccessorBuilder::ReturnValue(ValueId value) {
+ FromApi(this)->ReturnValue(value);
+}
+
+
+void FastAccessorBuilder::CheckFlagSetOrReturnNull(ValueId value_id, int mask) {
+ FromApi(this)->CheckFlagSetOrReturnNull(value_id, mask);
+}
+
+
+void FastAccessorBuilder::CheckNotZeroOrReturnNull(ValueId value_id) {
+ FromApi(this)->CheckNotZeroOrReturnNull(value_id);
+}
+
+
+FastAccessorBuilder::LabelId FastAccessorBuilder::MakeLabel() {
+ return FromApi(this)->MakeLabel();
+}
+
+
+void FastAccessorBuilder::SetLabel(LabelId label_id) {
+ FromApi(this)->SetLabel(label_id);
+}
+
+
+void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
+ LabelId label_id) {
+ FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
+}
+
+} // namespace experimental
+} // namespace v8
diff --git a/deps/v8/src/api-experimental.h b/deps/v8/src/api-experimental.h
new file mode 100644
index 0000000000..bc0bc55739
--- /dev/null
+++ b/deps/v8/src/api-experimental.h
@@ -0,0 +1,28 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_API_EXPERIMENTAL_H_
+#define V8_API_EXPERIMENTAL_H_
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+class Code;
+} // internal;
+namespace experimental {
+class FastAccessorBuilder;
+} // experimental
+
+namespace internal {
+namespace experimental {
+
+v8::internal::MaybeHandle<v8::internal::Code> BuildCodeFromFastAccessorBuilder(
+ v8::experimental::FastAccessorBuilder* fast_handler);
+
+} // namespace experimental
+} // namespace internal
+} // namespace v8
+
+#endif // V8_API_EXPERIMENTAL_H_
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index d8dd151041..bc71e3ef90 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -438,8 +438,16 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type) {
- Handle<Code> code = isolate->builtins()->HandleApiCall();
- Handle<Code> construct_stub = isolate->builtins()->JSConstructStubApi();
+ Handle<Code> code;
+ if (obj->call_code()->IsCallHandlerInfo() &&
+ CallHandlerInfo::cast(obj->call_code())->fast_handler()->IsCode()) {
+ code = isolate->builtins()->HandleFastApiCall();
+ } else {
+ code = isolate->builtins()->HandleApiCall();
+ }
+ Handle<Code> construct_stub =
+ prototype.is_null() ? isolate->builtins()->ConstructedNonConstructable()
+ : isolate->builtins()->JSConstructStubApi();
obj->set_instantiated(true);
Handle<JSFunction> result;
@@ -540,7 +548,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined()) {
map->set_is_callable();
- map->set_is_constructor(true);
+ map->set_is_constructor();
}
// Recursively copy parent instance templates' accessors,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 5d4c9c0c41..8274a73568 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -9,9 +9,13 @@
#include <sanitizer/asan_interface.h>
#endif // V8_USE_ADDRESS_SANITIZER
#include <cmath> // For isnan.
+#include <limits>
+#include <vector>
#include "include/v8-debug.h"
+#include "include/v8-experimental.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
+#include "src/api-experimental.h"
#include "src/api-natives.h"
#include "src/assert-scope.h"
#include "src/background-parsing-task.h"
@@ -33,9 +37,10 @@
#include "src/global-handles.h"
#include "src/icu_util.h"
#include "src/isolate-inl.h"
-#include "src/json-parser.h"
#include "src/messages.h"
-#include "src/parser.h"
+#include "src/parsing/json-parser.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/heap-profiler.h"
@@ -48,7 +53,6 @@
#include "src/prototype.h"
#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
-#include "src/scanner-character-streams.h"
#include "src/simulator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@@ -882,8 +886,8 @@ int NeanderArray::length() {
i::Object* NeanderArray::get(int offset) {
- DCHECK(0 <= offset);
- DCHECK(offset < length());
+ DCHECK_LE(0, offset);
+ DCHECK_LT(offset, length());
return obj_.get(offset + 1);
}
@@ -932,7 +936,7 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
// TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
Utils::OpenHandle(*value),
- static_cast<PropertyAttributes>(attribute));
+ static_cast<i::PropertyAttributes>(attribute));
}
@@ -953,7 +957,7 @@ void Template::SetAccessorProperty(
i::ApiNatives::AddAccessorProperty(
isolate, templ, Utils::OpenHandle(*name),
Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true),
- static_cast<PropertyAttributes>(attribute));
+ static_cast<i::PropertyAttributes>(attribute));
}
@@ -996,7 +1000,8 @@ void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
static Local<FunctionTemplate> FunctionTemplateNew(
- i::Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
+ i::Isolate* isolate, FunctionCallback callback,
+ experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
v8::Local<Signature> signature, int length, bool do_not_cache) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
@@ -1014,7 +1019,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
- Utils::ToLocal(obj)->SetCallHandler(callback, data);
+ Utils::ToLocal(obj)->SetCallHandler(callback, data, fast_handler);
}
obj->set_length(length);
obj->set_undetectable(false);
@@ -1025,6 +1030,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
return Utils::ToLocal(obj);
}
+
Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
FunctionCallback callback,
v8::Local<Value> data,
@@ -1036,8 +1042,21 @@ Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
DCHECK(!i_isolate->serializer_enabled());
LOG_API(i_isolate, "FunctionTemplate::New");
ENTER_V8(i_isolate);
- return FunctionTemplateNew(
- i_isolate, callback, data, signature, length, false);
+ return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
+ length, false);
+}
+
+
+Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
+ Isolate* isolate, FunctionCallback callback,
+ experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
+ v8::Local<Signature> signature, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ DCHECK(!i_isolate->serializer_enabled());
+ LOG_API(i_isolate, "FunctionTemplate::NewWithFastHandler");
+ ENTER_V8(i_isolate);
+ return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
+ length, false);
}
@@ -1053,49 +1072,15 @@ Local<AccessorSignature> AccessorSignature::New(
}
-Local<TypeSwitch> TypeSwitch::New(Local<FunctionTemplate> type) {
- Local<FunctionTemplate> types[1] = {type};
- return TypeSwitch::New(1, types);
-}
-
-
-Local<TypeSwitch> TypeSwitch::New(int argc, Local<FunctionTemplate> types[]) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "TypeSwitch::New");
- ENTER_V8(isolate);
- i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
- for (int i = 0; i < argc; i++)
- vector->set(i, *Utils::OpenHandle(*types[i]));
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TYPE_SWITCH_INFO_TYPE);
- i::Handle<i::TypeSwitchInfo> obj =
- i::Handle<i::TypeSwitchInfo>::cast(struct_obj);
- obj->set_types(*vector);
- return Utils::ToLocal(obj);
-}
-
-
-int TypeSwitch::match(v8::Local<Value> value) {
- i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
- LOG_API(info->GetIsolate(), "TypeSwitch::match");
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
- i::FixedArray* types = i::FixedArray::cast(info->types());
- for (int i = 0; i < types->length(); i++) {
- if (i::FunctionTemplateInfo::cast(types->get(i))->IsTemplateFor(*obj))
- return i + 1;
- }
- return 0;
-}
-
-
#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
i::Handle<i::Object> foreign = FromCData(obj->GetIsolate(), cdata); \
(obj)->setter(*foreign); \
} while (false)
-void FunctionTemplate::SetCallHandler(FunctionCallback callback,
- v8::Local<Value> data) {
+void FunctionTemplate::SetCallHandler(
+ FunctionCallback callback, v8::Local<Value> data,
+ experimental::FastAccessorBuilder* fast_handler) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
@@ -1106,6 +1091,11 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
+ i::MaybeHandle<i::Code> code =
+ i::experimental::BuildCodeFromFastAccessorBuilder(fast_handler);
+ if (!code.is_null()) {
+ obj->set_fast_handler(*code.ToHandleChecked());
+ }
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -1121,7 +1111,7 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
obj->set_name(*Utils::OpenHandle(*name));
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
- obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+ obj->set_property_attributes(static_cast<i::PropertyAttributes>(attributes));
if (!signature.IsEmpty()) {
obj->set_expected_receiver_type(*Utils::OpenHandle(*signature));
}
@@ -1343,7 +1333,7 @@ void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
i::HandleScope scope(isolate);
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
intrinsic,
- static_cast<PropertyAttributes>(attribute));
+ static_cast<i::PropertyAttributes>(attribute));
}
@@ -1956,8 +1946,9 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
isolate);
for (size_t i = 0; i < context_extension_count; ++i) {
- i::Handle<i::JSObject> extension =
+ i::Handle<i::JSReceiver> extension =
Utils::OpenHandle(*context_extensions[i]);
+ if (!extension->IsJSObject()) return Local<Function>();
i::Handle<i::JSFunction> closure(context->closure(), isolate);
context = factory->NewWithContext(closure, context, extension);
}
@@ -2129,7 +2120,7 @@ v8::TryCatch::TryCatch()
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- v8::internal::GetCurrentStackPosition()));
+ isolate_, v8::internal::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2146,7 +2137,7 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
// Special handling for simulators which have a separate JS stack.
js_stack_comparable_address_ =
reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
- v8::internal::GetCurrentStackPosition()));
+ isolate_, v8::internal::GetCurrentStackPosition()));
isolate_->RegisterTryCatchHandler(this);
}
@@ -2165,7 +2156,7 @@ v8::TryCatch::~TryCatch() {
isolate_->RestorePendingMessageFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::internal::SimulatorStack::UnregisterCTryCatch();
+ v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
} else {
@@ -2176,7 +2167,7 @@ v8::TryCatch::~TryCatch() {
isolate_->CancelScheduledExceptionFromTryCatch(this);
}
isolate_->UnregisterTryCatchHandler(this);
- v8::internal::SimulatorStack::UnregisterCTryCatch();
+ v8::internal::SimulatorStack::UnregisterCTryCatch(isolate_);
}
}
@@ -2775,9 +2766,7 @@ bool Value::IsSharedArrayBuffer() const {
}
-bool Value::IsObject() const {
- return Utils::OpenHandle(this)->IsJSObject();
-}
+bool Value::IsObject() const { return Utils::OpenHandle(this)->IsJSReceiver(); }
bool Value::IsNumber() const {
@@ -2785,6 +2774,9 @@ bool Value::IsNumber() const {
}
+bool Value::IsProxy() const { return Utils::OpenHandle(this)->IsJSProxy(); }
+
+
#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
bool Value::Is##Type() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
@@ -2890,6 +2882,12 @@ bool Value::IsSetIterator() const {
}
+bool Value::IsPromise() const {
+ auto self = Utils::OpenHandle(this);
+ return i::Object::IsPromise(self);
+}
+
+
MaybeLocal<String> Value::ToString(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
@@ -2908,12 +2906,16 @@ Local<String> Value::ToString(Isolate* isolate) const {
MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
- auto obj = Utils::OpenHandle(this);
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
PREPARE_FOR_EXECUTION(context, "ToDetailString", String);
Local<String> result;
- has_pending_exception =
- !ToLocal<String>(i::Execution::ToDetailString(isolate, obj), &result);
+ i::Handle<i::Object> args[] = {obj};
+ has_pending_exception = !ToLocal<String>(
+ i::Execution::TryCall(isolate, isolate->no_side_effects_to_string_fun(),
+ isolate->factory()->undefined_value(),
+ arraysize(args), args),
+ &result);
RETURN_ON_FAILED_EXECUTION(String);
RETURN_ESCAPED(result);
}
@@ -3041,8 +3043,7 @@ void External::CheckCast(v8::Value* that) {
void v8::Object::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSObject(),
- "v8::Object::Cast()",
+ Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast()",
"Could not convert to object");
}
@@ -3150,6 +3151,12 @@ void v8::Promise::Resolver::CheckCast(Value* that) {
}
+void v8::Proxy::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast()",
+ "Could not convert to proxy");
+}
+
+
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(
@@ -3480,13 +3487,14 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
bool);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj, i::LookupIterator::OWN);
- Maybe<bool> result = i::JSObject::CreateDataProperty(&it, value_obj);
+ Maybe<bool> result =
+ i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -3498,11 +3506,12 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
bool);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it(isolate, self, index, i::LookupIterator::OWN);
- Maybe<bool> result = i::JSObject::CreateDataProperty(&it, value_obj);
+ Maybe<bool> result =
+ i::JSReceiver::CreateDataProperty(&it, value_obj, i::Object::DONT_THROW);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -3515,13 +3524,14 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::PropertyAttribute attributes) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DefineOwnProperty()",
bool);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
if (self->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), self)) {
- isolate->ReportFailedAccessCheck(self);
+ !isolate->MayAccess(handle(isolate->context()),
+ i::Handle<i::JSObject>::cast(self))) {
+ isolate->ReportFailedAccessCheck(i::Handle<i::JSObject>::cast(self));
return Nothing<bool>();
}
@@ -3530,18 +3540,18 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
desc.set_enumerable(!(attributes & v8::DontEnum));
desc.set_configurable(!(attributes & v8::DontDelete));
desc.set_value(value_obj);
- bool success = i::JSReceiver::DefineOwnProperty(isolate, self, key_obj, &desc,
- i::Object::DONT_THROW);
+ Maybe<bool> success = i::JSReceiver::DefineOwnProperty(
+ isolate, self, key_obj, &desc, i::Object::DONT_THROW);
// Even though we said DONT_THROW, there might be accessors that do throw.
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(success);
+ return success;
}
MUST_USE_RESULT
static i::MaybeHandle<i::Object> DefineObjectProperty(
i::Handle<i::JSObject> js_object, i::Handle<i::Object> key,
- i::Handle<i::Object> value, PropertyAttributes attrs) {
+ i::Handle<i::Object> value, i::PropertyAttributes attrs) {
i::Isolate* isolate = js_object->GetIsolate();
bool success = false;
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
@@ -3555,13 +3565,14 @@ static i::MaybeHandle<i::Object> DefineObjectProperty(
Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value,
v8::PropertyAttribute attribs) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
- auto self = Utils::OpenHandle(this);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::ForceSet()", bool);
+ auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto key_obj = Utils::OpenHandle(*key);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
DefineObjectProperty(self, key_obj, value_obj,
- static_cast<PropertyAttributes>(attribs)).is_null();
+ static_cast<i::PropertyAttributes>(attribs))
+ .is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -3573,12 +3584,14 @@ bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(),
"v8::Object::ForceSet", false, i::HandleScope,
false);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSObject> self =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
has_pending_exception =
DefineObjectProperty(self, key_obj, value_obj,
- static_cast<PropertyAttributes>(attribs)).is_null();
+ static_cast<i::PropertyAttributes>(attribs))
+ .is_null();
EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, false);
return true;
}
@@ -3648,8 +3661,8 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
auto result = i::JSReceiver::GetPropertyAttributes(self, key_name);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
- if (result.FromJust() == ABSENT) {
- return Just(static_cast<PropertyAttribute>(NONE));
+ if (result.FromJust() == i::ABSENT) {
+ return Just(static_cast<PropertyAttribute>(i::NONE));
}
return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
@@ -3658,7 +3671,7 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return GetPropertyAttributes(context, key)
- .FromMaybe(static_cast<PropertyAttribute>(NONE));
+ .FromMaybe(static_cast<PropertyAttribute>(i::NONE));
}
@@ -3666,17 +3679,18 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
Local<String> key) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyDescriptor()",
Value);
- auto obj = Utils::OpenHandle(this);
- auto key_name = Utils::OpenHandle(*key);
- i::Handle<i::Object> args[] = { obj, key_name };
- i::Handle<i::JSFunction> fun = isolate->object_get_own_property_descriptor();
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
+ i::Handle<i::String> key_name = Utils::OpenHandle(*key);
+
+ i::PropertyDescriptor desc;
+ Maybe<bool> found =
+ i::JSReceiver::GetOwnPropertyDescriptor(isolate, obj, key_name, &desc);
+ has_pending_exception = found.IsNothing();
RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(Utils::ToLocal(result));
+ if (!found.FromJust()) {
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
+ }
+ RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate)));
}
@@ -3702,8 +3716,8 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
// We do not allow exceptions thrown while setting the prototype
// to propagate outside.
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- auto result = i::JSObject::SetPrototype(self, value_obj, false,
- i::Object::THROW_ON_ERROR);
+ auto result = i::JSReceiver::SetPrototype(self, value_obj, false,
+ i::Object::THROW_ON_ERROR);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
@@ -3737,8 +3751,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetPropertyNames()", Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
- has_pending_exception = !i::JSReceiver::GetKeys(
- self, i::JSReceiver::INCLUDE_PROTOS).ToHandle(&value);
+ has_pending_exception =
+ !i::JSReceiver::GetKeys(self, i::JSReceiver::INCLUDE_PROTOS,
+ i::ENUMERABLE_STRINGS)
+ .ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
@@ -3759,8 +3775,9 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
- has_pending_exception = !i::JSReceiver::GetKeys(
- self, i::JSReceiver::OWN_ONLY).ToHandle(&value);
+ has_pending_exception = !i::JSReceiver::GetKeys(self, i::JSReceiver::OWN_ONLY,
+ i::ENUMERABLE_STRINGS)
+ .ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
@@ -3778,63 +3795,13 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
- auto self = Utils::OpenHandle(this);
- auto isolate = self->GetIsolate();
- auto v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
- i::Handle<i::Object> name(self->class_name(), isolate);
- i::Handle<i::Object> tag;
-
- // Native implementation of Object.prototype.toString (v8natives.js):
- // var c = %_ClassOf(this);
- // if (c === 'Arguments') c = 'Object';
- // return "[object " + c + "]";
-
- if (!name->IsString()) {
- return v8::String::NewFromUtf8(v8_isolate, "[object ]",
- NewStringType::kNormal);
- }
- auto class_name = i::Handle<i::String>::cast(name);
- if (i::String::Equals(class_name, isolate->factory()->Arguments_string())) {
- return v8::String::NewFromUtf8(v8_isolate, "[object Object]",
- NewStringType::kNormal);
- }
- if (internal::FLAG_harmony_tostring) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString()", String);
- auto toStringTag = isolate->factory()->to_string_tag_symbol();
- has_pending_exception = !i::Runtime::GetObjectProperty(
- isolate, self, toStringTag).ToHandle(&tag);
- RETURN_ON_FAILED_EXECUTION(String);
- if (tag->IsString()) {
- class_name = Utils::OpenHandle(*handle_scope.Escape(
- Utils::ToLocal(i::Handle<i::String>::cast(tag))));
- }
- }
- const char* prefix = "[object ";
- Local<String> str = Utils::ToLocal(class_name);
- const char* postfix = "]";
-
- int prefix_len = i::StrLength(prefix);
- int str_len = str->Utf8Length();
- int postfix_len = i::StrLength(postfix);
-
- int buf_len = prefix_len + str_len + postfix_len;
- i::ScopedVector<char> buf(buf_len);
-
- // Write prefix.
- char* ptr = buf.start();
- i::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
- ptr += prefix_len;
-
- // Write real content.
- str->WriteUtf8(ptr, str_len);
- ptr += str_len;
-
- // Write postfix.
- i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
-
- // Copy the buffer into a heap-allocated string and return it.
- return v8::String::NewFromUtf8(v8_isolate, buf.start(),
- NewStringType::kNormal, buf_len);
+ PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString", String);
+ auto obj = Utils::OpenHandle(this);
+ Local<String> result;
+ has_pending_exception =
+ !ToLocal<String>(i::JSObject::ObjectProtoToString(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(result);
}
@@ -3846,7 +3813,7 @@ Local<String> v8::Object::ObjectProtoToString() {
Local<String> v8::Object::GetConstructorName() {
auto self = Utils::OpenHandle(this);
- i::Handle<i::String> name(self->constructor_name());
+ i::Handle<i::String> name = i::JSReceiver::GetConstructorName(self);
return Utils::ToLocal(name);
}
@@ -3855,12 +3822,11 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> obj;
- has_pending_exception =
- !i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY)
- .ToHandle(&obj);
+ Maybe<bool> result =
+ i::Runtime::DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(obj->IsTrue());
+ return result;
}
@@ -3913,11 +3879,10 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
bool);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> obj;
- has_pending_exception =
- !i::JSReceiver::DeleteElement(self, index).ToHandle(&obj);
+ Maybe<bool> result = i::JSReceiver::DeleteElement(self, index);
+ has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(obj->IsTrue());
+ return result;
}
@@ -3944,24 +3909,27 @@ bool v8::Object::Has(uint32_t index) {
template <typename Getter, typename Setter, typename Data>
-static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* obj,
+static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetAccessor()", bool);
+ if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false);
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
v8::Local<AccessorSignature> signature;
auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
signature);
if (info.is_null()) return Nothing<bool>();
- bool fast = Utils::OpenHandle(obj)->HasFastProperties();
+ bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
has_pending_exception =
- !i::JSObject::SetAccessor(Utils::OpenHandle(obj), info).ToHandle(&result);
+ !i::JSObject::SetAccessor(obj, info).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
if (result->IsUndefined()) return Nothing<bool>();
if (fast) {
- i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0, "APISetAccessor");
+ i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
}
return Just(true);
}
@@ -4005,14 +3973,14 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return;
i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
i::Handle<i::Object> setter_i = v8::Utils::OpenHandle(*setter, true);
if (setter_i.is_null()) setter_i = isolate->factory()->null_value();
- i::JSObject::DefineAccessor(v8::Utils::OpenHandle(this),
- v8::Utils::OpenHandle(*name),
- getter_i,
- setter_i,
- static_cast<PropertyAttributes>(attribute));
+ i::JSObject::DefineAccessor(i::Handle<i::JSObject>::cast(self),
+ v8::Utils::OpenHandle(*name), getter_i, setter_i,
+ static_cast<i::PropertyAttributes>(attribute));
}
@@ -4040,8 +4008,10 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasRealNamedProperty()",
bool);
auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
- auto result = i::JSObject::HasRealNamedProperty(self, key_val);
+ auto result = i::JSObject::HasRealNamedProperty(
+ i::Handle<i::JSObject>::cast(self), key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4059,7 +4029,9 @@ Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(context,
"v8::Object::HasRealIndexedProperty()", bool);
auto self = Utils::OpenHandle(this);
- auto result = i::JSObject::HasRealElementProperty(self, index);
+ if (!self->IsJSObject()) return Just(false);
+ auto result = i::JSObject::HasRealElementProperty(
+ i::Handle<i::JSObject>::cast(self), index);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4077,8 +4049,10 @@ Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(
context, "v8::Object::HasRealNamedCallbackProperty()", bool);
auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
- auto result = i::JSObject::HasRealNamedCallbackProperty(self, key_val);
+ auto result = i::JSObject::HasRealNamedCallbackProperty(
+ i::Handle<i::JSObject>::cast(self), key_val);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@@ -4093,13 +4067,15 @@ bool v8::Object::HasRealNamedCallbackProperty(Local<String> key) {
bool v8::Object::HasNamedLookupInterceptor() {
auto self = Utils::OpenHandle(this);
- return self->HasNamedInterceptor();
+ return self->IsJSObject() &&
+ i::Handle<i::JSObject>::cast(self)->HasNamedInterceptor();
}
bool v8::Object::HasIndexedLookupInterceptor() {
auto self = Utils::OpenHandle(this);
- return self->HasIndexedInterceptor();
+ return self->IsJSObject() &&
+ i::Handle<i::JSObject>::cast(self)->HasIndexedInterceptor();
}
@@ -4107,7 +4083,8 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Local<Context> context, Local<Name> key) {
PREPARE_FOR_EXECUTION(
context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return MaybeLocal<Value>();
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return MaybeLocal<Value>();
@@ -4138,7 +4115,8 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
PREPARE_FOR_EXECUTION_PRIMITIVE(
context, "v8::Object::GetRealNamedPropertyAttributesInPrototypeChain()",
PropertyAttribute);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return Nothing<PropertyAttribute>();
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::PrototypeIterator iter(isolate, self);
if (iter.IsAtEnd()) return Nothing<PropertyAttribute>();
@@ -4147,10 +4125,11 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
isolate, self, key_obj, proto,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> result = i::JSReceiver::GetPropertyAttributes(&it);
+ Maybe<i::PropertyAttributes> result =
+ i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
- if (result.FromJust() == ABSENT) return Just(None);
+ if (result.FromJust() == i::ABSENT) return Just(None);
return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
@@ -4197,8 +4176,8 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
auto result = i::JSReceiver::GetPropertyAttributes(&it);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
if (!it.IsFound()) return Nothing<PropertyAttribute>();
- if (result.FromJust() == ABSENT) {
- return Just(static_cast<PropertyAttribute>(NONE));
+ if (result.FromJust() == i::ABSENT) {
+ return Just(static_cast<PropertyAttribute>(i::NONE));
}
return Just<PropertyAttribute>(
static_cast<PropertyAttribute>(result.FromJust()));
@@ -4213,7 +4192,7 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<v8::Object> v8::Object::Clone() {
- auto self = Utils::OpenHandle(this);
+ auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
ENTER_V8(isolate);
auto result = isolate->factory()->CopyJSObject(self);
@@ -4242,17 +4221,19 @@ bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return false;
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
if (value.IsEmpty()) {
- i::JSObject::DeleteHiddenProperty(self, key_string);
+ i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
+ key_string);
return true;
}
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_string, value_obj);
+ i::Handle<i::Object> result = i::JSObject::SetHiddenProperty(
+ i::Handle<i::JSObject>::cast(self), key_string, value_obj);
return *result == *self;
}
@@ -4260,11 +4241,14 @@ bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Local<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return v8::Local<v8::Value>();
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(key_string), isolate);
+ i::Handle<i::Object> result(
+ i::Handle<i::JSObject>::cast(self)->GetHiddenProperty(key_string),
+ isolate);
if (result->IsTheHole()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -4274,11 +4258,13 @@ bool v8::Object::DeleteHiddenValue(v8::Local<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return false;
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
- i::JSObject::DeleteHiddenProperty(self, key_string);
+ i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
+ key_string);
return true;
}
@@ -4346,8 +4332,9 @@ MaybeLocal<Function> Function::New(Local<Context> context,
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
LOG_API(isolate, "Function::New");
ENTER_V8(isolate);
- return FunctionTemplateNew(isolate, callback, data, Local<Signature>(),
- length, true)->GetFunction(context);
+ return FunctionTemplateNew(isolate, callback, nullptr, data,
+ Local<Signature>(), length, true)
+ ->GetFunction(context);
}
@@ -4421,25 +4408,40 @@ void Function::SetName(v8::Local<v8::String> name) {
Local<Value> Function::GetName() const {
auto self = Utils::OpenHandle(this);
+ if (self->IsJSBoundFunction()) {
+ auto func = i::Handle<i::JSBoundFunction>::cast(self);
+ return Utils::ToLocal(handle(func->name(), func->GetIsolate()));
+ }
+ if (self->IsJSFunction()) {
+ auto func = i::Handle<i::JSFunction>::cast(self);
+ return Utils::ToLocal(handle(func->shared()->name(), func->GetIsolate()));
+ }
+ return ToApiHandle<Primitive>(
+ self->GetIsolate()->factory()->undefined_value());
+}
+
+
+Local<Value> Function::GetInferredName() const {
+ auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return ToApiHandle<Primitive>(
self->GetIsolate()->factory()->undefined_value());
}
auto func = i::Handle<i::JSFunction>::cast(self);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
func->GetIsolate()));
}
-Local<Value> Function::GetInferredName() const {
+Local<Value> Function::GetDebugName() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return ToApiHandle<Primitive>(
self->GetIsolate()->factory()->undefined_value());
}
auto func = i::Handle<i::JSFunction>::cast(self);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
- func->GetIsolate()));
+ i::Handle<i::String> name = i::JSFunction::GetDebugName(func);
+ return Utils::ToLocal(i::Handle<i::Object>(*name, name->GetIsolate()));
}
@@ -4534,18 +4536,13 @@ int Function::ScriptId() const {
Local<v8::Value> Function::GetBoundFunction() const {
auto self = Utils::OpenHandle(this);
- if (!self->IsJSFunction()) {
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
+ if (self->IsJSBoundFunction()) {
+ auto bound_function = i::Handle<i::JSBoundFunction>::cast(self);
+ auto bound_target_function = i::handle(
+ bound_function->bound_target_function(), bound_function->GetIsolate());
+ return Utils::CallableToLocal(bound_target_function);
}
- auto func = i::Handle<i::JSFunction>::cast(self);
- if (!func->shared()->bound()) {
- return v8::Undefined(reinterpret_cast<v8::Isolate*>(func->GetIsolate()));
- }
- i::Handle<i::BindingsArray> bound_args = i::Handle<i::BindingsArray>(
- i::BindingsArray::cast(func->function_bindings()));
- i::Handle<i::Object> original(bound_args->bound_function(),
- func->GetIsolate());
- return Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(original));
+ return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
}
@@ -4805,7 +4802,7 @@ class Utf8LengthHelper : public i::AllStatic {
}
static int Calculate(i::ConsString* current, uint8_t* state_out) {
- using namespace internal;
+ using internal::ConsString;
int total_length = 0;
uint8_t state = kInitialState;
while (true) {
@@ -4905,26 +4902,22 @@ class Utf8WriterVisitor {
int remaining,
char* const buffer,
bool replace_invalid_utf8) {
- using namespace unibrow;
- DCHECK(remaining > 0);
+ DCHECK_GT(remaining, 0);
// We can't use a local buffer here because Encode needs to modify
// previous characters in the stream. We know, however, that
// exactly one character will be advanced.
- if (Utf16::IsSurrogatePair(last_character, character)) {
- int written = Utf8::Encode(buffer,
- character,
- last_character,
- replace_invalid_utf8);
- DCHECK(written == 1);
+ if (unibrow::Utf16::IsSurrogatePair(last_character, character)) {
+ int written = unibrow::Utf8::Encode(buffer, character, last_character,
+ replace_invalid_utf8);
+ DCHECK_EQ(written, 1);
return written;
}
// Use a scratch buffer to check the required characters.
- char temp_buffer[Utf8::kMaxEncodedSize];
+ char temp_buffer[unibrow::Utf8::kMaxEncodedSize];
// Can't encode using last_character as gcc has array bounds issues.
- int written = Utf8::Encode(temp_buffer,
- character,
- Utf16::kNoPreviousCharacter,
- replace_invalid_utf8);
+ int written = unibrow::Utf8::Encode(temp_buffer, character,
+ unibrow::Utf16::kNoPreviousCharacter,
+ replace_invalid_utf8);
// Won't fit.
if (written > remaining) return 0;
// Copy over the character from temp_buffer.
@@ -4946,13 +4939,13 @@ class Utf8WriterVisitor {
// unit, or all units have been written out.
template<typename Char>
void Visit(const Char* chars, const int length) {
- using namespace unibrow;
DCHECK(!early_termination_);
if (length == 0) return;
// Copy state to stack.
char* buffer = buffer_;
- int last_character =
- sizeof(Char) == 1 ? Utf16::kNoPreviousCharacter : last_character_;
+ int last_character = sizeof(Char) == 1
+ ? unibrow::Utf16::kNoPreviousCharacter
+ : last_character_;
int i = 0;
// Do a fast loop where there is no exit capacity check.
while (true) {
@@ -4962,7 +4955,8 @@ class Utf8WriterVisitor {
} else {
int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
// Need enough space to write everything but one character.
- STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
+ STATIC_ASSERT(unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ==
+ 3);
int max_size_per_char = sizeof(Char) == 1 ? 2 : 3;
int writable_length =
(remaining_capacity - max_size_per_char)/max_size_per_char;
@@ -4974,17 +4968,15 @@ class Utf8WriterVisitor {
// Write the characters to the stream.
if (sizeof(Char) == 1) {
for (; i < fast_length; i++) {
- buffer +=
- Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++));
+ buffer += unibrow::Utf8::EncodeOneByte(
+ buffer, static_cast<uint8_t>(*chars++));
DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
}
} else {
for (; i < fast_length; i++) {
uint16_t character = *chars++;
- buffer += Utf8::Encode(buffer,
- character,
- last_character,
- replace_invalid_utf8_);
+ buffer += unibrow::Utf8::Encode(buffer, character, last_character,
+ replace_invalid_utf8_);
last_character = character;
DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
}
@@ -5001,12 +4993,12 @@ class Utf8WriterVisitor {
DCHECK(!skip_capacity_check_);
// Slow loop. Must check capacity on each iteration.
int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
- DCHECK(remaining_capacity >= 0);
+ DCHECK_GE(remaining_capacity, 0);
for (; i < length && remaining_capacity > 0; i++) {
uint16_t character = *chars++;
// remaining_capacity is <= 3 bytes at this point, so we do not write out
// an umatched lead surrogate.
- if (replace_invalid_utf8_ && Utf16::IsLeadSurrogate(character)) {
+ if (replace_invalid_utf8_ && unibrow::Utf16::IsLeadSurrogate(character)) {
early_termination_ = true;
break;
}
@@ -5302,51 +5294,56 @@ uint32_t Uint32::Value() const {
int v8::Object::InternalFieldCount() {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- return obj->GetInternalFieldCount();
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return 0;
+ return i::Handle<i::JSObject>::cast(self)->GetInternalFieldCount();
}
-static bool InternalFieldOK(i::Handle<i::JSObject> obj,
- int index,
+static bool InternalFieldOK(i::Handle<i::JSReceiver> obj, int index,
const char* location) {
- return Utils::ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
+ return Utils::ApiCheck(
+ obj->IsJSObject() &&
+ (index < i::Handle<i::JSObject>::cast(obj)->GetInternalFieldCount()),
+ location, "Internal field out of bounds");
}
Local<Value> v8::Object::SlowGetInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetInternalField()";
if (!InternalFieldOK(obj, index, location)) return Local<Value>();
- i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
+ i::Handle<i::Object> value(
+ i::Handle<i::JSObject>::cast(obj)->GetInternalField(index),
+ obj->GetIsolate());
return Utils::ToLocal(value);
}
void v8::Object::SetInternalField(int index, v8::Local<Value> value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
- obj->SetInternalField(index, *val);
+ i::Handle<i::JSObject>::cast(obj)->SetInternalField(index, *val);
}
void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
if (!InternalFieldOK(obj, index, location)) return NULL;
- return DecodeSmiToAligned(obj->GetInternalField(index), location);
+ return DecodeSmiToAligned(
+ i::Handle<i::JSObject>::cast(obj)->GetInternalField(index), location);
}
void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
- obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
+ i::Handle<i::JSObject>::cast(obj)
+ ->SetInternalField(index, EncodeAlignedAsSmi(value, location));
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
@@ -5967,20 +5964,24 @@ double v8::NumberObject::ValueOf() const {
}
-Local<v8::Value> v8::BooleanObject::New(bool value) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "BooleanObject::New");
- ENTER_V8(isolate);
- i::Handle<i::Object> boolean(value
- ? isolate->heap()->true_value()
- : isolate->heap()->false_value(),
- isolate);
+Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, "BooleanObject::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
+ : i_isolate->heap()->false_value(),
+ i_isolate);
i::Handle<i::Object> obj =
- i::Object::ToObject(isolate, boolean).ToHandleChecked();
+ i::Object::ToObject(i_isolate, boolean).ToHandleChecked();
return Utils::ToLocal(obj);
}
+Local<v8::Value> v8::BooleanObject::New(bool value) {
+ return New(Isolate::GetCurrent(), value);
+}
+
+
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6038,8 +6039,9 @@ MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
}
PREPARE_FOR_EXECUTION(context, "Date::New", Value);
Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(i::Execution::NewDate(isolate, time), &result);
+ has_pending_exception = !ToLocal<Value>(
+ i::JSDate::New(isolate->date_function(), isolate->date_function(), time),
+ &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -6080,26 +6082,13 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
}
-static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
- i::Isolate* isolate = i::Isolate::Current();
- uint8_t flags_buf[3];
- int num_flags = 0;
- if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
- if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
- if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
- DCHECK(num_flags <= static_cast<int>(arraysize(flags_buf)));
- return isolate->factory()->InternalizeOneByteString(
- i::Vector<const uint8_t>(flags_buf, num_flags));
-}
-
-
MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
Local<String> pattern, Flags flags) {
PREPARE_FOR_EXECUTION(context, "RegExp::New", RegExp);
Local<v8::RegExp> result;
has_pending_exception =
- !ToLocal<RegExp>(i::Execution::NewJSRegExp(Utils::OpenHandle(*pattern),
- RegExpFlagsToString(flags)),
+ !ToLocal<RegExp>(i::JSRegExp::New(Utils::OpenHandle(*pattern),
+ static_cast<i::JSRegExp::Flags>(flags)),
&result);
RETURN_ON_FAILED_EXECUTION(RegExp);
RETURN_ESCAPED(result);
@@ -6121,20 +6110,20 @@ Local<v8::String> v8::RegExp::GetSource() const {
// Assert that the static flags cast in GetFlags is valid.
-#define REGEXP_FLAG_ASSERT_EQ(api_flag, internal_flag) \
- STATIC_ASSERT(static_cast<int>(v8::RegExp::api_flag) == \
- static_cast<int>(i::JSRegExp::internal_flag))
-REGEXP_FLAG_ASSERT_EQ(kNone, NONE);
-REGEXP_FLAG_ASSERT_EQ(kGlobal, GLOBAL);
-REGEXP_FLAG_ASSERT_EQ(kIgnoreCase, IGNORE_CASE);
-REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE);
-REGEXP_FLAG_ASSERT_EQ(kSticky, STICKY);
-REGEXP_FLAG_ASSERT_EQ(kUnicode, UNICODE_ESCAPES);
+#define REGEXP_FLAG_ASSERT_EQ(flag) \
+ STATIC_ASSERT(static_cast<int>(v8::RegExp::flag) == \
+ static_cast<int>(i::JSRegExp::flag))
+REGEXP_FLAG_ASSERT_EQ(kNone);
+REGEXP_FLAG_ASSERT_EQ(kGlobal);
+REGEXP_FLAG_ASSERT_EQ(kIgnoreCase);
+REGEXP_FLAG_ASSERT_EQ(kMultiline);
+REGEXP_FLAG_ASSERT_EQ(kSticky);
+REGEXP_FLAG_ASSERT_EQ(kUnicode);
#undef REGEXP_FLAG_ASSERT_EQ
v8::RegExp::Flags v8::RegExp::GetFlags() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
- return static_cast<RegExp::Flags>(obj->GetFlags().value());
+ return RegExp::Flags(static_cast<int>(obj->GetFlags()));
}
@@ -6180,10 +6169,7 @@ MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
}
-Local<Object> Array::CloneElementAt(uint32_t index) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(CloneElementAt(context, index), Object);
-}
+Local<Object> Array::CloneElementAt(uint32_t index) { return Local<Object>(); }
Local<v8::Map> v8::Map::New(Isolate* isolate) {
@@ -6286,23 +6272,6 @@ Local<Array> Map::AsArray() const {
}
-MaybeLocal<Map> Map::FromArray(Local<Context> context, Local<Array> array) {
- PREPARE_FOR_EXECUTION(context, "Map::FromArray", Map);
- if (array->Length() % 2 != 0) {
- return MaybeLocal<Map>();
- }
- i::Handle<i::Object> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*array)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->map_from_array(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(Map);
- RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
-}
-
-
Local<v8::Set> v8::Set::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "Set::New");
@@ -6387,26 +6356,6 @@ Local<Array> Set::AsArray() const {
}
-MaybeLocal<Set> Set::FromArray(Local<Context> context, Local<Array> array) {
- PREPARE_FOR_EXECUTION(context, "Set::FromArray", Set);
- i::Handle<i::Object> result;
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*array)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->set_from_array(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(Set);
- RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
-}
-
-
-bool Value::IsPromise() const {
- auto self = Utils::OpenHandle(this);
- return i::Object::IsPromise(self);
-}
-
-
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "Promise::Resolver::New", Resolver);
i::Handle<i::Object> result;
@@ -6426,7 +6375,7 @@ Local<Promise::Resolver> Promise::Resolver::New(Isolate* isolate) {
Local<Promise> Promise::Resolver::GetPromise() {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
return Local<Promise>::Cast(Utils::ToLocal(promise));
}
@@ -6473,10 +6422,12 @@ void Promise::Resolver::Reject(Local<Value> value) {
}
-MaybeLocal<Promise> Promise::Chain(Local<Context> context,
- Local<Function> handler) {
+namespace {
+
+MaybeLocal<Promise> DoChain(Value* value, Local<Context> context,
+ Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, "Promise::Chain", Promise);
- auto self = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(value);
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
i::Handle<i::Object> result;
has_pending_exception = !i::Execution::Call(isolate, isolate->promise_chain(),
@@ -6486,10 +6437,18 @@ MaybeLocal<Promise> Promise::Chain(Local<Context> context,
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
+} // namespace
+
+
+MaybeLocal<Promise> Promise::Chain(Local<Context> context,
+ Local<Function> handler) {
+ return DoChain(this, context, handler);
+}
+
Local<Promise> Promise::Chain(Local<Function> handler) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(Chain(context, handler), Promise);
+ RETURN_TO_LOCAL_UNCHECKED(DoChain(this, context, handler), Promise);
}
@@ -6534,7 +6493,7 @@ Local<Promise> Promise::Then(Local<Function> handler) {
bool Promise::HasHandler() {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
LOG_API(isolate, "Promise::HasRejectHandler");
ENTER_V8(isolate);
@@ -6543,6 +6502,44 @@ bool Promise::HasHandler() {
}
+Local<Object> Proxy::GetTarget() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ i::Handle<i::JSReceiver> target(self->target());
+ return Utils::ToLocal(target);
+}
+
+
+Local<Value> Proxy::GetHandler() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> handler(self->handler(), self->GetIsolate());
+ return Utils::ToLocal(handler);
+}
+
+
+bool Proxy::IsRevoked() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ return self->IsRevoked();
+}
+
+
+void Proxy::Revoke() {
+ i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
+ i::JSProxy::Revoke(self);
+}
+
+
+MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
+ Local<Object> local_handler) {
+ PREPARE_FOR_EXECUTION(context, "Proxy::New", Proxy);
+ i::Handle<i::JSReceiver> target = Utils::OpenHandle(*local_target);
+ i::Handle<i::JSReceiver> handler = Utils::OpenHandle(*local_handler);
+ Local<Proxy> result;
+ has_pending_exception =
+ !ToLocal<Proxy>(i::JSProxy::New(isolate, target, handler), &result);
+ RETURN_ON_FAILED_EXECUTION(Proxy);
+ RETURN_ESCAPED(result);
+}
+
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
@@ -7207,6 +7204,12 @@ void Isolate::Dispose() {
}
+void Isolate::DiscardThreadSpecificMetadata() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->DiscardPerThreadDataForThisThread();
+}
+
+
void Isolate::Enter() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->Enter();
@@ -7709,14 +7712,22 @@ DEFINE_ERROR(Error, error)
#undef DEFINE_ERROR
+Local<Message> Exception::CreateMessage(Isolate* isolate,
+ Local<Value> exception) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::HandleScope scope(i_isolate);
+ return Utils::MessageToLocal(
+ scope.CloseAndEscape(i_isolate->CreateMessage(obj, NULL)));
+}
+
+
Local<Message> Exception::CreateMessage(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsHeapObject()) return Local<Message>();
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- return Utils::MessageToLocal(
- scope.CloseAndEscape(isolate->CreateMessage(obj, NULL)));
+ return CreateMessage(reinterpret_cast<Isolate*>(isolate), exception);
}
@@ -7732,20 +7743,26 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
// --- D e b u g S u p p o r t ---
-bool Debug::SetDebugEventListener(EventCallback that, Local<Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
+bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
+ Local<Value> data) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::HandleScope scope(i_isolate);
+ i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
if (that != NULL) {
- foreign = isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+ foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
- isolate->debug()->SetEventListener(foreign,
- Utils::OpenHandle(*data, true));
+ i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
return true;
}
+bool Debug::SetDebugEventListener(EventCallback that, Local<Value> data) {
+ return SetDebugEventListener(
+ reinterpret_cast<Isolate*>(i::Isolate::Current()), that, data);
+}
+
+
void Debug::DebugBreak(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
}
@@ -7763,10 +7780,16 @@ bool Debug::CheckDebugBreak(Isolate* isolate) {
}
+void Debug::SetMessageHandler(Isolate* isolate,
+ v8::Debug::MessageHandler handler) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i_isolate->debug()->SetMessageHandler(handler);
+}
+
+
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- isolate->debug()->SetMessageHandler(handler);
+ SetMessageHandler(reinterpret_cast<Isolate*>(i::Isolate::Current()), handler);
}
@@ -7819,8 +7842,9 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
const int kArgc = 1;
v8::Local<v8::Value> argv[kArgc] = {obj};
Local<Value> result;
- has_pending_exception = !v8_fun->Call(context, Utils::ToLocal(debug), kArgc,
- argv).ToLocal(&result);
+ has_pending_exception =
+ !v8_fun->Call(context, Utils::ToLocal(debug), kArgc, argv)
+ .ToLocal(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -7831,15 +7855,25 @@ Local<Value> Debug::GetMirror(v8::Local<v8::Value> obj) {
}
+void Debug::ProcessDebugMessages(Isolate* isolate) {
+ reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
+}
+
+
void Debug::ProcessDebugMessages() {
- i::Isolate::Current()->debug()->ProcessDebugMessages(true);
+ ProcessDebugMessages(reinterpret_cast<Isolate*>(i::Isolate::Current()));
+}
+
+
+Local<Context> Debug::GetDebugContext(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
}
Local<Context> Debug::GetDebugContext() {
- i::Isolate* isolate = i::Isolate::Current();
- ENTER_V8(isolate);
- return Utils::ToLocal(isolate->debug()->GetDebugContext());
+ return GetDebugContext(reinterpret_cast<Isolate*>(i::Isolate::Current()));
}
@@ -7862,8 +7896,8 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
Local<String> CpuProfileNode::GetFunctionName() const {
- i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ i::Isolate* isolate = node->isolate();
const i::CodeEntry* entry = node->entry();
i::Handle<i::String> name =
isolate->factory()->InternalizeUtf8String(entry->name());
@@ -7887,8 +7921,8 @@ int CpuProfileNode::GetScriptId() const {
Local<String> CpuProfileNode::GetScriptResourceName() const {
- i::Isolate* isolate = i::Isolate::Current();
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
+ i::Isolate* isolate = node->isolate();
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
node->entry()->resource_name()));
}
@@ -7958,16 +7992,17 @@ const std::vector<CpuProfileDeoptInfo>& CpuProfileNode::GetDeoptInfos() const {
void CpuProfile::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::CpuProfile* profile = reinterpret_cast<i::CpuProfile*>(this);
+ i::Isolate* isolate = profile->top_down()->isolate();
i::CpuProfiler* profiler = isolate->cpu_profiler();
DCHECK(profiler != NULL);
- profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
+ profiler->DeleteProfile(profile);
}
Local<String> CpuProfile::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
+ i::Isolate* isolate = profile->top_down()->isolate();
return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
profile->title()));
}
@@ -8010,7 +8045,7 @@ int CpuProfile::GetSamplesCount() const {
void CpuProfiler::SetSamplingInterval(int us) {
- DCHECK(us >= 0);
+ DCHECK_GE(us, 0);
return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
base::TimeDelta::FromMicroseconds(us));
}
@@ -8054,8 +8089,8 @@ HeapGraphEdge::Type HeapGraphEdge::GetType() const {
Local<Value> HeapGraphEdge::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
i::HeapGraphEdge* edge = ToInternal(this);
+ i::Isolate* isolate = edge->isolate();
switch (edge->type()) {
case i::HeapGraphEdge::kContextVariable:
case i::HeapGraphEdge::kInternal:
@@ -8098,7 +8133,7 @@ HeapGraphNode::Type HeapGraphNode::GetType() const {
Local<String> HeapGraphNode::GetName() const {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = ToInternal(this)->isolate();
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
@@ -8132,7 +8167,7 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
void HeapSnapshot::Delete() {
- i::Isolate* isolate = i::Isolate::Current();
+ i::Isolate* isolate = ToInternal(this)->profiler()->isolate();
if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
ToInternal(this)->Delete();
} else {
@@ -8328,11 +8363,10 @@ void Testing::PrepareStressRun(int run) {
}
-// TODO(svenpanne) Deprecate this.
-void Testing::DeoptimizeAll() {
- i::Isolate* isolate = i::Isolate::Current();
- i::HandleScope scope(isolate);
- internal::Deoptimizer::DeoptimizeAll(isolate);
+void Testing::DeoptimizeAll(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::HandleScope scope(i_isolate);
+ internal::Deoptimizer::DeoptimizeAll(i_isolate);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 08fbd7ee8f..556765264a 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -142,10 +142,9 @@ class RegisteredExtension {
V(ObjectTemplate, ObjectTemplateInfo) \
V(Signature, FunctionTemplateInfo) \
V(AccessorSignature, FunctionTemplateInfo) \
- V(TypeSwitch, TypeSwitchInfo) \
V(Data, Object) \
V(RegExp, JSRegExp) \
- V(Object, JSObject) \
+ V(Object, JSReceiver) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
@@ -174,6 +173,7 @@ class RegisteredExtension {
V(External, Object) \
V(StackTrace, JSArray) \
V(StackFrame, JSObject) \
+ V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap)
class Utils {
@@ -201,6 +201,8 @@ class Utils {
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
+ v8::internal::Handle<v8::internal::JSReceiver> obj);
+ static inline Local<Object> ToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
@@ -208,13 +210,14 @@ class Utils {
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
+ static inline Local<Proxy> ToLocal(
+ v8::internal::Handle<v8::internal::JSProxy> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBufferView> obj);
static inline Local<DataView> ToLocal(
v8::internal::Handle<v8::internal::JSDataView> obj);
-
static inline Local<TypedArray> ToLocal(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Uint8Array> ToLocalUint8Array(
@@ -261,8 +264,6 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<AccessorSignature> AccessorSignatureToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
- static inline Local<TypeSwitch> ToLocal(
- v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<NativeWeakMap> NativeWeakMapToLocal(
@@ -353,10 +354,12 @@ MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
+MAKE_TO_LOCAL(ToLocal, JSReceiver, Object)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
+MAKE_TO_LOCAL(ToLocal, JSProxy, Proxy)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
@@ -369,7 +372,6 @@ MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
-MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 175a21df51..c9602ea028 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -104,7 +104,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -131,7 +132,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -257,7 +258,7 @@ void RelocInfo::WipeOut() {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -472,9 +473,9 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target) {
+ Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_embedded_constant_pool) {
- set_target_address_at(constant_pool_entry, code, target);
+ set_target_address_at(isolate, constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
}
@@ -482,7 +483,7 @@ void Assembler::deserialization_set_special_target_at(
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -572,15 +573,15 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
+ // Assembler::FlushICache(isolate, pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@@ -598,7 +599,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc, 2 * kInstrSize);
+ Assembler::FlushICache(isolate, pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@@ -618,7 +619,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc, 4 * kInstrSize);
+ Assembler::FlushICache(isolate, pc, 4 * kInstrSize);
}
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index e7b619debb..d2e3231bb8 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -843,8 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 1,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1,
CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target24));
} else {
@@ -853,14 +852,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 1,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 1, CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
} else {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 2,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 2, CodePatcher::DONT_FLUSH);
patcher.masm()->movw(dst, target16_0);
patcher.masm()->movt(dst, target16_1);
}
@@ -870,15 +867,13 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 2,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 2, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
} else {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
- 3,
- CodePatcher::DONT_FLUSH);
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
+ 3, CodePatcher::DONT_FLUSH);
patcher.masm()->mov(dst, Operand(target8_0));
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
@@ -3362,6 +3357,20 @@ void Assembler::vmrs(Register dst, Condition cond) {
}
+void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
+ 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3376,6 +3385,20 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
+ vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3390,6 +3413,20 @@ void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
+ vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3404,6 +3441,20 @@ void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
+ // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+ // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
+ // M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
+ vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3418,6 +3469,20 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
}
+void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
+ const Condition cond) {
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
+ // Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
+ 0x5 * B9 | B7 | B6 | m * B5 | vm);
+}
+
+
void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
@@ -3594,6 +3659,7 @@ void Assembler::GrowBuffer() {
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc.origin = this;
// Copy the data.
int pc_delta = desc.buffer - buffer_;
@@ -3669,7 +3735,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index db6adae57a..1abf1ab6a6 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -671,19 +671,18 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- INLINE(static void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -697,11 +696,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target);
+ Isolate* isolate, Address constant_pool_entry, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
@@ -1211,10 +1211,16 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
// ARMv8 rounding instructions.
+ void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
+ void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
+ const Condition cond = al);
void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
@@ -1308,7 +1314,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 4464816f72..0c83f918ca 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -22,9 +22,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r0 : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- r1 : called function
+ // -- r1 : target
+ // -- r3 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -36,28 +35,29 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(r1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(r3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(r1, r3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects r0 to contain the number of arguments
- // including the receiver and the extra arguments. But r0 is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(r2);
- __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ mov(r0, r2, LeaveCC, ne);
+ // including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
@@ -67,30 +67,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ // Load the InternalArray function from the current native context.
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ ldr(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ // Load the Array function from the current native context.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -152,6 +137,106 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into r0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lo, &no_arguments);
+ __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ Drop(2);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ Move(r0, Smi::FromInt(0));
+ __ Ret(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- r3 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r2 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lo, &no_arguments);
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ Move(r2, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r2 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(r2, &done_convert);
+ __ CompareObjectType(r2, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &done_convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3);
+ __ Move(r0, r2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(r2, r0);
+ __ Pop(r1, r3);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r1, r3);
+ __ b(ne, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r2, r1, r3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r2);
+ }
+ __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -201,7 +286,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(r0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -211,13 +296,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
- // -- r3 : original constructor
+ // -- r3 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r2 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r2 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -232,7 +320,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure r2 is a string.
+ // 3. Make sure r2 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(r2, &convert);
@@ -251,70 +339,45 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- r2 : the first argument
- // -- r1 : constructor function
- // -- r3 : original constructor
- // -- lr : return address
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r1, r3);
+ __ b(ne, &new_object);
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r1, r3);
- __ b(ne, &rt_call);
-
- __ Allocate(JSValue::kSize, r0, r3, r4, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in r0.
- __ LoadGlobalFunctionInitialMap(r1, r3, r4);
- __ str(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Move(r3, Smi::FromInt(JSValue::kSize));
- __ Push(r1, r2, r3);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(r1, r2);
- }
- __ b(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r2);
- __ Push(r1, r3); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(r1, r2);
- }
- __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r2, r1, r3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r2);
}
+ __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+ __ Ret();
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r1 : target function (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
__ push(r1);
+ __ push(r3);
// Push function as parameter to the runtime call.
__ Push(r1);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ pop(r3);
__ pop(r1);
}
@@ -353,12 +416,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r2 : allocation site or undefined
- // -- r3 : original constructor
+ // -- r3 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -374,178 +438,168 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r2);
__ SmiTag(r0);
__ push(r0);
- __ push(r1);
- __ push(r3);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(ne, &rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ CompareObjectType(r3, r5, r4, JS_FUNCTION_TYPE);
- __ b(ne, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r3: original constructor
- __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r5, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ ldr(r5, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
- __ cmp(r1, r5);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ ldr(r4, bit_field3);
- __ DecodeField<Map::Counter>(r3, r4);
- __ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
- __ b(lt, &allocate);
- // Decrease generous allocation count.
- __ sub(r4, r4, Operand(1 << Map::Counter::kShift));
- __ str(r4, bit_field3);
- __ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
- __ b(ne, &allocate);
-
- __ Push(r1, r2);
-
- __ push(r2); // r2 = intial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(r2);
- __ pop(r1);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- Label rt_call_reload_new_target;
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ Allocate(r3, r4, r5, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
- // Fill all the in-object properties with the appropriate filler.
- // r1: constructor function
- // r2: initial map
- // r3: object size
- // r4: JSObject (not tagged)
- // r5: First in-object property of JSObject (not tagged)
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
- __ DecodeField<Map::Counter>(ip);
- __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
- __ b(lt, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ Ubfx(r0, r0, Map::kInObjectPropertiesOrConstructorFunctionIndexByte *
- kBitsPerByte,
- kBitsPerByte);
- __ ldr(r2, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
- __ Ubfx(r2, r2, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r0, r0, Operand(r2));
- __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
- // r0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ cmp(r0, ip);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CompareObjectType(r3, r5, r4, JS_FUNCTION_TYPE);
+ __ b(ne, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // r3: new target
+ __ ldr(r2,
+ FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &rt_call);
+ __ CompareObjectType(r2, r5, r4, MAP_TYPE);
+ __ b(ne, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ ldr(r5, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r1, r5);
+ __ b(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r1: constructor function
+ // r2: initial map
+ // r3: new target
+ __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
+ __ b(eq, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // r1: constructor function
+ // r2: initial map
+ // r3: new target
+ __ ldrb(r9, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+
+ __ Allocate(r9, r4, r9, r6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r1: constructor function
+ // r2: initial map
+ // r3: new target
+ // r4: JSObject (not HeapObject tagged - the actual address).
+ // r9: start of next object
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(r5, r4);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ add(r4, r4, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r4: JSObject (tagged)
+ // r5: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // Check if slack tracking is enabled.
+ MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ ldr(r0, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(ip, r0);
+ // ip: slack tracking counter
+ __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
+ __ b(lt, &no_inobject_slack_tracking);
+ __ push(ip); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ sub(r0, r0, Operand(1 << Map::ConstructionCounter::kShift));
+ __ str(r0, bit_field3);
+
+ // Allocate object with a slack.
+ __ ldr(r0, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
+ __ Ubfx(r0, r0, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
+ kBitsPerByte);
+ __ sub(r0, r9, Operand(r0, LSL, kPointerSizeLog2));
+ // r0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r5, r0);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(r5, r0, r6);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r5, r9, r6);
+
+ __ pop(r0); // Restore allocation count value before decreasing.
+ __ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
+ __ b(ne, &allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(r1, r3, r4, r2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r1, r3, r4);
+
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r3: new target
+ // r4: JSObject
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(r5, r0, r6);
- // To allow for truncation.
- __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
+ __ InitializeFieldsWithFiller(r5, r9, r6);
+
+ // Continue with JSObject being successfully allocated
+ // r1: constructor function
+ // r3: new target
+ // r4: JSObject
+ __ jmp(&allocated);
}
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
+ // Allocate the new receiver object using the runtime call.
+ // r1: constructor function
+ // r3: new target
+ __ bind(&rt_call);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ add(r4, r4, Operand(kHeapObjectTag));
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(r1, r3);
+ __ Push(r1, r3); // constructor function, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(r4, r0);
+ __ Pop(r1, r3);
- // Continue with JSObject being successfully allocated
+ // Receiver for constructor call allocated.
+ // r1: constructor function
+ // r3: new target
// r4: JSObject
- __ jmp(&allocated);
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+ // Retrieve smi-tagged arguments count from the stack.
+ __ ldr(r0, MemOperand(sp));
}
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- // r3: original constructor
- __ bind(&rt_call);
-
- __ push(r1); // constructor function
- __ push(r3); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(r4, r0);
-
- // Receiver for constructor call allocated.
- // r4: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ pop(r3);
- __ pop(r1);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ ldr(r0, MemOperand(sp));
__ SmiUntag(r0);
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ push(r3);
- __ push(r4);
- __ push(r4);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(r4);
+ __ push(r4);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -554,24 +608,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r0: number of arguments
// r1: constructor function
// r2: address of last argument (caller sp)
- // r3: number of arguments (smi-tagged)
+ // r3: new target
+ // r4: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(r3, r0);
+ __ SmiTag(r4, r0);
__ b(&entry);
__ bind(&loop);
- __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
__ push(ip);
__ bind(&entry);
- __ sub(r3, r3, Operand(2), SetCC);
+ __ sub(r4, r4, Operand(2), SetCC);
__ b(ge, &loop);
// Call the function.
// r0: number of arguments
// r1: constructor function
+ // r3: new target
if (is_api_function) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code =
@@ -579,156 +634,85 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r1, r3, FIRST_JS_RECEIVER_TYPE);
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ ldr(r1, MemOperand(sp));
+ }
// Leave construct frame.
}
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
+ }
__ Jump(lr);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- r1 : constructor function
- // -- r2 : allocation site or undefined
- // -- r3 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(r2, r4);
- __ push(r2);
-
- __ mov(r4, r0);
- __ SmiTag(r4);
- __ push(r4); // Smi-tagged arguments count.
-
- // Push new.target.
- __ push(r3);
-
- // receiver is the hole.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ push(ip);
-
- // Set up pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r1: constructor function
- // r2: address of last argument (caller sp)
- // r4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, entry;
- __ b(&entry);
- __ bind(&loop);
- __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
- __ push(ip);
- __ bind(&entry);
- __ sub(r4, r4, Operand(2), SetCC);
- __ b(ge, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ mov(r2, Operand(debug_step_in_fp));
- __ ldr(r2, MemOperand(r2));
- __ tst(r2, r2);
- __ b(eq, &skip_step_in);
-
- __ Push(r0);
- __ Push(r1);
- __ Push(r1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(r1);
- __ Pop(r0);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // r0: number of arguments
- // r1: constructor function
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // r0: result
- // sp[0]: number of arguments (smi-tagged)
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Get arguments count, skipping over new.target.
- __ ldr(r1, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
- __ add(sp, sp, Operand(kPointerSize));
- __ Jump(lr);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(r1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -756,7 +740,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -863,6 +847,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o r1: the JS function object being called.
+// o r3: the new target
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
@@ -880,6 +865,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ push(r3);
+
+ // Push zero for bytecode array offset.
+ __ mov(r0, Operand(0));
+ __ push(r0);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -908,7 +898,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -939,7 +929,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -947,8 +937,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ sub(kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -1028,7 +1018,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (not including receiver)
- // -- r3 : original constructor
+ // -- r3 : new target
// -- r1 : constructor to call
// -- r2 : address of the first argument
// -----------------------------------
@@ -1038,47 +1028,114 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ sub(r4, r2, r4);
// Push a slot for the receiver to be constructed.
- __ push(r0);
+ __ mov(ip, Operand::Zero());
+ __ push(ip);
// Push the arguments.
Generate_InterpreterPushArgs(masm, r2, r4, r5);
// Call the constructor with r0, r1, and r3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ ldr(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ ldr(r1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ ldr(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
+ kPointerSizeLog2));
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(pc, ip);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ push(r1);
- // Push function as parameter to the runtime call.
- __ Push(r1);
- // Whether to compile in a background thread.
- __ LoadRoot(
- ip, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(ip);
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(r1);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1094,13 +1151,14 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
+ // r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
@@ -1127,13 +1185,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - isolate
+ // r3 - new target
FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
+ __ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
__ PushFixedFrame(r1);
@@ -1165,7 +1224,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
}
@@ -1191,7 +1250,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> r6.
@@ -1231,6 +1290,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Register scratch2,
+ Label* receiver_check_failed) {
+ Register signature = scratch0;
+ Register map = scratch1;
+ Register constructor = scratch2;
+
+ // If there is no signature, return the holder.
+ __ ldr(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ b(eq, &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, ip, ip);
+ __ cmp(ip, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ b(ne, &next_prototype);
+ Register type = constructor;
+ __ ldr(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(signature, type);
+ __ b(eq, &receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, ip, ip, FUNCTION_TEMPLATE_INFO_TYPE);
+
+ // Otherwise load the parent function template and iterate.
+ __ ldr(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset),
+ eq);
+ __ b(&function_template_loop, eq);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ b(eq, receiver_check_failed);
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldr(ip, FieldMemOperand(map, Map::kBitField3Offset));
+ __ tst(ip, Operand(Map::IsHiddenPrototype::kMask));
+ __ b(eq, receiver_check_failed);
+ // Iterate.
+ __ b(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments excluding receiver
+ // -- r1 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ CompatibleReceiverCheck(masm, r2, r3, r4, r5, r6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ ldr(r4, FieldMemOperand(r3, FunctionTemplateInfo::kCallCodeOffset));
+ __ ldr(r4, FieldMemOperand(r4, CallHandlerInfo::kFastHandlerOffset));
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r4);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver)
+ __ add(r0, r0, Operand(1));
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1238,7 +1400,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1282,7 +1444,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ b(hs, &ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1293,7 +1455,120 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into r0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(r0);
+ __ JumpIfSmi(r0, &receiver_not_date);
+ __ CompareObjectType(r0, r1, r2, JS_DATE_TYPE);
+ __ b(ne, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ ldr(r0, FieldMemOperand(r0, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(r1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ ldr(r1, MemOperand(r1));
+ __ ldr(ip, FieldMemOperand(r0, JSDate::kCacheStampOffset));
+ __ cmp(r1, ip);
+ __ b(ne, &stamp_mismatch);
+ __ ldr(r0, FieldMemOperand(
+ r0, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, r1);
+ __ mov(r1, Operand(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into r1, argArray into r0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ mov(r3, r2);
+ __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
+ __ sub(r4, r0, Operand(1), SetCC);
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArg
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argArray
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r2, MemOperand(sp, 0));
+ __ mov(r0, r3);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r0 : argArray
+ // -- r1 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(r1, &receiver_not_callable);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ b(eq, &receiver_not_callable);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(r0, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(r0, Operand(0));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ str(r1, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{
@@ -1336,185 +1611,128 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ ldr(key, MemOperand(fp, indexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array.
- __ bind(&loop);
- __ ldr(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
- __ ldr(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ push(r0);
-
- __ ldr(key, MemOperand(fp, indexOffset));
- __ add(key, key, Operand(1 << kSmiTagSize));
- __ str(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, limitOffset));
- __ cmp(key, r1);
- __ b(ne, &loop);
-
- // On exit, the pushed arguments count is in r0, untagged
- __ mov(r0, key);
- __ SmiUntag(r0);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(r1);
-
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ ldr(r1, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ Push(r0, r1);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- Generate_CheckStackOverflow(masm, r0, kArgcIsSmiTagged);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, r1);
+ __ mov(r3, r1);
+ __ sub(r4, r0, Operand(1), SetCC);
+ __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // thisArgument
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r2, MemOperand(sp, 0));
+ __ mov(r0, r3);
+ }
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ mov(r1, Operand::Zero());
- __ ldr(r2, MemOperand(fp, kReceiverOffset));
- __ Push(r0, r1, r2); // limit, initial index and receiver.
+ // ----------- S t a t e -------------
+ // -- r0 : argumentsList
+ // -- r1 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(r1, &target_not_callable);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ b(eq, &target_not_callable);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ str(r1, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ add(sp, sp, Operand(kStackSize * kPointerSize));
- __ Jump(lr);
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r1 (if present), argumentsList into r0 (if present),
+ // new.target into r3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(r1);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ ldr(r0, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(ne, &validate_arguments);
- __ ldr(r0, MemOperand(fp, kFunctionOffset));
- __ str(r0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(r0);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, r0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(r0); // limit
- __ mov(r1, Operand::Zero()); // initial index
- __ push(r1);
- // Push the constructor function as callee.
- __ ldr(r0, MemOperand(fp, kFunctionOffset));
- __ push(r0);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ ldr(r4, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ mov(r2, r1);
+ __ str(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // receiver
+ __ sub(r4, r0, Operand(1), SetCC);
+ __ ldr(r1, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // target
+ __ mov(r3, r1); // new.target defaults to target
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // argumentsList
+ __ sub(r4, r4, Operand(1), SetCC, ge);
+ __ ldr(r3, MemOperand(sp, r4, LSL, kPointerSizeLog2), ge); // new.target
+ __ add(sp, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ mov(r0, r2);
}
- __ add(sp, sp, Operand(kStackSize * kPointerSize));
- __ Jump(lr);
-}
+ // ----------- S t a t e -------------
+ // -- r0 : argumentsList
+ // -- r3 : new.target
+ // -- r1 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(r1, &target_not_constructor);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &target_not_constructor);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(r3, &new_target_not_constructor);
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &new_target_not_constructor);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ str(r1, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ str(r3, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1524,6 +1742,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
+ // -- r3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1565,6 +1784,130 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argumentsList
+ // -- r1 : target
+ // -- r3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(r0, &create_runtime);
+
+ // Load the map of argumentsList into r2.
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Load native context into r4.
+ __ ldr(r4, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ ldr(ip, ContextMemOperand(r4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r2);
+ __ b(eq, &create_arguments);
+ __ ldr(ip, ContextMemOperand(r4, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r2);
+ __ b(eq, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(r2, ip, JS_ARRAY_TYPE);
+ __ b(eq, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r3, r0);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(r1, r3);
+ __ ldr(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ SmiUntag(r2);
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ ldr(r2,
+ FieldMemOperand(r0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ cmp(r2, ip);
+ __ b(ne, &create_runtime);
+ __ SmiUntag(r2);
+ __ mov(r0, r4);
+ __ b(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ ldr(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r2);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmp(r2, Operand(FAST_ELEMENTS));
+ __ b(hi, &create_runtime);
+ __ cmp(r2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ b(eq, &create_runtime);
+ __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ SmiUntag(r2);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ sub(ip, sp, ip);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ip, Operand(r2, LSL, kPointerSizeLog2));
+ __ b(gt, &done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r1 : target
+ // -- r0 : args (a FixedArray built from argumentsList)
+ // -- r2 : len (number of elements to push from args)
+ // -- r3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ mov(r4, Operand(0));
+ Label done, loop;
+ __ bind(&loop);
+ __ cmp(r4, r2);
+ __ b(eq, &done);
+ __ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
+ __ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
+ __ Push(ip);
+ __ add(r4, r4, Operand(1));
+ __ b(&loop);
+ __ bind(&done);
+ __ Move(r0, r4);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1655,17 +1998,121 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(r2);
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount actual(r0);
ParameterCount expected(r2);
- __ InvokeCode(r3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ push(r1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : target (checked to be a JSBoundFunction)
+ // -- r3 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into r2 and length of that into r4.
+ Label no_bound_arguments;
+ __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
+ __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ SmiUntag(r4);
+ __ cmp(r4, Operand(0));
+ __ b(eq, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : target (checked to be a JSBoundFunction)
+ // -- r2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- r3 : new.target (only in case of [[Construct]])
+ // -- r4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ sub(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ b(gt, &done); // Signed comparison.
+ // Restore the stack pointer.
+ __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(r5, Operand(0));
+ __ bind(&loop);
+ __ cmp(r5, r0);
+ __ b(gt, &done_loop);
+ __ ldr(ip, MemOperand(sp, r4, LSL, kPointerSizeLog2));
+ __ str(ip, MemOperand(sp, r5, LSL, kPointerSizeLog2));
+ __ add(r4, r4, Operand(1));
+ __ add(r5, r5, Operand(1));
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ SmiUntag(r4);
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ sub(r4, r4, Operand(1), SetCC);
+ __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2));
+ __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ add(r0, r0, Operand(1));
+ __ b(gt, &loop);
+ }
}
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(r1);
+
+ // Patch the receiver to [[BoundThis]].
+ __ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
+ __ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ ldr(ip, MemOperand(ip));
+ __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
@@ -1682,14 +2129,20 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
- __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmp(r5, Operand(JS_PROXY_TYPE));
__ b(ne, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ ldr(r1, FieldMemOperand(r1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(r1);
- __ b(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(r1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ add(r0, r0, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1701,7 +2154,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver the (original) target.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1711,7 +2164,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1721,10 +2174,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (checked to be a JSFunction)
- // -- r3 : the original constructor (checked to be a JSFunction)
+ // -- r3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(r1);
- __ AssertFunction(r3);
// Calling convention for function specific ConstructStubs require
// r2 to contain either an AllocationSite or undefined.
@@ -1739,17 +2191,47 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSBoundFunction)
+ // -- r3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(r1);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ __ cmp(r1, r3);
+ __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
+ eq);
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ ldr(ip, MemOperand(ip));
+ __ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
- // -- r1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- r3 : the original constructor (either the same as the constructor or
+ // -- r1 : the constructor to call (checked to be a JSProxy)
+ // -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ ldr(r1, FieldMemOperand(r1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(r1);
+ __ Push(r3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ add(r0, r0, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1758,23 +2240,32 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (can be any Object)
- // -- r3 : the original constructor (either the same as the constructor or
+ // -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(r1, &non_constructor);
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Construct]] internal method.
__ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(r2, Operand(1 << Map::kIsConstructor));
__ b(eq, &non_constructor);
- // Dispatch based on instance type.
- __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET, eq);
- __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ cmp(r5, Operand(JS_PROXY_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1783,7 +2274,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1791,11 +2282,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1804,14 +2292,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
+ // -- r3 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1820,12 +2306,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kPointerSize));
@@ -1835,7 +2322,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r0: copy start address
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
// r4: copy end address
Label copy;
@@ -1868,24 +2355,25 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
Label copy;
__ bind(&copy);
// Adjust load for return address and receiver.
@@ -1898,7 +2386,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r1: function
// r2: expected number of arguments
- // r3: code entry to call
+ // r3: new target (passed through to callee)
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
@@ -1917,7 +2405,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(r0, r2);
// r0 : expected number of arguments
// r1 : function (passed through to callee)
- __ Call(r3);
+ // r3 : new target (passed through to callee)
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(r4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1931,13 +2421,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Jump(r3);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Jump(r4);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0);
}
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index c920725477..21413335ea 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -250,7 +250,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
@@ -271,7 +271,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
@@ -436,11 +436,11 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
+ // FIRST_JS_RECEIVER_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
@@ -453,7 +453,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -520,9 +520,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
- __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmp(r2, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_JS_RECEIVER_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -682,8 +682,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if (cc == lt || cc == le) {
@@ -697,9 +696,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -901,7 +899,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1346,16 +1344,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ b(ne, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ ldr(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(scratch, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- __ b(ne, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ ldr(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1380,27 +1368,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ Register const result = r0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &fast_runtime_fallback);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ b(eq, &fast_runtime_fallback);
+
+ __ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ b(eq, &done);
- __ cmp(object_prototype, null);
- __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ cmp(object, null);
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ b(ne, &loop);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ Move(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1502,7 +1510,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1529,7 +1537,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(r1, r3, r2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1590,7 +1598,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r4, r9, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
@@ -1600,8 +1608,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
+ __ ldr(r4, NativeContextMemOperand());
__ cmp(r6, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
@@ -1730,7 +1737,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 = argument count (tagged)
__ bind(&runtime);
__ Push(r1, r3, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1749,7 +1756,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1793,10 +1800,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ldr(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
- __ ldr(r4, MemOperand(
- r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
__ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
@@ -1845,7 +1849,29 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(r1, r3, r2);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // r2 : number of parameters (tagged)
+ // r3 : parameters pointer
+ // r4 : rest parameter index (tagged)
+
+ Label runtime;
+ __ ldr(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ldr(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r3, r5, Operand::PointerOffsetFromSmiKey(r2));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ Push(r2, r3, r4);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1854,7 +1880,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2125,7 +2151,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ b(eq, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2220,7 +2246,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2263,33 +2289,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
- // r4 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
- if (is_super) {
- __ Push(r4);
- }
__ CallStub(stub);
- if (is_super) {
- __ Pop(r4);
- }
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2297,7 +2315,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
- // r4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2338,7 +2355,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ b(ne, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
__ jmp(&done);
@@ -2361,7 +2378,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &not_array_function);
@@ -2369,12 +2386,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -2384,7 +2401,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
- // r4 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2393,28 +2409,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into r2, or undefined.
- __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
- __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(eq, &feedback_register_initialized);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(r2, r5);
- }
+ __ AssertUndefinedOrAllocationSite(r2, r5);
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mov(r3, r4);
- } else {
- __ mov(r3, r1);
- }
+ // Pass function as new target.
+ __ mov(r3, r1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2433,7 +2443,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r3 - slot id
// r2 - vector
// r4 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, miss);
@@ -2457,11 +2467,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r1 - function
// r3 - slot id (Smi)
// r2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2498,9 +2504,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
- __ bind(&call);
+ __ bind(&call_function);
__ mov(r0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2535,14 +2542,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ ldr(r4, FieldMemOperand(r2, with_types_offset));
- __ sub(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, with_types_offset));
- __ ldr(r4, FieldMemOperand(r2, generic_offset));
- __ add(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, generic_offset));
- __ jmp(&call);
+
+ __ bind(&call);
+ __ mov(r0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2555,14 +2559,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ b(eq, &miss);
- // Update stats.
- __ ldr(r4, FieldMemOperand(r2, with_types_offset));
- __ add(r4, r4, Operand(Smi::FromInt(1)));
- __ str(r4, FieldMemOperand(r2, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
+ __ ldr(ip, NativeContextMemOperand());
+ __ cmp(r4, ip);
+ __ b(ne, &miss);
// Initialize the call counter.
__ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
@@ -2581,7 +2587,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r1);
}
- __ jmp(&call);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2599,7 +2605,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@@ -2664,11 +2670,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2695,7 +2701,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -2734,7 +2740,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
@@ -2990,7 +2996,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// r0: original string
@@ -3030,7 +3036,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&slow_string);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3041,7 +3047,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3056,7 +3062,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3086,7 +3092,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3216,7 +3222,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// tagged as a small integer.
__ bind(&runtime);
__ Push(r1, r0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3258,7 +3264,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
@@ -3519,9 +3525,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3529,16 +3535,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
- __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &miss);
- __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, &miss);
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, &miss);
DCHECK(GetCondition() == eq);
__ sub(r0, r0, Operand(r1));
@@ -3549,7 +3556,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r2, r1, Operand(r0));
@@ -3566,7 +3573,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(r0, r0, Operand(r1));
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ mov(r2, Operand(Smi::FromInt(GREATER)));
@@ -3574,7 +3581,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ mov(r2, Operand(Smi::FromInt(LESS)));
}
__ Push(r1, r0, r2);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3590,7 +3597,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -4035,11 +4042,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4059,68 +4066,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : element value to store
- // -- r3 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers r1, r2, r4
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
-
- __ CheckFastElements(r2, r5, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r0, &smi_element);
- __ CheckFastSmiElements(r2, r5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r1, r3, r0);
- __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
- __ Push(r5, r4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
- __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(r0, MemOperand(r6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
- __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4835,7 +4780,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r0 : argc (only if argument_count() == ANY)
// -- r1 : constructor
// -- r2 : AllocationSite or undefined
- // -- r3 : original constructor
+ // -- r3 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -4856,6 +4801,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r2, r4);
}
+ // Enter the context of the Array function.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
Label subclassing;
__ cmp(r3, r1);
__ b(ne, &subclassing);
@@ -4875,25 +4823,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- __ push(r1);
- __ push(r3);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(r0, r0, Operand(2));
+ __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ add(r0, r0, Operand(3));
break;
case NONE:
- __ mov(r0, Operand(2));
+ __ str(r1, MemOperand(sp, 0 * kPointerSize));
+ __ mov(r0, Operand(3));
break;
case ONE:
- __ mov(r0, Operand(3));
+ __ str(r1, MemOperand(sp, 1 * kPointerSize));
+ __ mov(r0, Operand(4));
break;
}
-
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(r3, r2);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4979,13 +4925,13 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ ldr(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
- __ ldr(result, ContextOperand(result));
+ __ ldr(result, ContextMemOperand(result));
__ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
@@ -4995,7 +4941,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Fallback to runtime.
__ SmiTag(slot);
__ push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5021,13 +4967,13 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
- __ ldr(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
- __ ldr(cell, ContextOperand(cell));
+ __ ldr(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
@@ -5119,8 +5065,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5247,7 +5192,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 845e38a85e..30ae358eb0 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -109,9 +109,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 97f1034061..c34acd6a5b 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -18,23 +18,23 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
- fast_exp_arm_machine_code, x, 0);
+byte* fast_exp_arm_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)
+ ->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DwVfpRegister input = d0;
@@ -67,11 +67,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
@@ -79,7 +79,8 @@ UnaryMathFunction CreateExpFunction() {
}
#if defined(V8_HOST_ARCH_ARM)
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@@ -87,9 +88,10 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
Register dest = r0;
Register src = r1;
@@ -227,7 +229,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -236,7 +238,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
// Convert 8 to 16. The number of character to copy must be at least 8.
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- MemCopyUint16Uint8Function stub) {
+ Isolate* isolate, MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@@ -244,9 +246,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
Register dest = r0;
Register src = r1;
@@ -314,7 +317,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(&desc);
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
@@ -322,16 +325,17 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
#endif
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
__ vsqrt(d0, d0);
@@ -342,9 +346,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -882,15 +886,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r1);
patcher->masm()->nop(ip.code());
@@ -937,7 +943,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->emit_code_stub_address(stub);
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index f54fb71d0a..880825a1be 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -5,7 +5,7 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 43f2fb3463..38635ea3cf 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -40,14 +40,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->bkpt(0);
}
}
@@ -72,7 +73,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 9d86579f28..66b7f45849 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1781,28 +1781,28 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
if (dp_operation) {
Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrinta.f32.f32 'Sd, 'Sm");
}
break;
case 0x1:
if (dp_operation) {
Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrintn.f32.f32 'Sd, 'Sm");
}
break;
case 0x2:
if (dp_operation) {
Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrintp.f32.f32 'Sd, 'Sm");
}
break;
case 0x3:
if (dp_operation) {
Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
} else {
- Unknown(instr);
+ Format(instr, "vrintm.f32.f32 'Sd, 'Sm");
}
break;
default:
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 963b77782a..b7fad7bee6 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -65,6 +65,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return r2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r3; }
+const Register RestParamAccessDescriptor::parameter_count() { return r2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return r3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return r4; }
+
+
const Register ApiGetterDescriptor::function_address() { return r2; }
@@ -127,6 +132,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r3, r2, r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1};
@@ -189,7 +201,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r1 : the function to call
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
- // r4 : original constructor (for IsSuperConstructorCall)
+ // r4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r0, r1, r4, r2};
@@ -206,6 +218,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : allocation site or undefined
+ Register registers[] = {r1, r3, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ Register registers[] = {r1, r3, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
@@ -358,6 +391,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r1, // JSFunction
+ r3, // the new target
r0, // actual number of arguments
r2, // expected number of arguments
};
@@ -399,27 +433,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // math rounding function
- r3, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // math rounding function
- r3, // vector slot id
- r4, // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -435,7 +448,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
- r3, // original constructor
+ r3, // new target
r1, // constructor to call
r2 // address of the first argument
};
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 456bfd5629..57fa3f5804 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -19,11 +19,12 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -1236,8 +1237,6 @@ void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1257,7 +1256,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(r0));
DCHECK(expected.is_immediate() || expected.reg().is(r2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1289,11 +1287,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r3, Operand(code_constant));
- add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -1311,20 +1304,79 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ mov(r4, Operand(step_in_enabled));
+ ldrb(r4, MemOperand(r4));
+ cmp(r4, Operand(0));
+ b(eq, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(r1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = r4;
+ ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -1342,6 +1394,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register fun,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -1352,19 +1405,17 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(r1));
Register expected_reg = r2;
- Register code_reg = r3;
+ Register temp_reg = r4;
- ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ldr(expected_reg,
- FieldMemOperand(code_reg,
+ FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(expected_reg);
- ldr(code_reg,
- FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
}
@@ -1382,11 +1433,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(r1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -1474,10 +1521,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ldr(scratch, FieldMemOperand(scratch, offset));
- ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1661,11 +1705,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1682,48 +1722,46 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
// Set up allocation top address register.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ldm(ia, top_address, result.bit() | alloc_limit.bit());
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ ldr(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ ldr(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE) != 0) {
- cmp(result, Operand(ip));
+ cmp(result, Operand(alloc_limit));
b(hs, gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
@@ -1743,15 +1781,15 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(scratch2, source, bits_operand, SetCC, cond);
- source = scratch2;
+ add(result_end, source, bits_operand, SetCC, cond);
+ source = result_end;
cond = cc;
}
}
b(cs, gc_required);
- cmp(scratch2, Operand(ip));
+ cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
- str(scratch2, MemOperand(topaddr));
+ str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1760,32 +1798,25 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
mov(result, Operand(0x7091));
- mov(scratch1, Operand(0x7191));
- mov(scratch2, Operand(0x7291));
+ mov(scratch, Operand(0x7191));
+ mov(result_end, Operand(0x7291));
}
jmp(gc_required);
return;
}
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(ip));
- DCHECK(!result.is(ip));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.
@@ -1795,48 +1826,45 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
DCHECK(result.code() < ip.code());
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- ldm(ia, topaddr, result.bit() | ip.bit());
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ldm(ia, top_address, result.bit() | alloc_limit.bit());
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- ldr(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ ldr(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- ldr(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ ldr(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
if ((flags & PRETENURE) != 0) {
- cmp(result, Operand(ip));
+ cmp(result, Operand(alloc_limit));
b(hs, gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
@@ -1844,20 +1872,20 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
+ add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
- add(scratch2, result, Operand(object_size), SetCC);
+ add(result_end, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
- cmp(scratch2, Operand(ip));
+ cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- tst(scratch2, Operand(kObjectAlignmentMask));
+ tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
- str(scratch2, MemOperand(topaddr));
+ str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -2063,6 +2091,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
@@ -2452,24 +2481,17 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r0, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2489,35 +2511,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(r2, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(r2));
- Call(r2);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(r2);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- ldr(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- ldr(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(r1));
- GetBuiltinFunction(r1, native_context_index);
- // Load the code entry point from the builtins object.
- ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, r1);
+ InvokeFunctionCode(r1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -2651,49 +2648,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- ldr(dst, GlobalObjectOperand());
- ldr(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ldr(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- ldr(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(ip, FieldMemOperand(scratch, offset));
+ ldr(scratch, NativeContextMemOperand());
+ ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ldr(map_in_out, FieldMemOperand(scratch, offset));
+ ldr(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ldr(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ldr(function,
- FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ldr(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ ldr(dst, NativeContextMemOperand());
+ ldr(dst, ContextMemOperand(dst, index));
}
@@ -2831,6 +2809,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotABoundFunction);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2945,27 +2936,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- LowDwVfpRegister double_scratch,
- int field_count) {
- int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
- for (int i = 0; i < double_count; i++) {
- vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
- vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
- }
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
- STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
- if (remain != 0) {
- vldr(double_scratch.low(),
- FieldMemOperand(src, (field_count - 1) * kPointerSize));
- vstr(double_scratch.low(),
- FieldMemOperand(dst, (field_count - 1) * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ str(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3022,15 +3011,15 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
b(&entry);
bind(&loop);
- str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+ str(filler, MemOperand(current_address, kPointerSize, PostIndex));
bind(&entry);
- cmp(start_offset, end_offset);
+ cmp(current_address, end_address);
b(lo, &loop);
}
@@ -3281,8 +3270,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -3315,27 +3304,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- b(eq, &is_data_object);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, not_data_object);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -3350,96 +3318,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
- b(ne, &done);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- tst(load_scratch, Operand(mask_scratch, LSL, 1));
- b(eq, &ok);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
- b(eq, &is_data_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- b(ne, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- tst(instance_type, Operand(kExternalStringTag));
- mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
- b(ne, &is_data_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- ldr(ip, FieldMemOperand(value, String::kLengthOffset));
- tst(instance_type, Operand(kStringEncodingMask));
- mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
- add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orr(ip, ip, Operand(mask_scratch));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, Operand(length));
- str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ b(eq, value_is_white);
}
@@ -3661,12 +3556,11 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -3678,7 +3572,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 8ab676f39b..26811b988c 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -24,6 +24,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r0};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r3};
const Register kRuntimeCallFunctionRegister = {Register::kCode_r1};
const Register kRuntimeCallArgCountRegister = {Register::kCode_r0};
@@ -87,11 +88,8 @@ enum TargetAddressStorageMode {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Returns the size of a call in instructions. Note, the value returned is
@@ -244,22 +242,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -610,8 +596,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -624,7 +617,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -642,15 +635,19 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -763,12 +760,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@@ -813,11 +806,11 @@ class MacroAssembler: public Assembler {
Register heap_number_map,
Label* gc_required);
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst,
- Register src,
- LowDwVfpRegister double_scratch,
- int field_count);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -827,12 +820,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -1079,33 +1071,30 @@ class MacroAssembler: public Assembler {
void CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1158,13 +1147,6 @@ class MacroAssembler: public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1312,6 +1294,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1462,8 +1448,6 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1516,8 +1500,7 @@ class CodePatcher {
DONT_FLUSH
};
- CodePatcher(byte* address,
- int instructions,
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
@@ -1545,13 +1528,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index = 0) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 716e804e3a..6e193885b0 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -390,7 +390,7 @@ void ArmDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -785,12 +785,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
- swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
+ swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -806,9 +806,8 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -816,7 +815,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -861,9 +860,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -3157,14 +3157,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
+ lazily_initialize_fast_sqrt(isolate_);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
- double dd_value = fast_sqrt(dm_value);
+ double dd_value = fast_sqrt(dm_value, isolate_);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
- float sd_value = fast_sqrt(sm_value);
+ float sd_value = fast_sqrt(sm_value, isolate_);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
@@ -3177,10 +3178,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
- double dm_value = get_double_from_d_register(vm);
- double dd_value = trunc(dm_value);
- dd_value = canonicalizeNaN(dd_value);
- set_d_register_from_double(vd, dd_value);
+ if (instr->SzValue() == 0x1) {
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = trunc(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ float sm_value = get_float_from_s_register(m);
+ float sd_value = truncf(sm_value);
+ sd_value = canonicalizeNaN(sd_value);
+ set_s_register_from_float(d, sd_value);
+ }
} else {
UNREACHABLE(); // Not used by V8.
}
@@ -3869,44 +3877,60 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
case 0x1D:
if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
- instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2 &&
- instr->Bit(8) == 0x1) {
- int vm = instr->VFPMRegValue(kDoublePrecision);
- int vd = instr->VFPDRegValue(kDoublePrecision);
- double dm_value = get_double_from_d_register(vm);
- double dd_value = 0.0;
- int rounding_mode = instr->Bits(17, 16);
- switch (rounding_mode) {
- case 0x0: // vrinta - round with ties to away from zero
- dd_value = round(dm_value);
- break;
- case 0x1: { // vrintn - round with ties to even
- dd_value = std::floor(dm_value);
- double error = dm_value - dd_value;
- // Take care of correctly handling the range [-0.5, -0.0], which
- // must yield -0.0.
- if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
- dd_value = -0.0;
- // If the error is greater than 0.5, or is equal to 0.5 and the
- // integer result is odd, round up.
- } else if ((error > 0.5) ||
- ((error == 0.5) && (fmod(dd_value, 2) != 0))) {
- dd_value++;
+ instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2) {
+ if (instr->SzValue() == 0x1) {
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ double dm_value = get_double_from_d_register(vm);
+ double dd_value = 0.0;
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0: // vrinta - round with ties to away from zero
+ dd_value = round(dm_value);
+ break;
+ case 0x1: { // vrintn - round with ties to even
+ dd_value = nearbyint(dm_value);
+ break;
}
- break;
+ case 0x2: // vrintp - ceil
+ dd_value = ceil(dm_value);
+ break;
+ case 0x3: // vrintm - floor
+ dd_value = floor(dm_value);
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
}
- case 0x2: // vrintp - ceil
- dd_value = std::ceil(dm_value);
- break;
- case 0x3: // vrintm - floor
- dd_value = std::floor(dm_value);
- break;
- default:
- UNREACHABLE(); // Case analysis is exhaustive.
- break;
+ dd_value = canonicalizeNaN(dd_value);
+ set_d_register_from_double(vd, dd_value);
+ } else {
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float sm_value = get_float_from_s_register(m);
+ float sd_value = 0.0;
+ int rounding_mode = instr->Bits(17, 16);
+ switch (rounding_mode) {
+ case 0x0: // vrinta - round with ties to away from zero
+ sd_value = roundf(sm_value);
+ break;
+ case 0x1: { // vrintn - round with ties to even
+ sd_value = nearbyintf(sm_value);
+ break;
+ }
+ case 0x2: // vrintp - ceil
+ sd_value = ceilf(sm_value);
+ break;
+ case 0x3: // vrintm - floor
+ sd_value = floorf(sm_value);
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ sd_value = canonicalizeNaN(sd_value);
+ set_s_register_from_float(d, sd_value);
}
- dd_value = canonicalizeNaN(dd_value);
- set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 0c6aaf8c24..6567607bb8 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
@@ -33,9 +33,10 @@ typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
+ p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
@@ -48,11 +49,15 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
} // namespace internal
@@ -344,7 +349,7 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(
- void* external_function,
+ Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@@ -426,17 +431,17 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_FP_INT(entry, p0, p1) \
- Simulator::current(Isolate::Current())->CallFPReturnsInt( \
- FUNCTION_ADDR(entry), p0, p1)
+#define CALL_GENERATED_FP_INT(isolate, entry, p0, p1) \
+ Simulator::current(isolate)->CallFPReturnsInt(FUNCTION_ADDR(entry), p0, p1)
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate) \
+ ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -450,13 +455,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 6de7fb1b2a..d7769791ef 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -31,7 +31,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -648,24 +649,24 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target) {
+ Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
+ // Assembler::FlushICache(isolate(), pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@@ -674,12 +675,11 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
-void Assembler::set_target_address_at(Address pc,
- Code* code,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@@ -725,7 +725,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -832,7 +832,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
STATIC_ASSERT(Assembler::kPatchDebugBreakSlotAddressOffset == 0);
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -848,7 +848,7 @@ void RelocInfo::WipeOut() {
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index d981f635ba..ea7a732f8a 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -511,7 +511,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
}
assm_->dc64(data);
}
@@ -527,7 +527,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
assm_->dc64(unique_it->first);
}
unique_entries_.clear();
@@ -589,6 +589,7 @@ void Assembler::GetCode(CodeDesc* desc) {
static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
}
@@ -657,22 +658,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
- prev_link->SetImmPCOffsetTarget(next_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), next_link);
} else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
- link->SetImmPCOffsetTarget(label_veneer);
+ link->SetImmPCOffsetTarget(isolate(), label_veneer);
link = next_link;
}
} else {
@@ -743,10 +744,11 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
- PatchingAssembler patcher(link, 2);
+ PatchingAssembler patcher(isolate(), link, 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
- link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ link->SetImmPCOffsetTarget(isolate(),
+ reinterpret_cast<Instruction*>(pc_));
}
// Link the label to the previous link in the chain.
@@ -2829,6 +2831,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -2866,9 +2869,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::COMMENT) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL)) ||
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
@@ -2895,8 +2898,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(
- reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
+ rmode, RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2985,9 +2988,8 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
- RelocInfo rinfo(buffer_ + location_offset,
- RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
- NULL);
+ RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
+ static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo);
}
@@ -3029,7 +3031,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(veneer);
+ branch->SetImmPCOffsetTarget(isolate(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 41060122d8..5854704b68 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -799,14 +799,12 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
// that call in the instruction stream.
@@ -819,11 +817,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address constant_pool_entry, Code* code, Address target);
+ Isolate* isolate, Address constant_pool_entry, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@@ -934,7 +933,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the emission of a constant pool.
//
@@ -2150,15 +2149,14 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
- PatchingAssembler(Instruction* start, unsigned count)
- : Assembler(NULL,
- reinterpret_cast<byte*>(start),
- count * kInstructionSize + kGap) {
+ PatchingAssembler(Isolate* isolate, Instruction* start, unsigned count)
+ : Assembler(isolate, reinterpret_cast<byte*>(start),
+ count * kInstructionSize + kGap) {
StartBlockPools();
}
- PatchingAssembler(byte* start, unsigned count)
- : Assembler(NULL, start, count * kInstructionSize + kGap) {
+ PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
+ : Assembler(isolate, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@@ -2173,7 +2171,7 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- Assembler::FlushICacheWithoutIsolate(buffer_, length);
+ Assembler::FlushICache(isolate(), buffer_, length);
}
// See definition of PatchAdrFar() for details.
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index f7ea89d807..b6bae4ad0e 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -20,25 +20,16 @@ namespace internal {
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ Ldr(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ Ldr(result, ContextMemOperand(result,
- Context::INTERNAL_ARRAY_FUNCTION_INDEX));
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
@@ -47,9 +38,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- x0 : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- x1 : called function
+ // -- x1 : target
+ // -- x3 : new target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -61,27 +51,29 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ Push(x1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(x1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(x3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(x1, x3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects x0 to contain the number of arguments
- // including the receiver and the extra arguments. But x0 is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(
- x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ Csel(x0, x0, x2, eq);
+ // including the receiver and the extra arguments.
__ Add(x0, x0, num_extra_args + 1);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
@@ -146,6 +138,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_NumberConstructor");
+
+ // 1. Load the first argument into x0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Cbz(x0, &no_arguments);
+ __ Sub(x0, x0, 1);
+ __ Drop(x0);
+ __ Ldr(x0, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ }
+
+ // 2a. Convert first argument to number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in x0).
+ __ Bind(&no_arguments);
+ __ Drop(1);
+ __ Ret();
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x3 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_NumberConstructor_ConstructStub");
+
+ // 1. Make sure we operate in the context of the called function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into x2 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Cbz(x0, &no_arguments);
+ __ Sub(x0, x0, 1);
+ __ Drop(x0);
+ __ Ldr(x2, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ B(&done);
+ __ Bind(&no_arguments);
+ __ Drop(1);
+ __ Mov(x2, Smi::FromInt(0));
+ __ Bind(&done);
+ }
+
+ // 3. Make sure x2 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(x2, &done_convert);
+ __ JumpIfObjectType(x2, x4, x4, HEAP_NUMBER_TYPE, &done_convert, eq);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x3);
+ __ Move(x0, x2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(x2, x0);
+ __ Pop(x3, x1);
+ }
+ __ Bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Cmp(x1, x3);
+ __ B(ne, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x2, x1, x3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(x2);
+ }
+ __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -197,7 +290,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ Bind(&symbol_descriptive_string);
{
__ Push(x0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -207,14 +300,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
- // -- x3 : original constructor
+ // -- x3 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_StringConstructor_ConstructStub");
- // 1. Load the first argument into x2 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into x2 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -229,7 +325,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Bind(&done);
}
- // 2. Make sure x2 is a string.
+ // 3. Make sure x2 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(x2, &convert);
@@ -242,75 +338,49 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Move(x0, x2);
__ CallStub(&stub);
__ Move(x2, x0);
- __ Pop(x1, x3);
+ __ Pop(x3, x1);
}
__ Bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- x2 : the first argument
- // -- x1 : constructor function
- // -- x3 : original constructor
- // -- lr : return address
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Cmp(x1, x3);
+ __ B(ne, &new_object);
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(x1, x3);
- __ B(ne, &rt_call);
-
- __ Allocate(JSValue::kSize, x0, x3, x4, &allocate, TAG_OBJECT);
- __ Bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(x1, x3, x4);
- __ Str(x3, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
- __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fallback to the runtime to allocate in new space.
- __ Bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x2);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(x2, x1);
- }
- __ B(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x2, x1, x3); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(x2, x1);
- }
- __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x2, x1, x3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(x2);
}
+ __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+ __ Ret();
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- x1 : target function (preserved for callee)
+ // -- x3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // - Push a copy of the function onto the stack.
- // - Push another copy as a parameter to the runtime call.
- __ Push(x1, x1);
+ // Push a copy of the target function and the new target.
+ // Push another copy as a parameter to the runtime call.
+ __ Push(x1, x3, x1);
__ CallRuntime(function_id, 1);
- // - Restore receiver.
- __ Pop(x1);
+ // Restore target function and new target.
+ __ Pop(x3, x1);
}
@@ -347,12 +417,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x2 : allocation site or undefined
- // -- x3 : original constructor
+ // -- x3 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -369,194 +440,173 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0;
Register constructor = x1;
Register allocation_site = x2;
- Register original_constructor = x3;
+ Register new_target = x3;
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ SmiTag(argc);
- __ Push(allocation_site, argc, constructor, original_constructor);
- // sp[0]: new.target
- // sp[1]: Constructor function.
- // sp[2]: number of arguments (smi-tagged)
- // sp[3]: allocation site
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ Mov(x2, Operand(debug_step_in_fp));
- __ Ldr(x2, MemOperand(x2));
- __ Cbnz(x2, &rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ JumpIfNotObjectType(original_constructor, x10, x11, JS_FUNCTION_TYPE,
- &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- Register init_map = x2;
- __ Ldr(init_map,
- FieldMemOperand(original_constructor,
- JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(init_map, &rt_call);
- __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ Ldr(x10,
- FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset));
- __ Cmp(constructor, x10);
- __ B(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the initial
- // map's instance type would be JS_FUNCTION_TYPE.
- __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
- __ B(eq, &rt_call);
-
- Register constructon_count = x14;
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 =
- FieldMemOperand(init_map, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ Ldr(x4, bit_field3);
- __ DecodeField<Map::Counter>(constructon_count, x4);
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(lt, &allocate);
- // Decrease generous allocation count.
- __ Subs(x4, x4, Operand(1 << Map::Counter::kShift));
- __ Str(x4, bit_field3);
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(ne, &allocate);
-
- // Push the constructor and map to the stack, and the map again
- // as argument to the runtime call.
- __ Push(constructor, init_map, init_map);
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
- __ Pop(init_map, constructor);
- __ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
- __ Bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- Label rt_call_reload_new_target;
- Register obj_size = x3;
- Register new_obj = x4;
- __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
- __ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
- SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // NB. the object pointer is not tagged, so MemOperand is used.
- Register empty = x5;
- __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
- __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset));
- STATIC_ASSERT(JSObject::kElementsOffset ==
- (JSObject::kPropertiesOffset + kPointerSize));
- __ Stp(empty, empty, MemOperand(new_obj, JSObject::kPropertiesOffset));
-
- Register first_prop = x5;
- __ Add(first_prop, new_obj, JSObject::kHeaderSize);
-
- // Fill all of the in-object properties with the appropriate filler.
- Register filler = x7;
- __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
-
- // Obtain number of pre-allocated property fields and in-object
- // properties.
- Register unused_props = x10;
- Register inobject_props = x11;
- Register inst_sizes_or_attrs = x11;
- Register prealloc_fields = x10;
- __ Ldr(inst_sizes_or_attrs,
- FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
- __ Ubfx(unused_props, inst_sizes_or_attrs,
- Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
- __ Ldr(inst_sizes_or_attrs,
- FieldMemOperand(init_map, Map::kInstanceSizesOffset));
- __ Ubfx(
- inobject_props, inst_sizes_or_attrs,
- Map::kInObjectPropertiesOrConstructorFunctionIndexByte * kBitsPerByte,
- kBitsPerByte);
- __ Sub(prealloc_fields, inobject_props, unused_props);
-
- // Calculate number of property fields in the object.
- Register prop_fields = x6;
- __ Sub(prop_fields, obj_size, JSObject::kHeaderSize / kPointerSize);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(lt, &no_inobject_slack_tracking);
- constructon_count = NoReg;
-
- // Fill the pre-allocated fields with undef.
- __ FillFields(first_prop, prealloc_fields, filler);
-
- // Update first_prop register to be the offset of the first field after
- // pre-allocated fields.
- __ Add(first_prop, first_prop,
- Operand(prealloc_fields, LSL, kPointerSizeLog2));
-
- if (FLAG_debug_code) {
- Register obj_end = x14;
- __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
- __ Cmp(first_prop, obj_end);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ __ Push(allocation_site, argc);
+
+ if (create_implicit_receiver) {
+ // sp[0]: new.target
+ // sp[1]: Constructor function.
+ // sp[2]: number of arguments (smi-tagged)
+ // sp[3]: allocation site
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ JumpIfNotObjectType(new_target, x10, x11, JS_FUNCTION_TYPE,
+ &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ Register init_map = x2;
+ __ Ldr(init_map,
+ FieldMemOperand(new_target,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(init_map, &rt_call);
+ __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ Ldr(x10,
+ FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset));
+ __ Cmp(constructor, x10);
+ __ B(ne, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial
+ // map's instance type would be JS_FUNCTION_TYPE.
+ __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
+ __ B(eq, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ Register obj_size = x10;
+ Register new_obj = x4;
+ Register next_obj = obj_size; // May overlap.
+ __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
+ __ Allocate(obj_size, new_obj, next_obj, x11, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // NB. the object pointer is not tagged, so MemOperand is used.
+ Register write_address = x5;
+ Register empty = x7;
+ __ Mov(write_address, new_obj);
+ __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ Str(init_map, MemOperand(write_address, kPointerSize, PostIndex));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ Stp(empty, empty,
+ MemOperand(write_address, 2 * kPointerSize, PostIndex));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ Add(new_obj, new_obj, kHeapObjectTag);
+
+ // Fill all of the in-object properties with the appropriate filler.
+ Register filler = x7;
+ __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ Register constructon_count = x14;
+ MemOperand bit_field3 =
+ FieldMemOperand(init_map, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ Ldr(x11, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(constructon_count, x11);
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+ __ B(lt, &no_inobject_slack_tracking);
+ // Decrease generous allocation count.
+ __ Subs(x11, x11, Operand(1 << Map::ConstructionCounter::kShift));
+ __ Str(x11, bit_field3);
+
+ // Allocate object with a slack.
+ Register unused_props = x11;
+ __ Ldr(unused_props,
+ FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
+ __ Ubfx(unused_props, unused_props,
+ Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
+
+ Register end_of_pre_allocated = x11;
+ __ Sub(end_of_pre_allocated, next_obj,
+ Operand(unused_props, LSL, kPointerSizeLog2));
+ unused_props = NoReg;
+
+ if (FLAG_debug_code) {
+ __ Cmp(write_address, end_of_pre_allocated);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+
+ // Fill the pre-allocated fields with undef.
+ __ InitializeFieldsWithFiller(write_address, end_of_pre_allocated,
+ filler);
+
+ // Fill the remaining fields with one pointer filler map.
+ __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(write_address, next_obj, filler);
+
+ __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+ __ B(ne, &allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(constructor, new_target, new_obj, init_map);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(new_obj, new_target, constructor);
+
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- // Fill the remaining fields with one pointer filler map.
- __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
- __ Sub(prop_fields, prop_fields, prealloc_fields);
+ __ InitializeFieldsWithFiller(write_address, next_obj, filler);
- __ bind(&no_inobject_slack_tracking);
+ // Continue with JSObject being successfully allocated.
+ __ B(&allocated);
}
- // Fill all of the property fields with undef.
- __ FillFields(first_prop, prop_fields, filler);
- first_prop = NoReg;
- prop_fields = NoReg;
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Add(new_obj, new_obj, kHeapObjectTag);
-
- // Continue with JSObject being successfully allocated.
- __ B(&allocated);
-
- // Reload the original constructor and fall-through.
- __ Bind(&rt_call_reload_new_target);
- __ Peek(x3, 0 * kXRegSize);
+ // Allocate the new receiver object using the runtime call.
+ // x1: constructor function
+ // x3: new target
+ __ Bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(constructor, new_target, constructor, new_target);
+ __ CallRuntime(Runtime::kNewObject);
+ __ Mov(x4, x0);
+ __ Pop(new_target, constructor);
+
+ // Receiver for constructor call allocated.
+ // x1: constructor function
+ // x3: new target
+ // x4: JSObject
+ __ Bind(&allocated);
+
+ // Reload the number of arguments from the stack.
+ // Set it up in x0 for the function call below.
+ // jssp[0]: number of arguments (smi-tagged)
+ __ Peek(argc, 0); // Load number of arguments.
}
- // Allocate the new receiver object using the runtime call.
- // x1: constructor function
- // x3: original constructor
- __ Bind(&rt_call);
- __ Push(constructor, original_constructor); // arguments 1-2
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Mov(x4, x0);
-
- // Receiver for constructor call allocated.
- // x4: JSObject
- __ Bind(&allocated);
-
- // Restore the parameters.
- __ Pop(original_constructor);
- __ Pop(constructor);
-
- // Reload the number of arguments from the stack.
- // Set it up in x0 for the function call below.
- // jssp[0]: number of arguments (smi-tagged)
- __ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
- __ Push(original_constructor, x4, x4);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(x4, x4);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@@ -566,19 +616,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x0: number of arguments
// x1: constructor function
// x2: address of last argument (caller sp)
+ // x3: new target
// jssp[0]: receiver
// jssp[1]: receiver
- // jssp[2]: new.target
- // jssp[3]: number of arguments (smi-tagged)
+ // jssp[2]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
- __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
+ __ Add(x4, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
__ B(&entry);
__ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex));
+ __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
__ Push(x11, x10);
__ Bind(&entry);
- __ Cmp(x3, x2);
+ __ Cmp(x4, x2);
__ B(gt, &loop);
// Because we copied values 2 by 2 we may have copied one extra value.
// Drop it if that is the case.
@@ -589,6 +639,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
// x0: number of arguments
// x1: constructor function
+ // x3: new target
if (is_api_function) {
__ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
Handle<Code> code =
@@ -596,155 +647,84 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(argc);
- __ InvokeFunction(constructor, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
- // jssp[1]: new.target
- // jssp[2]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // x0: result
- // jssp[0]: receiver (newly allocated object)
// jssp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(x0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ Bind(&use_receiver);
- __ Peek(x0, 0);
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Remove the receiver from the stack, remove caller arguments, and
- // return.
- __ Bind(&exit);
- // x0: result
- // jssp[0]: receiver (newly allocated object)
- // jssp[1]: new.target (original constructor)
- // jssp[2]: number of arguments (smi-tagged)
- __ Peek(x1, 2 * kXRegSize);
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(x0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ JumpIfObjectType(x0, x1, x3, FIRST_JS_RECEIVER_TYPE, &exit, ge);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0);
+
+ // Remove the receiver from the stack, remove caller arguments, and
+ // return.
+ __ Bind(&exit);
+ // x0: result
+ // jssp[0]: receiver (newly allocated object)
+ // jssp[1]: number of arguments (smi-tagged)
+ __ Peek(x1, 1 * kXRegSize);
+ } else {
+ __ Peek(x1, 0);
+ }
// Leave construct frame.
}
__ DropBySMI(x1);
__ Drop(1);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
+ }
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- x1 : constructor function
- // -- x2 : allocation site or undefined
- // -- x3 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_JSConstructStubForDerived");
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(x2, x10);
- __ Mov(x4, x0);
- __ SmiTag(x4);
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Push(x2, x4, x3, x10);
- // sp[0]: receiver (the hole)
- // sp[1]: new.target
- // sp[2]: number of arguments
- // sp[3]: allocation site
-
- // Set up pointer to last argument.
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
-
- // Copy arguments and receiver to the expression stack.
- // Copy 2 values every loop to use ldp/stp.
- // x0: number of arguments
- // x1: constructor function
- // x2: address of last argument (caller sp)
- // jssp[0]: receiver
- // jssp[1]: new.target
- // jssp[2]: number of arguments (smi-tagged)
- // Compute the start address of the copy in x4.
- __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
- Label loop, entry, done_copying_arguments;
- __ B(&entry);
- __ Bind(&loop);
- __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
- __ Push(x11, x10);
- __ Bind(&entry);
- __ Cmp(x4, x2);
- __ B(gt, &loop);
- // Because we copied values 2 by 2 we may have copied one extra value.
- // Drop it if that is the case.
- __ B(eq, &done_copying_arguments);
- __ Drop(1);
- __ Bind(&done_copying_arguments);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ Mov(x2, Operand(debug_step_in_fp));
- __ Ldr(x2, MemOperand(x2));
- __ Cbz(x2, &skip_step_in);
-
- __ Push(x0, x1, x1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(x1, x0);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // x0: number of arguments
- // x1: constructor function
- ParameterCount actual(x0);
- __ InvokeFunction(x1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore the context from the frame.
- // x0: result
- // jssp[0]: number of arguments (smi-tagged)
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Load number of arguments (smi), skipping over new.target.
- __ Peek(x1, kPointerSize);
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame
- }
- __ DropBySMI(x1);
- __ Drop(1);
- __ Ret();
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -771,7 +751,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Cmp(x10, Operand(argc, LSL, kPointerSizeLog2));
}
__ B(gt, &enough_stack_space);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
@@ -891,6 +871,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// - x1: the JS function object being called.
+// - x3: the new target
// - cp: our context.
// - fp: our caller's frame pointer.
// - jssp: stack pointer.
@@ -907,6 +888,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Push(x3);
+
+ // Push zero for bytecode array offset.
+ __ Mov(x0, Operand(0));
+ __ Push(x0);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -935,7 +921,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sub(x10, jssp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -963,7 +949,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
__ Push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ Pop(kInterpreterBytecodeArrayRegister);
__ Bind(&ok);
}
@@ -971,8 +957,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Sub(kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -1012,36 +998,103 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ Add(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ Ldr(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ Ldr(x1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+ kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ Ldr(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
+ __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip0);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- Register function = x1;
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
- // Preserve function. At the same time, push arguments for
- // kCompileOptimized.
- __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
- __ Push(function, function, x10);
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(function);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1057,16 +1110,17 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// calling through to the runtime:
// x0 - The address from which to resume execution.
// x1 - isolate
+ // x3 - new target
// lr - The return address for the JSFunction itself. It has not yet been
// preserved on the stack because the frame setup code was replaced
// with a call to this stub, to handle code ageing.
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0, x1, fp, lr);
+ __ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ Pop(lr, fp, x1, x0);
+ __ Pop(lr, fp, x3, x1, x0);
}
// The calling function has been made young again, so return to execute the
@@ -1097,17 +1151,18 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// calling through to the runtime:
// x0 - The address from which to resume execution.
// x1 - isolate
+ // x3 - new target
// lr - The return address for the JSFunction itself. It has not yet been
// preserved on the stack because the frame setup code was replaced
// with a call to this stub, to handle code ageing.
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(x0, x1, fp, lr);
+ __ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(
masm->isolate()), 2);
- __ Pop(lr, fp, x1, x0);
+ __ Pop(lr, fp, x3, x1, x0);
// Perform prologue operations usually performed by the young code stub.
__ EmitFrameSetupForCodeAgePatching(masm);
@@ -1142,7 +1197,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// preserve the registers with parameters.
__ PushXRegList(kSafepointSavedRegisters);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ PopXRegList(kSafepointSavedRegisters);
}
@@ -1172,7 +1227,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Mov(x0, Smi::FromInt(static_cast<int>(type)));
__ Push(x0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it.
@@ -1214,6 +1269,109 @@ void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Register scratch2,
+ Label* receiver_check_failed) {
+ Register signature = scratch0;
+ Register map = scratch1;
+ Register constructor = scratch2;
+
+ // If there is no signature, return the holder.
+ __ Ldr(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ B(eq, &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ Bind(&prototype_loop_start);
+
+ // Get the constructor, if any
+ __ GetMapConstructor(constructor, map, x16, x16);
+ __ cmp(x16, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ B(ne, &next_prototype);
+ Register type = constructor;
+ __ Ldr(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ Bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ Cmp(signature, type);
+ __ B(eq, &receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, x16, x17, FUNCTION_TEMPLATE_INFO_TYPE);
+ __ B(ne, &next_prototype);
+
+ // Otherwise load the parent function template and iterate.
+ __ Ldr(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ B(&function_template_loop);
+
+ // Load the next prototype.
+ __ Bind(&next_prototype);
+ __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ B(eq, receiver_check_failed);
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldr(x16, FieldMemOperand(map, Map::kBitField3Offset));
+ __ Tst(x16, Operand(Map::IsHiddenPrototype::kMask));
+ __ B(eq, receiver_check_failed);
+ // Iterate.
+ __ B(&prototype_loop_start);
+
+ __ Bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments excluding receiver
+ // -- x1 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[8 * (argc - 1)] : first argument
+ // -- sp[8 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
+ CompatibleReceiverCheck(masm, x2, x3, x4, x5, x6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ Ldr(x4, FieldMemOperand(x3, FunctionTemplateInfo::kCallCodeOffset));
+ __ Ldr(x4, FieldMemOperand(x4, CallHandlerInfo::kFastHandlerOffset));
+ __ Add(x4, x4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(x4);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ Bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver)
+ __ add(x0, x0, Operand(1));
+ __ Drop(x0);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1221,7 +1379,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ Push(x0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1257,7 +1415,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ B(hs, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1267,13 +1425,149 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+// static
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- jssp[0] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_DatePrototype_GetField");
+
+ // 1. Pop receiver into x0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(x0);
+ __ JumpIfSmi(x0, &receiver_not_date);
+ __ JumpIfNotObjectType(x0, x1, x2, JS_DATE_TYPE, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ Ldr(x0, FieldMemOperand(x0, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ Mov(x1, ExternalReference::date_cache_stamp(masm->isolate()));
+ __ Ldr(x1, MemOperand(x1));
+ __ Ldr(x2, FieldMemOperand(x0, JSDate::kCacheStampOffset));
+ __ Cmp(x1, x2);
+ __ B(ne, &stamp_mismatch);
+ __ Ldr(x0, FieldMemOperand(
+ x0, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ Bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Mov(x1, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ Bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argArray (if argc == 2)
+ // -- jssp[8] : thisArg (if argc >= 1)
+ // -- jssp[16] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
+
+ Register argc = x0;
+ Register arg_array = x0;
+ Register receiver = x1;
+ Register this_arg = x2;
+ Register undefined_value = x3;
+ Register null_value = x4;
+
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+
+ // 1. Load receiver into x1, argArray into x0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ // Claim (2 - argc) dummy arguments from the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Claim(2);
+ __ Drop(argc);
+
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argArray (dummy value if argc <= 1)
+ // -- jssp[8] : thisArg (dummy value if argc == 0)
+ // -- jssp[16] : receiver
+ // -----------------------------------
+ __ Cmp(argc, 1);
+ __ Pop(arg_array, this_arg); // Overwrites argc.
+ __ CmovX(this_arg, undefined_value, lo); // undefined if argc == 0.
+ __ CmovX(arg_array, undefined_value, ls); // undefined if argc <= 1.
+
+ __ Peek(receiver, 0);
+ __ Poke(this_arg, 0);
+ }
+
+ // ----------- S t a t e -------------
+ // -- x0 : argArray
+ // -- x1 : receiver
+ // -- x3 : undefined root value
+ // -- jssp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(receiver, &receiver_not_callable);
+ __ Ldr(x10, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ldrb(w10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable,
+ &receiver_not_callable);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ Cmp(arg_array, null_value);
+ __ Ccmp(arg_array, undefined_value, ZFlag, ne);
+ __ B(eq, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target in x3).
+ DCHECK(undefined_value.Is(x3));
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ Bind(&no_arguments);
+ {
+ __ Mov(x0, 0);
+ DCHECK(receiver.Is(x1));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ Bind(&receiver_not_callable);
+ {
+ __ Poke(receiver, 0);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register argc = x0;
Register function = x1;
Register scratch1 = x10;
Register scratch2 = x11;
- ASM_LOCATION("Builtins::Generate_FunctionCall");
+ ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
+
// 1. Make sure we have at least one argument.
{
Label done;
@@ -1312,205 +1606,161 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Ldr(key, MemOperand(fp, indexOffset));
- __ B(&entry);
-
- // Load the current argument from the arguments array.
- __ Bind(&loop);
- __ Ldr(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
- __ Ldr(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ Push(x0);
-
- __ Ldr(key, MemOperand(fp, indexOffset));
- __ Add(key, key, Smi::FromInt(1));
- __ Str(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ Bind(&entry);
- __ Ldr(x1, MemOperand(fp, limitOffset));
- __ Cmp(key, x1);
- __ B(ne, &loop);
-
- // On exit, the pushed arguments count is in x0, untagged
- __ Mov(x0, key);
- __ SmiUntag(x0);
-}
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argumentsList (if argc == 3)
+ // -- jssp[8] : thisArgument (if argc >= 2)
+ // -- jssp[16] : target (if argc >= 1)
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ReflectApply");
+ Register argc = x0;
+ Register arguments_list = x0;
+ Register target = x1;
+ Register this_argument = x2;
+ Register undefined_value = x3;
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
+ // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
-
- Register args = x12;
- Register receiver = x14;
- Register function = x15;
- Register apply_function = x1;
-
- // Push the vector.
- __ Ldr(
- apply_function,
- FieldMemOperand(apply_function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(apply_function,
- FieldMemOperand(apply_function,
- SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(apply_function);
-
- // Get the length of the arguments via a builtin call.
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- __ Ldr(args, MemOperand(fp, kArgumentsOffset));
- __ Push(function, args);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
- Register argc = x0;
+ // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Claim(3);
+ __ Drop(argc);
- Generate_CheckStackOverflow(masm, argc, kArgcIsSmiTagged);
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : argumentsList (dummy value if argc <= 2)
+ // -- jssp[8] : thisArgument (dummy value if argc <= 1)
+ // -- jssp[16] : target (dummy value if argc == 0)
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ __ Adds(x10, argc, 0); // Preserve argc, and set the Z flag if it is zero.
+ __ Pop(arguments_list, this_argument, target); // Overwrites argc.
+ __ CmovX(target, undefined_value, eq); // undefined if argc == 0.
+ __ Cmp(x10, 2);
+ __ CmovX(this_argument, undefined_value, lo); // undefined if argc <= 1.
+ __ CmovX(arguments_list, undefined_value, ls); // undefined if argc <= 2.
+
+ __ Poke(this_argument, 0); // Overwrite receiver.
+ }
- // Push current limit, index and receiver.
- __ Mov(x1, 0); // Initial index.
- __ Ldr(receiver, MemOperand(fp, kReceiverOffset));
- __ Push(argc, x1, receiver);
+ // ----------- S t a t e -------------
+ // -- x0 : argumentsList
+ // -- x1 : target
+ // -- jssp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(target, &target_not_callable);
+ __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ Ldr(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsCallable, &target_not_callable);
- // At the end of the loop, the number of arguments is stored in x0, untagged
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target in x3).
+ DCHECK(undefined_value.Is(x3));
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ Ldr(x1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ Bind(&target_not_callable);
+ {
+ __ Poke(target, 0);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ Drop(kStackSize);
- __ Ret();
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
-
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
-
- // Is x11 safe to use?
- Register newTarget = x11;
- Register args = x12;
- Register function = x15;
- Register construct_function = x1;
-
- // Push the vector.
- __ Ldr(construct_function,
- FieldMemOperand(construct_function,
- JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(construct_function,
- FieldMemOperand(construct_function,
- SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(construct_function);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ Ldr(x0, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
- __ B(ne, &validate_arguments);
- __ Ldr(x0, MemOperand(fp, kFunctionOffset));
- __ Str(x0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ Bind(&validate_arguments);
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- __ Ldr(args, MemOperand(fp, kArgumentsOffset));
- __ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
- __ Push(function, args, newTarget);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- Register argc = x0;
-
- Generate_CheckStackOverflow(masm, argc, kArgcIsSmiTagged);
-
- // Push current limit and index & constructor function as callee.
- __ Mov(x1, 0); // Initial index.
- __ Push(argc, x1, function);
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : new.target (optional)
+ // -- jssp[8] : argumentsList
+ // -- jssp[16] : target
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_ReflectConstruct");
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ Register argc = x0;
+ Register arguments_list = x0;
+ Register target = x1;
+ Register new_target = x3;
+ Register undefined_value = x4;
- // Use undefined feedback vector
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ Ldr(x1, MemOperand(fp, kFunctionOffset));
- __ Ldr(x4, MemOperand(fp, kNewTargetOffset));
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ // 1. Load target into x1 (if present), argumentsList into x0 (if present),
+ // new.target into x3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments from the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+ __ Claim(3);
+ __ Drop(argc);
- // Leave internal frame.
+ // ----------- S t a t e -------------
+ // -- x0 : argc
+ // -- jssp[0] : new.target (dummy value if argc <= 2)
+ // -- jssp[8] : argumentsList (dummy value if argc <= 1)
+ // -- jssp[16] : target (dummy value if argc == 0)
+ // -- jssp[24] : receiver
+ // -----------------------------------
+ __ Adds(x10, argc, 0); // Preserve argc, and set the Z flag if it is zero.
+ __ Pop(new_target, arguments_list, target); // Overwrites argc.
+ __ CmovX(target, undefined_value, eq); // undefined if argc == 0.
+ __ Cmp(x10, 2);
+ __ CmovX(arguments_list, undefined_value, lo); // undefined if argc <= 1.
+ __ CmovX(new_target, target, ls); // target if argc <= 2.
+
+ __ Poke(undefined_value, 0); // Overwrite receiver.
}
- __ Drop(kStackSize);
- __ Ret();
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_FunctionApply");
- Generate_ApplyHelper(masm, false);
-}
+ // ----------- S t a t e -------------
+ // -- x0 : argumentsList
+ // -- x1 : target
+ // -- x3 : new.target
+ // -- jssp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_ReflectApply");
- Generate_ApplyHelper(masm, true);
-}
-
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(target, &target_not_constructor);
+ __ Ldr(x10, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
+ &target_not_constructor);
+
+ // 3. Make sure the new.target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(new_target, &new_target_not_constructor);
+ __ Ldr(x10, FieldMemOperand(new_target, HeapObject::kMapOffset));
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x10, 1 << Map::kIsConstructor,
+ &new_target_not_constructor);
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ Bind(&target_not_constructor);
+ {
+ __ Poke(target, 0);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_ReflectConstruct");
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ Bind(&new_target_not_constructor);
+ {
+ __ Poke(new_target, 0);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1520,6 +1770,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- x0 : actual number of arguments
// -- x1 : function (passed through to callee)
// -- x2 : expected number of arguments
+ // -- x3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
@@ -1561,8 +1812,152 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argumentsList
+ // -- x1 : target
+ // -- x3 : new.target (checked to be constructor or undefined)
+ // -- jssp[0] : thisArgument
+ // -----------------------------------
+
+ Register arguments_list = x0;
+ Register target = x1;
+ Register new_target = x3;
+
+ Register args = x0;
+ Register len = x2;
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(arguments_list, &create_runtime);
+
+ // Load native context.
+ Register native_context = x4;
+ __ Ldr(native_context, NativeContextMemOperand());
+
+ // Load the map of argumentsList.
+ Register arguments_list_map = x2;
+ __ Ldr(arguments_list_map,
+ FieldMemOperand(arguments_list, HeapObject::kMapOffset));
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ Ldr(x10, ContextMemOperand(native_context,
+ Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Ldr(x11, ContextMemOperand(native_context,
+ Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Cmp(arguments_list_map, x10);
+ __ Ccmp(arguments_list_map, x11, ZFlag, ne);
+ __ B(eq, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(arguments_list_map, native_context, JS_ARRAY_TYPE);
+ __ B(eq, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ Bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(target, new_target, arguments_list);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(new_target, target);
+ __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
+ FixedArray::kLengthOffset));
+ }
+ __ B(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ Bind(&create_arguments);
+ __ Ldrsw(len, UntagSmiFieldMemOperand(
+ arguments_list,
+ JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+ __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
+ __ CompareAndBranch(len, x11, ne, &create_runtime);
+ __ Mov(args, x10);
+ __ B(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ Bind(&create_array);
+ __ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(x10);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ // Branch for anything that's not FAST_{SMI_}ELEMENTS.
+ __ TestAndBranchIfAnySet(x10, ~FAST_ELEMENTS, &create_runtime);
+ __ Ldrsw(len,
+ UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+
+ __ Bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ __ Sub(x10, masm->StackPointer(), x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ B(gt, &done); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ Bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- x0 : args (a FixedArray built from argumentsList)
+ // -- x1 : target
+ // -- x2 : len (number of elements to push from args)
+ // -- x3 : new.target (checked to be constructor or undefined)
+ // -- jssp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label done, loop;
+ Register src = x4;
+
+ __ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Mov(x0, len); // The 'len' argument for Call() or Construct().
+ __ Cbz(len, &done);
+ __ Claim(len);
+ __ Bind(&loop);
+ __ Sub(len, len, 1);
+ __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
+ __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
+ __ Cbnz(len, &loop);
+ __ Bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (len)
+ // -- x1 : target
+ // -- x3 : new.target (checked to be constructor or undefined)
+ // -- jssp[0] : args[len-1]
+ // -- jssp[8] : args[len-2]
+ // ... : ...
+ // -- jssp[8*(len-2)] : args[1]
+ // -- jssp[8*(len-1)] : args[0]
+ // -----------------------------------
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(new_target, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
+ ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSFunction)
@@ -1651,17 +2046,121 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
ParameterCount actual(x0);
ParameterCount expected(x2);
- __ InvokeCode(x3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ Push(x1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : target (checked to be a JSBoundFunction)
+ // -- x3 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into x2 and length of that into x4.
+ Label no_bound_arguments;
+ __ Ldr(x2, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
+ __ Cmp(x4, 0);
+ __ B(eq, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : target (checked to be a JSBoundFunction)
+ // -- x2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- x3 : new.target (only in case of [[Construct]])
+ // -- x4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ Claim(x4);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(jssp, Heap::kRealStackLimitRootIndex);
+ __ B(gt, &done); // Signed comparison.
+ // Restore the stack pointer.
+ __ Drop(x4);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ Bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ Mov(x5, 0);
+ __ Bind(&loop);
+ __ Cmp(x5, x0);
+ __ B(gt, &done_loop);
+ __ Peek(x10, Operand(x4, LSL, kPointerSizeLog2));
+ __ Poke(x10, Operand(x5, LSL, kPointerSizeLog2));
+ __ Add(x4, x4, 1);
+ __ Add(x5, x5, 1);
+ __ B(&loop);
+ __ Bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ Ldrsw(x4, UntagSmiFieldMemOperand(x2, FixedArray::kLengthOffset));
+ __ Add(x2, x2, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Bind(&loop);
+ __ Sub(x4, x4, 1);
+ __ Ldr(x10, MemOperand(x2, x4, LSL, kPointerSizeLog2));
+ __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x0, x0, 1);
+ __ Cmp(x4, 0);
+ __ B(gt, &loop);
+ }
}
+ __ Bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(x1);
+
+ // Patch the receiver to [[BoundThis]].
+ __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
+ __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Mov(x10,
+ ExternalReference(Builtins::kCall_ReceiverIsAny, masm->isolate()));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x12);
}
@@ -1678,14 +2177,20 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
- __ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
+ __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ Cmp(x5, JS_PROXY_TYPE);
__ B(ne, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ Ldr(x1, FieldMemOperand(x1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(x1);
- __ B(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(x1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ Add(x0, x0, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1696,7 +2201,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1706,7 +2211,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1716,10 +2221,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (checked to be a JSFunction)
- // -- x3 : the original constructor (checked to be a JSFunction)
+ // -- x3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(x1);
- __ AssertFunction(x3);
// Calling convention for function specific ConstructStubs require
// x2 to contain either an AllocationSite or undefined.
@@ -1735,17 +2239,53 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSBoundFunction)
+ // -- x3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(x1);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ Cmp(x1, x3);
+ __ B(ne, &done);
+ __ Ldr(x3,
+ FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Mov(x10, ExternalReference(Builtins::kConstruct, masm->isolate()));
+ __ Ldr(x11, MemOperand(x10));
+ __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x12);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
- // -- x1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- x3 : the original constructor (either the same as the constructor or
+ // -- x1 : the constructor to call (checked to be a JSProxy)
+ // -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ Ldr(x1, FieldMemOperand(x1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(x1);
+ __ Push(x3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ Add(x0, x0, 3);
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1754,22 +2294,31 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (can be any Object)
- // -- x3 : the original constructor (either the same as the constructor or
+ // -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(x1, &non_constructor);
- __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
- __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
// Dispatch based on instance type.
- __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
+ __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq);
- __ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
+
+ // Check if target has a [[Construct]] internal method.
+ __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ Cmp(x5, JS_PROXY_TYPE);
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1778,7 +2327,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1786,11 +2335,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1831,7 +2377,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (not including receiver)
- // -- x3 : original constructor
+ // -- x3 : new target
// -- x1 : constructor to call
// -- x2 : address of the first argument
// -----------------------------------
@@ -1860,7 +2406,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ B(gt, &loop_header);
// Call the constructor with x0, x1, and x3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1870,20 +2416,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x0 : actual number of arguments
// -- x1 : function (passed through to callee)
// -- x2 : expected number of arguments
+ // -- x3 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
-
Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
- Register code_entry = x3;
+ Register code_entry = x10;
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Cmp(argc_actual, argc_expected);
__ B(lt, &too_few);
__ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1891,6 +2434,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
Register copy_start = x10;
Register copy_end = x11;
@@ -1956,11 +2500,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
__ Lsl(scratch2, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
@@ -2010,6 +2555,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Mov(argc_actual, argc_expected);
// x0 : expected number of arguments
// x1 : function (passed through to callee)
+ // x3 : new target (passed through to callee)
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Call(code_entry);
// Store offset of return address for deoptimizer.
@@ -2021,13 +2568,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments.
__ Bind(&dont_adapt_arguments);
+ __ Ldr(code_entry, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
__ Jump(code_entry);
__ Bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ Unreachable();
}
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 751d8aebde..a1e920755d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -223,7 +223,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
// Call runtime on identical JSObjects. Otherwise return equal.
- __ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
+ __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
slow, ge);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
@@ -245,7 +245,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
__ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
&heap_number);
// Comparing JS objects with <=, >= is complicated.
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
@@ -336,10 +336,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are not
// equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label right_non_object;
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
__ B(lt, &right_non_object);
// Return non-zero - x0 already contains a non-zero pointer.
@@ -356,9 +356,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If right is not ODDBALL, test left. Otherwise, set eq condition.
__ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
- // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE.
+ // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
// Otherwise, right or left is ODDBALL, so set a ge condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne);
+ __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
__ B(ge, &return_not_equal);
@@ -471,11 +471,11 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Bind(&object_test);
- __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
- // If right >= FIRST_SPEC_OBJECT_TYPE, test left.
- // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition.
- __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge);
+ // If right >= FIRST_JS_RECEIVER_TYPE, test left.
+ // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
+ __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
__ B(lt, not_both_strings);
@@ -653,8 +653,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cond == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
@@ -668,9 +667,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ Bind(&miss);
@@ -966,7 +964,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Bind(&call_runtime);
// Put the arguments back on the stack.
__ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// Return.
__ Bind(&done);
@@ -1550,17 +1548,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
__ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- Register const scratch_w = scratch.W();
- __ Ldr(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // On 64-bit platforms, compiler hints field is not a smi. See definition of
- // kCompilerHintsOffset in src/objects.h.
- __ Ldr(scratch_w, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(scratch_w, SharedFunctionInfo::kBoundFunction, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ Ldr(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1585,27 +1572,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(x0, Heap::kTrueValueRootIndex);
+ Register const result = x0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ Bind(&loop);
- __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
+ &fast_runtime_fallback);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ B(eq, &fast_runtime_fallback);
+
+ __ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Cmp(object, function_prototype);
__ B(eq, &done);
- __ Cmp(object_prototype, null);
- __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ Cmp(object, null);
+ __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ B(ne, &loop);
- __ LoadRoot(x0, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ Bind(&done);
- __ StoreRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ Bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ Move(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1656,7 +1663,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// the runtime system.
__ Bind(&slow);
__ Push(key);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1687,7 +1694,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ Bind(&runtime);
__ Push(x1, x3, x2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1801,13 +1808,10 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x11 sloppy_args_map offset to args (or aliased args) map (uninit)
// x14 arg_count number of function arguments
- Register global_object = x10;
Register global_ctx = x10;
Register sloppy_args_map = x11;
Register aliased_args_map = x10;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx,
- FieldMemOperand(global_object, JSGlobalObject::kNativeContextOffset));
+ __ Ldr(global_ctx, NativeContextMemOperand());
__ Ldr(sloppy_args_map,
ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
@@ -1965,7 +1969,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1981,7 +1985,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ Push(receiver, key);
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ Bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2047,14 +2051,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (native) context.
- Register global_object = x10;
- Register global_ctx = x10;
Register strict_args_map = x4;
- __ Ldr(global_object, GlobalObjectMemOperand());
- __ Ldr(global_ctx,
- FieldMemOperand(global_object, JSGlobalObject::kNativeContextOffset));
- __ Ldr(strict_args_map,
- ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
+ strict_args_map);
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
@@ -2118,13 +2117,61 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ Bind(&runtime);
__ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
+ // x4 : rest parameter index (tagged)
+ //
+ // Returns pointer to result object in x0.
+
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
+ DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
+
+ // Get the stub arguments from the frame, and make an untagged copy of the
+ // parameter count.
+ Register rest_index_smi = x4;
+ Register param_count_smi = x2;
+ Register params = x3;
+ Register param_count = x13;
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Test if arguments adaptor needed.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx,
+ MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &runtime);
+
+ // x4 rest_index_smi index of rest parameter
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
+ // x11 caller_fp caller's frame pointer
+ // x13 param_count number of parameters passed to function
+
+ // Patch the argument length and parameters pointer.
+ __ Ldr(param_count_smi,
+ MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(param_count, param_count_smi);
+ __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
+ __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
+
+ __ Bind(&runtime);
+ __ Push(param_count_smi, params, rest_index_smi);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2565,7 +2612,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ B(eq, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
@@ -2574,7 +2621,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2622,25 +2669,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
Register feedback_vector, Register index,
- Register orig_construct, bool is_super) {
+ Register new_target) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
- if (is_super) {
- __ Push(argc, function, feedback_vector, index, orig_construct);
- } else {
- __ Push(argc, function, feedback_vector, index);
- }
+ __ Push(argc, function, feedback_vector, index);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
- if (is_super) {
- __ Pop(orig_construct, index, feedback_vector, function, argc);
- } else {
- __ Pop(index, feedback_vector, function, argc);
- }
+ __ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
@@ -2648,19 +2687,17 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Register function,
Register feedback_vector, Register index,
- Register orig_construct, Register scratch1,
- Register scratch2, Register scratch3,
- bool is_super) {
+ Register new_target, Register scratch1,
+ Register scratch2, Register scratch3) {
ASM_LOCATION("GenerateRecordCallTarget");
DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
- feedback_vector, index, orig_construct));
+ feedback_vector, index, new_target));
// Cache the called function in a feedback vector slot. Cache states are
// uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
// argc : number of arguments to the construct function
// function : the function to call
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
- // orig_construct : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2703,7 +2740,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
__ B(&done);
@@ -2727,7 +2764,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ Bind(&initialize);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &not_array_function);
@@ -2736,13 +2773,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index, orig_construct, is_super);
+ feedback_vector, index, new_target);
__ B(&done);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
- feedback_vector, index, orig_construct, is_super);
+ feedback_vector, index, new_target);
__ Bind(&done);
}
@@ -2753,7 +2790,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
- // x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
Label non_function;
@@ -2764,28 +2800,21 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
&non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
- IsSuperConstructorCall());
-
- __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into x2, or undefined.
- __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
- __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
- __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
- &feedback_register_initialized);
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
-
- __ AssertUndefinedOrAllocationSite(x2, x5);
- }
+ GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
- if (IsSuperConstructorCall()) {
- __ Mov(x3, x4);
- } else {
- __ Mov(x3, function);
- }
+ __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+
+ __ AssertUndefinedOrAllocationSite(x2, x5);
+
+ __ Mov(x3, function);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2811,7 +2840,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
Register allocation_site = x4;
Register scratch = x5;
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
__ Cmp(function, scratch);
__ B(ne, miss);
@@ -2828,9 +2857,9 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Set up arguments for the array constructor stub.
Register allocation_site_arg = feedback_vector;
- Register original_constructor_arg = index;
+ Register new_target_arg = index;
__ Mov(allocation_site_arg, allocation_site);
- __ Mov(original_constructor_arg, function);
+ __ Mov(new_target_arg, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
}
@@ -2842,11 +2871,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x1 - function
// x3 - slot id (Smi)
// x2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2890,9 +2915,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
- __ bind(&call);
+ __ Bind(&call_function);
__ Mov(x0, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2922,14 +2948,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Subs(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
- __ B(&call);
+
+ __ Bind(&call);
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2941,14 +2964,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
__ Cmp(function, x5);
__ B(eq, &miss);
- // Update stats.
- __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
- __ Adds(x4, x4, Operand(Smi::FromInt(1)));
- __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
+ __ Ldr(x5, NativeContextMemOperand());
+ __ Cmp(x4, x5);
+ __ B(ne, &miss);
// Initialize the call counter.
__ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
@@ -2968,7 +2993,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(function);
}
- __ B(&call);
+ __ B(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2988,7 +3013,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@@ -3046,11 +3071,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -3078,7 +3103,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3109,7 +3134,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Mov(result_, x0);
call_helper.AfterCall(masm);
__ B(&exit_);
@@ -3127,7 +3152,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
@@ -3381,9 +3406,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ Bind(&runtime);
__ Push(lhs, rhs);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ Bind(&miss);
@@ -3391,9 +3416,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
- ASM_LOCATION("CompareICStub[Objects]");
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
+ ASM_LOCATION("CompareICStub[Receivers]");
Label miss;
@@ -3403,10 +3428,11 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
__ JumpIfEitherSmi(rhs, lhs, &miss);
- __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
- __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
+ __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Sub(result, rhs, lhs);
__ Ret();
@@ -3415,8 +3441,8 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
- ASM_LOCATION("CompareICStub[KnownObjects]");
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
+ ASM_LOCATION("CompareICStub[KnownReceivers]");
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
@@ -3442,7 +3468,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Sub(result, rhs, lhs);
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
Register ncr = x2;
if (op() == Token::LT || op() == Token::LTE) {
@@ -3451,7 +3477,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Mov(ncr, Smi::FromInt(LESS));
}
__ Push(lhs, rhs, ncr);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ Bind(&miss);
@@ -3479,7 +3505,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(left, right, op);
// Call the miss handler. This also pops the arguments.
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
@@ -3725,7 +3751,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// x1: result_length
@@ -3771,7 +3797,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&slow_string);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ Bind(&not_string);
Label not_oddball;
@@ -3782,7 +3808,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3797,7 +3823,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ Bind(&not_smi);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3829,7 +3855,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3973,7 +3999,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
__ Bind(&runtime);
__ Push(x1, x0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -4115,12 +4141,12 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
- __ EnsureNotWhite(val,
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- regs_.scratch2(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(val,
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ regs_.scratch2(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm); // Restore the extra scratch registers we used.
@@ -4170,76 +4196,6 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // x0 value element value to store
- // x3 index_smi element index as smi
- // sp[0] array_index_smi array literal index in function as smi
- // sp[1] array array literal
-
- Register value = x0;
- Register index_smi = x3;
-
- Register array = x1;
- Register array_map = x2;
- Register array_index_smi = x4;
- __ PeekPair(array_index_smi, array, 0);
- __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset));
-
- Label double_elements, smi_element, fast_elements, slow_elements;
- Register bitfield2 = x10;
- __ Ldrb(bitfield2, FieldMemOperand(array_map, Map::kBitField2Offset));
-
- // Jump if array's ElementsKind is not FAST*_SMI_ELEMENTS, FAST_ELEMENTS or
- // FAST_HOLEY_ELEMENTS.
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- __ Cmp(bitfield2, Map::kMaximumBitField2FastHoleyElementValue);
- __ B(hi, &double_elements);
-
- __ JumpIfSmi(value, &smi_element);
-
- // Jump if array's ElementsKind is not FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
- __ Tbnz(bitfield2, MaskToBit(FAST_ELEMENTS << Map::ElementsKindBits::kShift),
- &fast_elements);
-
- // Store into the array literal requires an elements transition. Call into
- // the runtime.
- __ Bind(&slow_elements);
- __ Push(array, index_smi, value);
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- __ Push(x11, array_index_smi);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ Bind(&fast_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Str(value, MemOperand(x11));
- // Update the write barrier for the array store.
- __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ Bind(&smi_element);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2));
- __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize));
- __ Ret();
-
- __ Bind(&double_elements);
- __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0,
- &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5230,12 +5186,12 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
- // -- x3 : original constructor
+ // -- x3 : new target
// -- sp[0] : last argument
// -----------------------------------
Register constructor = x1;
Register allocation_site = x2;
- Register original_constructor = x3;
+ Register new_target = x3;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
@@ -5257,8 +5213,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
}
+ // Enter the context of the Array function.
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
Label subclassing;
- __ Cmp(original_constructor, constructor);
+ __ Cmp(new_target, constructor);
__ B(ne, &subclassing);
Register kind = x3;
@@ -5277,22 +5236,23 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing support.
__ Bind(&subclassing);
- __ Push(constructor, original_constructor);
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(x0, x0, Operand(2));
+ __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x0, x0, Operand(3));
break;
case NONE:
- __ Mov(x0, Operand(2));
+ __ Poke(constructor, 0 * kPointerSize);
+ __ Mov(x0, Operand(3));
break;
case ONE:
- __ Mov(x0, Operand(3));
+ __ Poke(constructor, 1 * kPointerSize);
+ __ Mov(x0, Operand(4));
break;
}
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(new_target, allocation_site);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5408,7 +5368,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Bind(&slow_case);
__ SmiTag(slot);
__ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5528,8 +5488,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5682,7 +5641,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ Bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ Bind(&delete_allocated_handles);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 341153380d..4b56b5468f 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -131,6 +131,7 @@ class RecordWriteStub: public PlatformCodeStub {
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
+ stub->GetIsolate(),
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 8e927bfd90..c2073f1f4b 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -16,9 +16,9 @@ namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
-byte* fast_exp_arm64_machine_code = NULL;
-double fast_exp_simulator(double x) {
- Simulator * simulator = Simulator::current(Isolate::Current());
+byte* fast_exp_arm64_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ Simulator * simulator = Simulator::current(isolate);
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
@@ -28,19 +28,18 @@ double fast_exp_simulator(double x) {
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
-
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
@@ -64,11 +63,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
@@ -76,8 +75,8 @@ UnaryMathFunction CreateExpFunction() {
}
-UnaryMathFunction CreateSqrtFunction() {
- return &std::sqrt;
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
+ return nullptr;
}
@@ -368,12 +367,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
}
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
- PatchingAssembler patcher(young_sequence_.start(),
+ PatchingAssembler patcher(isolate, young_sequence_.start(),
young_sequence_.length() / kInstructionSize);
// The young sequence is the frame setup code for FUNCTION code types. It is
// generated by FullCodeGenerator::Generate.
@@ -382,7 +382,7 @@ CodeAgingHelper::CodeAgingHelper() {
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
- PatchingAssembler patcher_old(old_sequence_.start(), length);
+ PatchingAssembler patcher_old(isolate, old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
}
@@ -417,7 +417,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
- PatchingAssembler patcher(sequence,
+ PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
index 7100ef1134..573f6fe159 100644
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -5,7 +5,7 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 43a375d953..00b24e9375 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -9,11 +9,11 @@
#include "src/globals.h"
// Assert that this is an LP64 system.
-STATIC_ASSERT(sizeof(int) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
-STATIC_ASSERT(sizeof(void *) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(1) == sizeof(int32_t)); // NOLINT(runtime/sizeof)
-STATIC_ASSERT(sizeof(1L) == sizeof(int64_t)); // NOLINT(runtime/sizeof)
+STATIC_ASSERT(sizeof(void *) == sizeof(int64_t));
+STATIC_ASSERT(sizeof(1) == sizeof(int32_t));
+STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
// Get the standard printf format macros for C99 stdint types.
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 19ee123b36..118c5dfa8d 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -49,7 +49,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
- PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
+ PatchingAssembler patcher(isolate, call_address,
+ patch_size() / kInstructionSize);
patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 60243d8306..d23533d8bc 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -219,13 +219,13 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
}
-void Instruction::SetImmPCOffsetTarget(Instruction* target) {
+void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) {
if (IsPCRelAddressing()) {
- SetPCRelImmTarget(target);
+ SetPCRelImmTarget(isolate, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
- SetUnresolvedInternalReferenceImmTarget(target);
+ SetUnresolvedInternalReferenceImmTarget(isolate, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
@@ -233,7 +233,7 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
}
-void Instruction::SetPCRelImmTarget(Instruction* target) {
+void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@@ -243,7 +243,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
- PatchingAssembler patcher(this,
+ PatchingAssembler patcher(isolate, this,
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@@ -284,7 +284,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
-void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
+void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
+ Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@@ -293,7 +294,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
- PatchingAssembler patcher(this, 2);
+ PatchingAssembler patcher(isolate, this, 2);
patcher.brk(high16);
patcher.brk(low16);
}
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 5c652e3ec8..db4e3d03a8 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -373,8 +373,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
- void SetImmPCOffsetTarget(Instruction* target);
- void SetUnresolvedInternalReferenceImmTarget(Instruction* target);
+ void SetImmPCOffsetTarget(Isolate* isolate, Instruction* target);
+ void SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
+ Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@@ -410,7 +411,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
- void SetPCRelImmTarget(Instruction* target);
+ void SetPCRelImmTarget(Isolate* isolate, Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 4e1b818065..485aa780e3 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -65,6 +65,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; }
+const Register RestParamAccessDescriptor::parameter_count() { return x2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return x3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return x4; }
+
+
const Register ApiGetterDescriptor::function_address() { return x2; }
@@ -129,9 +134,20 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: closure
+ // x2: object literal index
+ // x1: constant properties
+ // x0: object literal flags
+ Register registers[] = {x3, x2, x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x3: array literals array
+ // x3: closure
// x2: array literal index
// x1: constant elements
Register registers[] = {x3, x2, x1};
@@ -141,7 +157,7 @@ void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x3: object literals array
+ // x3: closure
// x2: object literal index
// x1: constant properties
// x0: object literal flags
@@ -204,7 +220,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
- // x4 : original constructor (for IsSuperConstructorCall)
+ // x4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {x0, x1, x4, x2};
@@ -221,6 +237,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ // x2: allocation site or undefined
+ Register registers[] = {x1, x3, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ Register registers[] = {x1, x3, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: length
@@ -387,6 +424,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x1, // JSFunction
+ x3, // the new target
x0, // actual number of arguments
x2, // expected number of arguments
};
@@ -428,27 +466,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // math rounding function
- x3, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // math rounding function
- x3, // vector slot id
- x4, // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -464,7 +481,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
- x3, // original constructor
+ x3, // new target
x1, // constructor to call
x2 // address of the first argument
};
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 9b4abe5514..60418ad839 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -1434,32 +1434,6 @@ void MacroAssembler::IsObjectNameType(Register object,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- // If cmp result is lt, the following ccmp will clear all flags.
- // Z == 0, N == V implies gt condition.
- Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
-
- // If we didn't get a valid label object just fall through and leave the
- // flags updated.
- if (fail != NULL) {
- B(gt, fail);
- }
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object,
Register type,
Label* not_string,
@@ -1488,7 +1462,8 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
-void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
+void MacroAssembler::Claim(int64_t count, uint64_t unit_size) {
+ DCHECK(count >= 0);
uint64_t size = count * unit_size;
if (size == 0) {
@@ -1516,6 +1491,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
return;
}
+ AssertPositiveOrZero(count);
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
@@ -1543,7 +1519,8 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
}
-void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
+void MacroAssembler::Drop(int64_t count, uint64_t unit_size) {
+ DCHECK(count >= 0);
uint64_t size = count * unit_size;
if (size == 0) {
@@ -1574,6 +1551,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
return;
}
+ AssertPositiveOrZero(count);
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 5b941a2a5a..fbf459db46 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -22,9 +22,9 @@ namespace internal {
#define __
-MacroAssembler::MacroAssembler(Isolate* arg_isolate,
- byte * buffer,
- unsigned buffer_size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
+ unsigned buffer_size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, buffer_size),
generating_stub_(false),
#if DEBUG
@@ -35,7 +35,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate,
sp_(jssp),
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -1343,6 +1343,8 @@ void MacroAssembler::AssertStackConsistency() {
// Avoid generating AssertStackConsistency checks for the Push in Abort.
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
+ // Restore StackPointer().
+ sub(StackPointer(), csp, StackPointer());
Abort(kTheCurrentStackPointerIsBelowCsp);
}
@@ -1626,6 +1628,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -1654,6 +1669,17 @@ void MacroAssembler::AssertString(Register object) {
}
+void MacroAssembler::AssertPositiveOrZero(Register value) {
+ if (emit_debug_code()) {
+ Label done;
+ int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
+ Tbz(value, sign_bit, &done);
+ Abort(kUnexpectedNegativeValue);
+ Bind(&done);
+ }
+}
+
+
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -1701,62 +1727,30 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- Ldr(target, GlobalObjectMemOperand());
- Ldr(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- Ldr(target, ContextMemOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target, Register function,
- int native_context_index) {
- DCHECK(!AreAliased(target, function));
- GetBuiltinFunction(function, native_context_index);
- // Load the code entry point from the builtins object.
- Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
-}
-
-
void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Get the builtin entry in x2 and setup the function object in x1.
- GetBuiltinEntry(x2, x1, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(x2));
- Call(x2);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(x2);
- }
-}
-
-
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Mov(x0, num_arguments);
- JumpToExternalReference(ext);
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, x1);
+ InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper);
}
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ Mov(x0, function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2153,152 +2147,6 @@ void MacroAssembler::ClampDoubleToUint8(Register output,
}
-void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Register scratch5) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in a tight loop.
- DCHECK(!AreAliased(dst, src,
- scratch1, scratch2, scratch3, scratch4, scratch5));
- DCHECK(count >= 2);
-
- const Register& remaining = scratch3;
- Mov(remaining, count / 2);
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- Sub(dst_untagged, dst, kHeapObjectTag);
- Sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields in pairs.
- Label loop;
- Bind(&loop);
- Ldp(scratch4, scratch5,
- MemOperand(src_untagged, kXRegSize* 2, PostIndex));
- Stp(scratch4, scratch5,
- MemOperand(dst_untagged, kXRegSize* 2, PostIndex));
- Sub(remaining, remaining, 1);
- Cbnz(remaining, &loop);
-
- // Handle the leftovers.
- if (count & 1) {
- Ldr(scratch4, MemOperand(src_untagged));
- Str(scratch4, MemOperand(dst_untagged));
- }
-}
-
-
-void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- sub(dst_untagged, dst, kHeapObjectTag);
- sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields in pairs.
- for (unsigned i = 0; i < count / 2; i++) {
- Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
- Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
- }
-
- // Handle the leftovers.
- if (count & 1) {
- Ldr(scratch3, MemOperand(src_untagged));
- Str(scratch3, MemOperand(dst_untagged));
- }
-}
-
-
-void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
- Register src,
- unsigned count,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
-
- const Register& dst_untagged = scratch1;
- const Register& src_untagged = scratch2;
- Sub(dst_untagged, dst, kHeapObjectTag);
- Sub(src_untagged, src, kHeapObjectTag);
-
- // Copy fields one by one.
- for (unsigned i = 0; i < count; i++) {
- Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
- Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
- }
-}
-
-
-void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
- unsigned count) {
- // One of two methods is used:
- //
- // For high 'count' values where many scratch registers are available:
- // Untag src and dst into scratch registers.
- // Copy src->dst in a tight loop.
- //
- // For low 'count' values or where few scratch registers are available:
- // Untag src and dst into scratch registers.
- // Copy src->dst in an unrolled loop.
- //
- // In both cases, fields are copied in pairs if possible, and left-overs are
- // handled separately.
- DCHECK(!AreAliased(dst, src));
- DCHECK(!temps.IncludesAliasOf(dst));
- DCHECK(!temps.IncludesAliasOf(src));
- DCHECK(!temps.IncludesAliasOf(xzr));
-
- if (emit_debug_code()) {
- Cmp(dst, src);
- Check(ne, kTheSourceAndDestinationAreTheSame);
- }
-
- // The value of 'count' at which a loop will be generated (if there are
- // enough scratch registers).
- static const unsigned kLoopThreshold = 8;
-
- UseScratchRegisterScope masm_temps(this);
- if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
- CopyFieldsLoopPairsHelper(dst, src, count,
- Register(temps.PopLowestIndex()),
- Register(temps.PopLowestIndex()),
- Register(temps.PopLowestIndex()),
- masm_temps.AcquireX(),
- masm_temps.AcquireX());
- } else if (temps.Count() >= 2) {
- CopyFieldsUnrolledPairsHelper(dst, src, count,
- Register(temps.PopLowestIndex()),
- Register(temps.PopLowestIndex()),
- masm_temps.AcquireX(),
- masm_temps.AcquireX());
- } else if (temps.Count() == 1) {
- CopyFieldsUnrolledHelper(dst, src, count,
- Register(temps.PopLowestIndex()),
- masm_temps.AcquireX(),
- masm_temps.AcquireX());
- } else {
- UNREACHABLE();
- }
-}
-
-
void MacroAssembler::CopyBytes(Register dst,
Register src,
Register length,
@@ -2354,38 +2202,35 @@ void MacroAssembler::CopyBytes(Register dst,
}
-void MacroAssembler::FillFields(Register dst,
- Register field_count,
- Register filler) {
- DCHECK(!dst.Is(csp));
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
+ Register filler) {
+ DCHECK(!current_address.Is(csp));
UseScratchRegisterScope temps(this);
- Register field_ptr = temps.AcquireX();
- Register counter = temps.AcquireX();
+ Register distance_in_words = temps.AcquireX();
Label done;
- // Decrement count. If the result < zero, count was zero, and there's nothing
- // to do. If count was one, flags are set to fail the gt condition at the end
- // of the pairs loop.
- Subs(counter, field_count, 1);
- B(lt, &done);
+ // Calculate the distance. If it's <= zero then there's nothing to do.
+ Subs(distance_in_words, end_address, current_address);
+ B(le, &done);
// There's at least one field to fill, so do this unconditionally.
- Str(filler, MemOperand(dst, kPointerSize, PostIndex));
+ Str(filler, MemOperand(current_address));
- // If the bottom bit of counter is set, there are an even number of fields to
- // fill, so pull the start pointer back by one field, allowing the pairs loop
- // to overwrite the field that was stored above.
- And(field_ptr, counter, 1);
- Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2));
+ // If the distance_in_words consists of odd number of words we advance
+ // start_address by one word, otherwise the pairs loop will ovwerite the
+ // field that was stored above.
+ And(distance_in_words, distance_in_words, kPointerSize);
+ Add(current_address, current_address, distance_in_words);
// Store filler to memory in pairs.
- Label entry, loop;
+ Label loop, entry;
B(&entry);
Bind(&loop);
- Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex));
- Subs(counter, counter, 2);
+ Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
Bind(&entry);
- B(gt, &loop);
+ Cmp(current_address, end_address);
+ B(lo, &loop);
Bind(&done);
}
@@ -2481,8 +2326,6 @@ void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
@@ -2502,7 +2345,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(x0));
DCHECK(expected.is_immediate() || expected.reg().is(x2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -2537,11 +2379,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// If the argument counts may mismatch, generate a call to the argument
// adaptor.
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- Mov(x3, Operand(code_constant));
- Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -2550,7 +2387,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
call_wrapper.AfterCall();
if (!*definitely_mismatches) {
// If the arg counts don't match, no extra code is emitted by
- // MAsm::InvokeCode and we can just fall through.
+ // MAsm::InvokeFunctionCode and we can just fall through.
B(done);
}
} else {
@@ -2561,24 +2398,80 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ Mov(x4, Operand(step_in_enabled));
+ ldrb(x4, MemOperand(x4));
+ CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(x1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
- Label done;
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+ }
+
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- &definitely_mismatches, call_wrapper);
+ InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
+ call_wrapper);
// If we are certain that actual != expected, then we know InvokePrologue will
// have handled the call through the argument adaptor mechanism.
// The called function expects the call kind in x5.
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = x4;
+ Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -2596,6 +2489,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2607,7 +2501,6 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(x1));
Register expected_reg = x2;
- Register code_reg = x3;
Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
// The number of arguments is stored as an int32_t, and -1 is a marker
@@ -2618,11 +2511,10 @@ void MacroAssembler::InvokeFunction(Register function,
Ldrsw(expected_reg,
FieldMemOperand(expected_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- Ldr(code_reg,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, new_target, expected, actual, flag,
+ call_wrapper);
}
@@ -2638,16 +2530,10 @@ void MacroAssembler::InvokeFunction(Register function,
// (See FullCodeGenerator::Generate().)
DCHECK(function.Is(x1));
- Register code_reg = x3;
-
// Set up the context.
Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
}
@@ -2760,14 +2646,13 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
void MacroAssembler::StubPrologue() {
- DCHECK(StackPointer().Is(jssp));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
__ Mov(temp, Smi::FromInt(StackFrame::STUB));
// Compiled stubs don't age, and so they don't need the predictable code
// ageing sequence.
__ Push(lr, fp, cp, temp);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ __ Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
}
@@ -3000,12 +2885,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- Ldr(dst, GlobalObjectMemOperand());
- Ldr(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
@@ -3084,23 +2963,24 @@ void MacroAssembler::Allocate(int object_size,
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
+ // Set up allocation top address and allocation limit registers.
Register top_address = scratch1;
- Register allocation_limit = scratch2;
+ Register alloc_limit = scratch2;
+ Register result_end = scratch3;
Mov(top_address, Operand(heap_allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and the allocation limit.
- Ldp(result, allocation_limit, MemOperand(top_address));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ Ldp(result, alloc_limit, MemOperand(top_address));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- Ldr(scratch3, MemOperand(top_address));
- Cmp(result, scratch3);
+ Ldr(alloc_limit, MemOperand(top_address));
+ Cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load the allocation limit. 'result' already contains the allocation top.
- Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ Ldr(alloc_limit, MemOperand(top_address, limit - top));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -3108,10 +2988,10 @@ void MacroAssembler::Allocate(int object_size,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
// Calculate new top and bail out if new space is exhausted.
- Adds(scratch3, result, object_size);
- Ccmp(scratch3, allocation_limit, CFlag, cc);
+ Adds(result_end, result, object_size);
+ Ccmp(result_end, alloc_limit, CFlag, cc);
B(hi, gc_required);
- Str(scratch3, MemOperand(top_address));
+ Str(result_end, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3120,30 +3000,29 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
// We apply salt to the original zap value to easily spot the values.
Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
- Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
- Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
+ Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
}
B(gc_required);
return;
}
UseScratchRegisterScope temps(this);
- Register scratch3 = temps.AcquireX();
+ Register scratch2 = temps.AcquireX();
- DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
- DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
- scratch1.Is64Bits() && scratch2.Is64Bits());
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, scratch2));
+ DCHECK(!AreAliased(result_end, result, scratch, scratch2));
+ DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
+ result_end.Is64Bits());
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDP.
@@ -3155,23 +3034,23 @@ void MacroAssembler::Allocate(Register object_size,
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register top_address = scratch1;
- Register allocation_limit = scratch2;
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
+ Register alloc_limit = scratch2;
Mov(top_address, heap_allocation_top);
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and the allocation limit.
- Ldp(result, allocation_limit, MemOperand(top_address));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ Ldp(result, alloc_limit, MemOperand(top_address));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- Ldr(scratch3, MemOperand(top_address));
- Cmp(result, scratch3);
+ Ldr(alloc_limit, MemOperand(top_address));
+ Cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load the allocation limit. 'result' already contains the allocation top.
- Ldr(allocation_limit, MemOperand(top_address, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ Ldr(alloc_limit, MemOperand(top_address, limit - top));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -3180,19 +3059,19 @@ void MacroAssembler::Allocate(Register object_size,
// Calculate new top and bail out if new space is exhausted
if ((flags & SIZE_IN_WORDS) != 0) {
- Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
+ Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
} else {
- Adds(scratch3, result, object_size);
+ Adds(result_end, result, object_size);
}
if (emit_debug_code()) {
- Tst(scratch3, kObjectAlignmentMask);
+ Tst(result_end, kObjectAlignmentMask);
Check(eq, kUnalignedAllocationInNewSpace);
}
- Ccmp(scratch3, allocation_limit, CFlag, cc);
+ Ccmp(result_end, alloc_limit, CFlag, cc);
B(hi, gc_required);
- Str(scratch3, MemOperand(top_address));
+ Str(result_end, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3390,6 +3269,28 @@ void MacroAssembler::JumpIfObjectType(Register object,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ Str(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
void MacroAssembler::JumpIfNotObjectType(Register object,
Register map,
Register type_reg,
@@ -3779,11 +3680,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- Ldr(scratch1, FieldMemOperand(scratch1, offset));
- Ldr(scratch1,
- FieldMemOperand(scratch1, JSGlobalObject::kNativeContextOffset));
+ Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -4314,8 +4211,8 @@ void MacroAssembler::HasColor(Register object,
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
// Check for the color.
if (first_bit == 0) {
@@ -4343,8 +4240,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
}
@@ -4380,21 +4277,18 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register shift_scratch,
- Register load_scratch,
- Register length_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register shift_scratch, Register load_scratch,
+ Register length_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@@ -4405,71 +4299,7 @@ void MacroAssembler::EnsureNotWhite(
// If the value is black or grey we don't need to do anything.
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- Label done;
- Tbnz(load_scratch, 0, &done);
-
- // Value is white. We check whether it is data that doesn't need scanning.
- Register map = load_scratch; // Holds map while checking type.
- Label is_data_object;
-
- // Check for heap-number.
- Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- Mov(length_scratch, HeapNumber::kSize);
- JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- TestAndBranchIfAnySet(instance_type,
- kIsIndirectStringMask | kIsNotStringMask,
- value_is_white_and_not_data);
-
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- Mov(length_scratch, ExternalString::kSize);
- TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
- String::kLengthOffset));
- Tst(instance_type, kStringEncodingMask);
- Cset(load_scratch, eq);
- Lsl(length_scratch, length_scratch, load_scratch);
- Add(length_scratch,
- length_scratch,
- SeqString::kHeaderSize + kObjectAlignmentMask);
- Bic(length_scratch, length_scratch, kObjectAlignmentMask);
-
- Bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- Register mask = shift_scratch;
- Mov(load_scratch, 1);
- Lsl(mask, load_scratch, shift_scratch);
-
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Orr(load_scratch, load_scratch, mask);
- Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
- Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Add(load_scratch, load_scratch, length_scratch);
- Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- Bind(&done);
+ Tbz(load_scratch, 0, value_is_white);
}
@@ -4615,32 +4445,25 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register scratch1,
Register scratch2,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- Ldr(scratch1, GlobalObjectMemOperand());
- Ldr(scratch1,
- FieldMemOperand(scratch1, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
- int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
- Ldr(scratch2, FieldMemOperand(scratch1, offset));
+ Ldr(scratch1, NativeContextMemOperand());
+ Ldr(scratch2,
+ ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
// Use the transitioned cached map.
- offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
- Ldr(map_in_out, FieldMemOperand(scratch1, offset));
+ Ldr(map_in_out,
+ ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- Ldr(function, GlobalObjectMemOperand());
- // Load the native context from the global or builtins object.
- Ldr(function,
- FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- Ldr(function, ContextMemOperand(function, index));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ Ldr(dst, NativeContextMemOperand());
+ Ldr(dst, ContextMemOperand(dst, index));
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 2747397993..78997d6d02 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -45,6 +45,7 @@ namespace internal {
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
+#define kJavaScriptCallNewTargetRegister x3
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@@ -145,7 +146,8 @@ enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
class MacroAssembler : public Assembler {
public:
- MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
+ MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
+ CodeObjectRequired create_code_object);
inline Handle<Object> CodeObject();
@@ -722,10 +724,10 @@ class MacroAssembler : public Assembler {
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
- inline void Claim(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
inline void Claim(const Register& count,
uint64_t unit_size = kXRegSize);
- inline void Drop(uint64_t count, uint64_t unit_size = kXRegSize);
+ inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count,
uint64_t unit_size = kXRegSize);
@@ -893,6 +895,7 @@ class MacroAssembler : public Assembler {
// This is required for compatibility with architecture independant code.
// Remove if not needed.
inline void Move(Register dst, Register src) { Mov(dst, src); }
+ inline void Move(Register dst, Smi* src) { Mov(dst, src); }
void LoadInstanceDescriptors(Register map,
Register descriptors);
@@ -963,6 +966,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -970,6 +977,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object);
+ // Abort execution if argument is not a positive or zero integer, enabled via
+ // --debug-code.
+ void AssertPositiveOrZero(Register value);
+
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@@ -1027,22 +1038,11 @@ class MacroAssembler : public Assembler {
// ---- Object Utilities ----
- // Copy fields from 'src' to 'dst', where both are tagged objects.
- // The 'temps' list is a list of X registers which can be used for scratch
- // values. The temps list must include at least one register.
- //
- // Currently, CopyFields cannot make use of more than three registers from
- // the 'temps' list.
- //
- // CopyFields expects to be able to take at least two registers from
- // MacroAssembler::TmpList().
- void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
-
- // Starting at address in dst, initialize field_count 64-bit fields with
- // 64-bit value in register filler. Register dst is corrupted.
- void FillFields(Register dst,
- Register field_count,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// Copies a number of bytes from src to dst. All passed registers are
// clobbered. On exit src and dst will point to the place just after where the
@@ -1094,20 +1094,25 @@ class MacroAssembler : public Assembler {
int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int ActivationFrameAlignment();
@@ -1127,12 +1132,8 @@ class MacroAssembler : public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
+
+ // Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
@@ -1141,14 +1142,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in the function register.
- void GetBuiltinEntry(Register target, Register function,
- int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -1179,20 +1172,21 @@ class MacroAssembler : public Assembler {
// 'call_kind' must be x5.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
InvokeFlag flag,
bool* definitely_mismatches,
const CallWrapper& call_wrapper);
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
// Invoke the JavaScript function in the given register.
// Changes the current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1297,12 +1291,8 @@ class MacroAssembler : public Assembler {
// If the new space is exhausted control continues at the gc_required label.
// In this case, the result and scratch registers may still be clobbered.
// If flags includes TAG_OBJECT, the result is tagged as as a heap object.
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void Allocate(int object_size,
Register result,
@@ -1350,6 +1340,12 @@ class MacroAssembler : public Assembler {
CPURegister heap_number_map = NoReg,
MutableMode mode = IMMUTABLE);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -1485,20 +1481,6 @@ class MacroAssembler : public Assembler {
// Fall-through if the object was a string and jump on fail otherwise.
inline void IsObjectNameType(Register object, Register type, Label* fail);
- inline void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // Check the instance type in the given map to see if it corresponds to a
- // JS object type. Jump to the fail label if this is not the case and fall
- // through otherwise. However if fail label is NULL, no branch will be
- // performed and the flag will be updated. You can test the flag for "le"
- // condition to test if it is a valid JS object type.
- inline void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// The object and type arguments can be the same register; in that case it
@@ -1688,8 +1670,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Emit code for a truncating division by a constant. The dividend register is
// unchanged. Dividend and result must be different.
@@ -1825,23 +1814,10 @@ class MacroAssembler : public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4, Label* value_is_white);
// Helper for finding the mark bits for an address.
// Note that the behaviour slightly differs from other architectures.
@@ -1911,7 +1887,7 @@ class MacroAssembler : public Assembler {
Register scratch2,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and
// map can be the same, function is then overwritten.
@@ -2013,19 +1989,6 @@ class MacroAssembler : public Assembler {
void PopPostamble(int count, int size) { PopPostamble(count * size); }
private:
- // Helpers for CopyFields.
- // These each implement CopyFields in a different way.
- void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3, Register scratch4,
- Register scratch5);
- void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3, Register scratch4);
- void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
- Register scratch1, Register scratch2,
- Register scratch3);
-
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
@@ -2229,8 +2192,8 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectMemOperand() {
- return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 4e6a9d91e1..8f72669f49 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -462,13 +462,11 @@ void Simulator::RunFrom(Instruction* start) {
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
- : external_function_(external_function),
- type_(type),
- next_(NULL) {
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
+ : external_function_(external_function), type_(type), next_(NULL) {
redirect_call_.SetInstructionBits(
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
// TODO(all): Simulator flush I cache
isolate->set_simulator_redirection(this);
@@ -483,9 +481,8 @@ class Redirection {
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -493,7 +490,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromHltInstruction(Instruction* redirect_call) {
@@ -748,9 +745,10 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_redirect_call();
}
@@ -2761,7 +2759,7 @@ double Simulator::FPRoundInt(double value, FPRounding round_mode) {
// If the error is greater than 0.5, or is equal to 0.5 and the integer
// result is odd, round up.
} else if ((error > 0.5) ||
- ((error == 0.5) && (fmod(int_result, 2) != 0))) {
+ ((error == 0.5) && (modulo(int_result, 2) != 0))) {
int_result++;
}
break;
@@ -3107,7 +3105,8 @@ T Simulator::FPSqrt(T op) {
} else if (op < 0.0) {
return FPDefaultNaN<T>();
} else {
- return fast_sqrt(op);
+ lazily_initialize_fast_sqrt(isolate_);
+ return fast_sqrt(op, isolate_);
}
}
@@ -3510,7 +3509,7 @@ void Simulator::Debug() {
reinterpret_cast<uint64_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 3d7c15cfd0..724c767ab7 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -24,7 +24,7 @@ namespace internal {
// Running without a simulator on a native ARM64 platform.
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*arm64_regexp_matcher)(String* input,
@@ -42,24 +42,29 @@ typedef int (*arm64_regexp_matcher)(String* input,
// should act as a function matching the type arm64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<arm64_regexp_matcher>(entry)( \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ NULL, p8))
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
- uintptr_t c_limit) {
+ uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
- static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static void UnregisterCTryCatch() { }
+ static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
#else // !defined(USE_SIMULATOR)
@@ -272,7 +277,8 @@ class Simulator : public DecoderVisitor {
void ResetState();
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type);
void DoRuntimeCall(Instruction* instr);
@@ -871,15 +877,14 @@ class Simulator : public DecoderVisitor {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
- FUNCTION_ADDR(entry), \
- p0, p1, p2, p3, p4))
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->CallJS( \
+ FUNCTION_ADDR(entry), p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>( \
- Simulator::current(Isolate::Current()) \
- ->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ static_cast<int>(Simulator::current(isolate)->CallRegExp( \
+ entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from
@@ -893,13 +898,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 8571f33176..4aac08d541 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -51,6 +51,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/ostreams.h"
+#include "src/parsing/token.h"
#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -59,7 +60,6 @@
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serialize.h"
-#include "src/token.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
@@ -173,7 +173,8 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
// We may use the assembler without an isolate.
serializer_enabled_(isolate && isolate->serializer_enabled()),
constant_pool_available_(false) {
- if (FLAG_mask_constants_with_cookie && isolate != NULL) {
+ DCHECK_NOT_NULL(isolate);
+ if (FLAG_mask_constants_with_cookie) {
jit_cookie_ = isolate->random_number_generator()->NextInt();
}
own_buffer_ = buffer == NULL;
@@ -204,19 +205,6 @@ void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
}
-void AssemblerBase::FlushICacheWithoutIsolate(void* start, size_t size) {
- // Ideally we would just call Isolate::Current() here. However, this flushes
- // out issues because we usually only need the isolate when in the simulator.
- Isolate* isolate;
-#if defined(USE_SIMULATOR)
- isolate = Isolate::Current();
-#else
- isolate = nullptr;
-#endif // USE_SIMULATOR
- FlushICache(isolate, start, size);
-}
-
-
void AssemblerBase::Print() {
OFStream os(stdout);
v8::internal::Disassembler::Decode(isolate(), &os, buffer_, pc_, nullptr);
@@ -520,8 +508,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode) ||
- RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
+ RelocInfo::IsVeneerPool(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@@ -712,8 +699,7 @@ void RelocIterator::next() {
Advance(kIntSize);
}
} else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode) ||
- RelocInfo::IsDebugBreakSlotAtCall(rmode)) {
+ RelocInfo::IsVeneerPool(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@@ -738,7 +724,8 @@ void RelocIterator::next() {
}
-RelocIterator::RelocIterator(Code* code, int mode_mask) {
+RelocIterator::RelocIterator(Code* code, int mode_mask)
+ : rinfo_(code->map()->GetIsolate()) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@@ -763,7 +750,8 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
}
-RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
+RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
+ : rinfo_(desc.origin->isolate()) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
// Relocation info is read backwards.
@@ -807,8 +795,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc 64";
case EMBEDDED_OBJECT:
return "embedded object";
- case CONSTRUCT_CALL:
- return "code target (js construct call)";
case DEBUGGER_STATEMENT:
return "debugger statement";
case CODE_TARGET:
@@ -843,8 +829,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at return";
case DEBUG_BREAK_SLOT_AT_CALL:
return "debug break slot at call";
- case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
- return "debug break slot at construct call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
case GENERATOR_CONTINUATION:
@@ -909,7 +893,6 @@ void RelocInfo::Verify(Isolate* isolate) {
Object::VerifyPointer(target_cell());
break;
case DEBUGGER_STATEMENT:
- case CONSTRUCT_CALL:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@@ -942,7 +925,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_POSITION:
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
- case DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL:
case GENERATOR_CONTINUATION:
case NONE32:
case NONE64:
@@ -959,12 +941,6 @@ void RelocInfo::Verify(Isolate* isolate) {
#endif // VERIFY_HEAP
-int RelocInfo::DebugBreakCallArgumentsCount(intptr_t data) {
- return static_cast<int>(data);
-}
-
-
-// -----------------------------------------------------------------------------
// Implementation of ExternalReference
void ExternalReference::SetUp() {
@@ -1441,14 +1417,6 @@ ExternalReference ExternalReference::debug_after_break_target_address(
}
-ExternalReference
- ExternalReference::debug_restarter_frame_function_pointer_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->debug()->restarter_frame_function_pointer_address());
-}
-
-
ExternalReference ExternalReference::virtual_handler_register(
Isolate* isolate) {
return ExternalReference(isolate->virtual_handler_register_address());
@@ -1467,17 +1435,20 @@ ExternalReference ExternalReference::runtime_function_table_address(
}
-double power_helper(double x, double y) {
+double power_helper(Isolate* isolate, double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1 if exponent is 0.
}
if (y == 0.5) {
+ lazily_initialize_fast_sqrt(isolate);
return (std::isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0); // Convert -0 to +0.
+ : fast_sqrt(x + 0.0, isolate); // Convert -0 to +0.
}
if (y == -0.5) {
- return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
+ lazily_initialize_fast_sqrt(isolate);
+ return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0,
+ isolate); // Convert -0 to +0.
}
return power_double_double(x, y);
}
@@ -1575,9 +1546,9 @@ ExternalReference ExternalReference::mod_two_doubles_operation(
}
-ExternalReference ExternalReference::debug_step_in_fp_address(
+ExternalReference ExternalReference::debug_step_in_enabled_address(
Isolate* isolate) {
- return ExternalReference(isolate->debug()->step_in_fp_addr());
+ return ExternalReference(isolate->debug()->step_in_enabled_address());
}
@@ -1891,11 +1862,10 @@ void Assembler::RecordGeneratorContinuation() {
}
-void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode, int call_argc) {
+void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));
- intptr_t data = static_cast<intptr_t>(call_argc);
- RecordRelocInfo(mode, data);
+ RecordRelocInfo(mode);
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 1243adf468..08c6b38541 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -38,8 +38,8 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/isolate.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
-#include "src/token.h"
namespace v8 {
@@ -55,6 +55,9 @@ class StatsCounter;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
+enum class CodeObjectRequired { kNo, kYes };
+
+
class AssemblerBase: public Malloced {
public:
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
@@ -107,9 +110,6 @@ class AssemblerBase: public Malloced {
static void FlushICache(Isolate* isolate, void* start, size_t size);
- // TODO(all): Help get rid of this one.
- static void FlushICacheWithoutIsolate(void* start, size_t size);
-
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
@@ -233,17 +233,18 @@ class CpuFeatures : public AllStatic {
static void PrintTarget();
static void PrintFeatures();
+ private:
+ friend class ExternalReference;
+ friend class AssemblerBase;
// Flush instruction cache.
static void FlushICache(void* start, size_t size);
- private:
// Platform-dependent implementation.
static void ProbeImpl(bool cross_compile);
static unsigned supported_;
static unsigned cache_line_size_;
static bool initialized_;
- friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -376,7 +377,6 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
- CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
CELL,
@@ -391,7 +391,6 @@ class RelocInfo {
DEBUG_BREAK_SLOT_AT_POSITION,
DEBUG_BREAK_SLOT_AT_RETURN,
DEBUG_BREAK_SLOT_AT_CALL,
- DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL,
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
@@ -428,19 +427,19 @@ class RelocInfo {
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
- RelocInfo() {}
+ explicit RelocInfo(Isolate* isolate) : isolate_(isolate) {
+ DCHECK_NOT_NULL(isolate);
+ }
- RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
- : pc_(pc), rmode_(rmode), data_(data), host_(host) {
+ RelocInfo(Isolate* isolate, byte* pc, Mode rmode, intptr_t data, Code* host)
+ : isolate_(isolate), pc_(pc), rmode_(rmode), data_(data), host_(host) {
+ DCHECK_NOT_NULL(isolate);
}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
- static inline bool IsConstructCall(Mode mode) {
- return mode == CONSTRUCT_CALL;
- }
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
}
@@ -484,8 +483,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
- IsDebugBreakSlotAtCall(mode) ||
- IsDebugBreakSlotAtConstructCall(mode);
+ IsDebugBreakSlotAtCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@@ -496,9 +494,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
- static inline bool IsDebugBreakSlotAtConstructCall(Mode mode) {
- return mode == DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
- }
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUGGER_STATEMENT;
}
@@ -514,6 +509,7 @@ class RelocInfo {
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
+ Isolate* isolate() const { return isolate_; }
byte* pc() const { return pc_; }
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
@@ -536,9 +532,6 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
- static int DebugBreakCallArgumentsCount(intptr_t data);
-
- // Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
@@ -621,9 +614,6 @@ class RelocInfo {
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
- // Patch the code with a call.
- void PatchCodeWithCall(Address target, int guard_bytes);
-
// Check whether this return sequence has been patched
// with a call to the debugger.
INLINE(bool IsPatchedReturnSequence());
@@ -651,12 +641,13 @@ class RelocInfo {
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDataMask =
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
- static const int kDebugBreakSlotMask =
- 1 << DEBUG_BREAK_SLOT_AT_POSITION | 1 << DEBUG_BREAK_SLOT_AT_RETURN |
- 1 << DEBUG_BREAK_SLOT_AT_CALL | 1 << DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL;
+ static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
+ 1 << DEBUG_BREAK_SLOT_AT_RETURN |
+ 1 << DEBUG_BREAK_SLOT_AT_CALL;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
+ Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction
// referencing the constant pool entry (except when rmode_ ==
@@ -866,7 +857,8 @@ class ExternalReference BASE_EMBEDDED {
static void InitializeMathExpData();
static void TearDownMathExpData();
- typedef void* ExternalReferenceRedirector(void* original, Type type);
+ typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
+ Type type);
ExternalReference() : address_(NULL) {}
@@ -984,8 +976,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
- static ExternalReference debug_restarter_frame_function_pointer_address(
- Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate);
@@ -999,7 +989,7 @@ class ExternalReference BASE_EMBEDDED {
Address address() const { return reinterpret_cast<Address>(address_); }
// Used to check if single stepping is enabled in generated code.
- static ExternalReference debug_step_in_fp_address(Isolate* isolate);
+ static ExternalReference debug_step_in_enabled_address(Isolate* isolate);
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
@@ -1043,9 +1033,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
- void* answer = (redirector == NULL) ?
- address :
- (*redirector)(address, type);
+ void* answer =
+ (redirector == NULL) ? address : (*redirector)(isolate, address, type);
return answer;
}
@@ -1134,7 +1123,7 @@ inline int NumberOfBitsSet(uint32_t x) {
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
-double power_helper(double x, double y);
+double power_helper(Isolate* isolate, double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
@@ -1150,8 +1139,11 @@ class CallWrapper {
virtual void BeforeCall(int call_size) const = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
+ // Return whether call needs to check for debug stepping.
+ virtual bool NeedsDebugStepCheck() const { return false; }
};
+
class NullCallWrapper : public CallWrapper {
public:
NullCallWrapper() { }
@@ -1161,6 +1153,16 @@ class NullCallWrapper : public CallWrapper {
};
+class CheckDebugStepCallWrapper : public CallWrapper {
+ public:
+ CheckDebugStepCallWrapper() {}
+ virtual ~CheckDebugStepCallWrapper() {}
+ virtual void BeforeCall(int call_size) const {}
+ virtual void AfterCall() const {}
+ virtual bool NeedsDebugStepCheck() const { return true; }
+};
+
+
// -----------------------------------------------------------------------------
// Constant pool support
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
new file mode 100644
index 0000000000..7cd947998d
--- /dev/null
+++ b/deps/v8/src/ast/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+adamk@chromium.org
+bmeurer@chromium.org
+littledan@chromium.org
+mstarzinger@chromium.org
+rossberg@chromium.org
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
new file mode 100644
index 0000000000..49cc7f6ff4
--- /dev/null
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -0,0 +1,409 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-rewriter.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Implementation of AstExpressionRewriter
+// The AST is traversed but no actual rewriting takes place, unless the
+// Visit methods are overriden in subclasses.
+
+#define REWRITE_THIS(node) \
+ do { \
+ if (!RewriteExpression(node)) return; \
+ } while (false)
+#define NOTHING() DCHECK_NULL(replacement_)
+
+
+void AstExpressionRewriter::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
+ for (int i = 0; i < declarations->length(); i++) {
+ AST_REWRITE_LIST_ELEMENT(Declaration, declarations, i);
+ }
+}
+
+
+void AstExpressionRewriter::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ AST_REWRITE_LIST_ELEMENT(Statement, statements, i);
+ // Not stopping when a jump statement is found.
+ }
+}
+
+
+void AstExpressionRewriter::VisitExpressions(
+ ZoneList<Expression*>* expressions) {
+ for (int i = 0; i < expressions->length(); i++) {
+ // The variable statement visiting code may pass NULL expressions
+ // to this code. Maybe this should be handled by introducing an
+ // undefined expression or literal? Revisit this code if this
+ // changes
+ if (expressions->at(i) != nullptr) {
+ AST_REWRITE_LIST_ELEMENT(Expression, expressions, i);
+ }
+ }
+}
+
+
+void AstExpressionRewriter::VisitVariableDeclaration(
+ VariableDeclaration* node) {
+ // Not visiting `proxy_`.
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitFunctionDeclaration(
+ FunctionDeclaration* node) {
+ // Not visiting `proxy_`.
+ AST_REWRITE_PROPERTY(FunctionLiteral, node, fun);
+}
+
+
+void AstExpressionRewriter::VisitImportDeclaration(ImportDeclaration* node) {
+ // Not visiting `proxy_`.
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitExportDeclaration(ExportDeclaration* node) {
+ // Not visiting `proxy_`.
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitBlock(Block* node) {
+ VisitStatements(node->statements());
+}
+
+
+void AstExpressionRewriter::VisitExpressionStatement(
+ ExpressionStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitEmptyStatement(EmptyStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ AST_REWRITE_PROPERTY(Statement, node, statement);
+}
+
+
+void AstExpressionRewriter::VisitIfStatement(IfStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, condition);
+ AST_REWRITE_PROPERTY(Statement, node, then_statement);
+ AST_REWRITE_PROPERTY(Statement, node, else_statement);
+}
+
+
+void AstExpressionRewriter::VisitContinueStatement(ContinueStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitBreakStatement(BreakStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitReturnStatement(ReturnStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+ AST_REWRITE_PROPERTY(Statement, node, statement);
+}
+
+
+void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, tag);
+ ZoneList<CaseClause*>* clauses = node->cases();
+ for (int i = 0; i < clauses->length(); i++) {
+ AST_REWRITE_LIST_ELEMENT(CaseClause, clauses, i);
+ }
+}
+
+
+void AstExpressionRewriter::VisitDoWhileStatement(DoWhileStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, cond);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitWhileStatement(WhileStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, cond);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitForStatement(ForStatement* node) {
+ if (node->init() != nullptr) {
+ AST_REWRITE_PROPERTY(Statement, node, init);
+ }
+ if (node->cond() != nullptr) {
+ AST_REWRITE_PROPERTY(Expression, node, cond);
+ }
+ if (node->next() != nullptr) {
+ AST_REWRITE_PROPERTY(Statement, node, next);
+ }
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitForInStatement(ForInStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, each);
+ AST_REWRITE_PROPERTY(Expression, node, subject);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
+ AST_REWRITE_PROPERTY(Expression, node, each);
+ AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
+ AST_REWRITE_PROPERTY(Expression, node, next_result);
+ AST_REWRITE_PROPERTY(Expression, node, result_done);
+ AST_REWRITE_PROPERTY(Expression, node, assign_each);
+ AST_REWRITE_PROPERTY(Expression, node, subject);
+ AST_REWRITE_PROPERTY(Statement, node, body);
+}
+
+
+void AstExpressionRewriter::VisitTryCatchStatement(TryCatchStatement* node) {
+ AST_REWRITE_PROPERTY(Block, node, try_block);
+ // Not visiting the variable.
+ AST_REWRITE_PROPERTY(Block, node, catch_block);
+}
+
+
+void AstExpressionRewriter::VisitTryFinallyStatement(
+ TryFinallyStatement* node) {
+ AST_REWRITE_PROPERTY(Block, node, try_block);
+ AST_REWRITE_PROPERTY(Block, node, finally_block);
+}
+
+
+void AstExpressionRewriter::VisitDebuggerStatement(DebuggerStatement* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitFunctionLiteral(FunctionLiteral* node) {
+ REWRITE_THIS(node);
+ VisitDeclarations(node->scope()->declarations());
+ ZoneList<Statement*>* body = node->body();
+ if (body != nullptr) VisitStatements(body);
+}
+
+
+void AstExpressionRewriter::VisitClassLiteral(ClassLiteral* node) {
+ REWRITE_THIS(node);
+ // Not visiting `class_variable_proxy_`.
+ if (node->extends() != nullptr) {
+ AST_REWRITE_PROPERTY(Expression, node, extends);
+ }
+ AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
+ ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
+ for (int i = 0; i < properties->length(); i++) {
+ VisitObjectLiteralProperty(properties->at(i));
+ }
+}
+
+
+void AstExpressionRewriter::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitConditional(Conditional* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, condition);
+ AST_REWRITE_PROPERTY(Expression, node, then_expression);
+ AST_REWRITE_PROPERTY(Expression, node, else_expression);
+}
+
+
+void AstExpressionRewriter::VisitVariableProxy(VariableProxy* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitLiteral(Literal* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitRegExpLiteral(RegExpLiteral* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitObjectLiteral(ObjectLiteral* node) {
+ REWRITE_THIS(node);
+ ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
+ for (int i = 0; i < properties->length(); i++) {
+ VisitObjectLiteralProperty(properties->at(i));
+ }
+}
+
+
+void AstExpressionRewriter::VisitObjectLiteralProperty(
+ ObjectLiteralProperty* property) {
+ if (property == nullptr) return;
+ AST_REWRITE_PROPERTY(Expression, property, key);
+ AST_REWRITE_PROPERTY(Expression, property, value);
+}
+
+
+void AstExpressionRewriter::VisitArrayLiteral(ArrayLiteral* node) {
+ REWRITE_THIS(node);
+ VisitExpressions(node->values());
+}
+
+
+void AstExpressionRewriter::VisitAssignment(Assignment* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, target);
+ AST_REWRITE_PROPERTY(Expression, node, value);
+}
+
+
+void AstExpressionRewriter::VisitYield(Yield* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, generator_object);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitThrow(Throw* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, exception);
+}
+
+
+void AstExpressionRewriter::VisitProperty(Property* node) {
+ REWRITE_THIS(node);
+ if (node == nullptr) return;
+ AST_REWRITE_PROPERTY(Expression, node, obj);
+ AST_REWRITE_PROPERTY(Expression, node, key);
+}
+
+
+void AstExpressionRewriter::VisitCall(Call* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+ VisitExpressions(node->arguments());
+}
+
+
+void AstExpressionRewriter::VisitCallNew(CallNew* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+ VisitExpressions(node->arguments());
+}
+
+
+void AstExpressionRewriter::VisitCallRuntime(CallRuntime* node) {
+ REWRITE_THIS(node);
+ VisitExpressions(node->arguments());
+}
+
+
+void AstExpressionRewriter::VisitUnaryOperation(UnaryOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitCountOperation(CountOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitBinaryOperation(BinaryOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, left);
+ AST_REWRITE_PROPERTY(Expression, node, right);
+}
+
+
+void AstExpressionRewriter::VisitCompareOperation(CompareOperation* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, left);
+ AST_REWRITE_PROPERTY(Expression, node, right);
+}
+
+
+void AstExpressionRewriter::VisitSpread(Spread* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+void AstExpressionRewriter::VisitThisFunction(ThisFunction* node) {
+ REWRITE_THIS(node);
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitSuperPropertyReference(
+ SuperPropertyReference* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
+ AST_REWRITE_PROPERTY(Expression, node, home_object);
+}
+
+
+void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
+ AST_REWRITE_PROPERTY(VariableProxy, node, new_target_var);
+ AST_REWRITE_PROPERTY(VariableProxy, node, this_function_var);
+}
+
+
+void AstExpressionRewriter::VisitCaseClause(CaseClause* node) {
+ if (!node->is_default()) {
+ AST_REWRITE_PROPERTY(Expression, node, label);
+ }
+ VisitStatements(node->statements());
+}
+
+
+void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
+ NOTHING();
+}
+
+
+void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Block, node, block);
+ AST_REWRITE_PROPERTY(VariableProxy, node, result);
+}
+
+
+void AstExpressionRewriter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ REWRITE_THIS(node);
+ AST_REWRITE_PROPERTY(Expression, node, expression);
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
new file mode 100644
index 0000000000..916842ab20
--- /dev/null
+++ b/deps/v8/src/ast/ast-expression-rewriter.h
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_EXPRESSION_REWRITER_H_
+#define V8_AST_AST_EXPRESSION_REWRITER_H_
+
+#include "src/allocation.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/effects.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// A rewriting Visitor over a CompilationInfo's AST that invokes
+// VisitExpression on each expression node.
+
+class AstExpressionRewriter : public AstVisitor {
+ public:
+ explicit AstExpressionRewriter(Isolate* isolate) : AstVisitor() {
+ InitializeAstRewriter(isolate);
+ }
+ explicit AstExpressionRewriter(uintptr_t stack_limit) : AstVisitor() {
+ InitializeAstRewriter(stack_limit);
+ }
+ ~AstExpressionRewriter() override {}
+
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statements) override;
+ void VisitExpressions(ZoneList<Expression*>* expressions) override;
+
+ virtual void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
+
+ protected:
+ virtual bool RewriteExpression(Expression* expr) = 0;
+
+ private:
+ DEFINE_AST_REWRITER_SUBCLASS_MEMBERS();
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(AstExpressionRewriter);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_EXPRESSION_REWRITER_H_
diff --git a/deps/v8/src/ast-expression-visitor.cc b/deps/v8/src/ast/ast-expression-visitor.cc
index e38b444699..6b2550c541 100644
--- a/deps/v8/src/ast-expression-visitor.cc
+++ b/deps/v8/src/ast/ast-expression-visitor.cc
@@ -4,11 +4,11 @@
#include "src/v8.h"
-#include "src/ast-expression-visitor.h"
+#include "src/ast/ast-expression-visitor.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -171,6 +171,11 @@ void AstExpressionVisitor::VisitForInStatement(ForInStatement* stmt) {
void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
RECURSE(Visit(stmt->iterable()));
+ RECURSE(Visit(stmt->each()));
+ RECURSE(Visit(stmt->assign_iterator()));
+ RECURSE(Visit(stmt->next_result()));
+ RECURSE(Visit(stmt->result_done()));
+ RECURSE(Visit(stmt->assign_each()));
RECURSE(Visit(stmt->body()));
}
@@ -209,9 +214,10 @@ void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
void AstExpressionVisitor::VisitConditional(Conditional* expr) {
- RECURSE(Visit(expr->condition()));
- RECURSE(Visit(expr->then_expression()));
- RECURSE(Visit(expr->else_expression()));
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->condition()));
+ RECURSE_EXPRESSION(Visit(expr->then_expression()));
+ RECURSE_EXPRESSION(Visit(expr->else_expression()));
}
@@ -393,5 +399,12 @@ void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
}
+void AstExpressionVisitor::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ VisitExpression(expr);
+ RECURSE(Visit(expr->expression()));
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast-expression-visitor.h b/deps/v8/src/ast/ast-expression-visitor.h
index a4bf34d63f..cda624d5b7 100644
--- a/deps/v8/src/ast-expression-visitor.h
+++ b/deps/v8/src/ast/ast-expression-visitor.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_EXPRESSION_VISITOR_H_
-#define V8_AST_EXPRESSION_VISITOR_H_
+#ifndef V8_AST_AST_EXPRESSION_VISITOR_H_
+#define V8_AST_AST_EXPRESSION_VISITOR_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/effects.h"
-#include "src/scopes.h"
#include "src/type-info.h"
#include "src/types.h"
#include "src/zone.h"
@@ -47,4 +47,4 @@ class AstExpressionVisitor : public AstVisitor {
} // namespace internal
} // namespace v8
-#endif // V8_AST_EXPRESSION_VISITOR_H_
+#endif // V8_AST_AST_EXPRESSION_VISITOR_H_
diff --git a/deps/v8/src/ast-literal-reindexer.cc b/deps/v8/src/ast/ast-literal-reindexer.cc
index 5987399f97..fce33e70b8 100644
--- a/deps/v8/src/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast/ast-literal-reindexer.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast-literal-reindexer.h"
+#include "src/ast/ast-literal-reindexer.h"
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -76,6 +76,12 @@ void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
}
+void AstLiteralReindexer::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
void AstLiteralReindexer::VisitImportDeclaration(ImportDeclaration* node) {
VisitVariableProxy(node->proxy());
}
diff --git a/deps/v8/src/ast-literal-reindexer.h b/deps/v8/src/ast/ast-literal-reindexer.h
index 14f64f6ef1..e2a71d3c47 100644
--- a/deps/v8/src/ast-literal-reindexer.h
+++ b/deps/v8/src/ast/ast-literal-reindexer.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_LITERAL_REINDEXER
-#define V8_AST_LITERAL_REINDEXER
+#ifndef V8_AST_AST_LITERAL_REINDEXER
+#define V8_AST_AST_LITERAL_REINDEXER
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -17,7 +17,6 @@ class AstLiteralReindexer final : public AstVisitor {
int count() const { return next_index_; }
void Reindex(Expression* pattern);
- int NextIndex() { return next_index_++; }
private:
#define DEFINE_VISIT(type) void Visit##type(type* node) override;
@@ -42,4 +41,4 @@ class AstLiteralReindexer final : public AstVisitor {
} // namespace internal
} // namespace v8
-#endif // V8_AST_LITERAL_REINDEXER
+#endif // V8_AST_AST_LITERAL_REINDEXER
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 8479191b5e..6c2b696a5d 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -181,7 +181,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
- DisableOptimization(kSuperReference);
+ DisableCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
Visit(node->this_var());
Visit(node->home_object());
@@ -190,7 +190,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
- DisableOptimization(kSuperReference);
+ DisableCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var());
Visit(node->new_target_var());
@@ -348,6 +348,7 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
+
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
VisitReference(node->target());
Visit(node->value());
@@ -373,7 +374,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
- DisableOptimization(kSpread);
+ DisableCrankshaft(kSpread);
Visit(node->expression());
}
@@ -556,6 +557,14 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
}
+void AstNumberingVisitor::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ IncrementNodeCount();
+ node->set_base_id(ReserveIdRange(RewritableAssignmentExpression::num_ids()));
+ Visit(node->expression());
+}
+
+
bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
@@ -571,11 +580,17 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DisableOptimization(kFunctionWithIllegalRedeclaration);
return Finish(node);
}
+ if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
DisableCrankshaft(kContextAllocatedArguments);
}
+ int rest_index;
+ if (scope->rest_parameter(&rest_index)) {
+ DisableCrankshaft(kRestParameter);
+ }
+
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
diff --git a/deps/v8/src/ast-numbering.h b/deps/v8/src/ast/ast-numbering.h
index 57c750cf64..0ac1ef2134 100644
--- a/deps/v8/src/ast-numbering.h
+++ b/deps/v8/src/ast/ast-numbering.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_NUMBERING_H_
-#define V8_AST_NUMBERING_H_
+#ifndef V8_AST_AST_NUMBERING_H_
+#define V8_AST_AST_NUMBERING_H_
namespace v8 {
namespace internal {
@@ -22,4 +22,4 @@ bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
} // namespace internal
} // namespace v8
-#endif // V8_AST_NUMBERING_H_
+#endif // V8_AST_AST_NUMBERING_H_
diff --git a/deps/v8/src/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 8a4a4daf0c..2e17fbcfaf 100644
--- a/deps/v8/src/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
#include "src/api.h"
#include "src/objects.h"
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 645b8b6631..4ae912ea82 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_AST_VALUE_FACTORY_H_
-#define V8_AST_VALUE_FACTORY_H_
+#ifndef V8_AST_AST_VALUE_FACTORY_H_
+#define V8_AST_AST_VALUE_FACTORY_H_
#include "src/api.h"
#include "src/hashmap.h"
@@ -62,7 +62,7 @@ class AstString : public ZoneObject {
};
-class AstRawString : public AstString {
+class AstRawString final : public AstString {
public:
int length() const override {
if (is_one_byte_)
@@ -115,19 +115,17 @@ class AstRawString : public AstString {
};
-class AstConsString : public AstString {
+class AstConsString final : public AstString {
public:
AstConsString(const AstString* left, const AstString* right)
- : left_(left),
- right_(right) {}
+ : length_(left->length() + right->length()), left_(left), right_(right) {}
- int length() const override { return left_->length() + right_->length(); }
+ int length() const override { return length_; }
void Internalize(Isolate* isolate) override;
private:
- friend class AstValueFactory;
-
+ const int length_;
const AstString* left_;
const AstString* right_;
};
@@ -252,12 +250,12 @@ class AstValue : public ZoneObject {
F(dot_generator, ".generator") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
- F(dot_module, ".module") \
F(dot_result, ".result") \
F(dot_switch_tag, ".switch_tag") \
F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
+ F(get_space, "get ") \
F(let, "let") \
F(native, "native") \
F(new_target, ".new.target") \
@@ -265,6 +263,7 @@ class AstValue : public ZoneObject {
F(proto, "__proto__") \
F(prototype, "prototype") \
F(rest_parameter, ".rest_parameter") \
+ F(set_space, "set ") \
F(this, "this") \
F(this_function, ".this_function") \
F(undefined, "undefined") \
@@ -373,4 +372,4 @@ class AstValueFactory {
#undef STRING_CONSTANTS
#undef OTHER_CONSTANTS
-#endif // V8_AST_VALUE_FACTORY_H_
+#endif // V8_AST_AST_VALUE_FACTORY_H_
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast/ast.cc
index b5c6cf57ea..69e7351a7d 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include <cmath> // For isfinite.
+#include "src/ast/scopes.h"
#include "src/builtins.h"
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
#include "src/hashmap.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/property.h"
#include "src/property-details.h"
-#include "src/scopes.h"
#include "src/string-stream.h"
#include "src/type-info.h"
@@ -120,18 +120,16 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
FeedbackVectorSlot* out_slot) {
- if (FLAG_vector_stores) {
- Property* property = expr->AsProperty();
- LhsKind assign_type = Property::GetAssignType(property);
- if ((assign_type == VARIABLE &&
- expr->AsVariableProxy()->var()->IsUnallocated()) ||
- assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
- // TODO(ishell): consider using ICSlotCache for variables here.
- FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
- ? FeedbackVectorSlotKind::KEYED_STORE_IC
- : FeedbackVectorSlotKind::STORE_IC;
- *out_slot = spec->AddSlot(kind);
- }
+ Property* property = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+ if ((assign_type == VARIABLE &&
+ expr->AsVariableProxy()->var()->IsUnallocated()) ||
+ assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
+ // TODO(ishell): consider using ICSlotCache for variables here.
+ FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
+ ? FeedbackVectorSlotKind::KEYED_STORE_IC
+ : FeedbackVectorSlotKind::STORE_IC;
+ *out_slot = spec->AddSlot(kind);
}
}
@@ -139,6 +137,10 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
void ForEachStatement::AssignFeedbackVectorSlots(
Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
+ // TODO(adamk): for-of statements do not make use of this feedback slot.
+ // The each_slot_ should be specific to ForInStatement, and this work moved
+ // there.
+ if (IsForOfStatement()) return;
AssignVectorSlots(each(), spec, &each_slot_);
}
@@ -256,8 +258,6 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
- if (!FLAG_vector_stores) return;
-
// This logic that computes the number of slots needed for vector store
// ICs must mirror FullCodeGenerator::VisitClassLiteral.
if (NeedsProxySlot()) {
@@ -294,8 +294,6 @@ bool ObjectLiteral::Property::emit_store() {
void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
- if (!FLAG_vector_stores) return;
-
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
int property_index = 0;
@@ -550,8 +548,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
- if (!FLAG_vector_stores) return;
-
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
int array_index = 0;
@@ -721,9 +717,6 @@ bool Call::IsUsingCallFeedbackICSlot(Isolate* isolate) const {
if (call_type == POSSIBLY_EVAL_CALL) {
return false;
}
- if (call_type == SUPER_CALL && !FLAG_vector_stores) {
- return false;
- }
return true;
}
@@ -805,334 +798,6 @@ void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
}
-// ----------------------------------------------------------------------------
-// Regular expressions
-
-#define MAKE_ACCEPT(Name) \
- void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) { \
- return visitor->Visit##Name(this, data); \
- }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
-#undef MAKE_ACCEPT
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExpTree::As##Name() { \
- return NULL; \
- } \
- bool RegExpTree::Is##Name() { return false; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-#define MAKE_TYPE_CASE(Name) \
- RegExp##Name* RegExp##Name::As##Name() { \
- return this; \
- } \
- bool RegExp##Name::Is##Name() { return true; }
-FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
-#undef MAKE_TYPE_CASE
-
-
-static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
- Interval result = Interval::Empty();
- for (int i = 0; i < children->length(); i++)
- result = result.Union(children->at(i)->CaptureRegisters());
- return result;
-}
-
-
-Interval RegExpAlternative::CaptureRegisters() {
- return ListCaptureRegisters(nodes());
-}
-
-
-Interval RegExpDisjunction::CaptureRegisters() {
- return ListCaptureRegisters(alternatives());
-}
-
-
-Interval RegExpLookahead::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-Interval RegExpCapture::CaptureRegisters() {
- Interval self(StartRegister(index()), EndRegister(index()));
- return self.Union(body()->CaptureRegisters());
-}
-
-
-Interval RegExpQuantifier::CaptureRegisters() {
- return body()->CaptureRegisters();
-}
-
-
-bool RegExpAssertion::IsAnchoredAtStart() {
- return assertion_type() == RegExpAssertion::START_OF_INPUT;
-}
-
-
-bool RegExpAssertion::IsAnchoredAtEnd() {
- return assertion_type() == RegExpAssertion::END_OF_INPUT;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtStart()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpAlternative::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* nodes = this->nodes();
- for (int i = nodes->length() - 1; i >= 0; i--) {
- RegExpTree* node = nodes->at(i);
- if (node->IsAnchoredAtEnd()) { return true; }
- if (node->max_match() > 0) { return false; }
- }
- return false;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtStart() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtStart())
- return false;
- }
- return true;
-}
-
-
-bool RegExpDisjunction::IsAnchoredAtEnd() {
- ZoneList<RegExpTree*>* alternatives = this->alternatives();
- for (int i = 0; i < alternatives->length(); i++) {
- if (!alternatives->at(i)->IsAnchoredAtEnd())
- return false;
- }
- return true;
-}
-
-
-bool RegExpLookahead::IsAnchoredAtStart() {
- return is_positive() && body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtStart() {
- return body()->IsAnchoredAtStart();
-}
-
-
-bool RegExpCapture::IsAnchoredAtEnd() {
- return body()->IsAnchoredAtEnd();
-}
-
-
-// Convert regular expression trees to a simple sexp representation.
-// This representation should be different from the input grammar
-// in as many cases as possible, to make it more difficult for incorrect
-// parses to look as correct ones which is likely if the input and
-// output formats are alike.
-class RegExpUnparser final : public RegExpVisitor {
- public:
- RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
- void VisitCharacterRange(CharacterRange that);
-#define MAKE_CASE(Name) void* Visit##Name(RegExp##Name*, void* data) override;
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
- private:
- std::ostream& os_;
- Zone* zone_;
-};
-
-
-void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
- os_ << "(|";
- for (int i = 0; i < that->alternatives()->length(); i++) {
- os_ << " ";
- that->alternatives()->at(i)->Accept(this, data);
- }
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
- os_ << "(:";
- for (int i = 0; i < that->nodes()->length(); i++) {
- os_ << " ";
- that->nodes()->at(i)->Accept(this, data);
- }
- os_ << ")";
- return NULL;
-}
-
-
-void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
- os_ << AsUC16(that.from());
- if (!that.IsSingleton()) {
- os_ << "-" << AsUC16(that.to());
- }
-}
-
-
-
-void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
- void* data) {
- if (that->is_negated()) os_ << "^";
- os_ << "[";
- for (int i = 0; i < that->ranges(zone_)->length(); i++) {
- if (i > 0) os_ << " ";
- VisitCharacterRange(that->ranges(zone_)->at(i));
- }
- os_ << "]";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
- switch (that->assertion_type()) {
- case RegExpAssertion::START_OF_INPUT:
- os_ << "@^i";
- break;
- case RegExpAssertion::END_OF_INPUT:
- os_ << "@$i";
- break;
- case RegExpAssertion::START_OF_LINE:
- os_ << "@^l";
- break;
- case RegExpAssertion::END_OF_LINE:
- os_ << "@$l";
- break;
- case RegExpAssertion::BOUNDARY:
- os_ << "@b";
- break;
- case RegExpAssertion::NON_BOUNDARY:
- os_ << "@B";
- break;
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
- os_ << "'";
- Vector<const uc16> chardata = that->data();
- for (int i = 0; i < chardata.length(); i++) {
- os_ << AsUC16(chardata[i]);
- }
- os_ << "'";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
- if (that->elements()->length() == 1) {
- that->elements()->at(0).tree()->Accept(this, data);
- } else {
- os_ << "(!";
- for (int i = 0; i < that->elements()->length(); i++) {
- os_ << " ";
- that->elements()->at(i).tree()->Accept(this, data);
- }
- os_ << ")";
- }
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
- os_ << "(# " << that->min() << " ";
- if (that->max() == RegExpTree::kInfinity) {
- os_ << "- ";
- } else {
- os_ << that->max() << " ";
- }
- os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
- that->body()->Accept(this, data);
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
- os_ << "(^ ";
- that->body()->Accept(this, data);
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) {
- os_ << "(-> " << (that->is_positive() ? "+ " : "- ");
- that->body()->Accept(this, data);
- os_ << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
- void* data) {
- os_ << "(<- " << that->index() << ")";
- return NULL;
-}
-
-
-void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
- os_ << '%';
- return NULL;
-}
-
-
-std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
- RegExpUnparser unparser(os, zone);
- Accept(&unparser, NULL);
- return os;
-}
-
-
-RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
- : alternatives_(alternatives) {
- DCHECK(alternatives->length() > 1);
- RegExpTree* first_alternative = alternatives->at(0);
- min_match_ = first_alternative->min_match();
- max_match_ = first_alternative->max_match();
- for (int i = 1; i < alternatives->length(); i++) {
- RegExpTree* alternative = alternatives->at(i);
- min_match_ = Min(min_match_, alternative->min_match());
- max_match_ = Max(max_match_, alternative->max_match());
- }
-}
-
-
-static int IncreaseBy(int previous, int increase) {
- if (RegExpTree::kInfinity - previous < increase) {
- return RegExpTree::kInfinity;
- } else {
- return previous + increase;
- }
-}
-
-RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
- : nodes_(nodes) {
- DCHECK(nodes->length() > 1);
- min_match_ = 0;
- max_match_ = 0;
- for (int i = 0; i < nodes->length(); i++) {
- RegExpTree* node = nodes->at(i);
- int node_min_match = node->min_match();
- min_match_ = IncreaseBy(min_match_, node_min_match);
- int node_max_match = node->max_match();
- max_match_ = IncreaseBy(max_match_, node_max_match);
- }
-}
-
-
CaseClause::CaseClause(Zone* zone, Expression* label,
ZoneList<Statement*>* statements, int pos)
: Expression(zone, pos),
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast/ast.h
index 14f71a6cc2..7f00955a64 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -2,25 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_AST_H_
-#define V8_AST_H_
+#ifndef V8_AST_AST_H_
+#define V8_AST_AST_H_
#include "src/assembler.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/modules.h"
+#include "src/ast/variables.h"
#include "src/bailout-reason.h"
#include "src/base/flags.h"
#include "src/base/smart-pointers.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list.h"
-#include "src/modules.h"
-#include "src/regexp/jsregexp.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
-#include "src/token.h"
#include "src/types.h"
#include "src/utils.h"
-#include "src/variables.h"
namespace v8 {
namespace internal {
@@ -91,7 +90,8 @@ namespace internal {
V(SuperCallReference) \
V(CaseClause) \
V(EmptyParentheses) \
- V(DoExpression)
+ V(DoExpression) \
+ V(RewritableAssignmentExpression)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@@ -110,19 +110,6 @@ class MaterializedLiteral;
class Statement;
class TypeFeedbackOracle;
-class RegExpAlternative;
-class RegExpAssertion;
-class RegExpAtom;
-class RegExpBackReference;
-class RegExpCapture;
-class RegExpCharacterClass;
-class RegExpCompiler;
-class RegExpDisjunction;
-class RegExpEmpty;
-class RegExpLookahead;
-class RegExpQuantifier;
-class RegExpText;
-
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@@ -250,6 +237,7 @@ class Statement : public AstNode {
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
+ virtual void MarkTail() {}
};
@@ -314,6 +302,9 @@ class Expression : public AstNode {
kTest
};
+ // Mark this expression as being in tail position.
+ virtual void MarkTail() {}
+
// True iff the expression is a valid reference expression.
virtual bool IsValidReferenceExpression() const { return false; }
@@ -374,6 +365,14 @@ class Expression : public AstNode {
BailoutId id() const { return BailoutId(local_id(0)); }
TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
+ // Parenthesized expressions in the form `( Expression )`.
+ void set_is_parenthesized() {
+ bit_field_ = ParenthesizedField::update(bit_field_, true);
+ }
+ bool is_parenthesized() const {
+ return ParenthesizedField::decode(bit_field_);
+ }
+
protected:
Expression(Zone* zone, int pos)
: AstNode(pos),
@@ -396,6 +395,8 @@ class Expression : public AstNode {
int base_id_;
Bounds bounds_;
class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
+ class ParenthesizedField
+ : public BitField16<bool, ToBooleanTypesField::kNext, 1> {};
uint16_t bit_field_;
// Ends with 16-bit field; deriving classes in turn begin with
// 16-bit fields for optimum packing efficiency.
@@ -470,6 +471,10 @@ class Block final : public BreakableStatement {
&& labels() == NULL; // Good enough as an approximation...
}
+ void MarkTail() override {
+ if (!statements_.is_empty()) statements_.last()->MarkTail();
+ }
+
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@@ -496,7 +501,11 @@ class DoExpression final : public Expression {
DECLARE_NODE_TYPE(DoExpression)
Block* block() { return block_; }
+ void set_block(Block* b) { block_ = b; }
VariableProxy* result() { return result_; }
+ void set_result(VariableProxy* v) { result_ = v; }
+
+ void MarkTail() override { block_->MarkTail(); }
protected:
DoExpression(Zone* zone, Block* block, VariableProxy* result, int pos)
@@ -572,6 +581,7 @@ class FunctionDeclaration final : public Declaration {
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
+ void set_fun(FunctionLiteral* f) { fun_ = f; }
InitializationFlag initialization() const override {
return kCreatedInitialized;
}
@@ -695,6 +705,7 @@ class DoWhileStatement final : public IterationStatement {
}
Expression* cond() const { return cond_; }
+ void set_cond(Expression* e) { cond_ = e; }
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ContinueId() const override { return BailoutId(local_id(0)); }
@@ -723,6 +734,7 @@ class WhileStatement final : public IterationStatement {
}
Expression* cond() const { return cond_; }
+ void set_cond(Expression* e) { cond_ = e; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId ContinueId() const override { return EntryId(); }
@@ -759,6 +771,10 @@ class ForStatement final : public IterationStatement {
Expression* cond() const { return cond_; }
Statement* next() const { return next_; }
+ void set_init(Statement* s) { init_ = s; }
+ void set_cond(Expression* e) { cond_ = e; }
+ void set_next(Statement* s) { next_ = s; }
+
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ContinueId() const override { return BailoutId(local_id(0)); }
BailoutId StackCheckId() const override { return BodyId(); }
@@ -797,6 +813,9 @@ class ForEachStatement : public IterationStatement {
Expression* each() const { return each_; }
Expression* subject() const { return subject_; }
+ void set_each(Expression* e) { each_ = e; }
+ void set_subject(Expression* e) { subject_ = e; }
+
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) override;
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
@@ -901,6 +920,11 @@ class ForOfStatement final : public ForEachStatement {
return assign_each_;
}
+ void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
+ void set_next_result(Expression* e) { next_result_ = e; }
+ void set_result_done(Expression* e) { result_done_ = e; }
+ void set_assign_each(Expression* e) { assign_each_ = e; }
+
BailoutId ContinueId() const override { return EntryId(); }
BailoutId StackCheckId() const override { return BackEdgeId(); }
@@ -933,6 +957,7 @@ class ExpressionStatement final : public Statement {
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
bool IsJump() const override { return expression_->IsThrow(); }
+ void MarkTail() override { expression_->MarkTail(); }
protected:
ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -988,6 +1013,8 @@ class ReturnStatement final : public JumpStatement {
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
+
protected:
explicit ReturnStatement(Zone* zone, Expression* expression, int pos)
: JumpStatement(zone, pos), expression_(expression) { }
@@ -1003,12 +1030,16 @@ class WithStatement final : public Statement {
Scope* scope() { return scope_; }
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
Statement* statement() const { return statement_; }
void set_statement(Statement* s) { statement_ = s; }
void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId EntryId() const { return BailoutId(local_id(0)); }
+ static int num_ids() { return parent_num_ids() + 2; }
+ BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
+ BailoutId EntryId() const { return BailoutId(local_id(1)); }
+
+ void MarkTail() override { statement_->MarkTail(); }
protected:
WithStatement(Zone* zone, Scope* scope, Expression* expression,
@@ -1044,6 +1075,7 @@ class CaseClause final : public Expression {
CHECK(!is_default());
return label_;
}
+ void set_label(Expression* e) { label_ = e; }
Label* body_target() { return &body_target_; }
ZoneList<Statement*>* statements() const { return statements_; }
@@ -1051,6 +1083,10 @@ class CaseClause final : public Expression {
BailoutId EntryId() const { return BailoutId(local_id(0)); }
TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
+ void MarkTail() override {
+ if (!statements_->is_empty()) statements_->last()->MarkTail();
+ }
+
Type* compare_type() { return compare_type_; }
void set_compare_type(Type* type) { compare_type_ = type; }
@@ -1081,6 +1117,12 @@ class SwitchStatement final : public BreakableStatement {
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
+ void set_tag(Expression* t) { tag_ = t; }
+
+ void MarkTail() override {
+ if (!cases_->is_empty()) cases_->last()->MarkTail();
+ }
+
protected:
SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
: BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
@@ -1109,6 +1151,7 @@ class IfStatement final : public Statement {
Statement* then_statement() const { return then_statement_; }
Statement* else_statement() const { return else_statement_; }
+ void set_condition(Expression* e) { condition_ = e; }
void set_then_statement(Statement* s) { then_statement_ = s; }
void set_else_statement(Statement* s) { else_statement_ = s; }
@@ -1117,6 +1160,11 @@ class IfStatement final : public Statement {
&& HasElseStatement() && else_statement()->IsJump();
}
+ void MarkTail() override {
+ then_statement_->MarkTail();
+ else_statement_->MarkTail();
+ }
+
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 3; }
BailoutId IfId() const { return BailoutId(local_id(0)); }
@@ -1186,6 +1234,8 @@ class TryCatchStatement final : public TryStatement {
Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; }
+ void MarkTail() override { catch_block_->MarkTail(); }
+
protected:
TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
Variable* variable, Block* catch_block, int pos)
@@ -1208,6 +1258,8 @@ class TryFinallyStatement final : public TryStatement {
Block* finally_block() const { return finally_block_; }
void set_finally_block(Block* b) { finally_block_ = b; }
+ void MarkTail() override { finally_block_->MarkTail(); }
+
protected:
TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
int pos)
@@ -1394,6 +1446,9 @@ class ObjectLiteralProperty final : public ZoneObject {
Expression* value() { return value_; }
Kind kind() { return kind_; }
+ void set_key(Expression* e) { key_ = e; }
+ void set_value(Expression* e) { value_ = e; }
+
// Type feedback information.
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
@@ -1501,11 +1556,18 @@ class ObjectLiteral final : public MaterializedLiteral {
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
// Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 1)); }
+ BailoutId GetIdForPropertyName(int i) {
+ return BailoutId(local_id(2 * i + 1));
+ }
+ BailoutId GetIdForPropertySet(int i) {
+ return BailoutId(local_id(2 * i + 2));
+ }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ObjectLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
+ int num_ids() const {
+ return parent_num_ids() + 1 + 2 * properties()->length();
+ }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
@@ -1565,12 +1627,11 @@ class RegExpLiteral final : public MaterializedLiteral {
DECLARE_NODE_TYPE(RegExpLiteral)
Handle<String> pattern() const { return pattern_->string(); }
- Handle<String> flags() const { return flags_->string(); }
+ int flags() const { return flags_; }
protected:
- RegExpLiteral(Zone* zone, const AstRawString* pattern,
- const AstRawString* flags, int literal_index, bool is_strong,
- int pos)
+ RegExpLiteral(Zone* zone, const AstRawString* pattern, int flags,
+ int literal_index, bool is_strong, int pos)
: MaterializedLiteral(zone, literal_index, is_strong, pos),
pattern_(pattern),
flags_(flags) {
@@ -1578,8 +1639,8 @@ class RegExpLiteral final : public MaterializedLiteral {
}
private:
- const AstRawString* pattern_;
- const AstRawString* flags_;
+ const AstRawString* const pattern_;
+ int const flags_;
};
@@ -1761,6 +1822,9 @@ class Property final : public Expression {
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
+ void set_obj(Expression* e) { obj_ = e; }
+ void set_key(Expression* e) { key_ = e; }
+
static int num_ids() { return parent_num_ids() + 1; }
BailoutId LoadId() const { return BailoutId(local_id(0)); }
@@ -1852,6 +1916,8 @@ class Call final : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
+ void set_expression(Expression* e) { expression_ = e; }
+
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) override;
@@ -1909,6 +1975,11 @@ class Call final : public Expression {
bit_field_ = IsUninitializedField::update(bit_field_, b);
}
+ bool is_tail() const { return IsTailField::decode(bit_field_); }
+ void MarkTail() override {
+ bit_field_ = IsTailField::update(bit_field_, true);
+ }
+
enum CallType {
POSSIBLY_EVAL_CALL,
GLOBAL_CALL,
@@ -1954,6 +2025,7 @@ class Call final : public Expression {
Handle<JSFunction> target_;
Handle<AllocationSite> allocation_site_;
class IsUninitializedField : public BitField8<bool, 0, 1> {};
+ class IsTailField : public BitField8<bool, 1, 1> {};
uint8_t bit_field_;
};
@@ -1965,6 +2037,8 @@ class CallNew final : public Expression {
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
+ void set_expression(Expression* e) { expression_ = e; }
+
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) override {
@@ -2074,6 +2148,7 @@ class UnaryOperation final : public Expression {
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
@@ -2104,12 +2179,25 @@ class BinaryOperation final : public Expression {
Token::Value op() const { return static_cast<Token::Value>(op_); }
Expression* left() const { return left_; }
+ void set_left(Expression* e) { left_ = e; }
Expression* right() const { return right_; }
+ void set_right(Expression* e) { right_ = e; }
Handle<AllocationSite> allocation_site() const { return allocation_site_; }
void set_allocation_site(Handle<AllocationSite> allocation_site) {
allocation_site_ = allocation_site;
}
+ void MarkTail() override {
+ switch (op()) {
+ case Token::COMMA:
+ case Token::AND:
+ case Token::OR:
+ right_->MarkTail();
+ default:
+ break;
+ }
+ }
+
// The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
static int num_ids() { return parent_num_ids() + 2; }
@@ -2168,6 +2256,7 @@ class CountOperation final : public Expression {
}
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
bool IsMonomorphic() override { return receiver_types_.length() == 1; }
SmallMapList* GetReceiverTypes() override { return &receiver_types_; }
@@ -2237,6 +2326,9 @@ class CompareOperation final : public Expression {
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ void set_left(Expression* e) { left_ = e; }
+ void set_right(Expression* e) { right_ = e; }
+
// Type feedback information.
static int num_ids() { return parent_num_ids() + 1; }
TypeFeedbackId CompareOperationFeedbackId() const {
@@ -2278,6 +2370,7 @@ class Spread final : public Expression {
DECLARE_NODE_TYPE(Spread)
Expression* expression() const { return expression_; }
+ void set_expression(Expression* e) { expression_ = e; }
static int num_ids() { return parent_num_ids(); }
@@ -2301,6 +2394,15 @@ class Conditional final : public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
+ void set_condition(Expression* e) { condition_ = e; }
+ void set_then_expression(Expression* e) { then_expression_ = e; }
+ void set_else_expression(Expression* e) { else_expression_ = e; }
+
+ void MarkTail() override {
+ then_expression_->MarkTail();
+ else_expression_->MarkTail();
+ }
+
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ThenId() const { return BailoutId(local_id(0)); }
BailoutId ElseId() const { return BailoutId(local_id(1)); }
@@ -2334,6 +2436,10 @@ class Assignment final : public Expression {
Token::Value op() const { return TokenField::decode(bit_field_); }
Expression* target() const { return target_; }
Expression* value() const { return value_; }
+
+ void set_target(Expression* e) { target_ = e; }
+ void set_value(Expression* e) { value_ = e; }
+
BinaryOperation* binary_operation() const { return binary_operation_; }
// This check relies on the definition order of token in token.h.
@@ -2381,9 +2487,12 @@ class Assignment final : public Expression {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
class IsUninitializedField : public BitField16<bool, 0, 1> {};
- class KeyTypeField : public BitField16<IcCheckType, 1, 1> {};
- class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 3> {};
- class TokenField : public BitField16<Token::Value, 5, 8> {};
+ class KeyTypeField
+ : public BitField16<IcCheckType, IsUninitializedField::kNext, 1> {};
+ class StoreModeField
+ : public BitField16<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
+ class TokenField : public BitField16<Token::Value, StoreModeField::kNext, 8> {
+ };
// Starts with 16-bit field, which should get packed together with
// Expression's trailing 16-bit field.
@@ -2396,6 +2505,38 @@ class Assignment final : public Expression {
};
+class RewritableAssignmentExpression : public Expression {
+ public:
+ DECLARE_NODE_TYPE(RewritableAssignmentExpression)
+
+ Expression* expression() { return expr_; }
+ bool is_rewritten() const { return is_rewritten_; }
+
+ void set_expression(Expression* e) { expr_ = e; }
+
+ void Rewrite(Expression* new_expression) {
+ DCHECK(!is_rewritten());
+ DCHECK_NOT_NULL(new_expression);
+ expr_ = new_expression;
+ is_rewritten_ = true;
+ }
+
+ static int num_ids() { return parent_num_ids(); }
+
+ protected:
+ RewritableAssignmentExpression(Zone* zone, Expression* expression)
+ : Expression(zone, expression->position()),
+ is_rewritten_(false),
+ expr_(expression) {}
+
+ private:
+ int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ bool is_rewritten_;
+ Expression* expr_;
+};
+
+
class Yield final : public Expression {
public:
DECLARE_NODE_TYPE(Yield)
@@ -2411,6 +2552,9 @@ class Yield final : public Expression {
Expression* expression() const { return expression_; }
Kind yield_kind() const { return yield_kind_; }
+ void set_generator_object(Expression* e) { generator_object_ = e; }
+ void set_expression(Expression* e) { expression_ = e; }
+
// Type feedback information.
bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
@@ -2454,6 +2598,7 @@ class Throw final : public Expression {
DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; }
+ void set_exception(Expression* e) { exception_ = e; }
protected:
Throw(Zone* zone, Expression* exception, int pos)
@@ -2467,35 +2612,23 @@ class Throw final : public Expression {
class FunctionLiteral final : public Expression {
public:
enum FunctionType {
- ANONYMOUS_EXPRESSION,
- NAMED_EXPRESSION,
- DECLARATION
+ kAnonymousExpression,
+ kNamedExpression,
+ kDeclaration,
+ kGlobalOrEval
};
- enum ParameterFlag {
- kNoDuplicateParameters = 0,
- kHasDuplicateParameters = 1
- };
-
- enum IsFunctionFlag {
- kGlobalOrEval,
- kIsFunction
- };
+ enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
- enum ShouldBeUsedOnceHint { kShouldBeUsedOnce, kDontKnowIfShouldBeUsedOnce };
-
- enum ArityRestriction {
- NORMAL_ARITY,
- GETTER_ARITY,
- SETTER_ARITY
- };
+ enum ArityRestriction { kNormalArity, kGetterArity, kSetterArity };
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return raw_name_->string(); }
- const AstRawString* raw_name() const { return raw_name_; }
+ const AstString* raw_name() const { return raw_name_; }
+ void set_raw_name(const AstString* name) { raw_name_ = name; }
Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
@@ -2550,14 +2683,14 @@ class FunctionLiteral final : public Expression {
inferred_name_ = Handle<String>();
}
- bool pretenure() { return Pretenure::decode(bitfield_); }
- void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
+ bool pretenure() const { return Pretenure::decode(bitfield_); }
+ void set_pretenure() { bitfield_ = Pretenure::update(bitfield_, true); }
- bool has_duplicate_parameters() {
+ bool has_duplicate_parameters() const {
return HasDuplicateParameters::decode(bitfield_);
}
- bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
+ bool is_function() const { return IsFunction::decode(bitfield_); }
// This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the
@@ -2565,19 +2698,19 @@ class FunctionLiteral final : public Expression {
// - (function() { ... })();
// - var x = function() { ... }();
bool should_eager_compile() const {
- return EagerCompileHintBit::decode(bitfield_) == kShouldEagerCompile;
+ return ShouldEagerCompile::decode(bitfield_);
}
void set_should_eager_compile() {
- bitfield_ = EagerCompileHintBit::update(bitfield_, kShouldEagerCompile);
+ bitfield_ = ShouldEagerCompile::update(bitfield_, true);
}
// A hint that we expect this function to be called (exactly) once,
// i.e. we suspect it's an initialization function.
bool should_be_used_once_hint() const {
- return ShouldBeUsedOnceHintBit::decode(bitfield_) == kShouldBeUsedOnce;
+ return ShouldBeUsedOnceHint::decode(bitfield_);
}
void set_should_be_used_once_hint() {
- bitfield_ = ShouldBeUsedOnceHintBit::update(bitfield_, kShouldBeUsedOnce);
+ bitfield_ = ShouldBeUsedOnceHint::update(bitfield_, true);
}
FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
@@ -2597,13 +2730,12 @@ class FunctionLiteral final : public Expression {
}
protected:
- FunctionLiteral(Zone* zone, const AstRawString* name,
+ FunctionLiteral(Zone* zone, const AstString* name,
AstValueFactory* ast_value_factory, Scope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
int expected_property_count, int parameter_count,
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
- IsFunctionFlag is_function,
EagerCompileHint eager_compile_hint, FunctionKind kind,
int position)
: Expression(zone, position),
@@ -2617,20 +2749,33 @@ class FunctionLiteral final : public Expression {
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
- bitfield_ = IsExpression::encode(function_type != DECLARATION) |
- IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
- Pretenure::encode(false) |
- HasDuplicateParameters::encode(has_duplicate_parameters) |
- IsFunction::encode(is_function) |
- EagerCompileHintBit::encode(eager_compile_hint) |
- FunctionKindBits::encode(kind) |
- ShouldBeUsedOnceHintBit::encode(kDontKnowIfShouldBeUsedOnce);
+ bitfield_ =
+ IsExpression::encode(function_type != kDeclaration) |
+ IsAnonymous::encode(function_type == kAnonymousExpression) |
+ Pretenure::encode(false) |
+ HasDuplicateParameters::encode(has_duplicate_parameters ==
+ kHasDuplicateParameters) |
+ IsFunction::encode(function_type != kGlobalOrEval) |
+ ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
+ FunctionKindBits::encode(kind) | ShouldBeUsedOnceHint::encode(false);
DCHECK(IsValidFunctionKind(kind));
}
private:
- const AstRawString* raw_name_;
- Handle<String> name_;
+ class IsExpression : public BitField16<bool, 0, 1> {};
+ class IsAnonymous : public BitField16<bool, 1, 1> {};
+ class Pretenure : public BitField16<bool, 2, 1> {};
+ class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
+ class IsFunction : public BitField16<bool, 4, 1> {};
+ class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
+ class FunctionKindBits : public BitField16<FunctionKind, 6, 8> {};
+ class ShouldBeUsedOnceHint : public BitField16<bool, 15, 1> {};
+
+ // Start with 16-bit field, which should get packed together
+ // with Expression's trailing 16-bit field.
+ uint16_t bitfield_;
+
+ const AstString* raw_name_;
Scope* scope_;
ZoneList<Statement*>* body_;
const AstString* raw_inferred_name_;
@@ -2642,17 +2787,6 @@ class FunctionLiteral final : public Expression {
int expected_property_count_;
int parameter_count_;
int function_token_position_;
-
- unsigned bitfield_;
- class IsExpression : public BitField<bool, 0, 1> {};
- class IsAnonymous : public BitField<bool, 1, 1> {};
- class Pretenure : public BitField<bool, 2, 1> {};
- class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
- class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
- class EagerCompileHintBit : public BitField<EagerCompileHint, 5, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 6, 8> {};
- class ShouldBeUsedOnceHintBit : public BitField<ShouldBeUsedOnceHint, 15, 1> {
- };
};
@@ -2664,10 +2798,17 @@ class ClassLiteral final : public Expression {
Handle<String> name() const { return raw_name_->string(); }
const AstRawString* raw_name() const { return raw_name_; }
+ void set_raw_name(const AstRawString* name) {
+ DCHECK_NULL(raw_name_);
+ raw_name_ = name;
+ }
+
Scope* scope() const { return scope_; }
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
+ void set_extends(Expression* e) { extends_ = e; }
FunctionLiteral* constructor() const { return constructor_; }
+ void set_constructor(FunctionLiteral* f) { constructor_ = f; }
ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
@@ -2690,7 +2831,7 @@ class ClassLiteral final : public Expression {
FeedbackVectorSlotCache* cache) override;
bool NeedsProxySlot() const {
- return FLAG_vector_stores && class_variable_proxy() != nullptr &&
+ return class_variable_proxy() != nullptr &&
class_variable_proxy()->var()->IsUnallocated();
}
@@ -2758,7 +2899,9 @@ class SuperPropertyReference final : public Expression {
DECLARE_NODE_TYPE(SuperPropertyReference)
VariableProxy* this_var() const { return this_var_; }
+ void set_this_var(VariableProxy* v) { this_var_ = v; }
Expression* home_object() const { return home_object_; }
+ void set_home_object(Expression* e) { home_object_ = e; }
protected:
SuperPropertyReference(Zone* zone, VariableProxy* this_var,
@@ -2779,8 +2922,11 @@ class SuperCallReference final : public Expression {
DECLARE_NODE_TYPE(SuperCallReference)
VariableProxy* this_var() const { return this_var_; }
+ void set_this_var(VariableProxy* v) { this_var_ = v; }
VariableProxy* new_target_var() const { return new_target_var_; }
+ void set_new_target_var(VariableProxy* v) { new_target_var_ = v; }
VariableProxy* this_function_var() const { return this_function_var_; }
+ void set_this_function_var(VariableProxy* v) { this_function_var_ = v; }
protected:
SuperCallReference(Zone* zone, VariableProxy* this_var,
@@ -2817,352 +2963,6 @@ class EmptyParentheses final : public Expression {
// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-class RegExpVisitor BASE_EMBEDDED {
- public:
- virtual ~RegExpVisitor() { }
-#define MAKE_CASE(Name) \
- virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
-#undef MAKE_CASE
-};
-
-
-class RegExpTree : public ZoneObject {
- public:
- static const int kInfinity = kMaxInt;
- virtual ~RegExpTree() {}
- virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
- virtual RegExpNode* ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) = 0;
- virtual bool IsTextElement() { return false; }
- virtual bool IsAnchoredAtStart() { return false; }
- virtual bool IsAnchoredAtEnd() { return false; }
- virtual int min_match() = 0;
- virtual int max_match() = 0;
- // Returns the interval of registers used for captures within this
- // expression.
- virtual Interval CaptureRegisters() { return Interval::Empty(); }
- virtual void AppendToText(RegExpText* text, Zone* zone);
- std::ostream& Print(std::ostream& os, Zone* zone); // NOLINT
-#define MAKE_ASTYPE(Name) \
- virtual RegExp##Name* As##Name(); \
- virtual bool Is##Name();
- FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
-#undef MAKE_ASTYPE
-};
-
-
-class RegExpDisjunction final : public RegExpTree {
- public:
- explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpDisjunction* AsDisjunction() override;
- Interval CaptureRegisters() override;
- bool IsDisjunction() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- int min_match() override { return min_match_; }
- int max_match() override { return max_match_; }
- ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
- private:
- bool SortConsecutiveAtoms(RegExpCompiler* compiler);
- void RationalizeConsecutiveAtoms(RegExpCompiler* compiler);
- void FixSingleCharacterDisjunctions(RegExpCompiler* compiler);
- ZoneList<RegExpTree*>* alternatives_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAlternative final : public RegExpTree {
- public:
- explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpAlternative* AsAlternative() override;
- Interval CaptureRegisters() override;
- bool IsAlternative() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- int min_match() override { return min_match_; }
- int max_match() override { return max_match_; }
- ZoneList<RegExpTree*>* nodes() { return nodes_; }
- private:
- ZoneList<RegExpTree*>* nodes_;
- int min_match_;
- int max_match_;
-};
-
-
-class RegExpAssertion final : public RegExpTree {
- public:
- enum AssertionType {
- START_OF_LINE,
- START_OF_INPUT,
- END_OF_LINE,
- END_OF_INPUT,
- BOUNDARY,
- NON_BOUNDARY
- };
- explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpAssertion* AsAssertion() override;
- bool IsAssertion() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- int min_match() override { return 0; }
- int max_match() override { return 0; }
- AssertionType assertion_type() { return assertion_type_; }
- private:
- AssertionType assertion_type_;
-};
-
-
-class CharacterSet final BASE_EMBEDDED {
- public:
- explicit CharacterSet(uc16 standard_set_type)
- : ranges_(NULL),
- standard_set_type_(standard_set_type) {}
- explicit CharacterSet(ZoneList<CharacterRange>* ranges)
- : ranges_(ranges),
- standard_set_type_(0) {}
- ZoneList<CharacterRange>* ranges(Zone* zone);
- uc16 standard_set_type() { return standard_set_type_; }
- void set_standard_set_type(uc16 special_set_type) {
- standard_set_type_ = special_set_type;
- }
- bool is_standard() { return standard_set_type_ != 0; }
- void Canonicalize();
- private:
- ZoneList<CharacterRange>* ranges_;
- // If non-zero, the value represents a standard set (e.g., all whitespace
- // characters) without having to expand the ranges.
- uc16 standard_set_type_;
-};
-
-
-class RegExpCharacterClass final : public RegExpTree {
- public:
- RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
- : set_(ranges),
- is_negated_(is_negated) { }
- explicit RegExpCharacterClass(uc16 type)
- : set_(type),
- is_negated_(false) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpCharacterClass* AsCharacterClass() override;
- bool IsCharacterClass() override;
- bool IsTextElement() override { return true; }
- int min_match() override { return 1; }
- int max_match() override { return 1; }
- void AppendToText(RegExpText* text, Zone* zone) override;
- CharacterSet character_set() { return set_; }
- // TODO(lrn): Remove need for complex version if is_standard that
- // recognizes a mangled standard set and just do { return set_.is_special(); }
- bool is_standard(Zone* zone);
- // Returns a value representing the standard character set if is_standard()
- // returns true.
- // Currently used values are:
- // s : unicode whitespace
- // S : unicode non-whitespace
- // w : ASCII word character (digit, letter, underscore)
- // W : non-ASCII word character
- // d : ASCII digit
- // D : non-ASCII digit
- // . : non-unicode non-newline
- // * : All characters
- uc16 standard_type() { return set_.standard_set_type(); }
- ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
- bool is_negated() { return is_negated_; }
-
- private:
- CharacterSet set_;
- bool is_negated_;
-};
-
-
-class RegExpAtom final : public RegExpTree {
- public:
- explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpAtom* AsAtom() override;
- bool IsAtom() override;
- bool IsTextElement() override { return true; }
- int min_match() override { return data_.length(); }
- int max_match() override { return data_.length(); }
- void AppendToText(RegExpText* text, Zone* zone) override;
- Vector<const uc16> data() { return data_; }
- int length() { return data_.length(); }
- private:
- Vector<const uc16> data_;
-};
-
-
-class RegExpText final : public RegExpTree {
- public:
- explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpText* AsText() override;
- bool IsText() override;
- bool IsTextElement() override { return true; }
- int min_match() override { return length_; }
- int max_match() override { return length_; }
- void AppendToText(RegExpText* text, Zone* zone) override;
- void AddElement(TextElement elm, Zone* zone) {
- elements_.Add(elm, zone);
- length_ += elm.length();
- }
- ZoneList<TextElement>* elements() { return &elements_; }
- private:
- ZoneList<TextElement> elements_;
- int length_;
-};
-
-
-class RegExpQuantifier final : public RegExpTree {
- public:
- enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
- RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
- : body_(body),
- min_(min),
- max_(max),
- min_match_(min * body->min_match()),
- quantifier_type_(type) {
- if (max > 0 && body->max_match() > kInfinity / max) {
- max_match_ = kInfinity;
- } else {
- max_match_ = max * body->max_match();
- }
- }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- static RegExpNode* ToNode(int min,
- int max,
- bool is_greedy,
- RegExpTree* body,
- RegExpCompiler* compiler,
- RegExpNode* on_success,
- bool not_at_start = false);
- RegExpQuantifier* AsQuantifier() override;
- Interval CaptureRegisters() override;
- bool IsQuantifier() override;
- int min_match() override { return min_match_; }
- int max_match() override { return max_match_; }
- int min() { return min_; }
- int max() { return max_; }
- bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
- bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
- bool is_greedy() { return quantifier_type_ == GREEDY; }
- RegExpTree* body() { return body_; }
-
- private:
- RegExpTree* body_;
- int min_;
- int max_;
- int min_match_;
- int max_match_;
- QuantifierType quantifier_type_;
-};
-
-
-class RegExpCapture final : public RegExpTree {
- public:
- explicit RegExpCapture(RegExpTree* body, int index)
- : body_(body), index_(index) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- static RegExpNode* ToNode(RegExpTree* body,
- int index,
- RegExpCompiler* compiler,
- RegExpNode* on_success);
- RegExpCapture* AsCapture() override;
- bool IsAnchoredAtStart() override;
- bool IsAnchoredAtEnd() override;
- Interval CaptureRegisters() override;
- bool IsCapture() override;
- int min_match() override { return body_->min_match(); }
- int max_match() override { return body_->max_match(); }
- RegExpTree* body() { return body_; }
- int index() { return index_; }
- static int StartRegister(int index) { return index * 2; }
- static int EndRegister(int index) { return index * 2 + 1; }
-
- private:
- RegExpTree* body_;
- int index_;
-};
-
-
-class RegExpLookahead final : public RegExpTree {
- public:
- RegExpLookahead(RegExpTree* body,
- bool is_positive,
- int capture_count,
- int capture_from)
- : body_(body),
- is_positive_(is_positive),
- capture_count_(capture_count),
- capture_from_(capture_from) { }
-
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpLookahead* AsLookahead() override;
- Interval CaptureRegisters() override;
- bool IsLookahead() override;
- bool IsAnchoredAtStart() override;
- int min_match() override { return 0; }
- int max_match() override { return 0; }
- RegExpTree* body() { return body_; }
- bool is_positive() { return is_positive_; }
- int capture_count() { return capture_count_; }
- int capture_from() { return capture_from_; }
-
- private:
- RegExpTree* body_;
- bool is_positive_;
- int capture_count_;
- int capture_from_;
-};
-
-
-class RegExpBackReference final : public RegExpTree {
- public:
- explicit RegExpBackReference(RegExpCapture* capture)
- : capture_(capture) { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpBackReference* AsBackReference() override;
- bool IsBackReference() override;
- int min_match() override { return 0; }
- int max_match() override { return capture_->max_match(); }
- int index() { return capture_->index(); }
- RegExpCapture* capture() { return capture_; }
- private:
- RegExpCapture* capture_;
-};
-
-
-class RegExpEmpty final : public RegExpTree {
- public:
- RegExpEmpty() { }
- void* Accept(RegExpVisitor* visitor, void* data) override;
- RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
- RegExpEmpty* AsEmpty() override;
- bool IsEmpty() override;
- int min_match() override { return 0; }
- int max_match() override { return 0; }
-};
-
-
-// ----------------------------------------------------------------------------
// Basic visitor
// - leaf node visitors are abstract.
@@ -3186,7 +2986,6 @@ class AstVisitor BASE_EMBEDDED {
#undef DEF_VISIT
};
-
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
void Visit(AstNode* node) final { \
@@ -3220,6 +3019,69 @@ class AstVisitor BASE_EMBEDDED {
uintptr_t stack_limit_; \
bool stack_overflow_
+#define DEFINE_AST_REWRITER_SUBCLASS_MEMBERS() \
+ public: \
+ AstNode* Rewrite(AstNode* node) { \
+ DCHECK_NULL(replacement_); \
+ DCHECK_NOT_NULL(node); \
+ Visit(node); \
+ if (HasStackOverflow()) return node; \
+ if (replacement_ == nullptr) return node; \
+ AstNode* result = replacement_; \
+ replacement_ = nullptr; \
+ return result; \
+ } \
+ \
+ private: \
+ void InitializeAstRewriter(Isolate* isolate) { \
+ InitializeAstVisitor(isolate); \
+ replacement_ = nullptr; \
+ } \
+ \
+ void InitializeAstRewriter(uintptr_t stack_limit) { \
+ InitializeAstVisitor(stack_limit); \
+ replacement_ = nullptr; \
+ } \
+ \
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); \
+ \
+ protected: \
+ AstNode* replacement_
+
+// Generic macro for rewriting things; `GET` is the expression to be
+// rewritten; `SET` is a command that should do the rewriting, i.e.
+// something sensible with the variable called `replacement`.
+#define AST_REWRITE(Type, GET, SET) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ DCHECK_NULL(replacement_); \
+ Visit(GET); \
+ if (HasStackOverflow()) return; \
+ if (replacement_ == nullptr) break; \
+ Type* replacement = reinterpret_cast<Type*>(replacement_); \
+ do { \
+ SET; \
+ } while (false); \
+ replacement_ = nullptr; \
+ } while (false)
+
+// Macro for rewriting object properties; it assumes that `object` has
+// `property` with a public getter and setter.
+#define AST_REWRITE_PROPERTY(Type, object, property) \
+ do { \
+ auto _obj = (object); \
+ AST_REWRITE(Type, _obj->property(), _obj->set_##property(replacement)); \
+ } while (false)
+
+// Macro for rewriting list elements; it assumes that `list` has methods
+// `at` and `Set`.
+#define AST_REWRITE_LIST_ELEMENT(Type, list, index) \
+ do { \
+ auto _list = (list); \
+ auto _index = (index); \
+ AST_REWRITE(Type, _list->at(_index), _list->Set(_index, replacement)); \
+ } while (false)
+
// ----------------------------------------------------------------------------
// AstNode factory
@@ -3351,8 +3213,8 @@ class AstNodeFactory final BASE_EMBEDDED {
SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
Statement* statement, Scope* scope) {
- return new (local_zone_)
- SloppyBlockFunctionStatement(local_zone_, statement, scope);
+ return new (parser_zone_)
+ SloppyBlockFunctionStatement(parser_zone_, statement, scope);
}
CaseClause* NewCaseClause(
@@ -3428,11 +3290,8 @@ class AstNodeFactory final BASE_EMBEDDED {
ast_value_factory_, key, value, is_static, is_computed_name);
}
- RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern,
- const AstRawString* flags,
- int literal_index,
- bool is_strong,
- int pos) {
+ RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
+ int literal_index, bool is_strong, int pos) {
return new (local_zone_) RegExpLiteral(local_zone_, pattern, flags,
literal_index, is_strong, pos);
}
@@ -3542,6 +3401,14 @@ class AstNodeFactory final BASE_EMBEDDED {
local_zone_, condition, then_expression, else_expression, position);
}
+ RewritableAssignmentExpression* NewRewritableAssignmentExpression(
+ Expression* expression) {
+ DCHECK_NOT_NULL(expression);
+ DCHECK(expression->IsAssignment());
+ return new (local_zone_)
+ RewritableAssignmentExpression(local_zone_, expression);
+ }
+
Assignment* NewAssignment(Token::Value op,
Expression* target,
Expression* value,
@@ -3571,19 +3438,18 @@ class AstNodeFactory final BASE_EMBEDDED {
}
FunctionLiteral* NewFunctionLiteral(
- const AstRawString* name, AstValueFactory* ast_value_factory,
- Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int parameter_count,
+ const AstRawString* name, Scope* scope, ZoneList<Statement*>* body,
+ int materialized_literal_count, int expected_property_count,
+ int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
int position) {
return new (parser_zone_) FunctionLiteral(
- parser_zone_, name, ast_value_factory, scope, body,
+ parser_zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
- function_type, has_duplicate_parameters, is_function,
- eager_compile_hint, kind, position);
+ function_type, has_duplicate_parameters, eager_compile_hint, kind,
+ position);
}
ClassLiteral* NewClassLiteral(const AstRawString* name, Scope* scope,
@@ -3666,4 +3532,4 @@ class AstNodeFactory final BASE_EMBEDDED {
} // namespace internal
} // namespace v8
-#endif // V8_AST_H_
+#endif // V8_AST_AST_H_
diff --git a/deps/v8/src/modules.cc b/deps/v8/src/ast/modules.cc
index f72693cd66..225cd8d62c 100644
--- a/deps/v8/src/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/modules.h"
+#include "src/ast/modules.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/modules.h b/deps/v8/src/ast/modules.h
index f1dbd2516a..e3c66dce94 100644
--- a/deps/v8/src/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MODULES_H_
-#define V8_MODULES_H_
+#ifndef V8_AST_MODULES_H_
+#define V8_AST_MODULES_H_
#include "src/zone.h"
@@ -118,4 +118,4 @@ class ModuleDescriptor : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_MODULES_H_
+#endif // V8_AST_MODULES_H_
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index f50d5904d0..1f6b8c31de 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -2,24 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/prettyprinter.h"
+#include "src/ast/prettyprinter.h"
#include <stdarg.h>
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/scopes.h"
#include "src/base/platform/platform.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
-CallPrinter::CallPrinter(Isolate* isolate) {
+CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin) {
output_ = NULL;
size_ = 0;
pos_ = 0;
position_ = 0;
found_ = false;
done_ = false;
+ is_builtin_ = is_builtin;
InitializeAstVisitor(isolate);
}
@@ -192,8 +193,9 @@ void CallPrinter::VisitForInStatement(ForInStatement* node) {
void CallPrinter::VisitForOfStatement(ForOfStatement* node) {
Find(node->each());
- Find(node->iterable());
+ Find(node->assign_iterator());
Find(node->body());
+ Find(node->next_result());
}
@@ -239,15 +241,19 @@ void CallPrinter::VisitConditional(Conditional* node) {
void CallPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->value(), true);
+ PrintLiteral(*node->value(), true);
}
void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
- PrintLiteral(node->pattern(), false);
+ PrintLiteral(*node->pattern(), false);
Print("/");
- PrintLiteral(node->flags(), false);
+ if (node->flags() & RegExp::kGlobal) Print("g");
+ if (node->flags() & RegExp::kIgnoreCase) Print("i");
+ if (node->flags() & RegExp::kMultiline) Print("m");
+ if (node->flags() & RegExp::kUnicode) Print("u");
+ if (node->flags() & RegExp::kSticky) Print("y");
}
@@ -269,7 +275,12 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void CallPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteral(node->name(), false);
+ if (is_builtin_) {
+ // Variable names of builtins are meaningless due to minification.
+ Print("(var)");
+ } else {
+ PrintLiteral(*node->name(), false);
+ }
}
@@ -291,7 +302,7 @@ void CallPrinter::VisitProperty(Property* node) {
if (literal != NULL && literal->value()->IsInternalizedString()) {
Find(node->obj(), true);
Print(".");
- PrintLiteral(literal->value(), false);
+ PrintLiteral(*literal->value(), false);
} else {
Find(node->obj(), true);
Print("[");
@@ -303,7 +314,15 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) {
bool was_found = !found_ && node->position() == position_;
- if (was_found) found_ = true;
+ if (was_found) {
+ // Bail out if the error is caused by a direct call to a variable in builtin
+ // code. The variable name is meaningless due to minification.
+ if (is_builtin_ && node->expression()->IsVariableProxy()) {
+ done_ = true;
+ return;
+ }
+ found_ = true;
+ }
Find(node->expression(), true);
if (!was_found) Print("(...)");
FindArguments(node->arguments());
@@ -313,7 +332,15 @@ void CallPrinter::VisitCall(Call* node) {
void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = !found_ && node->position() == position_;
- if (was_found) found_ = true;
+ if (was_found) {
+ // Bail out if the error is caused by a direct call to a variable in builtin
+ // code. The variable name is meaningless due to minification.
+ if (is_builtin_ && node->expression()->IsVariableProxy()) {
+ done_ = true;
+ return;
+ }
+ found_ = true;
+ }
Find(node->expression(), was_found);
FindArguments(node->arguments());
if (was_found) done_ = true;
@@ -380,7 +407,15 @@ void CallPrinter::VisitThisFunction(ThisFunction* node) {}
void CallPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {}
-void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {}
+void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {
+ Print("super");
+}
+
+
+void CallPrinter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Find(node->expression());
+}
void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
@@ -399,14 +434,11 @@ void CallPrinter::FindArguments(ZoneList<Expression*>* arguments) {
}
-void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
- Object* object = *value;
+void CallPrinter::PrintLiteral(Object* value, bool quote) {
+ Object* object = value;
if (object->IsString()) {
- String* string = String::cast(object);
if (quote) Print("\"");
- for (int i = 0; i < string->length(); i++) {
- Print("%c", string->Get(i));
- }
+ Print("%s", String::cast(object)->ToCString().get());
if (quote) Print("\"");
} else if (object->IsNull()) {
Print("null");
@@ -418,12 +450,15 @@ void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
Print("undefined");
} else if (object->IsNumber()) {
Print("%g", object->Number());
+ } else if (object->IsSymbol()) {
+ // Symbols can only occur as literals if they were inserted by the parser.
+ PrintLiteral(Symbol::cast(object)->name(), false);
}
}
void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
- PrintLiteral(value->string(), quote);
+ PrintLiteral(*value->string(), quote);
}
@@ -729,7 +764,11 @@ void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print(" RegExp(");
PrintLiteral(node->pattern(), false);
Print(",");
- PrintLiteral(node->flags(), false);
+ if (node->flags() & RegExp::kGlobal) Print("g");
+ if (node->flags() & RegExp::kIgnoreCase) Print("i");
+ if (node->flags() & RegExp::kMultiline) Print("m");
+ if (node->flags() & RegExp::kUnicode) Print("u");
+ if (node->flags() & RegExp::kSticky) Print("y");
Print(") ");
}
@@ -890,6 +929,12 @@ void PrettyPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
+void PrettyPrinter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
const char* PrettyPrinter::Print(AstNode* node) {
Init();
Visit(node);
@@ -1461,7 +1506,16 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
PrintIndented(buf.start());
PrintLiteralIndented("PATTERN", node->pattern(), false);
- PrintLiteralIndented("FLAGS", node->flags(), false);
+ int i = 0;
+ if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
+ if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
+ if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
+ if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
+ if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
+ buf[i] = '\0';
+ PrintIndented("FLAGS ");
+ Print(buf.start());
+ Print("\n");
}
@@ -1632,6 +1686,12 @@ void AstPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
+void AstPrinter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
#endif // DEBUG
} // namespace internal
diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 0793d33e74..7e4dcdc804 100644
--- a/deps/v8/src/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PRETTYPRINTER_H_
-#define V8_PRETTYPRINTER_H_
+#ifndef V8_AST_PRETTYPRINTER_H_
+#define V8_AST_PRETTYPRINTER_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
namespace v8 {
namespace internal {
class CallPrinter : public AstVisitor {
public:
- explicit CallPrinter(Isolate* isolate);
+ explicit CallPrinter(Isolate* isolate, bool is_builtin);
virtual ~CallPrinter();
// The following routine prints the node with position |position| into a
@@ -37,11 +37,12 @@ class CallPrinter : public AstVisitor {
int position_; // position of ast node to print
bool found_;
bool done_;
+ bool is_builtin_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:
- void PrintLiteral(Handle<Object> value, bool quote);
+ void PrintLiteral(Object* value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
void FindStatements(ZoneList<Statement*>* statements);
void FindArguments(ZoneList<Expression*>* arguments);
@@ -136,4 +137,4 @@ class AstPrinter: public PrettyPrinter {
} // namespace internal
} // namespace v8
-#endif // V8_PRETTYPRINTER_H_
+#endif // V8_AST_PRETTYPRINTER_H_
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/ast/scopeinfo.cc
index c061b8fceb..668879fe51 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/ast/scopeinfo.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/scopeinfo.h"
+#include "src/ast/scopeinfo.h"
#include <stdlib.h>
+#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -49,6 +49,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
receiver_info = NONE;
}
+ bool has_new_target = scope->new_target_var() != nullptr;
+
// Determine use and location of the function variable if it is present.
VariableAllocationInfo function_name_info;
VariableMode function_variable_mode;
@@ -90,6 +92,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
LanguageModeField::encode(scope->language_mode()) |
DeclarationScopeField::encode(scope->is_declaration_scope()) |
ReceiverVariableField::encode(receiver_info) |
+ HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(scope->asm_module()) |
@@ -374,6 +377,9 @@ bool ScopeInfo::HasAllocatedReceiver() {
}
+bool ScopeInfo::HasNewTarget() { return HasNewTargetField::decode(Flags()); }
+
+
bool ScopeInfo::HasFunctionName() {
if (length() > 0) {
return NONE != FunctionVariableField::decode(Flags());
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/ast/scopeinfo.h
index 2afc667c30..489a672ed8 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/ast/scopeinfo.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SCOPEINFO_H_
-#define V8_SCOPEINFO_H_
+#ifndef V8_AST_SCOPEINFO_H_
+#define V8_AST_SCOPEINFO_H_
#include "src/allocation.h"
-#include "src/modules.h"
-#include "src/variables.h"
+#include "src/ast/modules.h"
+#include "src/ast/variables.h"
namespace v8 {
namespace internal {
@@ -172,4 +172,4 @@ class ModuleInfo: public FixedArray {
} // namespace internal
} // namespace v8
-#endif // V8_SCOPEINFO_H_
+#endif // V8_AST_SCOPEINFO_H_
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/ast/scopes.cc
index 6a6b8ad45c..c2b05b7c04 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/scopes.h"
+#include "src/ast/scopes.h"
#include "src/accessors.h"
+#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/scopeinfo.h"
+#include "src/parsing/parser.h" // for ParseInfo
namespace v8 {
namespace internal {
@@ -202,8 +202,6 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
num_stack_slots_ = 0;
num_heap_slots_ = 0;
num_global_slots_ = 0;
- num_modules_ = 0;
- module_var_ = NULL;
arity_ = 0;
has_simple_parameters_ = true;
rest_parameter_ = NULL;
@@ -308,14 +306,10 @@ bool Scope::Analyze(ParseInfo* info) {
}
#ifdef DEBUG
- bool native = info->isolate()->bootstrapper()->IsActive();
- if (!info->shared_info().is_null()) {
- Object* script = info->shared_info()->script();
- native = script->IsScript() &&
- Script::cast(script)->type() == Script::TYPE_NATIVE;
+ if (info->script_is_native() ? FLAG_print_builtin_scopes
+ : FLAG_print_scopes) {
+ scope->Print();
}
-
- if (native ? FLAG_print_builtin_scopes : FLAG_print_scopes) scope->Print();
#endif
info->set_scope(scope);
@@ -581,11 +575,24 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
TEMPORARY,
Variable::NORMAL,
kCreatedInitialized);
- scope->temps_.Add(var, zone());
+ scope->AddTemporary(var);
return var;
}
+bool Scope::RemoveTemporary(Variable* var) {
+ // Most likely (always?) any temporary variable we want to remove
+ // was just added before, so we search backwards.
+ for (int i = temps_.length(); i-- > 0;) {
+ if (temps_[i] == var) {
+ temps_.Remove(i);
+ return true;
+ }
+ }
+ return false;
+}
+
+
void Scope::AddDeclaration(Declaration* declaration) {
decls_.Add(declaration, zone());
}
@@ -610,7 +617,11 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
Declaration* decl = decls_[i];
- if (decl->mode() != VAR && !is_block_scope()) continue;
+ // We don't create a separate scope to hold the function name of a function
+ // expression, so we have to make sure not to consider it when checking for
+ // conflicts (since it's conceptually "outside" the declaration scope).
+ if (is_function_scope() && decl == function()) continue;
+ if (IsLexicalVariableMode(decl->mode()) && !is_block_scope()) continue;
const AstRawString* name = decl->proxy()->raw_name();
// Iterate through all scopes until and including the declaration scope.
@@ -620,11 +631,11 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
// captured in Parser::Declare. The only conflicts we still need to check
// are lexical vs VAR, or any declarations within a declaration block scope
// vs lexical declarations in its surrounding (function) scope.
- if (decl->mode() != VAR) current = current->outer_scope_;
+ if (IsLexicalVariableMode(decl->mode())) current = current->outer_scope_;
do {
// There is a conflict if there exists a non-VAR binding.
Variable* other_var = current->variables_.Lookup(name);
- if (other_var != NULL && other_var->mode() != VAR) {
+ if (other_var != NULL && IsLexicalVariableMode(other_var->mode())) {
return decl;
}
previous = current;
@@ -714,16 +725,10 @@ bool Scope::AllocateVariables(ParseInfo* info, AstNodeFactory* factory) {
}
PropagateScopeInfo(outer_scope_calls_sloppy_eval);
- // 2) Allocate module instances.
- if (FLAG_harmony_modules && is_script_scope()) {
- DCHECK(num_modules_ == 0);
- AllocateModules();
- }
-
- // 3) Resolve variables.
+ // 2) Resolve variables.
if (!ResolveVariablesRecursively(info, factory)) return false;
- // 4) Allocate variables.
+ // 3) Allocate variables.
AllocateVariablesRecursively(info->isolate());
return true;
@@ -852,6 +857,24 @@ void Scope::GetNestedScopeChain(Isolate* isolate,
}
+void Scope::CollectNonLocals(HashMap* non_locals) {
+ // Collect non-local variables referenced in the scope.
+ // TODO(yangguo): store non-local variables explicitly if we can no longer
+ // rely on unresolved_ to find them.
+ for (int i = 0; i < unresolved_.length(); i++) {
+ VariableProxy* proxy = unresolved_[i];
+ if (proxy->is_resolved() && proxy->var()->IsStackAllocated()) continue;
+ Handle<String> name = proxy->name();
+ void* key = reinterpret_cast<void*>(name.location());
+ HashMap::Entry* entry = non_locals->LookupOrInsert(key, name->Hash());
+ entry->value = key;
+ }
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ inner_scopes_[i]->CollectNonLocals(non_locals);
+ }
+}
+
+
void Scope::ReportMessage(int start_position, int end_position,
MessageTemplate::Template message,
const AstRawString* arg) {
@@ -1157,6 +1180,28 @@ bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
// Otherwise, try to resolve the variable.
BindingKind binding_kind;
Variable* var = LookupRecursive(proxy, &binding_kind, factory);
+
+#ifdef DEBUG
+ if (info->script_is_native()) {
+ // To avoid polluting the global object in native scripts
+ // - Variables must not be allocated to the global scope.
+ CHECK_NOT_NULL(outer_scope());
+ // - Variables must be bound locally or unallocated.
+ if (BOUND != binding_kind) {
+ // The following variable name may be minified. If so, disable
+ // minification in js2c.py for better output.
+ Handle<String> name = proxy->raw_name()->string();
+ V8_Fatal(__FILE__, __LINE__, "Unbound variable: '%s' in native script.",
+ name->ToCString().get());
+ }
+ VariableLocation location = var->location();
+ CHECK(location == VariableLocation::LOCAL ||
+ location == VariableLocation::CONTEXT ||
+ location == VariableLocation::PARAMETER ||
+ location == VariableLocation::UNALLOCATED);
+ }
+#endif
+
switch (binding_kind) {
case BOUND:
// We found a variable binding.
@@ -1632,23 +1677,6 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
}
-void Scope::AllocateModules() {
- DCHECK(is_script_scope());
- DCHECK(!already_resolved());
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* scope = inner_scopes_.at(i);
- if (scope->is_module_scope()) {
- DCHECK(!scope->already_resolved());
- DCHECK(scope->module_descriptor_->IsFrozen());
- DCHECK_NULL(scope->module_var_);
- scope->module_var_ =
- NewTemporary(ast_value_factory_->dot_module_string());
- ++num_modules_;
- }
- }
-}
-
-
int Scope::StackLocalCount() const {
return num_stack_slots() -
(function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/ast/scopes.h
index d115097803..6c261f63c3 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SCOPES_H_
-#define V8_SCOPES_H_
+#ifndef V8_AST_SCOPES_H_
+#define V8_AST_SCOPES_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/hashmap.h"
#include "src/pending-compilation-error-handler.h"
#include "src/zone.h"
@@ -208,6 +209,15 @@ class Scope: public ZoneObject {
// names.
Variable* NewTemporary(const AstRawString* name);
+ // Remove a temporary variable. This is for adjusting the scope of
+ // temporaries used when desugaring parameter initializers.
+ bool RemoveTemporary(Variable* var);
+
+ // Adds a temporary variable in this scope's TemporaryScope. This is for
+ // adjusting the scope of temporaries used when desugaring parameter
+ // initializers.
+ void AddTemporary(Variable* var) { temps_.Add(var, zone()); }
+
// Adds the specific declaration node to the list of declarations in
// this scope. The declarations are processed as part of entering
// the scope; see codegen.cc:ProcessDeclarations.
@@ -427,9 +437,7 @@ class Scope: public ZoneObject {
return rest_parameter_;
}
- bool has_rest_parameter() const {
- return rest_index_ >= 0;
- }
+ bool has_rest_parameter() const { return rest_index_ >= 0; }
bool has_simple_parameters() const {
return has_simple_parameters_;
@@ -510,12 +518,6 @@ class Scope: public ZoneObject {
int ContextLocalCount() const;
int ContextGlobalCount() const;
- // For script scopes, the number of module literals (including nested ones).
- int num_modules() const { return num_modules_; }
-
- // For module scopes, the host scope's internal variable binding this module.
- Variable* module_var() const { return module_var_; }
-
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -562,6 +564,8 @@ class Scope: public ZoneObject {
void GetNestedScopeChain(Isolate* isolate, List<Handle<ScopeInfo> >* chain,
int statement_position);
+ void CollectNonLocals(HashMap* non_locals);
+
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(const AstRawString* name) {
@@ -692,12 +696,6 @@ class Scope: public ZoneObject {
int num_heap_slots_;
int num_global_slots_;
- // The number of modules (including nested ones).
- int num_modules_;
-
- // For module scopes, the host scope's temporary variable binding this module.
- Variable* module_var_;
-
// Info about the parameter list of a function.
int arity_;
bool has_simple_parameters_;
@@ -794,7 +792,6 @@ class Scope: public ZoneObject {
void AllocateVariablesRecursively(Isolate* isolate);
void AllocateParameter(Variable* var, int index);
void AllocateReceiver();
- void AllocateModules();
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -849,4 +846,4 @@ class Scope: public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_SCOPES_H_
+#endif // V8_AST_SCOPES_H_
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/ast/variables.cc
index b8bcbd03d1..8e00782386 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/variables.h"
+#include "src/ast/variables.h"
-#include "src/ast.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/variables.h b/deps/v8/src/ast/variables.h
index a9cd5dcfec..ca5d1cdd40 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VARIABLES_H_
-#define V8_VARIABLES_H_
+#ifndef V8_AST_VARIABLES_H_
+#define V8_AST_VARIABLES_H_
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
#include "src/zone.h"
namespace v8 {
@@ -37,6 +37,10 @@ class Variable: public ZoneObject {
// scope is only used to follow the context chain length.
Scope* scope() const { return scope_; }
+ // This is for adjusting the scope of temporaries used when desugaring
+ // parameter initializers.
+ void set_scope(Scope* scope) { scope_ = scope; }
+
Handle<String> name() const { return name_->string(); }
const AstRawString* raw_name() const { return name_; }
VariableMode mode() const { return mode_; }
@@ -211,4 +215,4 @@ class ClassVariable : public Variable {
} // namespace internal
} // namespace v8
-#endif // V8_VARIABLES_H_
+#endif // V8_AST_VARIABLES_H_
diff --git a/deps/v8/src/atomic-utils.h b/deps/v8/src/atomic-utils.h
index 2aa78f8b5e..34e1cb0269 100644
--- a/deps/v8/src/atomic-utils.h
+++ b/deps/v8/src/atomic-utils.h
@@ -19,9 +19,10 @@ class AtomicNumber {
AtomicNumber() : value_(0) {}
explicit AtomicNumber(T initial) : value_(initial) {}
- V8_INLINE void Increment(T increment) {
- base::Barrier_AtomicIncrement(&value_,
- static_cast<base::AtomicWord>(increment));
+ // Returns the newly set value.
+ V8_INLINE T Increment(T increment) {
+ return static_cast<T>(base::Barrier_AtomicIncrement(
+ &value_, static_cast<base::AtomicWord>(increment)));
}
V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index c224303e05..0f290fb7f0 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -9,7 +9,7 @@
#include "src/base/platform/semaphore.h"
#include "src/base/smart-pointers.h"
#include "src/compiler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index c09e429cd8..83898d12bf 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -146,11 +146,14 @@ namespace internal {
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmiAndNotABoundFunction, \
+ "Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotADate, "Operand is not a date") \
+ V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
@@ -174,6 +177,7 @@ namespace internal {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+ V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kSloppyFunctionExpectsJSReceiverReceiver, \
"Sloppy function expects JSReceiver as receiver.") \
@@ -250,6 +254,7 @@ namespace internal {
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
+ V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWeShouldNotHaveAnEmptyLexicalContext, \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index 097b914399..8422ec7b60 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -29,6 +29,22 @@
],
},
}],
+ ['OS=="win" and component=="shared_library"', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/icui18n.dll',
+ '<(PRODUCT_DIR)/icuuc.dll',
+ '<(PRODUCT_DIR)/v8.dll',
+ ],
+ },
+ }],
+ ['OS=="mac" and asan==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/libclang_rt.asan_osx_dynamic.dylib',
+ ],
+ },
+ }],
['tsan==1', {
'variables': {
'files': [
@@ -56,4 +72,4 @@
'variables': {},
}],
],
-} \ No newline at end of file
+}
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index f5710dfb5c..4ba3c47ad9 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -212,6 +212,26 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
}
+// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed summation resulted in an overflow.
+inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+ uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs);
+ *val = bit_cast<int64_t>(res);
+ return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0;
+}
+
+
+// SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed subtraction resulted in an overflow.
+inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+ uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs);
+ *val = bit_cast<int64_t>(res);
+ return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
+}
+
+
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 5f66d21fcf..9637f657f9 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -55,6 +55,13 @@
#else
#define V8_HOST_ARCH_32_BIT 1
#endif
+#elif defined(__s390__) || defined(__s390x__)
+#define V8_HOST_ARCH_S390 1
+#if defined(__s390x__)
+#define V8_HOST_ARCH_64_BIT 1
+#else
+#define V8_HOST_ARCH_32_BIT 1
+#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
@@ -78,7 +85,7 @@
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
!V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_S390
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -121,6 +128,12 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
+#elif V8_TARGET_ARCH_S390
+#if V8_TARGET_ARCH_S390X
+#define V8_TARGET_ARCH_64_BIT 1
+#else
+#define V8_TARGET_ARCH_32_BIT 1
+#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_ARCH_32_BIT 1
#else
@@ -179,10 +192,23 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_BE
#define V8_TARGET_BIG_ENDIAN 1
+#elif V8_TARGET_ARCH_S390
+#if V8_TARGET_ARCH_S390_LE_SIM
+#define V8_TARGET_LITTLE_ENDIAN 1
+#else
+#define V8_TARGET_BIG_ENDIAN 1
+#endif
#else
#error Unknown target architecture endianness
#endif
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) || \
+ defined(V8_TARGET_ARCH_X87)
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
+#else
+#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
+#endif
+
// Number of bits to represent the page size for paged spaces. The value of 20
// gives 1Mb bytes per page.
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 467ecf67c9..6bdb69319d 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_FLAGS_H_
#define V8_BASE_FLAGS_H_
+#include <cstddef>
+
#include "src/base/compiler-specific.h"
namespace v8 {
@@ -30,6 +32,13 @@ class Flags final {
: mask_(static_cast<S>(flag)) {}
explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
+ bool operator==(flag_type flag) const {
+ return mask_ == static_cast<S>(flag);
+ }
+ bool operator!=(flag_type flag) const {
+ return mask_ != static_cast<S>(flag);
+ }
+
Flags& operator&=(const Flags& flags) {
mask_ &= flags.mask_;
return *this;
@@ -60,6 +69,8 @@ class Flags final {
operator mask_type() const { return mask_; }
bool operator!() const { return !mask_; }
+ friend size_t hash_value(const Flags& flags) { return flags.mask_; }
+
private:
mask_type mask_;
};
@@ -97,13 +108,17 @@ class Flags final {
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
return Type(lhs) ^ rhs; \
- } inline Type operator^(Type::flag_type lhs, const Type& rhs) \
+ } inline Type \
+ operator^(Type::flag_type lhs, const Type& rhs) \
ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
return rhs ^ lhs; \
- } inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
- ALLOW_UNUSED_TYPE; \
- inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
+ } inline void \
+ operator^(Type::flag_type lhs, Type::mask_type rhs) ALLOW_UNUSED_TYPE; \
+ inline void operator^(Type::flag_type lhs, Type::mask_type rhs) { \
+ } inline Type \
+ operator~(Type::flag_type val)ALLOW_UNUSED_TYPE; \
+ inline Type operator~(Type::flag_type val) { return ~Type(val); }
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index f47b0b9d55..10cab4b2bf 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -102,66 +102,6 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif // V8_OS_NACL
-// The COMPILE_ASSERT macro can be used to verify that a compile time
-// expression is true. For example, you could use it to verify the
-// size of a static array:
-//
-// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
-// content_type_names_incorrect_size);
-//
-// or to make sure a struct is smaller than a certain size:
-//
-// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
-//
-// The second argument to the macro is the name of the variable. If
-// the expression is false, most compilers will issue a warning/error
-// containing the name of the variable.
-#if V8_HAS_CXX11_STATIC_ASSERT
-
-// Under C++11, just use static_assert.
-#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
-
-#else
-
-template <bool>
-struct CompileAssert {};
-
-#define COMPILE_ASSERT(expr, msg) \
- typedef CompileAssert<static_cast<bool>(expr)> \
- msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED_TYPE
-
-// Implementation details of COMPILE_ASSERT:
-//
-// - COMPILE_ASSERT works by defining an array type that has -1
-// elements (and thus is invalid) when the expression is false.
-//
-// - The simpler definition
-//
-// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
-//
-// does not work, as gcc supports variable-length arrays whose sizes
-// are determined at run-time (this is gcc's extension and not part
-// of the C++ standard). As a result, gcc fails to reject the
-// following code with the simple definition:
-//
-// int foo;
-// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
-// // not a compile-time constant.
-//
-// - By using the type CompileAssert<static_cast<bool>(expr)>, we ensure that
-// expr is a compile-time constant. (Template arguments must be
-// determined at compile-time.)
-//
-// - The array size is (static_cast<bool>(expr) ? 1 : -1), instead of simply
-//
-// ((expr) ? 1 : -1).
-//
-// This is to avoid running into a bug in MS VC 7.1, which
-// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
-
-#endif
-
-
// bit_cast<Dest,Source> is a template function that implements the
// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
// very low-level functions like the protobuf library and fast math
@@ -217,8 +157,8 @@ struct CompileAssert {};
// is likely to surprise you.
template <class Dest, class Source>
V8_INLINE Dest bit_cast(Source const& source) {
- COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
-
+ static_assert(sizeof(Dest) == sizeof(Source),
+ "source and dest must be same size");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
@@ -278,32 +218,8 @@ V8_INLINE Dest bit_cast(Source const& source) {
#endif
-// Use C++11 static_assert if possible, which gives error
-// messages that are easier to understand on first sight.
-#if V8_HAS_CXX11_STATIC_ASSERT
+// TODO(all) Replace all uses of this macro with static_assert, remove macro.
#define STATIC_ASSERT(test) static_assert(test, #test)
-#else
-// This is inspired by the static assertion facility in boost. This
-// is pretty magical. If it causes you trouble on a platform you may
-// find a fix in the boost code.
-template <bool> class StaticAssertion;
-template <> class StaticAssertion<true> { };
-// This macro joins two tokens. If one of the tokens is a macro the
-// helper call causes it to be resolved before joining.
-#define SEMI_STATIC_JOIN(a, b) SEMI_STATIC_JOIN_HELPER(a, b)
-#define SEMI_STATIC_JOIN_HELPER(a, b) a##b
-// Causes an error during compilation of the condition is not
-// statically known to be true. It is formulated as a typedef so that
-// it can be used wherever a typedef can be used. Beware that this
-// actually causes each use to introduce a new defined type with a
-// name depending on the source line.
-template <int> class StaticAssertionHelper { };
-#define STATIC_ASSERT(test) \
- typedef StaticAssertionHelper< \
- sizeof(StaticAssertion<static_cast<bool>((test))>)> \
- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED_TYPE
-
-#endif
// The USE(x) template is used to silence C++ compiler warnings
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index 4cf06a9047..ff428402b6 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -97,15 +97,14 @@ int RandomNumberGenerator::NextInt(int max) {
double RandomNumberGenerator::NextDouble() {
- return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
- static_cast<double>(static_cast<int64_t>(1) << 53);
+ XorShift128(&state0_, &state1_);
+ return ToDouble(state0_, state1_);
}
int64_t RandomNumberGenerator::NextInt64() {
- uint64_t lo = bit_cast<unsigned>(Next(32));
- uint64_t hi = bit_cast<unsigned>(Next(32));
- return lo | (hi << 32);
+ XorShift128(&state0_, &state1_);
+ return bit_cast<int64_t>(state0_ + state1_);
}
@@ -119,21 +118,26 @@ void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
int RandomNumberGenerator::Next(int bits) {
DCHECK_LT(0, bits);
DCHECK_GE(32, bits);
- // Do unsigned multiplication, which has the intended modulo semantics, while
- // signed multiplication would expose undefined behavior.
- uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
- // Assigning a uint64_t to an int64_t is implementation defined, but this
- // should be OK. Use a static_cast to explicitly state that we know what we're
- // doing. (Famous last words...)
- int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
- seed_ = seed;
- return static_cast<int>(seed >> (48 - bits));
+ XorShift128(&state0_, &state1_);
+ return static_cast<int>((state0_ + state1_) >> (64 - bits));
}
void RandomNumberGenerator::SetSeed(int64_t seed) {
+ if (seed == 0) seed = 1;
initial_seed_ = seed;
- seed_ = (seed ^ kMultiplier) & kMask;
+ state0_ = MurmurHash3(bit_cast<uint64_t>(seed));
+ state1_ = MurmurHash3(state0_);
+}
+
+
+uint64_t RandomNumberGenerator::MurmurHash3(uint64_t h) {
+ h ^= h >> 33;
+ h *= V8_UINT64_C(0xFF51AFD7ED558CCD);
+ h ^= h >> 33;
+ h *= V8_UINT64_C(0xC4CEB9FE1A85EC53);
+ h ^= h >> 33;
+ return h;
}
} // namespace base
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 10f2789c7d..cd3e6bfdc8 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -12,10 +12,16 @@ namespace base {
// -----------------------------------------------------------------------------
// RandomNumberGenerator
-//
-// This class is used to generate a stream of pseudorandom numbers. The class
-// uses a 48-bit seed, which is modified using a linear congruential formula.
-// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+
+// This class is used to generate a stream of pseudo-random numbers. The class
+// uses a 64-bit seed, which is passed through MurmurHash3 to create two 64-bit
+// state values. This pair of state values is then used in xorshift128+.
+// The resulting stream of pseudo-random numbers has a period length of 2^128-1.
+// See Marsaglia: http://www.jstatsoft.org/v08/i14/paper
+// And Vigna: http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
+// NOTE: Any changes to the algorithm must be tested against TestU01.
+// Please find instructions for this in the internal repository.
+
// If two instances of RandomNumberGenerator are created with the same seed, and
// the same sequence of method calls is made for each, they will generate and
// return identical sequences of numbers.
@@ -83,6 +89,27 @@ class RandomNumberGenerator final {
int64_t initial_seed() const { return initial_seed_; }
+ // Static and exposed for external use.
+ static inline double ToDouble(uint64_t state0, uint64_t state1) {
+ // Exponent for double values for [1.0 .. 2.0)
+ static const uint64_t kExponentBits = V8_UINT64_C(0x3FF0000000000000);
+ static const uint64_t kMantissaMask = V8_UINT64_C(0x000FFFFFFFFFFFFF);
+ uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
+ return bit_cast<double>(random) - 1;
+ }
+
+ // Static and exposed for external use.
+ static inline void XorShift128(uint64_t* state0, uint64_t* state1) {
+ uint64_t s1 = *state0;
+ uint64_t s0 = *state1;
+ *state0 = s0;
+ s1 ^= s1 << 23;
+ s1 ^= s1 >> 17;
+ s1 ^= s0;
+ s1 ^= s0 >> 26;
+ *state1 = s1;
+ }
+
private:
static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
static const int64_t kAddend = 0xb;
@@ -90,8 +117,11 @@ class RandomNumberGenerator final {
int Next(int bits) WARN_UNUSED_RESULT;
+ static uint64_t MurmurHash3(uint64_t);
+
int64_t initial_seed_;
- int64_t seed_;
+ uint64_t state0_;
+ uint64_t state1_;
};
} // namespace base
diff --git a/deps/v8/src/bit-vector.cc b/deps/v8/src/bit-vector.cc
index cdd00f89c4..0fbb01811a 100644
--- a/deps/v8/src/bit-vector.cc
+++ b/deps/v8/src/bit-vector.cc
@@ -5,7 +5,6 @@
#include "src/bit-vector.h"
#include "src/base/bits.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index c609e578d4..f68a12ab14 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -16,11 +16,7 @@
#include "src/isolate-inl.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
-#include "third_party/fdlibm/fdlibm.h"
-
-#if defined(V8_WASM)
#include "src/wasm/wasm-js.h"
-#endif
namespace v8 {
namespace internal {
@@ -59,7 +55,6 @@ template Handle<String> Bootstrapper::SourceLookup<ExperimentalNatives>(
template Handle<String> Bootstrapper::SourceLookup<ExperimentalExtraNatives>(
int index);
template Handle<String> Bootstrapper::SourceLookup<ExtraNatives>(int index);
-template Handle<String> Bootstrapper::SourceLookup<CodeStubNatives>(int index);
void Bootstrapper::Initialize(bool create_heap_objects) {
@@ -130,7 +125,6 @@ void Bootstrapper::TearDown() {
DeleteNativeSources(ExtraNatives::GetSourceCache(isolate_->heap()));
DeleteNativeSources(
ExperimentalExtraNatives::GetSourceCache(isolate_->heap()));
- DeleteNativeSources(CodeStubNatives::GetSourceCache(isolate_->heap()));
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -185,13 +179,11 @@ class Genesis BASE_EMBEDDED {
// Similarly, we want to use the global that has been created by the templates
// passed through the API. The global from the snapshot is detached from the
// other objects in the snapshot.
- void HookUpGlobalObject(Handle<JSGlobalObject> global_object,
- Handle<FixedArray> outdated_contexts);
+ void HookUpGlobalObject(Handle<JSGlobalObject> global_object);
// The native context has a ScriptContextTable that store declarative bindings
// made in script scopes. Add a "this" binding to that table pointing to the
// global proxy.
void InstallGlobalThisBinding();
- void HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts);
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
@@ -206,8 +198,11 @@ class Genesis BASE_EMBEDDED {
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
+ DECLARE_FEATURE_INITIALIZATION(promise_extra, "")
#undef DECLARE_FEATURE_INITIALIZATION
+ Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
+ const char* name);
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
@@ -222,6 +217,7 @@ class Genesis BASE_EMBEDDED {
void InstallBuiltinFunctionIds();
void InstallExperimentalBuiltinFunctionIds();
void InitializeNormalizedMapCaches();
+ void InstallJSProxyMaps();
enum ExtensionTraversalState {
UNVISITED, VISITED, INSTALLED
@@ -271,8 +267,7 @@ class Genesis BASE_EMBEDDED {
FUNCTION_WITH_WRITEABLE_PROTOTYPE,
FUNCTION_WITH_READONLY_PROTOTYPE,
// Without prototype.
- FUNCTION_WITHOUT_PROTOTYPE,
- BOUND_FUNCTION
+ FUNCTION_WITHOUT_PROTOTYPE
};
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
@@ -340,26 +335,6 @@ Handle<Context> Bootstrapper::CreateEnvironment(
}
-bool Bootstrapper::CreateCodeStubContext(Isolate* isolate) {
- HandleScope scope(isolate);
- SaveContext save_context(isolate);
- BootstrapperActive active(this);
-
- v8::ExtensionConfiguration no_extensions;
- Handle<Context> native_context = CreateEnvironment(
- MaybeHandle<JSGlobalProxy>(), v8::Local<v8::ObjectTemplate>(),
- &no_extensions, THIN_CONTEXT);
- isolate->heap()->SetRootCodeStubContext(*native_context);
- isolate->set_context(*native_context);
- Handle<JSObject> code_stub_exports =
- isolate->factory()->NewJSObject(isolate->object_function());
- JSObject::NormalizeProperties(code_stub_exports, CLEAR_INOBJECT_PROPERTIES, 2,
- "container to export to extra natives");
- isolate->heap()->SetRootCodeStubExportsObject(*code_stub_exports);
- return InstallCodeStubNatives(isolate);
-}
-
-
static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
// object.__proto__ = proto;
Handle<Map> old_map = Handle<Map>(object->map());
@@ -386,31 +361,55 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
namespace {
+void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
+ Handle<JSFunction> function, Handle<String> function_name,
+ PropertyAttributes attributes = DONT_ENUM) {
+ JSObject::AddProperty(target, property_name, function, attributes);
+ if (target->IsJSGlobalObject()) {
+ function->shared()->set_instance_class_name(*function_name);
+ }
+ function->shared()->set_native(true);
+}
+
+
+static void InstallFunction(Handle<JSObject> target,
+ Handle<JSFunction> function, Handle<Name> name,
+ PropertyAttributes attributes = DONT_ENUM) {
+ Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
+ InstallFunction(target, name, function, name_string, attributes);
+}
+
+
+static Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
+ InstanceType type, int instance_size,
+ MaybeHandle<JSObject> maybe_prototype,
+ Builtins::Name call,
+ bool strict_function_map = false) {
+ Factory* factory = isolate->factory();
+ Handle<Code> call_code(isolate->builtins()->builtin(call));
+ Handle<JSObject> prototype;
+ static const bool kReadOnlyPrototype = false;
+ static const bool kInstallConstructor = false;
+ return maybe_prototype.ToHandle(&prototype)
+ ? factory->NewFunction(name, call_code, prototype, type,
+ instance_size, kReadOnlyPrototype,
+ kInstallConstructor, strict_function_map)
+ : factory->NewFunctionWithoutPrototype(name, call_code,
+ strict_function_map);
+}
+
+
Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
InstanceType type, int instance_size,
MaybeHandle<JSObject> maybe_prototype,
Builtins::Name call,
PropertyAttributes attributes,
bool strict_function_map = false) {
- Isolate* isolate = target->GetIsolate();
- Factory* factory = isolate->factory();
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
- Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
- Handle<JSObject> prototype;
- static const bool kReadOnlyPrototype = false;
- static const bool kInstallConstructor = false;
Handle<JSFunction> function =
- maybe_prototype.ToHandle(&prototype)
- ? factory->NewFunction(name_string, call_code, prototype, type,
- instance_size, kReadOnlyPrototype,
- kInstallConstructor, strict_function_map)
- : factory->NewFunctionWithoutPrototype(name_string, call_code,
- strict_function_map);
- JSObject::AddProperty(target, name, function, attributes);
- if (target->IsJSGlobalObject()) {
- function->shared()->set_instance_class_name(*name_string);
- }
- function->shared()->set_native(true);
+ CreateFunction(target->GetIsolate(), name_string, type, instance_size,
+ maybe_prototype, call, strict_function_map);
+ InstallFunction(target, name, function, name_string, attributes);
return function;
}
@@ -484,7 +483,7 @@ void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
map->set_is_callable();
return map;
}
@@ -557,7 +556,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
}
// Allocate the empty function as the prototype for function - ES6 19.2.3
- Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
+ Handle<Code> code(isolate->builtins()->EmptyFunction());
Handle<JSFunction> empty_function =
factory->NewFunctionWithoutPrototype(factory->empty_string(), code);
@@ -597,7 +596,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode) {
- int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
+ int size = IsFunctionModeWithPrototype(function_mode) ? 3 : 2;
Map::EnsureDescriptorSlack(map, size);
PropertyAttributes rw_attribs =
@@ -607,35 +606,22 @@ void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
PropertyAttributes roc_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- if (function_mode == BOUND_FUNCTION) {
- { // Add length.
- Handle<String> length_string = isolate()->factory()->length_string();
- DataDescriptor d(length_string, 0, roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
- { // Add name.
- Handle<String> name_string = isolate()->factory()->name_string();
- DataDescriptor d(name_string, 1, roc_attribs, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
- } else {
- DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
- function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
- function_mode == FUNCTION_WITHOUT_PROTOTYPE);
- { // Add length.
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, roc_attribs);
- map->AppendDescriptor(&d);
- }
- { // Add name.
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- roc_attribs);
- map->AppendDescriptor(&d);
- }
+ DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+ function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
+ function_mode == FUNCTION_WITHOUT_PROTOTYPE);
+ { // Add length.
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+ { // Add name.
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ roc_attribs);
+ map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
// Add prototype.
@@ -682,11 +668,13 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
Handle<Code> code(isolate()->builtins()->builtin(builtin_name));
Handle<JSFunction> function =
factory()->NewFunctionWithoutPrototype(name, code);
- function->set_map(native_context()->sloppy_function_map());
function->shared()->DontAdaptArguments();
// %ThrowTypeError% must not have a name property.
- JSReceiver::DeleteProperty(function, factory()->name_string()).Assert();
+ if (JSReceiver::DeleteProperty(function, factory()->name_string())
+ .IsNothing()) {
+ DCHECK(false);
+ }
// length needs to be non configurable.
Handle<Object> value(Smi::FromInt(function->shared()->length()), isolate());
@@ -695,8 +683,10 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
.Assert();
- if (JSObject::PreventExtensions(function, Object::THROW_ON_ERROR).IsNothing())
+ if (JSObject::PreventExtensions(function, Object::THROW_ON_ERROR)
+ .IsNothing()) {
DCHECK(false);
+ }
return function;
}
@@ -725,7 +715,7 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
FunctionMode function_mode, Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrictFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
map->set_is_callable();
Map::SetPrototype(map, empty_function);
return map;
@@ -736,7 +726,7 @@ Handle<Map> Genesis::CreateStrongFunctionMap(
Handle<JSFunction> empty_function, bool is_constructor) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrongFunctionInstanceDescriptor(map);
- map->set_is_constructor(is_constructor);
+ if (is_constructor) map->set_is_constructor();
Map::SetPrototype(map, empty_function);
map->set_is_callable();
map->set_is_extensible(is_constructor);
@@ -763,21 +753,6 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// This map is installed in MakeFunctionInstancePrototypeWritable.
strict_function_map_writable_prototype_ =
CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
-
- // Special map for non-constructor bound functions.
- // TODO(bmeurer): Bound functions should not be represented as JSFunctions.
- Handle<Map> bound_function_without_constructor_map =
- CreateStrictFunctionMap(BOUND_FUNCTION, empty);
- native_context()->set_bound_function_without_constructor_map(
- *bound_function_without_constructor_map);
-
- // Special map for constructor bound functions.
- // TODO(bmeurer): Bound functions should not be represented as JSFunctions.
- Handle<Map> bound_function_with_constructor_map =
- Map::Copy(bound_function_without_constructor_map, "IsConstructor");
- bound_function_with_constructor_map->set_is_constructor(true);
- native_context()->set_bound_function_with_constructor_map(
- *bound_function_with_constructor_map);
}
@@ -920,23 +895,6 @@ void Genesis::InstallGlobalThisBinding() {
}
-void Genesis::HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts) {
- // One of these contexts should be the one that declares the global "this"
- // binding.
- for (int i = 0; i < outdated_contexts->length(); ++i) {
- Context* context = Context::cast(outdated_contexts->get(i));
- if (context->IsScriptContext()) {
- ScopeInfo* scope_info = context->scope_info();
- int slot = scope_info->ReceiverContextSlotIndex();
- if (slot >= 0) {
- DCHECK_EQ(slot, Context::MIN_CONTEXT_SLOTS);
- context->set(slot, native_context()->global_proxy());
- }
- }
- }
-}
-
-
Handle<JSGlobalObject> Genesis::CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy) {
@@ -973,8 +931,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
if (js_global_object_template.is_null()) {
Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
- Builtins::kIllegal));
+ Handle<Code> code = isolate()->builtins()->Illegal();
Handle<JSObject> prototype =
factory()->NewFunctionPrototype(isolate()->object_function());
js_global_object_function = factory()->NewFunction(
@@ -1004,8 +961,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
Handle<JSFunction> global_proxy_function;
if (global_proxy_template.IsEmpty()) {
Handle<String> name = Handle<String>(heap()->empty_string());
- Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
- Builtins::kIllegal));
+ Handle<Code> code = isolate()->builtins()->Illegal();
global_proxy_function = factory()->NewFunction(
name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
} else {
@@ -1044,27 +1000,65 @@ void Genesis::HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
}
-void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object,
- Handle<FixedArray> outdated_contexts) {
+void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object) {
Handle<JSGlobalObject> global_object_from_snapshot(
JSGlobalObject::cast(native_context()->extension()));
native_context()->set_extension(*global_object);
native_context()->set_security_token(*global_object);
- // Replace outdated global objects in deserialized contexts.
- for (int i = 0; i < outdated_contexts->length(); ++i) {
- Context* context = Context::cast(outdated_contexts->get(i));
- // Assert that there is only one native context.
- DCHECK(!context->IsNativeContext() || context == *native_context());
- DCHECK_EQ(context->global_object(), *global_object_from_snapshot);
- context->set_global_object(*global_object);
- }
-
TransferNamedProperties(global_object_from_snapshot, global_object);
TransferIndexedProperties(global_object_from_snapshot, global_object);
}
+static Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
+ Handle<String> name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Handle<JSFunction> fun =
+ CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), call);
+ if (adapt) {
+ fun->shared()->set_internal_formal_parameter_count(len);
+ } else {
+ fun->shared()->DontAdaptArguments();
+ }
+ fun->shared()->set_length(len);
+ return fun;
+}
+
+
+static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ Handle<String> name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Handle<JSFunction> fun =
+ SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
+ InstallFunction(base, fun, name, DONT_ENUM);
+ return fun;
+}
+
+
+static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ const char* name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Factory* const factory = base->GetIsolate()->factory();
+ return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
+ len, adapt);
+}
+
+
+static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
+ Handle<JSFunction> function,
+ int context_index) {
+ Handle<Smi> index(Smi::FromInt(context_index), isolate);
+ JSObject::AddProperty(
+ function, isolate->factory()->native_context_index_symbol(), index, NONE);
+ isolate->native_context()->set(context_index, *function);
+}
+
+
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
@@ -1076,7 +1070,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_previous(NULL);
// Set extension and global object.
native_context()->set_extension(*global_object);
- native_context()->set_global_object(*global_object);
// Security setup: Set the security token of the native context to the global
// object. This makes the security check between two different contexts fail
// by default even in case of global object reinitialization.
@@ -1090,18 +1083,71 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_script_context_table(*script_context_table);
InstallGlobalThisBinding();
- Handle<String> object_name = factory->Object_string();
- JSObject::AddProperty(
- global_object, object_name, isolate->object_function(), DONT_ENUM);
+ { // --- O b j e c t ---
+ Handle<String> object_name = factory->Object_string();
+ Handle<JSFunction> object_function = isolate->object_function();
+ JSObject::AddProperty(global_object, object_name, object_function,
+ DONT_ENUM);
+ SimpleInstallFunction(object_function, factory->assign_string(),
+ Builtins::kObjectAssign, 2, false);
+ SimpleInstallFunction(object_function, factory->create_string(),
+ Builtins::kObjectCreate, 2, false);
+ Handle<JSFunction> object_freeze = SimpleInstallFunction(
+ object_function, "freeze", Builtins::kObjectFreeze, 1, false);
+ native_context()->set_object_freeze(*object_freeze);
+ Handle<JSFunction> object_is_extensible =
+ SimpleInstallFunction(object_function, "isExtensible",
+ Builtins::kObjectIsExtensible, 1, false);
+ native_context()->set_object_is_extensible(*object_is_extensible);
+ Handle<JSFunction> object_is_frozen = SimpleInstallFunction(
+ object_function, "isFrozen", Builtins::kObjectIsFrozen, 1, false);
+ native_context()->set_object_is_frozen(*object_is_frozen);
+ Handle<JSFunction> object_is_sealed = SimpleInstallFunction(
+ object_function, "isSealed", Builtins::kObjectIsSealed, 1, false);
+ native_context()->set_object_is_sealed(*object_is_sealed);
+ Handle<JSFunction> object_keys = SimpleInstallFunction(
+ object_function, "keys", Builtins::kObjectKeys, 1, false);
+ native_context()->set_object_keys(*object_keys);
+ SimpleInstallFunction(object_function, "preventExtensions",
+ Builtins::kObjectPreventExtensions, 1, false);
+ SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
+ false);
+ }
Handle<JSObject> global(native_context()->global_object());
- // Install global Function object
- Handle<JSFunction> function_function =
- InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal);
- function_function->initial_map()->set_is_callable();
- function_function->initial_map()->set_is_constructor(true);
+ { // --- F u n c t i o n ---
+ Handle<JSFunction> prototype = empty_function;
+ Handle<JSFunction> function_fun =
+ InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
+ prototype, Builtins::kFunctionConstructor);
+ function_fun->set_prototype_or_initial_map(
+ *sloppy_function_map_writable_prototype_);
+ function_fun->shared()->DontAdaptArguments();
+ function_fun->shared()->set_construct_stub(
+ *isolate->builtins()->FunctionConstructor());
+ function_fun->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate, function_fun,
+ Context::FUNCTION_FUNCTION_INDEX);
+
+ // Setup the methods on the %FunctionPrototype%.
+ SimpleInstallFunction(prototype, factory->apply_string(),
+ Builtins::kFunctionPrototypeApply, 2, false);
+ SimpleInstallFunction(prototype, factory->bind_string(),
+ Builtins::kFunctionPrototypeBind, 1, false);
+ SimpleInstallFunction(prototype, factory->call_string(),
+ Builtins::kFunctionPrototypeCall, 1, false);
+ SimpleInstallFunction(prototype, factory->toString_string(),
+ Builtins::kFunctionPrototypeToString, 0, false);
+
+ // Install the "constructor" property on the %FunctionPrototype%.
+ JSObject::AddProperty(prototype, factory->constructor_string(),
+ function_fun, DONT_ENUM);
+
+ sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
+ strict_function_map_writable_prototype_->SetConstructor(*function_fun);
+ native_context()->strong_function_map()->SetConstructor(*function_fun);
+ }
{ // --- A r r a y ---
Handle<JSFunction> array_function =
@@ -1134,11 +1180,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
initial_map->AppendDescriptor(&d);
}
- // array_function is used internally. JS code creating array object should
- // search for the 'Array' property on the global object and use that one
- // as the constructor. 'Array' property on a global object can be
- // overwritten by JS code.
- native_context()->set_array_function(*array_function);
+ InstallWithIntrinsicDefaultProto(isolate, array_function,
+ Context::ARRAY_FUNCTION_INDEX);
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
@@ -1150,14 +1193,23 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::Copy(initial_map, "SetInstancePrototype");
initial_strong_map->set_is_strong();
CacheInitialJSArrayMaps(native_context(), initial_strong_map);
+
+ Handle<JSFunction> is_arraylike = SimpleInstallFunction(
+ array_function, isolate->factory()->InternalizeUtf8String("isArray"),
+ Builtins::kArrayIsArray, 1, true);
+ native_context()->set_is_arraylike(*is_arraylike);
}
{ // --- N u m b e r ---
- Handle<JSFunction> number_fun =
- InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
- native_context()->set_number_function(*number_fun);
+ Handle<JSFunction> number_fun = InstallFunction(
+ global, "Number", JS_VALUE_TYPE, JSValue::kSize,
+ isolate->initial_object_prototype(), Builtins::kNumberConstructor);
+ number_fun->shared()->DontAdaptArguments();
+ number_fun->shared()->set_construct_stub(
+ *isolate->builtins()->NumberConstructor_ConstructStub());
+ number_fun->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate, number_fun,
+ Context::NUMBER_FUNCTION_INDEX);
}
{ // --- B o o l e a n ---
@@ -1165,18 +1217,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal);
- native_context()->set_boolean_function(*boolean_fun);
+ InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
+ Context::BOOLEAN_FUNCTION_INDEX);
}
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
- string_fun->shared()->set_construct_stub(isolate->builtins()->builtin(
- Builtins::kStringConstructor_ConstructStub));
+ string_fun->shared()->set_construct_stub(
+ *isolate->builtins()->StringConstructor_ConstructStub());
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
- native_context()->set_string_function(*string_fun);
+ InstallWithIntrinsicDefaultProto(isolate, string_fun,
+ Context::STRING_FUNCTION_INDEX);
Handle<Map> string_map =
Handle<Map>(native_context()->string_function()->initial_map());
@@ -1199,19 +1253,143 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> symbol_fun = InstallFunction(
global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kSymbolConstructor);
- symbol_fun->shared()->set_construct_stub(isolate->builtins()->builtin(
- Builtins::kSymbolConstructor_ConstructStub));
- symbol_fun->shared()->set_internal_formal_parameter_count(1);
+ symbol_fun->shared()->set_construct_stub(
+ *isolate->builtins()->SymbolConstructor_ConstructStub());
symbol_fun->shared()->set_length(1);
+ symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
}
{ // --- D a t e ---
// Builtin functions for Date.prototype.
- InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- }
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Handle<JSFunction> date_fun =
+ InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize, prototype,
+ Builtins::kDateConstructor);
+ InstallWithIntrinsicDefaultProto(isolate, date_fun,
+ Context::DATE_FUNCTION_INDEX);
+ date_fun->shared()->set_construct_stub(
+ *isolate->builtins()->DateConstructor_ConstructStub());
+ date_fun->shared()->set_length(7);
+ date_fun->shared()->DontAdaptArguments();
+
+ // Install the Date.now, Date.parse and Date.UTC functions.
+ SimpleInstallFunction(date_fun, "now", Builtins::kDateNow, 0, false);
+ SimpleInstallFunction(date_fun, "parse", Builtins::kDateParse, 1, false);
+ SimpleInstallFunction(date_fun, "UTC", Builtins::kDateUTC, 7, false);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), date_fun,
+ DONT_ENUM);
+
+ // Install the Date.prototype methods.
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kDatePrototypeToString, 0, false);
+ SimpleInstallFunction(prototype, "toDateString",
+ Builtins::kDatePrototypeToDateString, 0, false);
+ SimpleInstallFunction(prototype, "toTimeString",
+ Builtins::kDatePrototypeToTimeString, 0, false);
+ SimpleInstallFunction(prototype, "toGMTString",
+ Builtins::kDatePrototypeToUTCString, 0, false);
+ SimpleInstallFunction(prototype, "toISOString",
+ Builtins::kDatePrototypeToISOString, 0, false);
+ SimpleInstallFunction(prototype, "toUTCString",
+ Builtins::kDatePrototypeToUTCString, 0, false);
+ SimpleInstallFunction(prototype, "getDate", Builtins::kDatePrototypeGetDate,
+ 0, true);
+ SimpleInstallFunction(prototype, "setDate", Builtins::kDatePrototypeSetDate,
+ 1, false);
+ SimpleInstallFunction(prototype, "getDay", Builtins::kDatePrototypeGetDay,
+ 0, true);
+ SimpleInstallFunction(prototype, "getFullYear",
+ Builtins::kDatePrototypeGetFullYear, 0, true);
+ SimpleInstallFunction(prototype, "setFullYear",
+ Builtins::kDatePrototypeSetFullYear, 3, false);
+ SimpleInstallFunction(prototype, "getHours",
+ Builtins::kDatePrototypeGetHours, 0, true);
+ SimpleInstallFunction(prototype, "setHours",
+ Builtins::kDatePrototypeSetHours, 4, false);
+ SimpleInstallFunction(prototype, "getMilliseconds",
+ Builtins::kDatePrototypeGetMilliseconds, 0, true);
+ SimpleInstallFunction(prototype, "setMilliseconds",
+ Builtins::kDatePrototypeSetMilliseconds, 1, false);
+ SimpleInstallFunction(prototype, "getMinutes",
+ Builtins::kDatePrototypeGetMinutes, 0, true);
+ SimpleInstallFunction(prototype, "setMinutes",
+ Builtins::kDatePrototypeSetMinutes, 3, false);
+ SimpleInstallFunction(prototype, "getMonth",
+ Builtins::kDatePrototypeGetMonth, 0, true);
+ SimpleInstallFunction(prototype, "setMonth",
+ Builtins::kDatePrototypeSetMonth, 2, false);
+ SimpleInstallFunction(prototype, "getSeconds",
+ Builtins::kDatePrototypeGetSeconds, 0, true);
+ SimpleInstallFunction(prototype, "setSeconds",
+ Builtins::kDatePrototypeSetSeconds, 2, false);
+ SimpleInstallFunction(prototype, "getTime", Builtins::kDatePrototypeGetTime,
+ 0, true);
+ SimpleInstallFunction(prototype, "setTime", Builtins::kDatePrototypeSetTime,
+ 1, false);
+ SimpleInstallFunction(prototype, "getTimezoneOffset",
+ Builtins::kDatePrototypeGetTimezoneOffset, 0, true);
+ SimpleInstallFunction(prototype, "getUTCDate",
+ Builtins::kDatePrototypeGetUTCDate, 0, true);
+ SimpleInstallFunction(prototype, "setUTCDate",
+ Builtins::kDatePrototypeSetUTCDate, 1, false);
+ SimpleInstallFunction(prototype, "getUTCDay",
+ Builtins::kDatePrototypeGetUTCDay, 0, true);
+ SimpleInstallFunction(prototype, "getUTCFullYear",
+ Builtins::kDatePrototypeGetUTCFullYear, 0, true);
+ SimpleInstallFunction(prototype, "setUTCFullYear",
+ Builtins::kDatePrototypeSetUTCFullYear, 3, false);
+ SimpleInstallFunction(prototype, "getUTCHours",
+ Builtins::kDatePrototypeGetUTCHours, 0, true);
+ SimpleInstallFunction(prototype, "setUTCHours",
+ Builtins::kDatePrototypeSetUTCHours, 4, false);
+ SimpleInstallFunction(prototype, "getUTCMilliseconds",
+ Builtins::kDatePrototypeGetUTCMilliseconds, 0, true);
+ SimpleInstallFunction(prototype, "setUTCMilliseconds",
+ Builtins::kDatePrototypeSetUTCMilliseconds, 1, false);
+ SimpleInstallFunction(prototype, "getUTCMinutes",
+ Builtins::kDatePrototypeGetUTCMinutes, 0, true);
+ SimpleInstallFunction(prototype, "setUTCMinutes",
+ Builtins::kDatePrototypeSetUTCMinutes, 3, false);
+ SimpleInstallFunction(prototype, "getUTCMonth",
+ Builtins::kDatePrototypeGetUTCMonth, 0, true);
+ SimpleInstallFunction(prototype, "setUTCMonth",
+ Builtins::kDatePrototypeSetUTCMonth, 2, false);
+ SimpleInstallFunction(prototype, "getUTCSeconds",
+ Builtins::kDatePrototypeGetUTCSeconds, 0, true);
+ SimpleInstallFunction(prototype, "setUTCSeconds",
+ Builtins::kDatePrototypeSetUTCSeconds, 2, false);
+ SimpleInstallFunction(prototype, "valueOf", Builtins::kDatePrototypeValueOf,
+ 0, false);
+ SimpleInstallFunction(prototype, "getYear", Builtins::kDatePrototypeGetYear,
+ 0, true);
+ SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
+ 1, false);
+
+ // Install i18n fallback functions.
+ SimpleInstallFunction(prototype, "toLocaleString",
+ Builtins::kDatePrototypeToString, 0, false);
+ SimpleInstallFunction(prototype, "toLocaleDateString",
+ Builtins::kDatePrototypeToDateString, 0, false);
+ SimpleInstallFunction(prototype, "toLocaleTimeString",
+ Builtins::kDatePrototypeToTimeString, 0, false);
+
+ // Install the @@toPrimitive function.
+ Handle<JSFunction> to_primitive = InstallFunction(
+ prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kDatePrototypeToPrimitive,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Set the expected parameters for @@toPrimitive to 1; required by builtin.
+ to_primitive->shared()->set_internal_formal_parameter_count(1);
+ // Set the length for the function to satisfy ECMA-262.
+ to_primitive->shared()->set_length(1);
+ }
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
@@ -1219,7 +1397,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal);
- native_context()->set_regexp_function(*regexp_fun);
+ InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
+ Context::REGEXP_FUNCTION_INDEX);
+ regexp_fun->shared()->set_construct_stub(
+ *isolate->builtins()->JSBuiltinsConstructStub());
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
@@ -1243,6 +1424,62 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
num_fields * kPointerSize);
}
+ { // -- E r r o r
+ Handle<JSFunction> error_fun = InstallFunction(
+ global, "Error", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, error_fun,
+ Context::ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- E v a l E r r o r
+ Handle<JSFunction> eval_error_fun = InstallFunction(
+ global, "EvalError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, eval_error_fun,
+ Context::EVAL_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- R a n g e E r r o r
+ Handle<JSFunction> range_error_fun = InstallFunction(
+ global, "RangeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, range_error_fun,
+ Context::RANGE_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- R e f e r e n c e E r r o r
+ Handle<JSFunction> reference_error_fun = InstallFunction(
+ global, "ReferenceError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, reference_error_fun,
+ Context::REFERENCE_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- S y n t a x E r r o r
+ Handle<JSFunction> syntax_error_fun = InstallFunction(
+ global, "SyntaxError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, syntax_error_fun,
+ Context::SYNTAX_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- T y p e E r r o r
+ Handle<JSFunction> type_error_fun = InstallFunction(
+ global, "TypeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, type_error_fun,
+ Context::TYPE_ERROR_FUNCTION_INDEX);
+ }
+
+ { // -- U R I E r r o r
+ Handle<JSFunction> uri_error_fun = InstallFunction(
+ global, "URIError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, uri_error_fun,
+ Context::URI_ERROR_FUNCTION_INDEX);
+ }
+
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
@@ -1274,20 +1511,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
- InstallFunction(
- global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithInternalFields,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
- native_context()->set_array_buffer_fun(*array_buffer_fun);
+ InstallArrayBuffer(global, "ArrayBuffer");
+ InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
+ Context::ARRAY_BUFFER_FUN_INDEX);
}
{ // -- T y p e d A r r a y s
-#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- Handle<JSFunction> fun; \
- InstallTypedArray(#Type "Array", TYPE##_ELEMENTS, &fun); \
- native_context()->set_##type##_array_fun(*fun); \
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ Handle<JSFunction> fun; \
+ InstallTypedArray(#Type "Array", TYPE##_ELEMENTS, &fun); \
+ InstallWithIntrinsicDefaultProto(isolate, fun, \
+ Context::TYPE##_ARRAY_FUN_INDEX); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
@@ -1298,21 +1533,26 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSDataView::kSizeWithInternalFields,
isolate->initial_object_prototype(),
Builtins::kIllegal);
- native_context()->set_data_view_fun(*data_view_fun);
+ InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
+ Context::DATA_VIEW_FUN_INDEX);
+ data_view_fun->shared()->set_construct_stub(
+ *isolate->builtins()->JSBuiltinsConstructStub());
}
{ // -- M a p
Handle<JSFunction> js_map_fun = InstallFunction(
global, "Map", JS_MAP_TYPE, JSMap::kSize,
isolate->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_js_map_fun(*js_map_fun);
+ InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
+ Context::JS_MAP_FUN_INDEX);
}
{ // -- S e t
Handle<JSFunction> js_set_fun = InstallFunction(
global, "Set", JS_SET_TYPE, JSSet::kSize,
isolate->initial_object_prototype(), Builtins::kIllegal);
- native_context()->set_js_set_fun(*js_set_fun);
+ InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
+ Context::JS_SET_FUN_INDEX);
}
{ // -- I t e r a t o r R e s u l t
@@ -1337,25 +1577,63 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_iterator_result_map(*map);
}
- // -- W e a k M a p
- InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- // -- W e a k S e t
- InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ { // -- W e a k M a p
+ Handle<JSFunction> js_weak_map_fun = InstallFunction(
+ global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, js_weak_map_fun,
+ Context::JS_WEAK_MAP_FUN_INDEX);
+ }
+
+ { // -- W e a k S e t
+ Handle<JSFunction> js_weak_set_fun = InstallFunction(
+ global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
+ isolate->initial_object_prototype(), Builtins::kIllegal);
+ InstallWithIntrinsicDefaultProto(isolate, js_weak_set_fun,
+ Context::JS_WEAK_SET_FUN_INDEX);
+ }
+
+ { // --- B o u n d F u n c t i o n
+ Handle<Map> map =
+ factory->NewMap(JS_BOUND_FUNCTION_TYPE, JSBoundFunction::kSize);
+ map->set_is_callable();
+ Map::SetPrototype(map, empty_function);
+
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ Map::EnsureDescriptorSlack(map, 2);
+
+ { // length
+ DataDescriptor d(factory->length_string(), JSBoundFunction::kLengthIndex,
+ roc_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // name
+ DataDescriptor d(factory->name_string(), JSBoundFunction::kNameIndex,
+ roc_attribs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ map->SetInObjectProperties(2);
+ native_context()->set_bound_function_without_constructor_map(*map);
+
+ map = Map::Copy(map, "IsConstructor");
+ map->set_is_constructor();
+ native_context()->set_bound_function_with_constructor_map(*map);
+ }
{ // --- sloppy arguments map
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
Handle<String> arguments_string = factory->Arguments_string();
- Handle<Code> code(isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<Code> code = isolate->builtins()->Illegal();
Handle<JSFunction> function = factory->NewFunctionWithoutPrototype(
arguments_string, code);
function->shared()->set_instance_class_name(*arguments_string);
- Handle<Map> map =
- factory->NewMap(JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize);
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -1414,8 +1692,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
caller->set_setter(*poison);
// Create the map. Allocate one in-object field for length.
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
- Heap::kStrictArgumentsObjectSize);
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE, Heap::kStrictArgumentsObjectSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 3);
@@ -1454,8 +1732,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- context extension
// Create a function for the context extension objects.
- Handle<Code> code = Handle<Code>(
- isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<Code> code = isolate->builtins()->Illegal();
Handle<JSFunction> context_extension_fun = factory->NewFunction(
factory->empty_string(), code, JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize);
@@ -1469,9 +1746,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
// Set up the call-as-function delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsFunction));
+ Handle<Code> code = isolate->builtins()->HandleApiCallAsFunction();
Handle<JSFunction> delegate = factory->NewFunction(
factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
native_context()->set_call_as_function_delegate(*delegate);
@@ -1480,15 +1755,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
// Set up the call-as-constructor delegate.
- Handle<Code> code =
- Handle<Code>(isolate->builtins()->builtin(
- Builtins::kHandleApiCallAsConstructor));
+ Handle<Code> code = isolate->builtins()->HandleApiCallAsConstructor();
Handle<JSFunction> delegate = factory->NewFunction(
factory->empty_string(), code, JS_OBJECT_TYPE, JSObject::kHeaderSize);
native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
-}
+} // NOLINT(readability/fn_size)
void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
@@ -1514,6 +1787,7 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
+ FEATURE_INITIALIZE_GLOBAL(promise_extra, "")
#undef FEATURE_INITIALIZE_GLOBAL
}
@@ -1577,20 +1851,6 @@ bool Bootstrapper::CompileExperimentalExtraBuiltin(Isolate* isolate,
}
-bool Bootstrapper::CompileCodeStubBuiltin(Isolate* isolate, int index) {
- HandleScope scope(isolate);
- Vector<const char> name = CodeStubNatives::GetScriptName(index);
- Handle<String> source_code =
- isolate->bootstrapper()->SourceLookup<CodeStubNatives>(index);
- Handle<JSObject> global(isolate->global_object());
- Handle<JSObject> exports(isolate->heap()->code_stub_exports_object());
- Handle<Object> args[] = {global, exports};
- bool result =
- CompileNative(isolate, name, source_code, arraysize(args), args);
- return result;
-}
-
-
bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> source, int argc,
Handle<Object> argv[]) {
@@ -1616,10 +1876,9 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
DCHECK(context->IsNativeContext());
- Handle<Context> runtime_context(context->runtime_context());
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(function_info,
- runtime_context);
+ context);
Handle<Object> receiver = isolate->factory()->undefined_value();
// For non-extension scripts, run script to get the function wrapper.
@@ -1719,24 +1978,6 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
}
-template <typename Data>
-Handle<JSTypedArray> CreateTypedArray(Isolate* isolate, ExternalArrayType type,
- size_t num_elements, Data** data) {
- size_t byte_length = num_elements * sizeof(**data);
- Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
- bool is_external = (*data != nullptr);
- if (!is_external) {
- *data = reinterpret_cast<Data*>(
- isolate->array_buffer_allocator()->Allocate(byte_length));
- }
- JSArrayBuffer::Setup(buffer, isolate, is_external, *data, byte_length,
- SharedFlag::kNotShared);
- return isolate->factory()->NewJSTypedArray(type, buffer, 0, num_elements,
- TENURED);
-}
-
-
void Genesis::ConfigureUtilsObject(ContextType context_type) {
switch (context_type) {
// We still need the utils object to find debug functions.
@@ -1764,12 +2005,6 @@ void Genesis::ConfigureUtilsObject(ContextType context_type) {
// The utils object can be removed for cases that reach this point.
native_context()->set_natives_utils_object(heap()->undefined_value());
-
-#ifdef DEBUG
- JSGlobalObject* dummy = native_context()->runtime_context()->global_object();
- DCHECK_EQ(0, dummy->elements()->length());
- DCHECK_EQ(0, GlobalDictionary::cast(dummy->properties())->NumberOfElements());
-#endif
}
@@ -1795,11 +2030,8 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> apply = InstallFunction(
container, "reflect_apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kReflectApply);
- apply->shared()->set_internal_formal_parameter_count(3);
+ apply->shared()->DontAdaptArguments();
apply->shared()->set_length(3);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
- apply->shared()->set_feedback_vector(*feedback_vector);
native_context->set_reflect_apply(*apply);
}
@@ -1807,14 +2039,20 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> construct = InstallFunction(
container, "reflect_construct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
- construct->shared()->set_internal_formal_parameter_count(3);
+ construct->shared()->DontAdaptArguments();
construct->shared()->set_length(2);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
- construct->shared()->set_feedback_vector(*feedback_vector);
native_context->set_reflect_construct(*construct);
}
+ {
+ Handle<JSFunction> to_string = InstallFunction(
+ container, "object_to_string", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kObjectProtoToString);
+ to_string->shared()->DontAdaptArguments();
+ to_string->shared()->set_length(0);
+ native_context->set_object_to_string(*to_string);
+ }
+
Handle<JSObject> iterator_prototype;
{
@@ -1836,12 +2074,26 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
generator_function_prototype, NONE);
static const bool kUseStrictFunctionMap = true;
- Handle<JSFunction> generator_function_function =
- InstallFunction(container, "GeneratorFunction", JS_FUNCTION_TYPE,
- JSFunction::kSize, generator_function_prototype,
- Builtins::kIllegal, kUseStrictFunctionMap);
- generator_function_function->initial_map()->set_is_callable();
- generator_function_function->initial_map()->set_is_constructor(true);
+ Handle<JSFunction> generator_function_function = InstallFunction(
+ container, "GeneratorFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
+ generator_function_prototype, Builtins::kGeneratorFunctionConstructor,
+ kUseStrictFunctionMap);
+ generator_function_function->set_prototype_or_initial_map(
+ native_context->sloppy_generator_function_map());
+ generator_function_function->shared()->DontAdaptArguments();
+ generator_function_function->shared()->set_construct_stub(
+ *isolate->builtins()->GeneratorFunctionConstructor());
+ generator_function_function->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(
+ isolate, generator_function_function,
+ Context::GENERATOR_FUNCTION_FUNCTION_INDEX);
+
+ native_context->sloppy_generator_function_map()->SetConstructor(
+ *generator_function_function);
+ native_context->strict_generator_function_map()->SetConstructor(
+ *generator_function_function);
+ native_context->strong_generator_function_map()->SetConstructor(
+ *generator_function_function);
}
{ // -- S e t I t e r a t o r
@@ -2028,6 +2280,7 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
INITIALIZE_FLAG(FLAG_harmony_tostring)
INITIALIZE_FLAG(FLAG_harmony_tolength)
+ INITIALIZE_FLAG(FLAG_harmony_species)
#undef INITIALIZE_FLAG
}
@@ -2037,34 +2290,21 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_bind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_completion)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tolength)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-
-
-static void SimpleInstallFunction(Handle<JSObject>& base, const char* name,
- Builtins::Name call, int len, bool adapt) {
- Handle<JSFunction> fun =
- InstallFunction(base, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), call);
- if (adapt) {
- fun->shared()->set_internal_formal_parameter_count(len);
- } else {
- fun->shared()->DontAdaptArguments();
- }
- fun->shared()->set_length(len);
-}
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
@@ -2109,35 +2349,50 @@ void Genesis::InitializeGlobal_harmony_regexp_subclass() {
void Genesis::InitializeGlobal_harmony_reflect() {
+ Factory* factory = isolate()->factory();
+
+ // We currently use some of the Reflect functions internally, even when
+ // the --harmony-reflect flag is not given.
+
+ Handle<JSFunction> define_property =
+ SimpleCreateFunction(isolate(), factory->defineProperty_string(),
+ Builtins::kReflectDefineProperty, 3, true);
+ native_context()->set_reflect_define_property(*define_property);
+
+ Handle<JSFunction> delete_property =
+ SimpleCreateFunction(isolate(), factory->deleteProperty_string(),
+ Builtins::kReflectDeleteProperty, 2, true);
+ native_context()->set_reflect_delete_property(*delete_property);
+
if (!FLAG_harmony_reflect) return;
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context()->global_object()));
- Handle<String> reflect_string =
- factory()->NewStringFromStaticChars("Reflect");
+ Handle<String> reflect_string = factory->NewStringFromStaticChars("Reflect");
Handle<JSObject> reflect =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
+ factory->NewJSObject(isolate()->object_function(), TENURED);
JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
- SimpleInstallFunction(reflect, "defineProperty",
- Builtins::kReflectDefineProperty, 3, true);
- SimpleInstallFunction(reflect, "deleteProperty",
- Builtins::kReflectDeleteProperty, 2, true);
- SimpleInstallFunction(reflect, "get",
- Builtins::kReflectGet, 3, false);
- SimpleInstallFunction(reflect, "getOwnPropertyDescriptor",
+ InstallFunction(reflect, define_property, factory->defineProperty_string());
+ InstallFunction(reflect, delete_property, factory->deleteProperty_string());
+
+ SimpleInstallFunction(reflect, factory->get_string(),
+ Builtins::kReflectGet, 2, false);
+ SimpleInstallFunction(reflect, factory->getOwnPropertyDescriptor_string(),
Builtins::kReflectGetOwnPropertyDescriptor, 2, true);
- SimpleInstallFunction(reflect, "getPrototypeOf",
+ SimpleInstallFunction(reflect, factory->getPrototypeOf_string(),
Builtins::kReflectGetPrototypeOf, 1, true);
- SimpleInstallFunction(reflect, "has",
+ SimpleInstallFunction(reflect, factory->has_string(),
Builtins::kReflectHas, 2, true);
- SimpleInstallFunction(reflect, "isExtensible",
+ SimpleInstallFunction(reflect, factory->isExtensible_string(),
Builtins::kReflectIsExtensible, 1, true);
- SimpleInstallFunction(reflect, "preventExtensions",
+ SimpleInstallFunction(reflect, factory->ownKeys_string(),
+ Builtins::kReflectOwnKeys, 1, true);
+ SimpleInstallFunction(reflect, factory->preventExtensions_string(),
Builtins::kReflectPreventExtensions, 1, true);
- SimpleInstallFunction(reflect, "set",
+ SimpleInstallFunction(reflect, factory->set_string(),
Builtins::kReflectSet, 3, false);
- SimpleInstallFunction(reflect, "setPrototypeOf",
+ SimpleInstallFunction(reflect, factory->setPrototypeOf_string(),
Builtins::kReflectSetPrototypeOf, 2, true);
}
@@ -2145,13 +2400,9 @@ void Genesis::InitializeGlobal_harmony_reflect() {
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context()->global_object()));
-
- Handle<JSFunction> shared_array_buffer_fun = InstallFunction(
- global, "SharedArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSizeWithInternalFields,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
+ Handle<JSGlobalObject> global(native_context()->global_object());
+ Handle<JSFunction> shared_array_buffer_fun =
+ InstallArrayBuffer(global, "SharedArrayBuffer");
native_context()->set_shared_array_buffer_fun(*shared_array_buffer_fun);
}
@@ -2187,6 +2438,100 @@ void Genesis::InitializeGlobal_harmony_simd() {
}
+void Genesis::InstallJSProxyMaps() {
+ // Allocate the different maps for all Proxy types.
+ // Next to the default proxy, we need maps indicating callable and
+ // constructable proxies.
+
+ Handle<Map> proxy_function_map =
+ Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
+ proxy_function_map->set_is_constructor();
+ native_context()->set_proxy_function_map(*proxy_function_map);
+
+ Handle<Map> proxy_map =
+ factory()->NewMap(JS_PROXY_TYPE, JSProxy::kSize, FAST_ELEMENTS);
+ proxy_map->set_dictionary_map(true);
+ native_context()->set_proxy_map(*proxy_map);
+
+ Handle<Map> proxy_callable_map = Map::Copy(proxy_map, "callable Proxy");
+ proxy_callable_map->set_is_callable();
+ native_context()->set_proxy_callable_map(*proxy_callable_map);
+ proxy_callable_map->SetConstructor(native_context()->function_function());
+
+ Handle<Map> proxy_constructor_map =
+ Map::Copy(proxy_callable_map, "constructor Proxy");
+ proxy_constructor_map->set_is_constructor();
+ native_context()->set_proxy_constructor_map(*proxy_constructor_map);
+}
+
+
+void Genesis::InitializeGlobal_harmony_proxies() {
+ if (!FLAG_harmony_proxies) return;
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context()->global_object()));
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ InstallJSProxyMaps();
+
+ // Create the Proxy object.
+ Handle<String> name = factory->Proxy_string();
+ Handle<Code> code(isolate->builtins()->ProxyConstructor());
+
+ Handle<JSFunction> proxy_function = factory->NewFunction(
+ isolate->proxy_function_map(), factory->Proxy_string(), code);
+
+ JSFunction::SetInitialMap(proxy_function,
+ Handle<Map>(native_context()->proxy_map(), isolate),
+ factory->null_value());
+
+ proxy_function->shared()->set_construct_stub(
+ *isolate->builtins()->ProxyConstructor_ConstructStub());
+ proxy_function->shared()->set_internal_formal_parameter_count(2);
+ proxy_function->shared()->set_length(2);
+
+ native_context()->set_proxy_function(*proxy_function);
+ InstallFunction(global, name, proxy_function, factory->Object_string());
+}
+
+
+Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
+ const char* name) {
+ // Setup the {prototype} with the given {name} for @@toStringTag.
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::AddProperty(prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked(name),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Allocate the constructor with the given {prototype}.
+ Handle<JSFunction> array_buffer_fun =
+ InstallFunction(target, name, JS_ARRAY_BUFFER_TYPE,
+ JSArrayBuffer::kSizeWithInternalFields, prototype,
+ Builtins::kArrayBufferConstructor);
+ array_buffer_fun->shared()->set_construct_stub(
+ *isolate()->builtins()->ArrayBufferConstructor_ConstructStub());
+ array_buffer_fun->shared()->DontAdaptArguments();
+ array_buffer_fun->shared()->set_length(1);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory()->constructor_string(),
+ array_buffer_fun, DONT_ENUM);
+
+ SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
+ Builtins::kArrayBufferIsView, 1, true);
+
+ return array_buffer_fun;
+}
+
+
+void Genesis::InitializeGlobal_harmony_species() {
+ if (!FLAG_harmony_species) return;
+ InstallPublicSymbol(factory(), native_context(), "species",
+ factory()->species_symbol());
+}
+
+
Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind) {
@@ -2233,31 +2578,6 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
bool Genesis::InstallNatives(ContextType context_type) {
HandleScope scope(isolate());
- // Create a bridge function that has context in the native context.
- Handle<JSFunction> bridge = factory()->NewFunction(factory()->empty_string());
- DCHECK(bridge->context() == *isolate()->native_context());
-
- // Allocate the runtime context.
- {
- Handle<Context> context =
- factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
- native_context()->set_runtime_context(*context);
- Handle<Code> code = isolate()->builtins()->Illegal();
- Handle<JSFunction> global_fun =
- factory()->NewFunction(factory()->empty_string(), code,
- JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
- global_fun->initial_map()->set_dictionary_map(true);
- global_fun->initial_map()->set_prototype(heap()->null_value());
- Handle<JSGlobalObject> dummy_global =
- Handle<JSGlobalObject>::cast(factory()->NewJSGlobalObject(global_fun));
- dummy_global->set_native_context(*native_context());
- dummy_global->set_global_proxy(native_context()->global_proxy());
- context->set_global_object(*dummy_global);
- // Something went wrong if we actually need to write into the dummy global.
- dummy_global->set_properties(*GlobalDictionary::New(isolate(), 0));
- dummy_global->set_elements(heap()->empty_fixed_array());
- }
-
// Set up the utils object as shared container between native scripts.
Handle<JSObject> utils = factory()->NewJSObject(isolate()->object_function());
JSObject::NormalizeProperties(utils, CLEAR_INOBJECT_PROPERTIES, 16,
@@ -2338,27 +2658,12 @@ bool Genesis::InstallNatives(ContextType context_type) {
native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
- // Install Date.prototype[@@toPrimitive].
+ // Install Global.eval.
{
- Handle<String> key = factory()->Date_string();
- Handle<JSFunction> date = Handle<JSFunction>::cast(
- Object::GetProperty(handle(native_context()->global_object()), key)
- .ToHandleChecked());
- Handle<JSObject> proto =
- Handle<JSObject>(JSObject::cast(date->instance_prototype()));
-
- // Install the @@toPrimitive function.
- Handle<JSFunction> to_primitive =
- InstallFunction(proto, factory()->to_primitive_symbol(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, MaybeHandle<JSObject>(),
- Builtins::kDateToPrimitive,
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
-
- // Set the expected parameters for @@toPrimitive to 1; required by builtin.
- to_primitive->shared()->set_internal_formal_parameter_count(1);
-
- // Set the length for the function to satisfy ECMA-262.
- to_primitive->shared()->set_length(1);
+ Handle<JSFunction> eval = SimpleInstallFunction(
+ handle(native_context()->global_object()), factory()->eval_string(),
+ Builtins::kGlobalEval, 1, false);
+ native_context()->set_global_eval_fun(*eval);
}
// Install Array.prototype.concat
@@ -2395,38 +2700,19 @@ bool Genesis::InstallNatives(ContextType context_type) {
// Set the lengths for the functions to satisfy ECMA-262.
concat->shared()->set_length(1);
}
- // Install Function.prototype.call and apply.
- {
- Handle<String> key = factory()->Function_string();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(Object::GetProperty(
- handle(native_context()->global_object()), key).ToHandleChecked());
- Handle<JSObject> proto =
- Handle<JSObject>(JSObject::cast(function->instance_prototype()));
-
- // Install the call and the apply functions.
- Handle<JSFunction> call =
- InstallFunction(proto, "call", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kFunctionCall);
- Handle<JSFunction> apply =
- InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kFunctionApply);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
- apply->shared()->set_feedback_vector(*feedback_vector);
-
- // Make sure that Function.prototype.call appears to be compiled.
- // The code will never be called, but inline caching for call will
- // only work if it appears to be compiled.
- call->shared()->DontAdaptArguments();
- DCHECK(call->is_compiled());
-
- // Set the expected parameters for apply to 2; required by builtin.
- apply->shared()->set_internal_formal_parameter_count(2);
- // Set the lengths for the functions to satisfy ECMA-262.
- call->shared()->set_length(1);
- apply->shared()->set_length(2);
+ // Set up the Promise constructor.
+ {
+ Handle<String> key = factory()->Promise_string();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(
+ Object::GetProperty(handle(native_context()->global_object()), key)
+ .ToHandleChecked());
+ JSFunction::EnsureHasInitialMap(function);
+ function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
+ function->shared()->set_construct_stub(
+ *isolate()->builtins()->JSBuiltinsConstructStub());
+ InstallWithIntrinsicDefaultProto(isolate(), function,
+ Context::PROMISE_FUNCTION_INDEX);
}
InstallBuiltinFunctionIds();
@@ -2526,8 +2812,6 @@ bool Genesis::InstallNatives(ContextType context_type) {
bool Genesis::InstallExperimentalNatives() {
- static const char* harmony_array_includes_natives[] = {
- "native harmony-array-includes.js", nullptr};
static const char* harmony_proxies_natives[] = {"native proxy.js", nullptr};
static const char* harmony_modules_natives[] = {nullptr};
static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
@@ -2536,12 +2820,15 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_sloppy_natives[] = {nullptr};
static const char* harmony_sloppy_function_natives[] = {nullptr};
static const char* harmony_sloppy_let_natives[] = {nullptr};
- static const char* harmony_unicode_regexps_natives[] = {nullptr};
- static const char* harmony_rest_parameters_natives[] = {nullptr};
+ static const char* harmony_species_natives[] = {"native harmony-species.js",
+ nullptr};
+ static const char* harmony_unicode_regexps_natives[] = {
+ "native harmony-unicode-regexps.js", nullptr};
static const char* harmony_default_parameters_natives[] = {nullptr};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
nullptr};
- static const char* harmony_destructuring_natives[] = {nullptr};
+ static const char* harmony_destructuring_bind_natives[] = {nullptr};
+ static const char* harmony_destructuring_assignment_natives[] = {nullptr};
static const char* harmony_object_observe_natives[] = {
"native harmony-object-observe.js", nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
@@ -2553,6 +2840,10 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_completion_natives[] = {nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
static const char* harmony_regexp_subclass_natives[] = {nullptr};
+ static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
+ static const char* harmony_function_name_natives[] = {nullptr};
+ static const char* promise_extra_natives[] = {"native promise-extra.js",
+ nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2571,6 +2862,7 @@ bool Genesis::InstallExperimentalNatives() {
HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
+ INSTALL_EXPERIMENTAL_NATIVES(promise_extra, "");
#undef INSTALL_EXPERIMENTAL_NATIVES
}
@@ -2616,16 +2908,6 @@ bool Genesis::InstallDebuggerNatives() {
}
-bool Bootstrapper::InstallCodeStubNatives(Isolate* isolate) {
- for (int i = CodeStubNatives::GetDebuggerCount();
- i < CodeStubNatives::GetBuiltinsCount(); i++) {
- if (!CompileCodeStubBuiltin(isolate, i)) return false;
- }
-
- return true;
-}
-
-
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
@@ -2710,8 +2992,7 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context->global_object()));
- Handle<JSObject> Error = Handle<JSObject>::cast(
- Object::GetProperty(isolate, global, "Error").ToHandleChecked());
+ Handle<JSObject> Error = isolate->error_function();
Handle<String> name =
factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("stackTraceLimit"));
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
@@ -2736,9 +3017,9 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
}
-#if defined(V8_WASM)
- WasmJs::Install(isolate, global);
-#endif
+ if (FLAG_expose_wasm) {
+ WasmJs::Install(isolate, global);
+ }
return true;
}
@@ -3123,10 +3404,8 @@ Genesis::Genesis(Isolate* isolate,
// We can only de-serialize a context if the isolate was initialized from
// a snapshot. Otherwise we have to build the context from scratch.
// Also create a context from scratch to expose natives, if required by flag.
- Handle<FixedArray> outdated_contexts;
if (!isolate->initialized_from_snapshot() ||
- !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
- &outdated_contexts)
+ !Snapshot::NewContextFromSnapshot(isolate, global_proxy)
.ToHandle(&native_context_)) {
native_context_ = Handle<Context>();
}
@@ -3148,8 +3427,7 @@ Genesis::Genesis(Isolate* isolate,
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
- HookUpGlobalObject(global_object, outdated_contexts);
- HookUpGlobalThisBinding(outdated_contexts);
+ HookUpGlobalObject(global_object);
if (!ConfigureGlobalObjects(global_proxy_template)) return;
} else {
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 2baa8ff61a..44f0f1b2a5 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -82,8 +82,6 @@ class Bootstrapper final {
v8::ExtensionConfiguration* extensions,
ContextType context_type = FULL_CONTEXT);
- bool CreateCodeStubContext(Isolate* isolate);
-
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
@@ -116,8 +114,6 @@ class Bootstrapper final {
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
- static bool CompileCodeStubBuiltin(Isolate* isolate, int index);
- static bool InstallCodeStubNatives(Isolate* isolate);
static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
static void ExportExperimentalFromRuntime(Isolate* isolate,
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index e4ceec99be..77df498a07 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -9,6 +9,7 @@
#include "src/arguments.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
+#include "src/dateparser-inl.h"
#include "src/elements.h"
#include "src/frames-inl.h"
#include "src/gdb-jit.h"
@@ -19,6 +20,7 @@
#include "src/profiler/cpu-profiler.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
+#include "src/string-builder.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -31,7 +33,10 @@ template <BuiltinExtraArguments extra_args>
class BuiltinArguments : public Arguments {
public:
BuiltinArguments(int length, Object** arguments)
- : Arguments(length, arguments) { }
+ : Arguments(length, arguments) {
+ // Check we have at least the receiver.
+ DCHECK_LE(1, this->length());
+ }
Object*& operator[] (int index) {
DCHECK(index < length());
@@ -43,51 +48,75 @@ class BuiltinArguments : public Arguments {
return Arguments::at<S>(index);
}
+ Handle<Object> atOrUndefined(Isolate* isolate, int index) {
+ if (index >= length()) {
+ return isolate->factory()->undefined_value();
+ }
+ return at<Object>(index);
+ }
+
Handle<Object> receiver() {
return Arguments::at<Object>(0);
}
- Handle<JSFunction> called_function() {
- STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
- return Arguments::at<JSFunction>(Arguments::length() - 1);
- }
+ Handle<JSFunction> target();
+ Handle<HeapObject> new_target();
// Gets the total number of arguments including the receiver (but
// excluding extra arguments).
- int length() const {
- STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
- return Arguments::length();
- }
-
-#ifdef DEBUG
- void Verify() {
- // Check we have at least the receiver.
- DCHECK(Arguments::length() >= 1);
- }
-#endif
+ int length() const;
};
-// Specialize BuiltinArguments for the called function extra argument.
+// Specialize BuiltinArguments for the extra arguments.
template <>
-int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
+int BuiltinArguments<BuiltinExtraArguments::kNone>::length() const {
+ return Arguments::length();
+}
+
+template <>
+int BuiltinArguments<BuiltinExtraArguments::kTarget>::length() const {
return Arguments::length() - 1;
}
-#ifdef DEBUG
template <>
-void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
- // Check we have at least the receiver and the called function.
- DCHECK(Arguments::length() >= 2);
- // Make sure cast to JSFunction succeeds.
- called_function();
+Handle<JSFunction> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
+ return Arguments::at<JSFunction>(Arguments::length() - 1);
+}
+
+template <>
+int BuiltinArguments<BuiltinExtraArguments::kNewTarget>::length() const {
+ return Arguments::length() - 1;
+}
+
+template <>
+Handle<HeapObject>
+BuiltinArguments<BuiltinExtraArguments::kNewTarget>::new_target() {
+ return Arguments::at<HeapObject>(Arguments::length() - 1);
+}
+
+template <>
+int BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::length()
+ const {
+ return Arguments::length() - 2;
}
-#endif
+template <>
+Handle<JSFunction>
+BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::target() {
+ return Arguments::at<JSFunction>(Arguments::length() - 2);
+}
+
+template <>
+Handle<HeapObject>
+BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::new_target() {
+ return Arguments::at<HeapObject>(Arguments::length() - 1);
+}
-#define DEF_ARG_TYPE(name, spec) \
- typedef BuiltinArguments<spec> name##ArgumentsType;
+
+#define DEF_ARG_TYPE(name, spec) \
+ typedef BuiltinArguments<BuiltinExtraArguments::spec> name##ArgumentsType;
BUILTIN_LIST_C(DEF_ARG_TYPE)
#undef DEF_ARG_TYPE
@@ -105,65 +134,30 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
// In the body of the builtin function the arguments can be accessed
// through the BuiltinArguments object args.
-#ifdef DEBUG
-
#define BUILTIN(name) \
MUST_USE_RESULT static Object* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate); \
MUST_USE_RESULT static Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
- args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
MUST_USE_RESULT static Object* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
-#else // For release mode.
-
-#define BUILTIN(name) \
- static Object* Builtin_impl##name( \
- name##ArgumentsType args, Isolate* isolate); \
- static Object* Builtin_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- name##ArgumentsType args(args_length, args_object); \
- return Builtin_impl##name(args, isolate); \
- } \
- static Object* Builtin_impl##name( \
- name##ArgumentsType args, Isolate* isolate)
-#endif
-
-#ifdef DEBUG
-inline bool CalledAsConstructor(Isolate* isolate) {
- // Calculate the result using a full stack frame iterator and check
- // that the state of the stack is as we assume it to be in the
- // code below.
- StackFrameIterator it(isolate);
- DCHECK(it.frame()->is_exit());
- it.Advance();
- StackFrame* frame = it.frame();
- bool reference_result = frame->is_construct();
- Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
- // Because we know fp points to an exit frame we can use the relevant
- // part of ExitFrame::ComputeCallerState directly.
- const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
- Address caller_fp = Memory::Address_at(fp + kCallerOffset);
- // This inlines the part of StackFrame::ComputeType that grabs the
- // type of the current frame. Note that StackFrame::ComputeType
- // has been specialized for each architecture so if any one of them
- // changes this code has to be changed as well.
- const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
- const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
- Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
- bool result = (marker == kConstructMarker);
- DCHECK_EQ(result, reference_result);
- return result;
-}
-#endif
+// ----------------------------------------------------------------------------
-// ----------------------------------------------------------------------------
+#define CHECK_RECEIVER(Type, name, method) \
+ if (!args.receiver()->Is##Type()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \
+ isolate->factory()->NewStringFromAsciiChecked(method), \
+ args.receiver())); \
+ } \
+ Handle<Type> name = Handle<Type>::cast(args.receiver())
inline bool ClampedToInteger(Object* object, int* out) {
@@ -306,7 +300,7 @@ inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
MUST_USE_RESULT static Object* CallJsIntrinsic(
Isolate* isolate, Handle<JSFunction> function,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
+ BuiltinArguments<BuiltinExtraArguments::kNone> args) {
HandleScope handleScope(isolate);
int argc = args.length() - 1;
ScopedVector<Handle<Object> > argv(argc);
@@ -464,6 +458,14 @@ BUILTIN(ArraySlice) {
int relative_end = 0;
bool is_sloppy_arguments = false;
+ // TODO(littledan): Look up @@species only once, not once here and
+ // again in the JS builtin. Pass the species out?
+ Handle<Object> species;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+ if (*species != isolate->context()->native_context()->array_function()) {
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
if (receiver->IsJSArray()) {
DisallowHeapAllocation no_gc;
JSArray* array = JSArray::cast(*receiver);
@@ -549,6 +551,14 @@ BUILTIN(ArraySplice) {
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
+ // TODO(littledan): Look up @@species only once, not once here and
+ // again in the JS builtin. Pass the species out?
+ Handle<Object> species;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+ if (*species != isolate->context()->native_context()->array_function()) {
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+ }
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
@@ -955,7 +965,7 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
-bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
+bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
uint32_t length, ArrayConcatVisitor* visitor) {
for (uint32_t i = 0; i < length; ++i) {
HandleScope loop_scope(isolate);
@@ -975,7 +985,7 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
/**
- * A helper function that visits elements of a JSObject in numerical
+ * A helper function that visits "array" elements of a JSReceiver in numerical
* order.
*
* The visitor argument called for each existing element in the array
@@ -984,7 +994,7 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
* length.
* Returns false if any access threw an exception, otherwise true.
*/
-bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
+bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
ArrayConcatVisitor* visitor) {
uint32_t length = 0;
@@ -1010,15 +1020,16 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
// use the slow case.
return IterateElementsSlow(isolate, receiver, length, visitor);
}
+ Handle<JSObject> array = Handle<JSObject>::cast(receiver);
- switch (receiver->GetElementsKind()) {
+ switch (array->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
@@ -1027,14 +1038,14 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
if (!element_value->IsTheHole()) {
visitor->visit(j, element_value);
} else {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+ Maybe<bool> maybe = JSReceiver::HasElement(array, j);
if (!maybe.IsJust()) return false;
if (maybe.FromJust()) {
- // Call GetElement on receiver, not its prototype, or getters won't
+ // Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Object::GetElement(isolate, receiver, j), false);
+ isolate, element_value, Object::GetElement(isolate, array, j),
+ false);
visitor->visit(j, element_value);
}
}
@@ -1047,12 +1058,12 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
if (length == 0) break;
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
- if (receiver->elements()->IsFixedArray()) {
- DCHECK(receiver->elements()->length() == 0);
+ if (array->elements()->IsFixedArray()) {
+ DCHECK(array->elements()->length() == 0);
break;
}
Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(receiver->elements()));
+ FixedDoubleArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
DCHECK(fast_length <= elements->length());
for (int j = 0; j < fast_length; j++) {
@@ -1063,15 +1074,15 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
isolate->factory()->NewNumber(double_value);
visitor->visit(j, element_value);
} else {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+ Maybe<bool> maybe = JSReceiver::HasElement(array, j);
if (!maybe.IsJust()) return false;
if (maybe.FromJust()) {
- // Call GetElement on receiver, not its prototype, or getters won't
+ // Call GetElement on array, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Object::GetElement(isolate, receiver, j), false);
+ isolate, element_value, Object::GetElement(isolate, array, j),
+ false);
visitor->visit(j, element_value);
}
}
@@ -1081,17 +1092,17 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
case DICTIONARY_ELEMENTS: {
// CollectElementIndices() can't be called when there's a JSProxy
// on the prototype chain.
- for (PrototypeIterator iter(isolate, receiver); !iter.IsAtEnd();
+ for (PrototypeIterator iter(isolate, array); !iter.IsAtEnd();
iter.Advance()) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return IterateElementsSlow(isolate, receiver, length, visitor);
+ return IterateElementsSlow(isolate, array, length, visitor);
}
}
- Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
+ Handle<SeededNumberDictionary> dict(array->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
// Collect all indices in the object and the prototypes less
// than length. This might introduce duplicates in the indices list.
- CollectElementIndices(receiver, length, &indices);
+ CollectElementIndices(array, length, &indices);
indices.Sort(&compareUInt32);
int j = 0;
int n = indices.length();
@@ -1100,8 +1111,7 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
uint32_t index = indices[j];
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, receiver, index),
- false);
+ isolate, element, Object::GetElement(isolate, array, index), false);
visitor->visit(index, element);
// Skip to next different index (i.e., omit duplicates).
do {
@@ -1112,7 +1122,7 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
}
case UINT8_CLAMPED_ELEMENTS: {
Handle<FixedUint8ClampedArray> pixels(
- FixedUint8ClampedArray::cast(receiver->elements()));
+ FixedUint8ClampedArray::cast(array->elements()));
for (uint32_t j = 0; j < length; j++) {
Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
visitor->visit(j, e);
@@ -1120,43 +1130,43 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
break;
}
case INT8_ELEMENTS: {
- IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, receiver, true,
+ IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, array, true,
true, visitor);
break;
}
case UINT8_ELEMENTS: {
- IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, receiver,
- true, true, visitor);
+ IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, array, true,
+ true, visitor);
break;
}
case INT16_ELEMENTS: {
- IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, receiver,
- true, true, visitor);
+ IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, array, true,
+ true, visitor);
break;
}
case UINT16_ELEMENTS: {
IterateTypedArrayElements<FixedUint16Array, uint16_t>(
- isolate, receiver, true, true, visitor);
+ isolate, array, true, true, visitor);
break;
}
case INT32_ELEMENTS: {
- IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, receiver,
- true, false, visitor);
+ IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, array, true,
+ false, visitor);
break;
}
case UINT32_ELEMENTS: {
IterateTypedArrayElements<FixedUint32Array, uint32_t>(
- isolate, receiver, true, false, visitor);
+ isolate, array, true, false, visitor);
break;
}
case FLOAT32_ELEMENTS: {
- IterateTypedArrayElements<FixedFloat32Array, float>(
- isolate, receiver, false, false, visitor);
+ IterateTypedArrayElements<FixedFloat32Array, float>(isolate, array, false,
+ false, visitor);
break;
}
case FLOAT64_ELEMENTS: {
IterateTypedArrayElements<FixedFloat64Array, double>(
- isolate, receiver, false, false, visitor);
+ isolate, array, false, false, visitor);
break;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -1165,8 +1175,7 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
HandleScope loop_scope(isolate);
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, receiver, index),
- false);
+ isolate, element, Object::GetElement(isolate, array, index), false);
visitor->visit(index, element);
}
break;
@@ -1178,37 +1187,29 @@ bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
+ DCHECK(isolate->IsFastArrayConstructorPrototypeChainIntact());
if (!FLAG_harmony_concat_spreadable) return false;
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
- Maybe<bool> maybe =
- JSReceiver::HasProperty(Handle<JSReceiver>::cast(obj), key);
- if (!maybe.IsJust()) return false;
- return maybe.FromJust();
+ Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
+ return maybe.FromMaybe(false);
}
-bool IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
+static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
HandleScope handle_scope(isolate);
- if (!obj->IsSpecObject()) return false;
+ if (!obj->IsJSReceiver()) return Just(false);
if (FLAG_harmony_concat_spreadable) {
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
Handle<Object> value;
MaybeHandle<Object> maybeValue =
i::Runtime::GetObjectProperty(isolate, obj, key);
- if (maybeValue.ToHandle(&value) && !value->IsUndefined()) {
- return value->BooleanValue();
- }
+ if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+ if (!value->IsUndefined()) return Just(value->BooleanValue());
}
- return obj->IsJSArray();
+ return Object::IsArray(obj);
}
-/**
- * Array::concat implementation.
- * See ECMAScript 262, 15.4.4.4.
- * TODO(581): Fix non-compliance for very large concatenations and update to
- * following the ECMAScript 5 specification.
- */
Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
int argument_count = args->length();
@@ -1364,10 +1365,10 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
for (int i = 0; i < argument_count; i++) {
Handle<Object> obj((*args)[i], isolate);
- bool spreadable = IsConcatSpreadable(isolate, obj);
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- if (spreadable) {
- Handle<JSObject> object = Handle<JSObject>::cast(obj);
+ Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
+ MAYBE_RETURN(spreadable, isolate->heap()->exception());
+ if (spreadable.FromJust()) {
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(obj);
if (!IterateElements(isolate, object, &visitor)) {
return isolate->heap()->exception();
}
@@ -1427,6 +1428,7 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
} // namespace
+// ES6 22.1.3.1 Array.prototype.concat
BUILTIN(ArrayConcat) {
HandleScope scope(isolate);
@@ -1449,6 +1451,266 @@ BUILTIN(ArrayConcat) {
}
+// ES6 22.1.2.2 Array.isArray
+BUILTIN(ArrayIsArray) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> object = args.at<Object>(1);
+ Maybe<bool> result = Object::IsArray(object);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 19.1.2.1 Object.assign
+BUILTIN(ObjectAssign) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+
+ // 1. Let to be ? ToObject(target).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
+ Execution::ToObject(isolate, target));
+ Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
+ // 2. If only one argument was passed, return to.
+ if (args.length() == 2) return *to;
+ // 3. Let sources be the List of argument values starting with the
+ // second argument.
+ // 4. For each element nextSource of sources, in ascending index order,
+ for (int i = 2; i < args.length(); ++i) {
+ Handle<Object> next_source = args.at<Object>(i);
+ // 4a. If nextSource is undefined or null, let keys be an empty List.
+ if (next_source->IsUndefined() || next_source->IsNull()) continue;
+ // 4b. Else,
+ // 4b i. Let from be ToObject(nextSource).
+ Handle<JSReceiver> from =
+ Object::ToObject(isolate, next_source).ToHandleChecked();
+ // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(from, JSReceiver::OWN_ONLY,
+ ALL_PROPERTIES, KEEP_NUMBERS));
+ // 4c. Repeat for each element nextKey of keys in List order,
+ for (int j = 0; j < keys->length(); ++j) {
+ Handle<Object> next_key(keys->get(j), isolate);
+ // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+ if (found.IsNothing()) return isolate->heap()->exception();
+ // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+ if (found.FromJust() && desc.enumerable()) {
+ // 4c ii 1. Let propValue be ? Get(from, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prop_value,
+ Runtime::GetObjectProperty(isolate, from, next_key, STRICT));
+ // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+ Handle<Object> status;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
+ prop_value, STRICT));
+ }
+ }
+ }
+ // 5. Return to.
+ return *to;
+}
+
+
+// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+BUILTIN(ObjectCreate) {
+ HandleScope scope(isolate);
+ Handle<Object> prototype = args.atOrUndefined(isolate, 1);
+ if (!prototype->IsNull() && !prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+ }
+
+ // Generate the map with the specified {prototype} based on the Object
+ // function's initial map from the current native context.
+ // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+ // slack tracking for Object.create.
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype, FAST_PROTOTYPE);
+ }
+
+ // Actually allocate the object.
+ Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+
+ // Define the properties if properties was specified and is not undefined.
+ Handle<Object> properties = args.atOrUndefined(isolate, 2);
+ if (!properties->IsUndefined()) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSReceiver::DefineProperties(isolate, object, properties));
+ }
+
+ return *object;
+}
+
+
+// ES6 section 19.1.2.5 Object.freeze ( O )
+BUILTIN(ObjectFreeze) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+ FROZEN, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+
+// ES6 section 19.1.2.11 Object.isExtensible ( O )
+BUILTIN(ObjectIsExtensible) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+ : Just(false);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 19.1.2.12 Object.isFrozen ( O )
+BUILTIN(ObjectIsFrozen) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result = object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ Handle<JSReceiver>::cast(object), FROZEN)
+ : Just(true);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 19.1.2.13 Object.isSealed ( O )
+BUILTIN(ObjectIsSealed) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result = object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ Handle<JSReceiver>::cast(object), SEALED)
+ : Just(true);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 19.1.2.14 Object.keys ( O )
+BUILTIN(ObjectKeys) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Execution::ToObject(isolate, object));
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(receiver, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS,
+ CONVERT_TO_STRING));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+
+// ES6 section 19.1.2.15 Object.preventExtensions ( O )
+BUILTIN(ObjectPreventExtensions) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+
+// ES6 section 19.1.2.17 Object.seal ( O )
+BUILTIN(ObjectSeal) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+ SEALED, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+
+namespace {
+
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context) {
+ DCHECK(context->allow_code_gen_from_strings()->IsFalse());
+ // Check with callback if set.
+ AllowCodeGenerationFromStringsCallback callback =
+ isolate->allow_code_gen_callback();
+ if (callback == NULL) {
+ // No callback set and code generation disallowed.
+ return false;
+ } else {
+ // Callback set. Let it decide if code generation is allowed.
+ VMState<EXTERNAL> state(isolate);
+ return callback(v8::Utils::ToLocal(context));
+ }
+}
+
+
+MaybeHandle<JSFunction> CompileString(Handle<Context> context,
+ Handle<String> source,
+ ParseRestriction restriction) {
+ Isolate* const isolate = context->GetIsolate();
+ Handle<Context> native_context(context->native_context(), isolate);
+
+ // Check if native context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+ !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ Handle<Object> error_message =
+ native_context->ErrorMessageForCodeGenerationFromStrings();
+ THROW_NEW_ERROR(isolate, NewEvalError(MessageTemplate::kCodeGenFromStrings,
+ error_message),
+ JSFunction);
+ }
+
+ // Compile source string in the native context.
+ Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared(),
+ isolate);
+ return Compiler::GetFunctionFromEval(source, outer_info, native_context,
+ SLOPPY, restriction,
+ RelocInfo::kNoPosition);
+}
+
+} // namespace
+
+
+// ES6 section 18.2.1 eval (x)
+BUILTIN(GlobalEval) {
+ HandleScope scope(isolate);
+ Handle<Object> x = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target();
+ Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+ if (!x->IsString()) return *x;
+ Handle<JSFunction> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function,
+ CompileString(handle(target->native_context(), isolate),
+ Handle<String>::cast(x), NO_PARSE_RESTRICTION));
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
+ return *result;
+}
+
+
// ES6 section 26.1.3 Reflect.defineProperty
BUILTIN(ReflectDefineProperty) {
HandleScope scope(isolate);
@@ -1473,12 +1735,11 @@ BUILTIN(ReflectDefineProperty) {
return isolate->heap()->exception();
}
- bool result =
+ Maybe<bool> result =
JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
name, &desc, Object::DONT_THROW);
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- // TODO(neis): Make DefineOwnProperty return Maybe<bool>.
- return *isolate->factory()->ToBoolean(result);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
}
@@ -1500,21 +1761,18 @@ BUILTIN(ReflectDeleteProperty) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
Object::ToName(isolate, key));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver>::cast(target), name));
-
- return *result;
+ Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
+ Handle<JSReceiver>::cast(target), name, SLOPPY);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
}
// ES6 section 26.1.6 Reflect.get
BUILTIN(ReflectGet) {
HandleScope scope(isolate);
- Handle<Object> undef = isolate->factory()->undefined_value();
- Handle<Object> target = args.length() > 1 ? args.at<Object>(1) : undef;
- Handle<Object> key = args.length() > 2 ? args.at<Object>(2) : undef;
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> key = args.atOrUndefined(isolate, 2);
Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
if (!target->IsJSReceiver()) {
@@ -1556,10 +1814,10 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
Object::ToName(isolate, key));
PropertyDescriptor desc;
- bool found = JSReceiver::GetOwnPropertyDescriptor(
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
isolate, Handle<JSReceiver>::cast(target), name, &desc);
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- if (!found) return isolate->heap()->undefined_value();
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
return *desc.ToObject(isolate);
}
@@ -1576,8 +1834,10 @@ BUILTIN(ReflectGetPrototypeOf) {
isolate->factory()->NewStringFromAsciiChecked(
"Reflect.getPrototypeOf")));
}
-
- return *Object::GetPrototype(isolate, target);
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
+ Object::GetPrototype(isolate, target));
+ return *prototype;
}
@@ -1619,20 +1879,32 @@ BUILTIN(ReflectIsExtensible) {
"Reflect.isExtensible")));
}
- // TODO(neis): For now, we ignore proxies. Once proxies are fully
- // implemented, do something like the following:
- /*
- Maybe<bool> maybe = JSReceiver::IsExtensible(
- Handle<JSReceiver>::cast(target));
- if (!maybe.IsJust()) return isolate->heap()->exception();
- return *isolate->factory()->ToBoolean(maybe.FromJust());
- */
+ Maybe<bool> result =
+ JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+
+// ES6 section 26.1.11 Reflect.ownKeys
+BUILTIN(ReflectOwnKeys) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
- if (target->IsJSObject()) {
- return *isolate->factory()->ToBoolean(
- JSObject::IsExtensible(Handle<JSObject>::cast(target)));
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.ownKeys")));
}
- return *isolate->factory()->false_value();
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(Handle<JSReceiver>::cast(target),
+ JSReceiver::OWN_ONLY, ALL_PROPERTIES,
+ CONVERT_TO_STRING));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -1651,18 +1923,17 @@ BUILTIN(ReflectPreventExtensions) {
Maybe<bool> result = JSReceiver::PreventExtensions(
Handle<JSReceiver>::cast(target), Object::DONT_THROW);
- return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
- : isolate->heap()->exception();
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
}
// ES6 section 26.1.13 Reflect.set
BUILTIN(ReflectSet) {
HandleScope scope(isolate);
- Handle<Object> undef = isolate->factory()->undefined_value();
- Handle<Object> target = args.length() > 1 ? args.at<Object>(1) : undef;
- Handle<Object> key = args.length() > 2 ? args.at<Object>(2) : undef;
- Handle<Object> value = args.length() > 3 ? args.at<Object>(3) : undef;
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> key = args.atOrUndefined(isolate, 2);
+ Handle<Object> value = args.atOrUndefined(isolate, 3);
Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
if (!target->IsJSReceiver()) {
@@ -1711,18 +1982,875 @@ BUILTIN(ReflectSetPrototypeOf) {
}
-// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
-BUILTIN(DateToPrimitive) {
+// -----------------------------------------------------------------------------
+// ES6 section 20.3 Date Objects
+
+
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+
+// 20.3.1.2 Day Number and Time within Day
+const double kMsPerDay = 86400000.0;
+
+
+// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time) {
+ if (std::isfinite(day) && std::isfinite(time)) {
+ return time + day * kMsPerDay;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date) {
+ if ((kMinYear <= year && year <= kMaxYear) &&
+ (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+ int y = FastD2I(year);
+ int m = FastD2I(month);
+ y += m / 12;
+ m %= 12;
+ if (m < 0) {
+ m += 12;
+ y -= 1;
+ }
+ DCHECK_LE(0, m);
+ DCHECK_LT(m, 12);
+
+ // kYearDelta is an arbitrary number such that:
+ // a) kYearDelta = -1 (mod 400)
+ // b) year + kYearDelta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be an overflow for 32-bit integers in the following
+ // operations.
+ static const int kYearDelta = 399999;
+ static const int kBaseDay =
+ 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+ (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+ int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+ (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+ kBaseDay;
+ if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+ static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ day_from_year += kDayFromMonth[m];
+ } else {
+ static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+ day_from_year += kDayFromMonth[m];
+ }
+ return static_cast<double>(day_from_year - 1) + date;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms) {
+ if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+ std::isfinite(ms)) {
+ double const h = DoubleToInteger(hour);
+ double const m = DoubleToInteger(min);
+ double const s = DoubleToInteger(sec);
+ double const milli = DoubleToInteger(ms);
+ return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+// ES6 section 20.3.1.15 TimeClip (time)
+double TimeClip(double time) {
+ if (-DateCache::kMaxTimeInMs <= time && time <= DateCache::kMaxTimeInMs) {
+ return DoubleToInteger(time) + 0.0;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+
+const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
+ "Thu", "Fri", "Sat"};
+const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
+
+
+// ES6 section 20.3.1.16 Date Time String Format
+double ParseDateTimeString(Handle<String> str) {
+ Isolate* const isolate = str->GetIsolate();
+ str = String::Flatten(str);
+ // TODO(bmeurer): Change DateParser to not use the FixedArray.
+ Handle<FixedArray> tmp =
+ isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent str_content = str->GetFlatContent();
+ bool result;
+ if (str_content.IsOneByte()) {
+ result = DateParser::Parse(str_content.ToOneByteVector(), *tmp,
+ isolate->unicode_cache());
+ } else {
+ result = DateParser::Parse(str_content.ToUC16Vector(), *tmp,
+ isolate->unicode_cache());
+ }
+ if (!result) return std::numeric_limits<double>::quiet_NaN();
+ double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
+ tmp->get(2)->Number());
+ double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
+ tmp->get(5)->Number(), tmp->get(6)->Number());
+ double date = MakeDate(day, time);
+ if (tmp->get(7)->IsNull()) {
+ if (!std::isnan(date)) {
+ date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
+ }
+ } else {
+ date -= tmp->get(7)->Number() * 1000.0;
+ }
+ return date;
+}
+
+
+enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
+
+
+// ES6 section 20.3.4.41.1 ToDateString(tv)
+void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
+ ToDateStringMode mode = kDateAndTime) {
+ if (std::isnan(time_val)) {
+ SNPrintF(str, "Invalid Date");
+ return;
+ }
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = date_cache->ToLocal(time_ms);
+ int year, month, day, weekday, hour, min, sec, ms;
+ date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
+ &min, &sec, &ms);
+ int timezone_offset = -date_cache->TimezoneOffset(time_ms);
+ int timezone_hour = std::abs(timezone_offset) / 60;
+ int timezone_min = std::abs(timezone_offset) % 60;
+ const char* local_timezone = date_cache->LocalTimezone(time_ms);
+ switch (mode) {
+ case kDateOnly:
+ SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
+ kShortMonths[month], day, year);
+ return;
+ case kTimeOnly:
+ SNPrintF(str, "%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
+ (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
+ local_timezone);
+ return;
+ case kDateAndTime:
+ SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
+ min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
+ timezone_min, local_timezone);
+ return;
+ }
+ UNREACHABLE();
+}
+
+
+Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ Isolate* const isolate = date->GetIsolate();
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+} // namespace
+
+
+// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
+BUILTIN(DateConstructor) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- if (!args.receiver()->IsJSReceiver()) {
+ double const time_val = JSDate::CurrentTimeValue(isolate);
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(time_val, str, isolate->date_cache());
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
+BUILTIN(DateConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ int const argc = args.length() - 1;
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ double time_val;
+ if (argc == 0) {
+ time_val = JSDate::CurrentTimeValue(isolate);
+ } else if (argc == 1) {
+ Handle<Object> value = args.at<Object>(1);
+ if (value->IsJSDate()) {
+ time_val = Handle<JSDate>::cast(value)->value()->Number();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToPrimitive(value));
+ if (value->IsString()) {
+ time_val = ParseDateTimeString(Handle<String>::cast(value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(value));
+ time_val = value->Number();
+ }
+ }
+ } else {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at<Object>(1)));
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at<Object>(2)));
+ double year = year_object->Number();
+ double month = month_object->Number();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at<Object>(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ms = ms_object->Number();
+ }
+ }
+ }
+ }
+ }
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ time_val = MakeDate(day, time);
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
+ }
+ Handle<JSDate> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSDate::New(target, new_target, time_val));
+ return *result;
+}
+
+
+// ES6 section 20.3.3.1 Date.now ( )
+BUILTIN(DateNow) {
+ HandleScope scope(isolate);
+ return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
+}
+
+
+// ES6 section 20.3.3.2 Date.parse ( string )
+BUILTIN(DateParse) {
+ HandleScope scope(isolate);
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, string,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+ return *isolate->factory()->NewNumber(ParseDateTimeString(string));
+}
+
+
+// ES6 section 20.3.3.4 Date.UTC (year,month,date,hours,minutes,seconds,ms)
+BUILTIN(DateUTC) {
+ HandleScope scope(isolate);
+ int const argc = args.length() - 1;
+ double year = std::numeric_limits<double>::quiet_NaN();
+ double month = std::numeric_limits<double>::quiet_NaN();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 1) {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at<Object>(1)));
+ year = year_object->Number();
+ if (argc >= 2) {
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at<Object>(2)));
+ month = month_object->Number();
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object,
+ Object::ToNumber(args.at<Object>(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ms = ms_object->Number();
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ return *isolate->factory()->NewNumber(TimeClip(MakeDate(day, time)));
+}
+
+
+// ES6 section 20.3.4.20 Date.prototype.setDate ( date )
+BUILTIN(DatePrototypeSetDate) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setDate");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ time_val = MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.21 Date.prototype.setFullYear (year, month, date)
+BUILTIN(DatePrototypeSetFullYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setFullYear");
+ int const argc = args.length() - 1;
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double y = year->Number(), m = 0.0, dt = 1.0;
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ if (argc >= 2) {
+ Handle<Object> month = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ m = month->Number();
+ if (argc >= 3) {
+ Handle<Object> date = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ }
+ double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.22 Date.prototype.setHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetHours) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setHours");
+ int const argc = args.length() - 1;
+ Handle<Object> hour = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ double h = hour->Number();
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> min = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ m = min->Number();
+ if (argc >= 3) {
+ Handle<Object> sec = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 4) {
+ Handle<Object> ms = args.at<Object>(4);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.23 Date.prototype.setMilliseconds(ms)
+BUILTIN(DatePrototypeSetMilliseconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMilliseconds");
+ Handle<Object> ms = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ int m = (time_within_day / (60 * 1000)) % 60;
+ int s = (time_within_day / 1000) % 60;
+ time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.24 Date.prototype.setMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetMinutes) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMinutes");
+ int const argc = args.length() - 1;
+ Handle<Object> min = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = min->Number();
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> sec = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 3) {
+ Handle<Object> ms = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
+BUILTIN(DatePrototypeSetMonth) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
+ int const argc = args.length() - 1;
+ Handle<Object> month = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, unused, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+ double m = month->Number();
+ double dt = day;
+ if (argc >= 2) {
+ Handle<Object> date = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetSeconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setSeconds");
+ int const argc = args.length() - 1;
+ Handle<Object> sec = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = sec->Number();
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> ms = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// ES6 section 20.3.4.27 Date.prototype.setTime ( time )
+BUILTIN(DatePrototypeSetTime) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setTime");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ return *JSDate::SetValue(date, TimeClip(value->Number()));
+}
+
+
+// ES6 section 20.3.4.28 Date.prototype.setUTCDate ( date )
+BUILTIN(DatePrototypeSetUTCDate) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCDate");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ if (std::isnan(date->value()->Number())) return date->value();
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int const days = isolate->date_cache()->DaysFromTime(time_ms);
+ int const time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ double const time_val =
+ MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.29 Date.prototype.setUTCFullYear (year, month, date)
+BUILTIN(DatePrototypeSetUTCFullYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCFullYear");
+ int const argc = args.length() - 1;
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double y = year->Number(), m = 0.0, dt = 1.0;
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int const days = isolate->date_cache()->DaysFromTime(time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ if (argc >= 2) {
+ Handle<Object> month = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ m = month->Number();
+ if (argc >= 3) {
+ Handle<Object> date = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ }
+ double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.30 Date.prototype.setUTCHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetUTCHours) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCHours");
+ int const argc = args.length() - 1;
+ Handle<Object> hour = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ double h = hour->Number();
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> min = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ m = min->Number();
+ if (argc >= 3) {
+ Handle<Object> sec = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 4) {
+ Handle<Object> ms = args.at<Object>(4);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMilliseconds(ms)
+BUILTIN(DatePrototypeSetUTCMilliseconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMilliseconds");
+ Handle<Object> ms = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ int m = (time_within_day / (60 * 1000)) % 60;
+ int s = (time_within_day / 1000) % 60;
+ time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.32 Date.prototype.setUTCMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetUTCMinutes) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMinutes");
+ int const argc = args.length() - 1;
+ Handle<Object> min = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = min->Number();
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> sec = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 3) {
+ Handle<Object> ms = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date )
+BUILTIN(DatePrototypeSetUTCMonth) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
+ int const argc = args.length() - 1;
+ Handle<Object> month = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int days = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, unused, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+ double m = month->Number();
+ double dt = day;
+ if (argc >= 2) {
+ Handle<Object> date = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetUTCSeconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCSeconds");
+ int const argc = args.length() - 1;
+ Handle<Object> sec = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = sec->Number();
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> ms = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+
+// ES6 section 20.3.4.35 Date.prototype.toDateString ( )
+BUILTIN(DatePrototypeToDateString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(date->value()->Number(), str, isolate->date_cache(), kDateOnly);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.36 Date.prototype.toISOString ( )
+BUILTIN(DatePrototypeToISOString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toISOString");
+ double const time_val = date->value()->Number();
+ if (std::isnan(time_val)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->NewStringFromAsciiChecked(
- "Date.prototype [ @@toPrimitive ]"),
- args.receiver()));
+ isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
+ }
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int year, month, day, weekday, hour, min, sec, ms;
+ isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+ &hour, &min, &sec, &ms);
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ if (year >= 0 && year <= 9999) {
+ SNPrintF(str, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
+ hour, min, sec, ms);
+ } else if (year < 0) {
+ SNPrintF(str, "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year, month + 1, day,
+ hour, min, sec, ms);
+ } else {
+ SNPrintF(str, "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
+ hour, min, sec, ms);
+ }
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.41 Date.prototype.toString ( )
+BUILTIN(DatePrototypeToString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(date->value()->Number(), str, isolate->date_cache());
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.42 Date.prototype.toTimeString ( )
+BUILTIN(DatePrototypeToTimeString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ ToDateString(date->value()->Number(), str, isolate->date_cache(), kTimeOnly);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.43 Date.prototype.toUTCString ( )
+BUILTIN(DatePrototypeToUTCString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
+ double const time_val = date->value()->Number();
+ if (std::isnan(time_val)) {
+ return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
}
- Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+ char buffer[128];
+ Vector<char> str(buffer, arraysize(buffer));
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int year, month, day, weekday, hour, min, sec, ms;
+ isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+ &hour, &min, &sec, &ms);
+ SNPrintF(str, "%s, %02d %s %4d %02d:%02d:%02d GMT", kShortWeekDays[weekday],
+ day, kShortMonths[month], year, hour, min, sec);
+ return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+}
+
+
+// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
+BUILTIN(DatePrototypeValueOf) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
+ return date->value();
+}
+
+
+// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
+BUILTIN(DatePrototypeToPrimitive) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
Handle<Object> hint = args.at<Object>(1);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
@@ -1731,12 +2859,375 @@ BUILTIN(DateToPrimitive) {
}
+// ES6 section B.2.4.1 Date.prototype.getYear ( )
+BUILTIN(DatePrototypeGetYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.getYear");
+ double time_val = date->value()->Number();
+ if (std::isnan(time_val)) return date->value();
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ return Smi::FromInt(year - 1900);
+}
+
+
+// ES6 section B.2.4.2 Date.prototype.setYear ( year )
+BUILTIN(DatePrototypeSetYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setYear");
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double m = 0.0, dt = 1.0, y = year->Number();
+ if (0.0 <= y && y <= 99.0) {
+ y = 1900.0 + DoubleToInteger(y);
+ }
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return SetLocalDateValue(date, time_val);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDay);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kYear);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kHour);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMinute);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMonth);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kSecond);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
+}
+
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
+}
+
+
+namespace {
+
+// ES6 section 19.2.1.1.1 CreateDynamicFunction
+MaybeHandle<JSFunction> CreateDynamicFunction(
+ Isolate* isolate,
+ BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget> args,
+ const char* token) {
+ // Compute number of arguments, ignoring the receiver.
+ DCHECK_LE(1, args.length());
+ int const argc = args.length() - 1;
+
+ // Build the source string.
+ Handle<String> source;
+ {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCharacter('(');
+ builder.AppendCString(token);
+ builder.AppendCharacter('(');
+ bool parenthesis_in_arg_string = false;
+ if (argc > 1) {
+ for (int i = 1; i < argc; ++i) {
+ if (i > 1) builder.AppendCharacter(',');
+ Handle<String> param;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, param, Object::ToString(isolate, args.at<Object>(i)),
+ JSFunction);
+ param = String::Flatten(param);
+ builder.AppendString(param);
+ // If the formal parameters string include ) - an illegal
+ // character - it may make the combined function expression
+ // compile. We avoid this problem by checking for this early on.
+ DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
+ String::FlatContent param_content = param->GetFlatContent();
+ for (int i = 0, length = param->length(); i < length; ++i) {
+ if (param_content.Get(i) == ')') {
+ parenthesis_in_arg_string = true;
+ break;
+ }
+ }
+ }
+ // If the formal parameters include an unbalanced block comment, the
+ // function must be rejected. Since JavaScript does not allow nested
+ // comments we can include a trailing block comment to catch this.
+ builder.AppendCString("\n/**/");
+ }
+ builder.AppendCString(") {\n");
+ if (argc > 0) {
+ Handle<String> body;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
+ JSFunction);
+ builder.AppendString(body);
+ }
+ builder.AppendCString("\n})");
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), JSFunction);
+
+ // The SyntaxError must be thrown after all the (observable) ToString
+ // conversions are done.
+ if (parenthesis_in_arg_string) {
+ THROW_NEW_ERROR(isolate,
+ NewSyntaxError(MessageTemplate::kParenthesisInArgString),
+ JSFunction);
+ }
+ }
+
+ // Compile the string in the constructor and not a helper so that errors to
+ // come from here.
+ Handle<JSFunction> target = args.target();
+ Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+ Handle<JSFunction> function;
+ {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, function,
+ CompileString(handle(target->native_context(), isolate), source,
+ ONLY_SINGLE_FUNCTION_LITERAL),
+ JSFunction);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, target_global_proxy, 0, nullptr),
+ JSFunction);
+ function = Handle<JSFunction>::cast(result);
+ function->shared()->set_name_should_print_as_anonymous(true);
+ }
+
+ // If new.target is equal to target then the function created
+ // is already correctly setup and nothing else should be done
+ // here. But if new.target is not equal to target then we are
+ // have a Function builtin subclassing case and therefore the
+ // function has wrong initial map. To fix that we create a new
+ // function object with correct initial map.
+ Handle<Object> unchecked_new_target = args.new_target();
+ if (!unchecked_new_target->IsUndefined() &&
+ !unchecked_new_target.is_identical_to(target)) {
+ Handle<JSReceiver> new_target =
+ Handle<JSReceiver>::cast(unchecked_new_target);
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, target, new_target), JSFunction);
+
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+ Handle<Map> map = Map::AsLanguageMode(
+ initial_map, shared_info->language_mode(), shared_info->kind());
+
+ Handle<Context> context(function->context(), isolate);
+ function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ map, shared_info, context, NOT_TENURED);
+ }
+ return function;
+}
+
+} // namespace
+
+
+// ES6 section 19.2.1.1 Function ( p1, p2, ... , pn, body )
+BUILTIN(FunctionConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, CreateDynamicFunction(isolate, args, "function"));
+ return *result;
+}
+
+
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+BUILTIN(FunctionPrototypeBind) {
+ HandleScope scope(isolate);
+ DCHECK_LE(1, args.length());
+ if (!args.receiver()->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kFunctionBind));
+ }
+
+ // Allocate the bound function with the given {this_arg} and {args}.
+ Handle<JSReceiver> target = args.at<JSReceiver>(0);
+ Handle<Object> this_arg = isolate->factory()->undefined_value();
+ ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
+ if (args.length() > 1) {
+ this_arg = args.at<Object>(1);
+ for (int i = 2; i < args.length(); ++i) {
+ argv[i - 2] = args.at<Object>(i);
+ }
+ }
+ Handle<JSBoundFunction> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function,
+ isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
+
+ // TODO(bmeurer): Optimize the rest for the common cases where {target} is
+ // a function with some initial map or even a bound function.
+ // Setup the "length" property based on the "length" of the {target}.
+ Handle<Object> length(Smi::FromInt(0), isolate);
+ Maybe<bool> target_has_length =
+ JSReceiver::HasOwnProperty(target, isolate->factory()->length_string());
+ if (!target_has_length.IsJust()) {
+ return isolate->heap()->exception();
+ } else if (target_has_length.FromJust()) {
+ Handle<Object> target_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, target_length,
+ JSReceiver::GetProperty(target, isolate->factory()->length_string()));
+ if (target_length->IsNumber()) {
+ length = isolate->factory()->NewNumber(std::max(
+ 0.0, DoubleToInteger(target_length->Number()) - argv.length()));
+ }
+ }
+ function->set_length(*length);
+
+ // Setup the "name" property based on the "name" of the {target}.
+ Handle<Object> target_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, target_name,
+ JSReceiver::GetProperty(target, isolate->factory()->name_string()));
+ Handle<String> name;
+ if (!target_name->IsString()) {
+ name = isolate->factory()->bound__string();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name, Name::ToFunctionName(Handle<String>::cast(target_name)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name, isolate->factory()->NewConsString(
+ isolate->factory()->bound__string(), name));
+ }
+ function->set_name(*name);
+ return *function;
+}
+
+
+// ES6 section 19.2.3.5 Function.prototype.toString ( )
+BUILTIN(FunctionPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (receiver->IsJSBoundFunction()) {
+ return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
+ } else if (receiver->IsJSFunction()) {
+ return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
+ }
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Function.prototype.toString")));
+}
+
+
+// ES6 section 25.2.1.1 GeneratorFunction (p1, p2, ... , pn, body)
+BUILTIN(GeneratorFunctionConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, CreateDynamicFunction(isolate, args, "function*"));
+ return *result;
+}
+
+
// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
BUILTIN(SymbolConstructor) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
Handle<Symbol> result = isolate->factory()->NewSymbol();
- Handle<Object> description = args.at<Object>(1);
+ Handle<Object> description = args.atOrUndefined(isolate, 1);
if (!description->IsUndefined()) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
Object::ToString(isolate, description));
@@ -1755,6 +3246,100 @@ BUILTIN(SymbolConstructor_ConstructStub) {
}
+// ES6 19.1.3.6 Object.prototype.toString
+BUILTIN(ObjectProtoToString) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSObject::ObjectProtoToString(isolate, object));
+ return *result;
+}
+
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
+BUILTIN(ArrayBufferConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target();
+ DCHECK(*target == target->native_context()->array_buffer_fun() ||
+ *target == target->native_context()->shared_array_buffer_fun());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->name(), isolate)));
+}
+
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
+BUILTIN(ArrayBufferConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> length = args.atOrUndefined(isolate, 1);
+ DCHECK(*target == target->native_context()->array_buffer_fun() ||
+ *target == target->native_context()->shared_array_buffer_fun());
+ Handle<Object> number_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+ Object::ToInteger(isolate, length));
+ if (number_length->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, target, new_target));
+ size_t byte_length;
+ if (!TryNumberToSize(isolate, *number_length, &byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ Handle<JSArrayBuffer> result = Handle<JSArrayBuffer>::cast(
+ isolate->factory()->NewJSObjectFromMap(initial_map));
+ SharedFlag shared_flag =
+ (*target == target->native_context()->array_buffer_fun())
+ ? SharedFlag::kNotShared
+ : SharedFlag::kShared;
+ if (!JSArrayBuffer::SetupAllocatingData(result, isolate, byte_length, true,
+ shared_flag)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
+ }
+ return *result;
+}
+
+
+// ES6 section 24.1.3.1 ArrayBuffer.isView ( arg )
+BUILTIN(ArrayBufferIsView) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ Object* arg = args[1];
+ return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
+}
+
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
+BUILTIN(ProxyConstructor) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked("Proxy")));
+}
+
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
+BUILTIN(ProxyConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ DCHECK(isolate->proxy_function()->IsConstructor());
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> handler = args.atOrUndefined(isolate, 2);
+ Handle<JSProxy> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSProxy::New(isolate, target, handler));
+ return *result;
+}
+
+
// -----------------------------------------------------------------------------
// Throwers for restricted function properties and strict arguments object
// properties
@@ -1778,11 +3363,14 @@ BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
//
+namespace {
+
template <bool is_construct>
-MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
- Isolate* isolate, BuiltinArguments<NEEDS_CALLED_FUNCTION>& args) {
+MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
+ Isolate* isolate, BuiltinArguments<BuiltinExtraArguments::kTarget> args) {
HandleScope scope(isolate);
- Handle<JSFunction> function = args.called_function();
+ Handle<JSFunction> function = args.target();
+ DCHECK(args.receiver()->IsJSReceiver());
// TODO(ishell): turn this back to a DCHECK.
CHECK(function->shared()->IsApiFunction());
@@ -1796,11 +3384,8 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
Object);
}
- DCHECK(!args[0]->IsNull());
- if (args[0]->IsUndefined()) args[0] = function->global_proxy();
-
if (!is_construct && !fun_data->accept_any_receiver()) {
- Handle<Object> receiver(&args[0]);
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
@@ -1857,10 +3442,11 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
return scope.CloseAndEscape(args.receiver());
}
+} // namespace
+
BUILTIN(HandleApiCall) {
HandleScope scope(isolate);
- DCHECK(!CalledAsConstructor(isolate));
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
HandleApiCallHelper<false>(isolate, args));
@@ -1870,7 +3456,6 @@ BUILTIN(HandleApiCall) {
BUILTIN(HandleApiCallConstruct) {
HandleScope scope(isolate);
- DCHECK(CalledAsConstructor(isolate));
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
HandleApiCallHelper<true>(isolate, args));
@@ -1908,11 +3493,12 @@ Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
namespace {
-class RelocatableArguments : public BuiltinArguments<NEEDS_CALLED_FUNCTION>,
- public Relocatable {
+class RelocatableArguments
+ : public BuiltinArguments<BuiltinExtraArguments::kTarget>,
+ public Relocatable {
public:
RelocatableArguments(Isolate* isolate, int length, Object** arguments)
- : BuiltinArguments<NEEDS_CALLED_FUNCTION>(length, arguments),
+ : BuiltinArguments<BuiltinExtraArguments::kTarget>(length, arguments),
Relocatable(isolate) {}
virtual inline void IterateInstance(ObjectVisitor* v) {
@@ -1962,12 +3548,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<JSFunction> function,
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
- Isolate* isolate,
- bool is_construct_call,
- BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
- // Non-functions are never called as constructors. Even if this is an object
- // called as a constructor the delegate call is not a construct call.
- DCHECK(!CalledAsConstructor(isolate));
+ Isolate* isolate, bool is_construct_call,
+ BuiltinArguments<BuiltinExtraArguments::kNone> args) {
Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
@@ -2159,11 +3741,6 @@ static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
}
-static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
- DebugCodegen::GeneratePlainReturnLiveEdit(masm);
-}
-
-
static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
DebugCodegen::GenerateFrameDropperLiveEdit(masm);
}
@@ -2224,36 +3801,34 @@ void Builtins::InitBuiltinFunctionTable() {
functions[builtin_count].s_name = NULL;
functions[builtin_count].name = builtin_count;
functions[builtin_count].flags = static_cast<Code::Flags>(0);
- functions[builtin_count].extra_args = NO_EXTRA_ARGUMENTS;
-
-#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
- functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
- functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
- functions->s_name = #aname; \
- functions->name = c_##aname; \
- functions->flags = Code::ComputeFlags(Code::BUILTIN); \
- functions->extra_args = aextra_args; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeFlags(Code::kind, \
- state, \
- extra); \
- functions->extra_args = NO_EXTRA_ARGUMENTS; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_H(aname, kind) \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeHandlerFlags(Code::kind); \
- functions->extra_args = NO_EXTRA_ARGUMENTS; \
- ++functions;
+ functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
+
+#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
+ functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
+ functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
+ functions->s_name = #aname; \
+ functions->name = c_##aname; \
+ functions->flags = Code::ComputeFlags(Code::BUILTIN); \
+ functions->extra_args = BuiltinExtraArguments::aextra_args; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
+ functions->extra_args = BuiltinExtraArguments::kNone; \
+ ++functions;
+
+#define DEF_FUNCTION_PTR_H(aname, kind) \
+ functions->generator = FUNCTION_ADDR(Generate_##aname); \
+ functions->c_code = NULL; \
+ functions->s_name = #aname; \
+ functions->name = k##aname; \
+ functions->flags = Code::ComputeHandlerFlags(Code::kind); \
+ functions->extra_args = BuiltinExtraArguments::kNone; \
+ ++functions;
BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
@@ -2291,7 +3866,8 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
- MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
+ MacroAssembler masm(isolate, u.buffer, sizeof u.buffer,
+ CodeObjectRequired::kYes);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
@@ -2357,12 +3933,12 @@ const char* Builtins::Lookup(byte* pc) {
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+ masm->TailCallRuntime(Runtime::kInterrupt);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+ masm->TailCallRuntime(Runtime::kStackGuard);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index c1159a8d52..a707a94752 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -5,17 +5,24 @@
#ifndef V8_BUILTINS_H_
#define V8_BUILTINS_H_
+#include "src/base/flags.h"
#include "src/handles.h"
namespace v8 {
namespace internal {
// Specifies extra arguments required by a C++ builtin.
-enum BuiltinExtraArguments {
- NO_EXTRA_ARGUMENTS = 0,
- NEEDS_CALLED_FUNCTION = 1
+enum class BuiltinExtraArguments : uint8_t {
+ kNone = 0u,
+ kTarget = 1u << 0,
+ kNewTarget = 1u << 1,
+ kTargetAndNewTarget = kTarget | kNewTarget
};
+inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
+ return static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs);
+}
+
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
@@ -44,125 +51,215 @@ enum BuiltinExtraArguments {
// Define list of builtins implemented in C++.
-#define BUILTIN_LIST_C(V) \
- V(Illegal, NO_EXTRA_ARGUMENTS) \
- \
- V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
- \
- V(ArrayPush, NO_EXTRA_ARGUMENTS) \
- V(ArrayPop, NO_EXTRA_ARGUMENTS) \
- V(ArrayShift, NO_EXTRA_ARGUMENTS) \
- V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
- V(ArraySlice, NO_EXTRA_ARGUMENTS) \
- V(ArraySplice, NO_EXTRA_ARGUMENTS) \
- V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
- \
- V(DateToPrimitive, NO_EXTRA_ARGUMENTS) \
- \
- V(ReflectDefineProperty, NO_EXTRA_ARGUMENTS) \
- V(ReflectDeleteProperty, NO_EXTRA_ARGUMENTS) \
- V(ReflectGet, NO_EXTRA_ARGUMENTS) \
- V(ReflectGetOwnPropertyDescriptor, NO_EXTRA_ARGUMENTS) \
- V(ReflectGetPrototypeOf, NO_EXTRA_ARGUMENTS) \
- V(ReflectHas, NO_EXTRA_ARGUMENTS) \
- V(ReflectIsExtensible, NO_EXTRA_ARGUMENTS) \
- V(ReflectPreventExtensions, NO_EXTRA_ARGUMENTS) \
- V(ReflectSet, NO_EXTRA_ARGUMENTS) \
- V(ReflectSetPrototypeOf, NO_EXTRA_ARGUMENTS) \
- \
- V(SymbolConstructor, NO_EXTRA_ARGUMENTS) \
- V(SymbolConstructor_ConstructStub, NO_EXTRA_ARGUMENTS) \
- \
- V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
- V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
- V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
- \
- V(RestrictedFunctionPropertiesThrower, NO_EXTRA_ARGUMENTS) \
- V(RestrictedStrictArgumentsPropertiesThrower, NO_EXTRA_ARGUMENTS)
+#define BUILTIN_LIST_C(V) \
+ V(Illegal, kNone) \
+ \
+ V(EmptyFunction, kNone) \
+ \
+ V(ArrayConcat, kNone) \
+ V(ArrayIsArray, kNone) \
+ V(ArrayPop, kNone) \
+ V(ArrayPush, kNone) \
+ V(ArrayShift, kNone) \
+ V(ArraySlice, kNone) \
+ V(ArraySplice, kNone) \
+ V(ArrayUnshift, kNone) \
+ \
+ V(ArrayBufferConstructor, kTarget) \
+ V(ArrayBufferConstructor_ConstructStub, kTargetAndNewTarget) \
+ V(ArrayBufferIsView, kNone) \
+ \
+ V(DateConstructor, kNone) \
+ V(DateConstructor_ConstructStub, kTargetAndNewTarget) \
+ V(DateNow, kNone) \
+ V(DateParse, kNone) \
+ V(DateUTC, kNone) \
+ V(DatePrototypeSetDate, kNone) \
+ V(DatePrototypeSetFullYear, kNone) \
+ V(DatePrototypeSetHours, kNone) \
+ V(DatePrototypeSetMilliseconds, kNone) \
+ V(DatePrototypeSetMinutes, kNone) \
+ V(DatePrototypeSetMonth, kNone) \
+ V(DatePrototypeSetSeconds, kNone) \
+ V(DatePrototypeSetTime, kNone) \
+ V(DatePrototypeSetUTCDate, kNone) \
+ V(DatePrototypeSetUTCFullYear, kNone) \
+ V(DatePrototypeSetUTCHours, kNone) \
+ V(DatePrototypeSetUTCMilliseconds, kNone) \
+ V(DatePrototypeSetUTCMinutes, kNone) \
+ V(DatePrototypeSetUTCMonth, kNone) \
+ V(DatePrototypeSetUTCSeconds, kNone) \
+ V(DatePrototypeToDateString, kNone) \
+ V(DatePrototypeToISOString, kNone) \
+ V(DatePrototypeToPrimitive, kNone) \
+ V(DatePrototypeToUTCString, kNone) \
+ V(DatePrototypeToString, kNone) \
+ V(DatePrototypeToTimeString, kNone) \
+ V(DatePrototypeValueOf, kNone) \
+ V(DatePrototypeGetYear, kNone) \
+ V(DatePrototypeSetYear, kNone) \
+ \
+ V(FunctionConstructor, kTargetAndNewTarget) \
+ V(FunctionPrototypeBind, kNone) \
+ V(FunctionPrototypeToString, kNone) \
+ \
+ V(GeneratorFunctionConstructor, kTargetAndNewTarget) \
+ \
+ V(GlobalEval, kTarget) \
+ \
+ V(ObjectAssign, kNone) \
+ V(ObjectCreate, kNone) \
+ V(ObjectFreeze, kNone) \
+ V(ObjectIsExtensible, kNone) \
+ V(ObjectIsFrozen, kNone) \
+ V(ObjectIsSealed, kNone) \
+ V(ObjectKeys, kNone) \
+ V(ObjectPreventExtensions, kNone) \
+ V(ObjectSeal, kNone) \
+ V(ObjectProtoToString, kNone) \
+ \
+ V(ProxyConstructor, kNone) \
+ V(ProxyConstructor_ConstructStub, kTarget) \
+ \
+ V(ReflectDefineProperty, kNone) \
+ V(ReflectDeleteProperty, kNone) \
+ V(ReflectGet, kNone) \
+ V(ReflectGetOwnPropertyDescriptor, kNone) \
+ V(ReflectGetPrototypeOf, kNone) \
+ V(ReflectHas, kNone) \
+ V(ReflectIsExtensible, kNone) \
+ V(ReflectOwnKeys, kNone) \
+ V(ReflectPreventExtensions, kNone) \
+ V(ReflectSet, kNone) \
+ V(ReflectSetPrototypeOf, kNone) \
+ \
+ V(SymbolConstructor, kNone) \
+ V(SymbolConstructor_ConstructStub, kTarget) \
+ \
+ V(HandleApiCall, kTarget) \
+ V(HandleApiCallConstruct, kTarget) \
+ V(HandleApiCallAsFunction, kNone) \
+ V(HandleApiCallAsConstructor, kNone) \
+ \
+ V(RestrictedFunctionPropertiesThrower, kNone) \
+ V(RestrictedStrictArgumentsPropertiesThrower, kNone)
// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
- V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
- LoadICState::kStrongModeState) \
- \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ConstructBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(Apply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(HandleFastApiCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSBuiltinsConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterNotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterNotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterNotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
+ V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
+ LoadICState::kStrongModeState) \
+ \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kNoExtraICState) \
+ V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(DatePrototypeGetDate, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetDay, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetHours, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetTime, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetTimezoneOffset, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCDate, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCDay, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCHours, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
@@ -181,7 +278,6 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState)
@@ -280,12 +376,13 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
+ static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_InOptimizationQueue(MacroAssembler* masm);
static void Generate_CompileOptimized(MacroAssembler* masm);
static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
- static void Generate_JSConstructStubForDerived(MacroAssembler* masm);
+ static void Generate_JSBuiltinsConstructStub(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
@@ -296,6 +393,8 @@ class Builtins {
static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
+ static void Generate_Apply(MacroAssembler* masm);
+
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
static void Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode);
@@ -310,6 +409,8 @@ class Builtins {
static void Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
Generate_CallFunction(masm, ConvertReceiverMode::kAny);
}
+ // ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList)
+ static void Generate_CallBoundFunction(MacroAssembler* masm);
// ES6 section 7.3.12 Call(F, V, [argumentsList])
static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
static void Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
@@ -324,19 +425,68 @@ class Builtins {
// ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
static void Generate_ConstructFunction(MacroAssembler* masm);
+ // ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget)
+ static void Generate_ConstructBoundFunction(MacroAssembler* masm);
// ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget)
static void Generate_ConstructProxy(MacroAssembler* masm);
// ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget])
static void Generate_Construct(MacroAssembler* masm);
- static void Generate_FunctionCall(MacroAssembler* masm);
- static void Generate_FunctionApply(MacroAssembler* masm);
+ static void Generate_HandleFastApiCall(MacroAssembler* masm);
+
+ static void Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index);
+ // ES6 section 20.3.4.2 Date.prototype.getDate ( )
+ static void Generate_DatePrototypeGetDate(MacroAssembler* masm);
+ // ES6 section 20.3.4.3 Date.prototype.getDay ( )
+ static void Generate_DatePrototypeGetDay(MacroAssembler* masm);
+ // ES6 section 20.3.4.4 Date.prototype.getFullYear ( )
+ static void Generate_DatePrototypeGetFullYear(MacroAssembler* masm);
+ // ES6 section 20.3.4.5 Date.prototype.getHours ( )
+ static void Generate_DatePrototypeGetHours(MacroAssembler* masm);
+ // ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( )
+ static void Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm);
+ // ES6 section 20.3.4.7 Date.prototype.getMinutes ( )
+ static void Generate_DatePrototypeGetMinutes(MacroAssembler* masm);
+ // ES6 section 20.3.4.8 Date.prototype.getMonth ( )
+ static void Generate_DatePrototypeGetMonth(MacroAssembler* masm);
+ // ES6 section 20.3.4.9 Date.prototype.getSeconds ( )
+ static void Generate_DatePrototypeGetSeconds(MacroAssembler* masm);
+ // ES6 section 20.3.4.10 Date.prototype.getTime ( )
+ static void Generate_DatePrototypeGetTime(MacroAssembler* masm);
+ // ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( )
+ static void Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm);
+ // ES6 section 20.3.4.12 Date.prototype.getUTCDate ( )
+ static void Generate_DatePrototypeGetUTCDate(MacroAssembler* masm);
+ // ES6 section 20.3.4.13 Date.prototype.getUTCDay ( )
+ static void Generate_DatePrototypeGetUTCDay(MacroAssembler* masm);
+ // ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( )
+ static void Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm);
+ // ES6 section 20.3.4.15 Date.prototype.getUTCHours ( )
+ static void Generate_DatePrototypeGetUTCHours(MacroAssembler* masm);
+ // ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( )
+ static void Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm);
+ // ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( )
+ static void Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm);
+ // ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( )
+ static void Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm);
+ // ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
+ static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
+
+ static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
+ static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
+
static void Generate_ReflectApply(MacroAssembler* masm);
static void Generate_ReflectConstruct(MacroAssembler* masm);
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
+ // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
+ static void Generate_NumberConstructor(MacroAssembler* masm);
+ // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
+ static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
+
static void Generate_StringConstructor(MacroAssembler* masm);
static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
@@ -348,6 +498,9 @@ class Builtins {
static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm);
static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
+ static void Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm);
+ static void Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm);
+ static void Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm);
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index 5927c22cde..d231bb799d 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -11,18 +11,113 @@ namespace v8 {
namespace internal {
-Cancelable::Cancelable(Isolate* isolate)
- : isolate_(isolate), is_cancelled_(false) {
- isolate->RegisterCancelableTask(this);
+Cancelable::Cancelable(CancelableTaskManager* parent)
+ : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
+ id_ = parent->Register(this);
+ CHECK(id_ != 0);
}
Cancelable::~Cancelable() {
- if (!is_cancelled_) {
- isolate_->RemoveCancelableTask(this);
+ // The following check is needed to avoid calling an already terminated
+ // manager object. This happens when the manager cancels all pending tasks
+ // in {CancelAndWait} only before destroying the manager object.
+ if (TryRun() || IsRunning()) {
+ parent_->RemoveFinishedTask(id_);
}
}
+static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
+
+
+CancelableTaskManager::CancelableTaskManager()
+ : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
+
+
+uint32_t CancelableTaskManager::Register(Cancelable* task) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ uint32_t id = ++task_id_counter_;
+ // The loop below is just used when task_id_counter_ overflows.
+ while ((id == 0) || (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id),
+ id) != nullptr)) {
+ ++id;
+ }
+ HashMap::Entry* entry =
+ cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
+ entry->value = task;
+ return id;
+}
+
+
+void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ USE(removed);
+ DCHECK(removed != nullptr);
+ cancelable_tasks_barrier_.NotifyOne();
+}
+
+
+bool CancelableTaskManager::TryAbort(uint32_t id) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ HashMap::Entry* entry =
+ cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id);
+ if (entry != nullptr) {
+ Cancelable* value = reinterpret_cast<Cancelable*>(entry->value);
+ if (value->Cancel()) {
+ // Cannot call RemoveFinishedTask here because of recursive locking.
+ void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ USE(removed);
+ DCHECK(removed != nullptr);
+ cancelable_tasks_barrier_.NotifyOne();
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void CancelableTaskManager::CancelAndWait() {
+ // Clean up all cancelable fore- and background tasks. Tasks are canceled on
+ // the way if possible, i.e., if they have not started yet. After each round
+ // of canceling we wait for the background tasks that have already been
+ // started.
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ // HashMap does not support removing while iterating, hence keep a set of
+ // entries that are to be removed.
+ std::set<uint32_t> to_remove;
+
+ // Cancelable tasks could potentially register new tasks, requiring a loop
+ // here.
+ while (cancelable_tasks_.occupancy() > 0) {
+ for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
+ p = cancelable_tasks_.Next(p)) {
+ if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
+ to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
+ }
+ }
+ // Remove tasks that were successfully canceled.
+ for (auto id : to_remove) {
+ cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ }
+ to_remove.clear();
+
+ // Finally, wait for already running background tasks.
+ if (cancelable_tasks_.occupancy() > 0) {
+ cancelable_tasks_barrier_.Wait(&mutex_);
+ }
+ }
+}
+
+
+CancelableTask::CancelableTask(Isolate* isolate)
+ : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+
+
+CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
+ : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index bae5b580cd..a8387fcd95 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -6,26 +6,114 @@
#define V8_CANCELABLE_TASK_H_
#include "include/v8-platform.h"
+#include "src/atomic-utils.h"
#include "src/base/macros.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/hashmap.h"
namespace v8 {
namespace internal {
+class Cancelable;
class Isolate;
+// Keeps track of cancelable tasks. It is possible to register and remove tasks
+// from any fore- and background task/thread.
+class CancelableTaskManager {
+ public:
+ CancelableTaskManager();
+
+ // Registers a new cancelable {task}. Returns the unique {id} of the task that
+ // can be used to try to abort a task by calling {Abort}.
+ uint32_t Register(Cancelable* task);
+
+ // Try to abort running a task identified by {id}. The possible outcomes are:
+ // (1) The task is already finished running and thus has been removed from
+ // the manager.
+ // (2) The task is currently running and cannot be canceled anymore.
+ // (3) The task is not yet running (or finished) so it is canceled and
+ // removed.
+ //
+ // Returns {false} for (1) and (2), and {true} for (3).
+ bool TryAbort(uint32_t id);
+
+ // Cancels all remaining registered tasks and waits for tasks that are
+ // already running.
+ void CancelAndWait();
+
+ private:
+ // Only called by {Cancelable} destructor. The task is done with executing,
+ // but needs to be removed.
+ void RemoveFinishedTask(uint32_t id);
+
+ // To mitigate the ABA problem, the api refers to tasks through an id.
+ uint32_t task_id_counter_;
+
+ // A set of cancelable tasks that are currently registered.
+ HashMap cancelable_tasks_;
+
+ // Mutex and condition variable enabling concurrent register and removing, as
+ // well as waiting for background tasks on {CancelAndWait}.
+ base::ConditionVariable cancelable_tasks_barrier_;
+ base::Mutex mutex_;
+
+ friend class Cancelable;
+
+ DISALLOW_COPY_AND_ASSIGN(CancelableTaskManager);
+};
+
+
class Cancelable {
public:
- explicit Cancelable(Isolate* isolate);
+ explicit Cancelable(CancelableTaskManager* parent);
virtual ~Cancelable();
- virtual void Cancel() { is_cancelled_ = true; }
+ // Never invoke after handing over the task to the platform! The reason is
+ // that {Cancelable} is used in combination with {v8::Task} and handed to
+ // a platform. This step transfers ownership to the platform, which destroys
+ // the task after running it. Since the exact time is not known, we cannot
+ // access the object after handing it to a platform.
+ uint32_t id() { return id_; }
protected:
- Isolate* isolate_;
- bool is_cancelled_;
+ bool TryRun() { return status_.TrySetValue(kWaiting, kRunning); }
+ bool IsRunning() { return status_.Value() == kRunning; }
+ intptr_t CancelAttempts() { return cancel_counter_.Value(); }
private:
+ // Identifies the state a cancelable task is in:
+ // |kWaiting|: The task is scheduled and waiting to be executed. {TryRun} will
+ // succeed.
+ // |kCanceled|: The task has been canceled. {TryRun} will fail.
+ // |kRunning|: The task is currently running and cannot be canceled anymore.
+ enum Status {
+ kWaiting,
+ kCanceled,
+ kRunning,
+ };
+
+ // Use {CancelableTaskManager} to abort a task that has not yet been
+ // executed.
+ bool Cancel() {
+ if (status_.TrySetValue(kWaiting, kCanceled)) {
+ return true;
+ }
+ cancel_counter_.Increment(1);
+ return false;
+ }
+
+ CancelableTaskManager* parent_;
+ AtomicValue<Status> status_;
+ uint32_t id_;
+
+ // The counter is incremented for failing tries to cancel a task. This can be
+ // used by the task itself as an indication how often external entities tried
+ // to abort it.
+ AtomicNumber<intptr_t> cancel_counter_;
+
+ friend class CancelableTaskManager;
+
DISALLOW_COPY_AND_ASSIGN(Cancelable);
};
@@ -33,18 +121,21 @@ class Cancelable {
// Multiple inheritance can be used because Task is a pure interface.
class CancelableTask : public Cancelable, public Task {
public:
- explicit CancelableTask(Isolate* isolate) : Cancelable(isolate) {}
+ explicit CancelableTask(Isolate* isolate);
// Task overrides.
void Run() final {
- if (!is_cancelled_) {
+ if (TryRun()) {
RunInternal();
}
}
virtual void RunInternal() = 0;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(CancelableTask);
};
@@ -52,18 +143,21 @@ class CancelableTask : public Cancelable, public Task {
// Multiple inheritance can be used because IdleTask is a pure interface.
class CancelableIdleTask : public Cancelable, public IdleTask {
public:
- explicit CancelableIdleTask(Isolate* isolate) : Cancelable(isolate) {}
+ explicit CancelableIdleTask(Isolate* isolate);
// IdleTask overrides.
void Run(double deadline_in_seconds) final {
- if (!is_cancelled_) {
+ if (TryRun()) {
RunInternal(deadline_in_seconds);
}
}
virtual void RunInternal(double deadline_in_seconds) = 0;
+ Isolate* isolate() { return isolate_; }
+
private:
+ Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(CancelableIdleTask);
};
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index ad6890bf22..6d31a5f530 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -78,8 +78,7 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
return Callable(
StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- FLAG_vector_stores ? VectorStoreICTrampolineDescriptor(isolate)
- : StoreDescriptor(isolate));
+ VectorStoreICTrampolineDescriptor(isolate));
}
@@ -87,10 +86,9 @@ Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
Callable CodeFactory::StoreICInOptimizedCode(
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state) {
- CallInterfaceDescriptor descriptor =
- FLAG_vector_stores && initialization_state != MEGAMORPHIC
- ? VectorStoreICDescriptor(isolate)
- : StoreDescriptor(isolate);
+ CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
+ ? VectorStoreICDescriptor(isolate)
+ : StoreDescriptor(isolate);
return Callable(StoreIC::initialize_stub_in_optimized_code(
isolate, language_mode, initialization_state),
descriptor);
@@ -102,8 +100,7 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
return Callable(
KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- FLAG_vector_stores ? VectorStoreICTrampolineDescriptor(isolate)
- : StoreDescriptor(isolate));
+ VectorStoreICTrampolineDescriptor(isolate));
}
@@ -111,10 +108,9 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
Callable CodeFactory::KeyedStoreICInOptimizedCode(
Isolate* isolate, LanguageMode language_mode,
InlineCacheState initialization_state) {
- CallInterfaceDescriptor descriptor =
- FLAG_vector_stores && initialization_state != MEGAMORPHIC
- ? VectorStoreICDescriptor(isolate)
- : StoreDescriptor(isolate);
+ CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
+ ? VectorStoreICDescriptor(isolate)
+ : StoreDescriptor(isolate);
return Callable(KeyedStoreIC::initialize_stub_in_optimized_code(
isolate, language_mode, initialization_state),
descriptor);
@@ -152,11 +148,9 @@ Callable CodeFactory::InstanceOf(Isolate* isolate) {
// static
-Callable CodeFactory::ToBoolean(Isolate* isolate,
- ToBooleanStub::ResultMode mode,
- ToBooleanStub::Types types) {
- ToBooleanStub stub(isolate, mode, types);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::ToBoolean(Isolate* isolate) {
+ Handle<Code> code = ToBooleanStub::GetUninitialized(isolate);
+ return Callable(code, ToBooleanDescriptor(isolate));
}
@@ -203,6 +197,13 @@ Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
// static
+Callable CodeFactory::RegExpExec(Isolate* isolate) {
+ RegExpExecStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
@@ -218,6 +219,13 @@ Callable CodeFactory::StringCompare(Isolate* isolate) {
// static
+Callable CodeFactory::SubString(Isolate* isolate) {
+ SubStringStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::Typeof(Isolate* isolate) {
TypeofStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -225,6 +233,13 @@ Callable CodeFactory::Typeof(Isolate* isolate) {
// static
+Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
+ FastCloneRegExpStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::FastCloneShallowArray(Isolate* isolate) {
// TODO(mstarzinger): Thread through AllocationSiteMode at some point.
FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
@@ -267,6 +282,13 @@ Callable CodeFactory::ArgumentsAccess(Isolate* isolate,
// static
+Callable CodeFactory::RestArgumentsAccess(Isolate* isolate) {
+ RestParamAccessStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
AllocateHeapNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -309,6 +331,20 @@ Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
// static
+Callable CodeFactory::Construct(Isolate* isolate) {
+ return Callable(isolate->builtins()->Construct(),
+ ConstructTrampolineDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::ConstructFunction(Isolate* isolate) {
+ return Callable(isolate->builtins()->ConstructFunction(),
+ ConstructTrampolineDescriptor(isolate));
+}
+
+
+// static
Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate) {
return Callable(isolate->builtins()->InterpreterPushArgsAndCall(),
InterpreterPushArgsAndCallDescriptor(isolate));
@@ -323,11 +359,10 @@ Callable CodeFactory::InterpreterPushArgsAndConstruct(Isolate* isolate) {
// static
-Callable CodeFactory::InterpreterCEntry(Isolate* isolate) {
- // TODO(rmcilroy): Deal with runtime functions that return two values.
+Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to
// save fpregs too.
- CEntryStub stub(isolate, 1, kDontSaveFPRegs, kArgvInRegister);
+ CEntryStub stub(isolate, result_size, kDontSaveFPRegs, kArgvInRegister);
return Callable(stub.GetCode(), InterpreterCEntryDescriptor(isolate));
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 4775efeb89..2126790359 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -66,9 +66,7 @@ class CodeFactory final {
// code-stubs.h.
static Callable InstanceOf(Isolate* isolate);
- static Callable ToBoolean(
- Isolate* isolate, ToBooleanStub::ResultMode mode,
- ToBooleanStub::Types types = ToBooleanStub::Types());
+ static Callable ToBoolean(Isolate* isolate);
static Callable ToNumber(Isolate* isolate);
static Callable ToString(Isolate* isolate);
@@ -77,13 +75,16 @@ class CodeFactory final {
static Callable NumberToString(Isolate* isolate);
static Callable RegExpConstructResult(Isolate* isolate);
+ static Callable RegExpExec(Isolate* isolate);
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
static Callable StringCompare(Isolate* isolate);
+ static Callable SubString(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
+ static Callable FastCloneRegExp(Isolate* isolate);
static Callable FastCloneShallowArray(Isolate* isolate);
static Callable FastCloneShallowObject(Isolate* isolate, int length);
@@ -93,6 +94,7 @@ class CodeFactory final {
static Callable ArgumentsAccess(Isolate* isolate, bool is_unmapped_arguments,
bool has_duplicate_parameters);
+ static Callable RestArgumentsAccess(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
static Callable AllocateMutableHeapNumber(Isolate* isolate);
@@ -103,10 +105,12 @@ class CodeFactory final {
ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable CallFunction(
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ static Callable Construct(Isolate* isolate);
+ static Callable ConstructFunction(Isolate* isolate);
static Callable InterpreterPushArgsAndCall(Isolate* isolate);
static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
- static Callable InterpreterCEntry(Isolate* isolate);
+ static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
};
} // namespace internal
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index b2d07d9c9b..2fab578b9b 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -250,7 +250,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate(), NULL, 256);
+ MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
{
// Update the static counter each time a new code stub is generated.
@@ -432,17 +432,78 @@ Handle<Code> TypeofStub::GenerateCode() { return DoGenerateCode(this); }
template <>
+HValue* CodeStubGraphBuilder<FastCloneRegExpStub>::BuildCodeStub() {
+ HValue* closure = GetParameter(0);
+ HValue* literal_index = GetParameter(1);
+
+ // This stub is very performance sensitive, the generated code must be tuned
+ // so that it doesn't build and eager frame.
+ info()->MarkMustNotHaveEagerFrame();
+
+ HValue* literals_array = Add<HLoadNamedField>(
+ closure, nullptr, HObjectAccess::ForLiteralsPointer());
+ HInstruction* boilerplate = Add<HLoadKeyed>(
+ literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
+ NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
+
+ IfBuilder if_notundefined(this);
+ if_notundefined.IfNot<HCompareObjectEqAndBranch>(
+ boilerplate, graph()->GetConstantUndefined());
+ if_notundefined.Then();
+ {
+ int result_size =
+ JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ HValue* result =
+ Add<HAllocate>(Add<HConstant>(result_size), HType::JSObject(),
+ NOT_TENURED, JS_REGEXP_TYPE);
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForMap(),
+ Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap()));
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForPropertiesPointer(),
+ Add<HLoadNamedField>(boilerplate, nullptr,
+ HObjectAccess::ForPropertiesPointer()));
+ Add<HStoreNamedField>(
+ result, HObjectAccess::ForElementsPointer(),
+ Add<HLoadNamedField>(boilerplate, nullptr,
+ HObjectAccess::ForElementsPointer()));
+ for (int offset = JSObject::kHeaderSize; offset < result_size;
+ offset += kPointerSize) {
+ HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(offset);
+ Add<HStoreNamedField>(result, access,
+ Add<HLoadNamedField>(boilerplate, nullptr, access));
+ }
+ Push(result);
+ }
+ if_notundefined.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
+ if_notundefined.End();
+
+ return Pop();
+}
+
+
+Handle<Code> FastCloneRegExpStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
+ HValue* closure = GetParameter(0);
+ HValue* literal_index = GetParameter(1);
// This stub is very performance sensitive, the generated code must be tuned
// so that it doesn't build and eager frame.
info()->MarkMustNotHaveEagerFrame();
+ HValue* literals_array = Add<HLoadNamedField>(
+ closure, nullptr, HObjectAccess::ForLiteralsPointer());
+
HInstruction* allocation_site = Add<HLoadKeyed>(
- GetParameter(0), GetParameter(1), nullptr, nullptr, FAST_ELEMENTS,
+ literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
@@ -503,9 +564,14 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* undefined = graph()->GetConstantUndefined();
+ HValue* closure = GetParameter(0);
+ HValue* literal_index = GetParameter(1);
+
+ HValue* literals_array = Add<HLoadNamedField>(
+ closure, nullptr, HObjectAccess::ForLiteralsPointer());
HInstruction* allocation_site = Add<HLoadKeyed>(
- GetParameter(0), GetParameter(1), nullptr, nullptr, FAST_ELEMENTS,
+ literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
@@ -1077,20 +1143,6 @@ Handle<Code> StoreTransitionStub::GenerateCode() {
template <>
-HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() {
- HValue* string = BuildLoadNamedField(GetParameter(0),
- FieldIndex::ForInObjectOffset(JSValue::kValueOffset));
- return BuildLoadNamedField(string,
- FieldIndex::ForInObjectOffset(String::kLengthOffset));
-}
-
-
-Handle<Code> StringLengthStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(StoreDescriptor::kReceiverIndex),
@@ -1393,9 +1445,9 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
HValue* left = GetParameter(BinaryOpICStub::kLeft);
HValue* right = GetParameter(BinaryOpICStub::kRight);
- Type* left_type = state.GetLeftType(zone());
- Type* right_type = state.GetRightType(zone());
- Type* result_type = state.GetResultType(zone());
+ Type* left_type = state.GetLeftType();
+ Type* right_type = state.GetRightType();
+ Type* result_type = state.GetResultType();
DCHECK(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
(state.HasSideEffects() || !result_type->Is(Type::None())));
@@ -1474,9 +1526,9 @@ HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
HValue* left = GetParameter(BinaryOpWithAllocationSiteStub::kLeft);
HValue* right = GetParameter(BinaryOpWithAllocationSiteStub::kRight);
- Type* left_type = state.GetLeftType(zone());
- Type* right_type = state.GetRightType(zone());
- Type* result_type = state.GetResultType(zone());
+ Type* left_type = state.GetLeftType();
+ Type* right_type = state.GetRightType();
+ Type* result_type = state.GetResultType();
HAllocationMode allocation_mode(allocation_site);
return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
@@ -1655,31 +1707,13 @@ Handle<Code> StringAddStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
- HValue* true_value = NULL;
- HValue* false_value = NULL;
-
- switch (stub->mode()) {
- case ToBooleanStub::RESULT_AS_SMI:
- true_value = graph()->GetConstant1();
- false_value = graph()->GetConstant0();
- break;
- case ToBooleanStub::RESULT_AS_ODDBALL:
- true_value = graph()->GetConstantTrue();
- false_value = graph()->GetConstantFalse();
- break;
- case ToBooleanStub::RESULT_AS_INVERSE_ODDBALL:
- true_value = graph()->GetConstantFalse();
- false_value = graph()->GetConstantTrue();
- break;
- }
-
IfBuilder if_true(this);
if_true.If<HBranch>(GetParameter(0), stub->types());
if_true.Then();
- if_true.Return(true_value);
+ if_true.Return(graph()->GetConstantTrue());
if_true.Else();
if_true.End();
- return false_value;
+ return graph()->GetConstantFalse();
}
@@ -1835,19 +1869,29 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
HValue* context_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
+ context_slot = Add<HLoadNamedField>(context_slot, nullptr,
+ HObjectAccess::ForWeakCellValue());
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
HValue* code_object = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
+ code_object = Add<HLoadNamedField>(code_object, nullptr,
+ HObjectAccess::ForWeakCellValue());
builder->If<HCompareObjectEqAndBranch>(native_context,
context_slot);
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
builder->And();
builder->IfNot<HCompareObjectEqAndBranch>(code_object,
- graph()->GetConstantUndefined());
+ graph()->GetConstant0());
builder->Then();
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
+ literals = Add<HLoadNamedField>(literals, nullptr,
+ HObjectAccess::ForWeakCellValue());
+ IfBuilder maybe_deopt(this);
+ maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
+ maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
+ maybe_deopt.End();
BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
@@ -1971,8 +2015,10 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* shared_code =
Add<HLoadNamedField>(optimized_map, nullptr,
HObjectAccess::ForOptimizedCodeMapSharedCode());
+ shared_code = Add<HLoadNamedField>(shared_code, nullptr,
+ HObjectAccess::ForWeakCellValue());
shared_code_check.IfNot<HCompareObjectEqAndBranch>(
- shared_code, graph()->GetConstantUndefined());
+ shared_code, graph()->GetConstant0());
shared_code_check.Then();
{
// Store the context-independent optimized code.
@@ -2071,16 +2117,15 @@ HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
context());
Add<HStoreNamedField>(function_context,
HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX),
- graph()->GetConstant0());
+ graph()->GetConstantHole());
- // Copy the global object from the previous context.
- HValue* global_object = Add<HLoadNamedField>(
+ // Copy the native context from the previous context.
+ HValue* native_context = Add<HLoadNamedField>(
context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForContextSlot(
- Context::GLOBAL_OBJECT_INDEX),
- global_object);
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
+ Add<HStoreNamedField>(function_context, HObjectAccess::ForContextSlot(
+ Context::NATIVE_CONTEXT_INDEX),
+ native_context);
// Initialize the rest of the slots to undefined.
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; ++i) {
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 61df12781b..1754288b6e 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -7,12 +7,13 @@
#include <sstream>
#include "src/bootstrapper.h"
+#include "src/compiler/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -108,7 +109,7 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
Factory* factory = isolate()->factory();
// Generate the new code.
- MacroAssembler masm(isolate(), NULL, 256);
+ MacroAssembler masm(isolate(), NULL, 256, CodeObjectRequired::kYes);
{
// Update the static counter each time a new code stub is generated.
@@ -341,11 +342,6 @@ void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
}
-void StringAddTFStub::PrintBaseName(std::ostream& os) const { // NOLINT
- os << "StringAddTFStub_" << flags() << "_" << pretenure_flag();
-}
-
-
InlineCacheState CompareICStub::GetICState() const {
CompareICState::State state = Max(left(), right());
switch (state) {
@@ -357,8 +353,8 @@ InlineCacheState CompareICStub::GetICState() const {
case CompareICState::INTERNALIZED_STRING:
case CompareICState::STRING:
case CompareICState::UNIQUE_NAME:
- case CompareICState::OBJECT:
- case CompareICState::KNOWN_OBJECT:
+ case CompareICState::RECEIVER:
+ case CompareICState::KNOWN_RECEIVER:
return MONOMORPHIC;
case CompareICState::GENERIC:
return ::v8::internal::GENERIC;
@@ -435,12 +431,12 @@ void CompareICStub::Generate(MacroAssembler* masm) {
case CompareICState::UNIQUE_NAME:
GenerateUniqueNames(masm);
break;
- case CompareICState::OBJECT:
- GenerateObjects(masm);
+ case CompareICState::RECEIVER:
+ GenerateReceivers(masm);
break;
- case CompareICState::KNOWN_OBJECT:
+ case CompareICState::KNOWN_RECEIVER:
DCHECK(*known_map_ != NULL);
- GenerateKnownObjects(masm);
+ GenerateKnownReceivers(masm);
break;
case CompareICState::GENERIC:
GenerateGeneric(masm);
@@ -473,38 +469,25 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) {
}
-namespace {
-
-Handle<JSFunction> GetFunction(Isolate* isolate, const char* name) {
- v8::ExtensionConfiguration no_extensions;
- MaybeHandle<Object> fun = Object::GetProperty(
- isolate, isolate->factory()->code_stub_exports_object(), name);
- Handle<JSFunction> function = Handle<JSFunction>::cast(fun.ToHandleChecked());
- DCHECK(!function->IsUndefined() &&
- "JavaScript implementation of stub not found");
- return function;
-}
-} // namespace
-
-
Handle<Code> TurboFanCodeStub::GenerateCode() {
- // Get the outer ("stub generator") function.
const char* name = CodeStub::MajorName(MajorKey());
- Handle<JSFunction> outer = GetFunction(isolate(), name);
- DCHECK_EQ(2, outer->shared()->length());
+ Zone zone;
+ CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
+ compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
+ GetCodeKind(), name);
+ GenerateAssembly(&assembler);
+ return assembler.GenerateCode();
+}
- // Invoke the outer function to get the stub itself.
- Factory* factory = isolate()->factory();
- Handle<Object> call_conv = factory->InternalizeUtf8String(name);
- Handle<Object> minor_key = factory->NewNumber(MinorKey());
- Handle<Object> args[] = {call_conv, minor_key};
- MaybeHandle<Object> result =
- Execution::Call(isolate(), outer, factory->undefined_value(), 2, args);
- Handle<JSFunction> inner = Handle<JSFunction>::cast(result.ToHandleChecked());
- // Just to make sure nobody calls this...
- inner->set_code(isolate()->builtins()->builtin(Builtins::kIllegal));
- return Compiler::GetStubCode(inner, this).ToHandleChecked();
+void StringLengthStub::GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const {
+ compiler::Node* value = assembler->Parameter(0);
+ compiler::Node* string =
+ assembler->LoadObjectField(value, JSValue::kValueOffset);
+ compiler::Node* result =
+ assembler->LoadObjectField(string, String::kLengthOffset);
+ assembler->Return(result);
}
@@ -640,8 +623,7 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
return LoadWithVectorDescriptor(isolate());
} else {
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return FLAG_vector_stores ? VectorStoreICDescriptor(isolate())
- : StoreDescriptor(isolate());
+ return VectorStoreICDescriptor(isolate());
}
}
@@ -667,19 +649,13 @@ void ToObjectStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor()
const {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor(isolate());
- }
- return StoreTransitionDescriptor(isolate());
+ return VectorStoreTransitionDescriptor(isolate());
}
CallInterfaceDescriptor
ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor(isolate());
- }
- return StoreTransitionDescriptor(isolate());
+ return VectorStoreTransitionDescriptor(isolate());
}
@@ -701,6 +677,13 @@ void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
}
+void FastCloneRegExpStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ FastCloneRegExpDescriptor call_descriptor(isolate());
+ descriptor->Initialize(
+ Runtime::FunctionForId(Runtime::kCreateRegExpLiteral)->entry);
+}
+
+
void FastCloneShallowArrayStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
FastCloneShallowArrayDescriptor call_descriptor(isolate());
@@ -855,6 +838,9 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
+void RestParamAccessStub::Generate(MacroAssembler* masm) { GenerateNew(masm); }
+
+
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type()) {
case READ_ELEMENT:
@@ -893,9 +879,8 @@ void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
}
-void CallConstructStub::PrintName(std::ostream& os) const { // NOLINT
- os << "CallConstructStub";
- if (RecordCallTarget()) os << "_Recording";
+void RestParamAccessStub::PrintName(std::ostream& os) const { // NOLINT
+ os << "RestParamAccessStub_";
}
@@ -975,7 +960,7 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
} else if (object->IsSmi()) {
Add(SMI);
return Smi::cast(*object)->value() != 0;
- } else if (object->IsSpecObject()) {
+ } else if (object->IsJSReceiver()) {
Add(SPEC_OBJECT);
return !object->IsUndetectableObject();
} else if (object->IsString()) {
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index d69e9263e1..21e21356bb 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/codegen.h"
+#include "src/compiler/code-stub-assembler.h"
#include "src/globals.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
@@ -43,8 +44,8 @@ namespace internal {
V(MathPow) \
V(ProfileEntryHook) \
V(RecordWrite) \
+ V(RestParamAccess) \
V(RegExpExec) \
- V(StoreArrayLiteralElement) \
V(StoreBufferOverflow) \
V(StoreElement) \
V(StringCompare) \
@@ -71,6 +72,7 @@ namespace internal {
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(ElementsTransitionAndStore) \
+ V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(FastNewClosure) \
@@ -96,10 +98,7 @@ namespace internal {
V(KeyedLoadIC) \
V(LoadIC) \
/* TurboFanCodeStubs */ \
- V(StringLengthTF) \
- V(StringAddTF) \
- /* TurboFanICs */ \
- V(MathFloor) \
+ V(StringLength) \
/* IC Handler stubs */ \
V(ArrayBufferViewLoadField) \
V(LoadConstant) \
@@ -109,8 +108,7 @@ namespace internal {
V(KeyedStoreSloppyArguments) \
V(StoreField) \
V(StoreGlobal) \
- V(StoreTransition) \
- V(StringLength)
+ V(StoreTransition)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -349,19 +347,6 @@ class CodeStub BASE_EMBEDDED {
}; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_IC(NAME, SUPER, DESC) \
- public: \
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- if (GetCallMode() == CALL_FROM_OPTIMIZED_CODE) { \
- return DESC##CallFromOptimizedCodeDescriptor(isolate()); \
- } else { \
- return DESC##CallFromUnoptimizedCodeDescriptor(isolate()); \
- } \
- }; \
- \
- protected: \
- DEFINE_CODE_STUB(NAME, SUPER)
-
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
public: \
Handle<Code> GenerateCode() override; \
@@ -550,35 +535,11 @@ class TurboFanCodeStub : public CodeStub {
protected:
explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
- private:
- DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
-};
-
-
-class TurboFanIC : public TurboFanCodeStub {
- public:
- enum CallMode { CALL_FROM_UNOPTIMIZED_CODE, CALL_FROM_OPTIMIZED_CODE };
-
- protected:
- explicit TurboFanIC(Isolate* isolate, CallMode mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = CallModeBits::encode(mode);
- }
-
- CallMode GetCallMode() const { return CallModeBits::decode(minor_key_); }
-
- void set_sub_minor_key(uint32_t key) {
- minor_key_ = SubMinorKeyBits::update(minor_key_, key);
- }
-
- uint32_t sub_minor_key() const { return SubMinorKeyBits::decode(minor_key_); }
-
- static const int kSubMinorKeyBits = kStubMinorKeyBits - 1;
+ virtual void GenerateAssembly(
+ compiler::CodeStubAssembler* assembler) const = 0;
private:
- class CallModeBits : public BitField<CallMode, 0, 1> {};
- class SubMinorKeyBits : public BitField<int, 1, kSubMinorKeyBits> {};
- DEFINE_CODE_STUB_BASE(TurboFanIC, TurboFanCodeStub);
+ DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
};
@@ -649,25 +610,18 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
-class MathFloorStub : public TurboFanIC {
- public:
- explicit MathFloorStub(Isolate* isolate, TurboFanIC::CallMode mode)
- : TurboFanIC(isolate, mode) {}
- Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- DEFINE_TURBOFAN_IC(MathFloor, TurboFanIC, MathRoundVariant);
-};
-
-
-class StringLengthTFStub : public TurboFanCodeStub {
+class StringLengthStub : public TurboFanCodeStub {
public:
- explicit StringLengthTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+ explicit StringLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
InlineCacheState GetICState() const override { return MONOMORPHIC; }
ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+ void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_CODE_STUB(StringLengthTF, TurboFanCodeStub);
+ DEFINE_CODE_STUB(StringLength, TurboFanCodeStub);
};
@@ -690,34 +644,6 @@ enum StringAddFlags {
std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
-class StringAddTFStub : public TurboFanCodeStub {
- public:
- StringAddTFStub(Isolate* isolate, StringAddFlags flags,
- PretenureFlag pretenure_flag)
- : TurboFanCodeStub(isolate) {
- minor_key_ = StringAddFlagsBits::encode(flags) |
- PretenureFlagBits::encode(pretenure_flag);
- }
-
- StringAddFlags flags() const {
- return StringAddFlagsBits::decode(MinorKey());
- }
-
- PretenureFlag pretenure_flag() const {
- return PretenureFlagBits::decode(MinorKey());
- }
-
- private:
- class StringAddFlagsBits : public BitField<StringAddFlags, 0, 3> {};
- class PretenureFlagBits : public BitField<PretenureFlag, 3, 1> {};
-
- void PrintBaseName(std::ostream& os) const override; // NOLINT
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
- DEFINE_CODE_STUB(StringAddTF, TurboFanCodeStub);
-};
-
-
class NumberToStringStub final : public HydrogenCodeStub {
public:
explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -794,6 +720,16 @@ class FastNewContextStub final : public HydrogenCodeStub {
};
+class FastCloneRegExpStub final : public HydrogenCodeStub {
+ public:
+ explicit FastCloneRegExpStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneRegExp);
+ DEFINE_HYDROGEN_CODE_STUB(FastCloneRegExp, HydrogenCodeStub);
+};
+
+
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
FastCloneShallowArrayStub(Isolate* isolate,
@@ -983,6 +919,7 @@ class CallICStub: public PlatformCodeStub {
protected:
int arg_count() const { return state().argc(); }
+ ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
CallICState state() const {
return CallICState(static_cast<ExtraICState>(minor_key_));
@@ -1169,18 +1106,6 @@ class LoadConstantStub : public HandlerStub {
};
-class StringLengthStub: public HandlerStub {
- public:
- explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {}
-
- protected:
- Code::Kind kind() const override { return Code::LOAD_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
-
- DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
-};
-
-
class StoreFieldStub : public HandlerStub {
public:
StoreFieldStub(Isolate* isolate, FieldIndex index,
@@ -1232,20 +1157,15 @@ class StoreTransitionHelper {
}
static Register SlotRegister() {
- DCHECK(FLAG_vector_stores);
return VectorStoreTransitionDescriptor::SlotRegister();
}
static Register VectorRegister() {
- DCHECK(FLAG_vector_stores);
return VectorStoreTransitionDescriptor::VectorRegister();
}
static Register MapRegister() {
- if (FLAG_vector_stores) {
- return VectorStoreTransitionDescriptor::MapRegister();
- }
- return StoreTransitionDescriptor::MapRegister();
+ return VectorStoreTransitionDescriptor::MapRegister();
}
static int ReceiverIndex() {
@@ -1263,7 +1183,6 @@ class StoreTransitionHelper {
}
static int VectorIndex() {
- DCHECK(FLAG_vector_stores);
if (HasVirtualSlotArg()) {
return VectorStoreTransitionDescriptor::kVirtualSlotVectorIndex;
}
@@ -1272,7 +1191,6 @@ class StoreTransitionHelper {
// Some platforms don't have a slot arg.
static bool HasVirtualSlotArg() {
- if (!FLAG_vector_stores) return false;
return SlotRegister().is(no_reg);
}
};
@@ -1703,9 +1621,9 @@ class CompareICStub : public PlatformCodeStub {
void GenerateInternalizedStrings(MacroAssembler* masm);
void GenerateStrings(MacroAssembler* masm);
void GenerateUniqueNames(MacroAssembler* masm);
- void GenerateObjects(MacroAssembler* masm);
+ void GenerateReceivers(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
- void GenerateKnownObjects(MacroAssembler* masm);
+ void GenerateKnownReceivers(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op() == Token::EQ_STRICT; }
@@ -1714,7 +1632,7 @@ class CompareICStub : public PlatformCodeStub {
void AddToSpecialCache(Handle<Code> new_object) override;
bool FindCodeInSpecialCache(Code** code_out) override;
bool UseSpecialCache() override {
- return state() == CompareICState::KNOWN_OBJECT;
+ return state() == CompareICState::KNOWN_RECEIVER;
}
class OpBits : public BitField<int, 0, 3> {};
@@ -1934,6 +1852,20 @@ class ArgumentsAccessStub: public PlatformCodeStub {
};
+class RestParamAccessStub : public PlatformCodeStub {
+ public:
+ explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ private:
+ void GenerateNew(MacroAssembler* masm);
+
+ void PrintName(std::ostream& os) const override; // NOLINT
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(RestParamAccess);
+ DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
+};
+
+
class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
@@ -1958,31 +1890,10 @@ class RegExpConstructResultStub final : public HydrogenCodeStub {
};
-class CallConstructStub: public PlatformCodeStub {
+// TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
+class CallConstructStub final : public PlatformCodeStub {
public:
- CallConstructStub(Isolate* isolate, CallConstructorFlags flags)
- : PlatformCodeStub(isolate) {
- minor_key_ = FlagBits::encode(flags);
- }
-
- void FinishCode(Handle<Code> code) override {
- code->set_has_function_cache(RecordCallTarget());
- }
-
- private:
- CallConstructorFlags flags() const { return FlagBits::decode(minor_key_); }
-
- bool RecordCallTarget() const {
- return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
- }
-
- bool IsSuperConstructorCall() const {
- return (flags() & SUPER_CONSTRUCTOR_CALL) != 0;
- }
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- class FlagBits : public BitField<CallConstructorFlags, 0, 2> {};
+ explicit CallConstructStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
@@ -2457,6 +2368,7 @@ class ScriptContextFieldStub : public HandlerStub {
const ScriptContextTable::LookupResult* lookup_result)
: HandlerStub(isolate) {
DCHECK(Accepted(lookup_result));
+ STATIC_ASSERT(kContextIndexBits + kSlotIndexBits <= kSubMinorKeyBits);
set_sub_minor_key(ContextIndexBits::encode(lookup_result->context_index) |
SlotIndexBits::encode(lookup_result->slot_index));
}
@@ -2473,7 +2385,7 @@ class ScriptContextFieldStub : public HandlerStub {
}
private:
- static const int kContextIndexBits = 13;
+ static const int kContextIndexBits = 9;
static const int kSlotIndexBits = 13;
class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
class SlotIndexBits
@@ -2566,10 +2478,7 @@ class StoreFastElementStub : public HydrogenCodeStub {
}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (FLAG_vector_stores) {
- return VectorStoreICDescriptor(isolate());
- }
- return StoreDescriptor(isolate());
+ return VectorStoreICDescriptor(isolate());
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
@@ -2820,10 +2729,7 @@ class StoreElementStub : public PlatformCodeStub {
}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (FLAG_vector_stores) {
- return VectorStoreICDescriptor(isolate());
- }
- return StoreDescriptor(isolate());
+ return VectorStoreICDescriptor(isolate());
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
@@ -2854,12 +2760,6 @@ class ToBooleanStub: public HydrogenCodeStub {
NUMBER_OF_TYPES
};
- enum ResultMode {
- RESULT_AS_SMI, // For Smi(1) on truthy value, Smi(0) otherwise.
- RESULT_AS_ODDBALL, // For {true} on truthy value, {false} otherwise.
- RESULT_AS_INVERSE_ODDBALL // For {false} on truthy value, {true} otherwise.
- };
-
// At most 16 different types can be distinguished, because the Code object
// only has room for two bytes to hold a set of these types. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 16);
@@ -2879,21 +2779,13 @@ class ToBooleanStub: public HydrogenCodeStub {
static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
};
- ToBooleanStub(Isolate* isolate, ResultMode mode, Types types = Types())
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(types.ToIntegral()) |
- ResultModeBits::encode(mode));
- }
-
ToBooleanStub(Isolate* isolate, ExtraICState state)
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)) |
- ResultModeBits::encode(RESULT_AS_SMI));
+ set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
}
bool UpdateStatus(Handle<Object> object);
Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
- ResultMode mode() const { return ResultModeBits::decode(sub_minor_key()); }
Code::Kind GetCodeKind() const override { return Code::TO_BOOLEAN_IC; }
void PrintState(std::ostream& os) const override; // NOLINT
@@ -2917,11 +2809,9 @@ class ToBooleanStub: public HydrogenCodeStub {
private:
ToBooleanStub(Isolate* isolate, InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {
- set_sub_minor_key(ResultModeBits::encode(RESULT_AS_SMI));
}
class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
- class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
@@ -2961,16 +2851,6 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
};
-class StoreArrayLiteralElementStub : public PlatformCodeStub {
- public:
- explicit StoreArrayLiteralElementStub(Isolate* isolate)
- : PlatformCodeStub(isolate) { }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreArrayLiteralElement);
- DEFINE_PLATFORM_CODE_STUB(StoreArrayLiteralElement, PlatformCodeStub);
-};
-
-
class StubFailureTrampolineStub : public PlatformCodeStub {
public:
StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode)
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 1e806d2ae5..a57cbb3a5e 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -7,37 +7,19 @@
#if defined(V8_OS_AIX)
#include <fenv.h> // NOLINT(build/c++11)
#endif
+#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
-#include "src/parser.h"
-#include "src/prettyprinter.h"
+#include "src/parsing/parser.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/rewriter.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
-#if defined(_WIN64)
-typedef double (*ModuloFunction)(double, double);
-static ModuloFunction modulo_function = NULL;
-// Defined in codegen-x64.cc.
-ModuloFunction CreateModuloFunction();
-
-void init_modulo_function() {
- modulo_function = CreateModuloFunction();
-}
-
-
-double modulo(double x, double y) {
- // Note: here we rely on dependent reads being ordered. This is true
- // on all architectures we currently support.
- return (*modulo_function)(x, y);
-}
-#elif defined(_WIN32)
-
+#if defined(V8_OS_WIN)
double modulo(double x, double y) {
// Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend
@@ -61,31 +43,29 @@ double modulo(double x, double y) {
return std::fmod(x, y);
#endif
}
-#endif // defined(_WIN64)
-
-
-#define UNARY_MATH_FUNCTION(name, generator) \
-static UnaryMathFunction fast_##name##_function = NULL; \
-void init_fast_##name##_function() { \
- fast_##name##_function = generator; \
-} \
-double fast_##name(double x) { \
- return (*fast_##name##_function)(x); \
-}
+#endif // defined(V8_OS_WIN)
+
+
+#define UNARY_MATH_FUNCTION(name, generator) \
+ static UnaryMathFunctionWithIsolate fast_##name##_function = nullptr; \
+ double std_##name(double x, Isolate* isolate) { return std::name(x); } \
+ void init_fast_##name##_function(Isolate* isolate) { \
+ if (FLAG_fast_math) fast_##name##_function = generator(isolate); \
+ if (!fast_##name##_function) fast_##name##_function = std_##name; \
+ } \
+ void lazily_initialize_fast_##name(Isolate* isolate) { \
+ if (!fast_##name##_function) init_fast_##name##_function(isolate); \
+ } \
+ double fast_##name(double x, Isolate* isolate) { \
+ return (*fast_##name##_function)(x, isolate); \
+ }
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
+UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
+UNARY_MATH_FUNCTION(exp, CreateExpFunction)
#undef UNARY_MATH_FUNCTION
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
#define __ ACCESS_MASM(masm_)
#ifdef DEBUG
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 7019d3d106..512cbfc40a 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -89,22 +89,19 @@ class CodeGenerator {
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
-typedef double (*UnaryMathFunction)(double x);
+typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
-UnaryMathFunction CreateExpFunction();
-UnaryMathFunction CreateSqrtFunction();
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate);
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
double modulo(double x, double y);
// Custom implementation of math functions.
-double fast_exp(double input);
-double fast_sqrt(double input);
-#ifdef _WIN64
-void init_modulo_function();
-#endif
-void lazily_initialize_fast_exp();
-void init_fast_sqrt_function();
+double fast_exp(double input, Isolate* isolate);
+double fast_sqrt(double input, Isolate* isolate);
+void lazily_initialize_fast_exp(Isolate* isolate);
+void lazily_initialize_fast_sqrt(Isolate* isolate);
class ElementsTransitionGenerator : public AllStatic {
@@ -145,7 +142,7 @@ static const int kNumberDictionaryProbes = 4;
class CodeAgingHelper {
public:
- CodeAgingHelper();
+ explicit CodeAgingHelper(Isolate* isolate);
uint32_t young_sequence_length() const { return young_sequence_.length(); }
bool IsYoung(byte* candidate) const {
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index c9c194f19f..96b3859e9a 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -124,6 +124,20 @@ void CompilationDependencies::AssumeMapStable(Handle<Map> map) {
}
+void CompilationDependencies::AssumePrototypeMapsStable(
+ Handle<Map> map, MaybeHandle<JSReceiver> prototype) {
+ for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
+ Handle<JSReceiver> const current =
+ PrototypeIterator::GetCurrent<JSReceiver>(i);
+ AssumeMapStable(handle(current->map()));
+ Handle<JSReceiver> last;
+ if (prototype.ToHandle(&last) && last.is_identical_to(current)) {
+ break;
+ }
+ }
+}
+
+
void CompilationDependencies::AssumeTransitionStable(
Handle<AllocationSite> site) {
// Do nothing if the object doesn't have any useful element transitions left.
@@ -135,5 +149,6 @@ void CompilationDependencies::AssumeTransitionStable(
Insert(DependentCode::kAllocationSiteTransitionChangedGroup, site);
}
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index ca09ef5e11..a40eb74801 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -32,6 +32,9 @@ class CompilationDependencies {
Insert(DependentCode::kFieldTypeGroup, map);
}
void AssumeMapStable(Handle<Map> map);
+ void AssumePrototypeMapsStable(
+ Handle<Map> map,
+ MaybeHandle<JSReceiver> prototype = MaybeHandle<JSReceiver>());
void AssumeMapNotDeprecated(Handle<Map> map);
void AssumePropertyCell(Handle<PropertyCell> cell) {
Insert(DependentCode::kPropertyCellChangedGroup, cell);
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index d55bf33bab..307b3b0e42 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -6,7 +6,10 @@
#include <algorithm>
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/prettyprinter.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
@@ -23,14 +26,11 @@
#include "src/isolate-inl.h"
#include "src/log-inl.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/prettyprinter.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/rewriter.h"
#include "src/runtime-profiler.h"
-#include "src/scanner-character-streams.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
#include "src/snapshot/serialize.h"
#include "src/vm-state-inl.h"
@@ -178,7 +178,6 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
parameter_count_(0),
optimization_id_(-1),
osr_expr_stack_height_(0),
- function_type_(nullptr),
debug_name_(debug_name) {
// Parameter count is number of stack parameters.
if (code_stub_ != NULL) {
@@ -205,14 +204,6 @@ CompilationInfo::~CompilationInfo() {
}
-void CompilationInfo::SetStub(CodeStub* code_stub) {
- SetMode(STUB);
- code_stub_ = code_stub;
- debug_name_ = CodeStub::MajorName(code_stub->MajorKey());
- set_output_code_kind(code_stub->GetCodeKind());
-}
-
-
int CompilationInfo::num_parameters() const {
return has_scope() ? scope()->num_parameters() : parameter_count_;
}
@@ -420,12 +411,29 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
DCHECK(info()->shared_info()->has_deoptimization_support());
DCHECK(!info()->is_first_compile());
- // Check the enabling conditions for TurboFan.
+ bool optimization_disabled = info()->shared_info()->optimization_disabled();
bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
- if (((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
- (dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0) ||
- info()->closure()->PassesFilter(FLAG_turbo_filter)) &&
- (FLAG_turbo_osr || !info()->is_osr())) {
+
+ // Check the enabling conditions for Turbofan.
+ // 1. "use asm" code.
+ bool is_turbofanable_asm = FLAG_turbo_asm &&
+ info()->shared_info()->asm_function() &&
+ !optimization_disabled;
+
+ // 2. Fallback for features unsupported by Crankshaft.
+ bool is_unsupported_by_crankshaft_but_turbofanable =
+ dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+ !optimization_disabled;
+
+ // 3. Explicitly enabled by the command-line filter.
+ bool passes_turbo_filter = info()->closure()->PassesFilter(FLAG_turbo_filter);
+
+ // If this is OSR request, OSR must be enabled by Turbofan.
+ bool passes_osr_test = FLAG_turbo_osr || !info()->is_osr();
+
+ if ((is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
+ passes_turbo_filter) &&
+ passes_osr_test) {
// Use TurboFan for the compilation.
if (FLAG_trace_opt) {
OFStream os(stdout);
@@ -553,6 +561,61 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
}
+namespace {
+
+void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
+ Handle<Code> code) {
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ Heap* heap = isolate->heap();
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep = DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+}
+
+
+void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+ // TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
+ Isolate* const isolate = code->GetIsolate();
+ DCHECK(code->is_optimized_code());
+ std::vector<Handle<Map>> maps;
+ std::vector<Handle<HeapObject>> objects;
+ {
+ DisallowHeapAllocation no_gc;
+ int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
+ objects.push_back(handle(it.rinfo()->target_cell(), isolate));
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ code->IsWeakObjectInOptimizedCode(
+ it.rinfo()->target_object())) {
+ Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
+ isolate);
+ if (object->IsMap()) {
+ maps.push_back(Handle<Map>::cast(object));
+ } else {
+ objects.push_back(object);
+ }
+ }
+ }
+ }
+ for (Handle<Map> map : maps) {
+ if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
+ isolate->heap()->AddRetainedMap(map);
+ }
+ Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
+ }
+ for (Handle<HeapObject> object : objects) {
+ AddWeakObjectToCodeDependency(isolate, object, code);
+ }
+ code->set_can_have_weak_objects(true);
+}
+
+} // namespace
+
+
OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
DCHECK(last_status() == SUCCEEDED);
// TODO(turbofan): Currently everything is done in the first phase.
@@ -561,6 +624,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
if (info()->is_deoptimization_enabled()) {
info()->parse_info()->context()->native_context()->AddOptimizedCode(
*info()->code());
+ RegisterWeakObjectsInOptimizedCode(info()->code());
}
RecordOptimizationStats();
return last_status();
@@ -585,6 +649,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
}
return SetLastStatus(BAILED_OUT);
}
+ RegisterWeakObjectsInOptimizedCode(optimized_code);
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
@@ -706,8 +771,6 @@ static bool CompileUnoptimizedCode(CompilationInfo* info) {
// TODO(rmcilroy): Remove this temporary work-around when ignition supports
// catch and eval.
static bool IgnitionShouldFallbackToFullCodeGen(Scope* scope) {
- if (!FLAG_ignition_fallback_on_eval_and_catch) return false;
-
if (scope->is_eval_scope() || scope->is_catch_scope() ||
scope->calls_eval()) {
return true;
@@ -719,21 +782,46 @@ static bool IgnitionShouldFallbackToFullCodeGen(Scope* scope) {
}
-static bool GenerateBytecode(CompilationInfo* info) {
- DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- bool success = false;
- if (Compiler::Analyze(info->parse_info())) {
- if (IgnitionShouldFallbackToFullCodeGen(info->scope())) {
- success = FullCodeGenerator::MakeCode(info);
- } else {
- success = interpreter::Interpreter::MakeBytecode(info);
- }
+static bool UseIgnition(CompilationInfo* info) {
+ // Cannot use Ignition when the {function_data} is already used.
+ if (info->has_shared_info() && info->shared_info()->HasBuiltinFunctionId()) {
+ return false;
+ }
+
+ // Checks whether the scope chain is supported.
+ if (FLAG_ignition_fallback_on_eval_and_catch &&
+ IgnitionShouldFallbackToFullCodeGen(info->scope())) {
+ return false;
+ }
+
+ // Checks whether top level functions should be passed by the filter.
+ if (info->closure().is_null()) {
+ Vector<const char> filter = CStrVector(FLAG_ignition_filter);
+ return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
}
- if (!success) {
+
+ // Finally respect the filter.
+ return info->closure()->PassesFilter(FLAG_ignition_filter);
+}
+
+
+static bool GenerateBaselineCode(CompilationInfo* info) {
+ if (FLAG_ignition && UseIgnition(info)) {
+ return interpreter::Interpreter::MakeBytecode(info);
+ } else {
+ return FullCodeGenerator::MakeCode(info);
+ }
+}
+
+
+static bool CompileBaselineCode(CompilationInfo* info) {
+ DCHECK(AllowCompilation::IsAllowed(info->isolate()));
+ if (!Compiler::Analyze(info->parse_info()) || !GenerateBaselineCode(info)) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
}
- return success;
+ return true;
}
@@ -746,19 +834,13 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
FunctionLiteral* lit = info->literal();
- shared->set_language_mode(lit->language_mode());
+ DCHECK_EQ(shared->language_mode(), lit->language_mode());
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
MaybeDisableOptimization(shared, lit->dont_optimize_reason());
- if (FLAG_ignition && !shared->HasBuiltinFunctionId() &&
- info->closure()->PassesFilter(FLAG_ignition_filter)) {
- // Compile bytecode for the interpreter.
- if (!GenerateBytecode(info)) return MaybeHandle<Code>();
- } else {
- // Compile unoptimized code.
- if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
-
- CHECK_EQ(Code::FUNCTION, info->code()->kind());
+ // Compile either unoptimized code or bytecode for the interpreter.
+ if (!CompileBaselineCode(info)) return MaybeHandle<Code>();
+ if (info->code()->kind() == Code::FUNCTION) { // Only for full code.
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
}
@@ -807,11 +889,8 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
- // Do not cache bound functions.
- Handle<JSFunction> function = info->closure();
- if (function->shared()->bound()) return;
-
// Cache optimized context-specific code.
+ Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<LiteralsArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
@@ -998,23 +1077,6 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
}
-MaybeHandle<Code> Compiler::GetStubCode(Handle<JSFunction> function,
- CodeStub* stub) {
- // Build a "hybrid" CompilationInfo for a JSFunction/CodeStub pair.
- Zone zone;
- ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
- info.SetFunctionType(stub->GetCallInterfaceDescriptor().GetFunctionType());
- info.MarkAsFunctionContextSpecializing();
- info.MarkAsDeoptimizationEnabled();
- info.SetStub(stub);
-
- // Run a "mini pipeline", extracted from compiler.cc.
- if (!ParseAndAnalyze(&parse_info)) return MaybeHandle<Code>();
- return compiler::Pipeline(&info).GenerateCode();
-}
-
-
bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
@@ -1162,6 +1224,7 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
// Get rid of old list of shared function infos.
info.MarkAsFirstCompile();
+ info.MarkAsDebug();
info.parse_info()->set_global();
if (!Parser::ParseStatic(info.parse_info())) return;
@@ -1176,13 +1239,6 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
}
-// Checks whether top level functions should be passed by {raw_filter}.
-static bool TopLevelFunctionPassesFilter(const char* raw_filter) {
- Vector<const char> filter = CStrVector(raw_filter);
- return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
-}
-
-
static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
@@ -1246,14 +1302,8 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
HistogramTimerScope timer(rate);
// Compile the code.
- if (FLAG_ignition && TopLevelFunctionPassesFilter(FLAG_ignition_filter)) {
- if (!GenerateBytecode(info)) {
- return Handle<SharedFunctionInfo>::null();
- }
- } else {
- if (!CompileUnoptimizedCode(info)) {
- return Handle<SharedFunctionInfo>::null();
- }
+ if (!CompileBaselineCode(info)) {
+ return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
@@ -1598,9 +1648,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// called.
info.EnsureFeedbackVector();
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
- } else if (Renumber(info.parse_info()) &&
- FullCodeGenerator::MakeCode(&info)) {
- // MakeCode will ensure that the feedback vector is present and
+ } else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
+ // Code generation will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
scope_info = ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
@@ -1618,6 +1667,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
isolate->factory()->NewSharedFunctionInfo(
literal->name(), literal->materialized_literal_count(),
literal->kind(), info.code(), scope_info, info.feedback_vector());
+ if (info.has_bytecode_array()) {
+ DCHECK(result->function_data()->IsUndefined());
+ result->set_function_data(*info.bytecode_array());
+ }
SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
SharedFunctionInfo::SetScript(result, script);
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index d831ac5fd8..9b439397c3 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
#include "src/signature.h"
@@ -268,9 +268,12 @@ class CompilationInfo {
bool is_first_compile() const { return GetFlag(kFirstCompile); }
- bool IsCodePreAgingActive() const {
+ bool GeneratePreagedPrologue() const {
+ // Generate a pre-aged prologue if we are optimizing for size, which
+ // will make code flushing more aggressive. Only apply to Code::FUNCTION,
+ // since StaticMarkingVisitor::IsFlushable only flushes proper functions.
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
- !is_debug();
+ !is_debug() && output_code_kind_ == Code::FUNCTION;
}
void EnsureFeedbackVector();
@@ -288,13 +291,18 @@ class CompilationInfo {
(FLAG_trap_on_stub_deopt && IsStub());
}
- bool has_global_object() const {
- return !closure().is_null() &&
- (closure()->context()->global_object() != NULL);
+ bool has_native_context() const {
+ return !closure().is_null() && (closure()->native_context() != nullptr);
}
+ Context* native_context() const {
+ return has_native_context() ? closure()->native_context() : nullptr;
+ }
+
+ bool has_global_object() const { return has_native_context(); }
+
JSGlobalObject* global_object() const {
- return has_global_object() ? closure()->context()->global_object() : NULL;
+ return has_global_object() ? native_context()->global_object() : nullptr;
}
// Accessors for the different compilation modes.
@@ -309,13 +317,6 @@ class CompilationInfo {
set_output_code_kind(Code::OPTIMIZED_FUNCTION);
}
- void SetFunctionType(Type::FunctionType* function_type) {
- function_type_ = function_type;
- }
- Type::FunctionType* function_type() const { return function_type_; }
-
- void SetStub(CodeStub* code_stub);
-
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return GetFlag(kDeoptimizationSupport);
@@ -520,8 +521,6 @@ class CompilationInfo {
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
- Type::FunctionType* function_type_;
-
const char* debug_name_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -646,8 +645,6 @@ class Compiler : public AllStatic {
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
Handle<JSFunction> function);
- MUST_USE_RESULT static MaybeHandle<Code> GetStubCode(
- Handle<JSFunction> function, CodeStub* stub);
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileDebugCode(Handle<JSFunction> function);
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index ac0be79225..ebd2789151 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -17,7 +17,8 @@ namespace compiler {
// static
FieldAccess AccessBuilder::ForMap() {
FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
- MaybeHandle<Name>(), Type::Any(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
return access;
}
@@ -26,7 +27,7 @@ FieldAccess AccessBuilder::ForMap() {
FieldAccess AccessBuilder::ForHeapNumberValue() {
FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
MaybeHandle<Name>(), TypeCache().Get().kFloat64,
- kMachFloat64};
+ MachineType::Float64()};
return access;
}
@@ -34,7 +35,8 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
return access;
}
@@ -42,7 +44,18 @@ FieldAccess AccessBuilder::ForJSObjectProperties() {
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
+ int index) {
+ int const offset = map->GetInObjectPropertyOffset(index);
+ FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged()};
return access;
}
@@ -50,7 +63,8 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachAnyTagged};
+ MaybeHandle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
return access;
}
@@ -58,7 +72,22 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
- Handle<Name>(), Type::Any(), kMachAnyTagged};
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase, JSArray::kLengthOffset, Handle<Name>(),
+ type_cache.kJSArrayLengthType,
+ MachineType::AnyTagged()};
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ access.type = type_cache.kFixedDoubleArrayLengthType;
+ } else if (IsFastElementsKind(elements_kind)) {
+ access.type = type_cache.kFixedArrayLengthType;
+ }
return access;
}
@@ -66,25 +95,80 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
- MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
+ MaybeHandle<Name>(), Type::UntaggedPointer(),
+ MachineType::Pointer()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
+ MaybeHandle<Name>(), TypeCache::Get().kInt8,
+ MachineType::Int8()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
+ FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(), Type::TaggedPointer(),
+ MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- FieldAccess access = {kTaggedBase,
- JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(), Type::Number(), kMachAnyTagged};
+ FieldAccess access = {
+ kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), Type::Number(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSIteratorResultDone() {
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSIteratorResultValue() {
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
+ MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSRegExpFlags() {
+ FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
+ MaybeHandle<Name>(), Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSRegExpSource() {
+ FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
+ MaybeHandle<Name>(), Type::Tagged(),
+ MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForFixedArrayLength() {
- FieldAccess access = {kTaggedBase, FixedArray::kLengthOffset,
- MaybeHandle<Name>(),
- TypeCache::Get().kFixedArrayLengthType, kMachAnyTagged};
+ FieldAccess access = {
+ kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
+ TypeCache::Get().kFixedArrayLengthType, MachineType::AnyTagged()};
return access;
}
@@ -92,16 +176,25 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
- Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ Handle<Name>(), Type::TaggedPointer(),
+ MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- FieldAccess access = {kTaggedBase,
- DescriptorArray::kEnumCacheBridgeCacheOffset,
- Handle<Name>(), Type::TaggedPointer(), kMachAnyTagged};
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForMapBitField() {
+ FieldAccess access = {kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
+ TypeCache::Get().kUint8, MachineType::Uint8()};
return access;
}
@@ -109,7 +202,7 @@ FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
// static
FieldAccess AccessBuilder::ForMapBitField3() {
FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- TypeCache::Get().kInt32, kMachInt32};
+ TypeCache::Get().kInt32, MachineType::Int32()};
return access;
}
@@ -117,7 +210,7 @@ FieldAccess AccessBuilder::ForMapBitField3() {
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
- Type::TaggedPointer(), kMachAnyTagged};
+ Type::TaggedPointer(), MachineType::AnyTagged()};
return access;
}
@@ -125,7 +218,7 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- TypeCache::Get().kUint8, kMachUint8};
+ TypeCache::Get().kUint8, MachineType::Uint8()};
return access;
}
@@ -133,7 +226,7 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
// static
FieldAccess AccessBuilder::ForMapPrototype() {
FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
- Type::TaggedPointer(), kMachAnyTagged};
+ Type::TaggedPointer(), MachineType::AnyTagged()};
return access;
}
@@ -141,7 +234,8 @@ FieldAccess AccessBuilder::ForMapPrototype() {
// static
FieldAccess AccessBuilder::ForStringLength() {
FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
- TypeCache::Get().kStringLengthType, kMachAnyTagged};
+ TypeCache::Get().kStringLengthType,
+ MachineType::AnyTagged()};
return access;
}
@@ -149,7 +243,8 @@ FieldAccess AccessBuilder::ForStringLength() {
// static
FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
- Handle<Name>(), Type::Receiver(), kMachAnyTagged};
+ Handle<Name>(), Type::Receiver(),
+ MachineType::AnyTagged()};
return access;
}
@@ -157,7 +252,8 @@ FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
// static
FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
- Handle<Name>(), Type::Internal(), kMachAnyTagged};
+ Handle<Name>(), Type::Internal(),
+ MachineType::AnyTagged()};
return access;
}
@@ -165,7 +261,7 @@ FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
// static
FieldAccess AccessBuilder::ForValue() {
FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
- Type::Any(), kMachAnyTagged};
+ Type::Any(), MachineType::AnyTagged()};
return access;
}
@@ -175,7 +271,7 @@ FieldAccess AccessBuilder::ForArgumentsLength() {
int offset =
JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
return access;
}
@@ -185,7 +281,7 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
int offset =
JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
return access;
}
@@ -194,7 +290,7 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
return access;
}
@@ -205,7 +301,7 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
return access;
}
@@ -219,7 +315,7 @@ FieldAccess AccessBuilder::ForPropertyCellValue() {
// static
FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
- type, kMachAnyTagged};
+ type, MachineType::AnyTagged()};
return access;
}
@@ -227,15 +323,23 @@ FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
// static
FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
- Handle<Name>(), Type::Any(), kMachAnyTagged};
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
return access;
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- kMachAnyTagged};
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
+ MachineType::AnyTagged()};
+ return access;
+}
+
+
+// static
+ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
+ ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
+ TypeCache::Get().kFloat64, MachineType::Float64()};
return access;
}
@@ -248,68 +352,48 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
switch (type) {
case kExternalInt8Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- kMachInt8};
+ MachineType::Int8()};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- kMachUint8};
+ MachineType::Uint8()};
return access;
}
case kExternalInt16Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- kMachInt16};
+ MachineType::Int16()};
return access;
}
case kExternalUint16Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- kMachUint16};
+ MachineType::Uint16()};
return access;
}
case kExternalInt32Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- kMachInt32};
+ MachineType::Int32()};
return access;
}
case kExternalUint32Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- kMachUint32};
+ MachineType::Uint32()};
return access;
}
case kExternalFloat32Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- kMachFloat32};
+ MachineType::Float32()};
return access;
}
case kExternalFloat64Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- kMachFloat64};
- return access;
- }
- }
- UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), kMachNone};
- return access;
-}
-
-
-// static
-ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
- switch (encoding) {
- case String::ONE_BYTE_ENCODING: {
- ElementAccess access = {kTaggedBase, SeqString::kHeaderSize,
- Type::Unsigned32(), kMachUint8};
- return access;
- }
- case String::TWO_BYTE_ENCODING: {
- ElementAccess access = {kTaggedBase, SeqString::kHeaderSize,
- Type::Unsigned32(), kMachUint16};
+ MachineType::Float64()};
return access;
}
}
UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), kMachNone};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None()};
return access;
}
@@ -317,23 +401,7 @@ ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
// static
FieldAccess AccessBuilder::ForStatsCounter() {
FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
- TypeCache::Get().kInt32, kMachInt32};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForFrameCallerFramePtr() {
- FieldAccess access = {kUntaggedBase, StandardFrameConstants::kCallerFPOffset,
- MaybeHandle<Name>(), Type::Internal(), kMachPtr};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForFrameMarker() {
- FieldAccess access = {kUntaggedBase, StandardFrameConstants::kMarkerOffset,
- MaybeHandle<Name>(), Type::Tagged(), kMachAnyTagged};
+ TypeCache::Get().kInt32, MachineType::Int32()};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 125cd5f79f..8375d37600 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -31,18 +31,42 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSObject::elements() field.
static FieldAccess ForJSObjectElements();
+ // Provides access to JSObject inobject property fields.
+ static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+
// Provides access to JSFunction::context() field.
static FieldAccess ForJSFunctionContext();
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
+ // Provides access to JSArray::length() field.
+ static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
+
// Provides access to JSArrayBuffer::backing_store() field.
static FieldAccess ForJSArrayBufferBackingStore();
+ // Provides access to JSArrayBuffer::bit_field() field.
+ static FieldAccess ForJSArrayBufferBitField();
+
+ // Provides access to JSArrayBufferView::buffer() field.
+ static FieldAccess ForJSArrayBufferViewBuffer();
+
// Provides access to JSDate fields.
static FieldAccess ForJSDateField(JSDate::FieldIndex index);
+ // Provides access to JSIteratorResult::done() field.
+ static FieldAccess ForJSIteratorResultDone();
+
+ // Provides access to JSIteratorResult::value() field.
+ static FieldAccess ForJSIteratorResultValue();
+
+ // Provides access to JSRegExp::flags() field.
+ static FieldAccess ForJSRegExpFlags();
+
+ // Provides access to JSRegExp::source() field.
+ static FieldAccess ForJSRegExpSource();
+
// Provides access to FixedArray::length() field.
static FieldAccess ForFixedArrayLength();
@@ -52,6 +76,9 @@ class AccessBuilder final : public AllStatic {
// Provides access to DescriptorArray::enum_cache_bridge_cache() field.
static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
+ // Provides access to Map::bit_field() byte.
+ static FieldAccess ForMapBitField();
+
// Provides access to Map::bit_field3() field.
static FieldAccess ForMapBitField3();
@@ -96,28 +123,19 @@ class AccessBuilder final : public AllStatic {
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
+ // Provides access to FixedDoubleArray elements.
+ static ElementAccess ForFixedDoubleArrayElement();
+
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
- // Provides access to the characters of sequential strings.
- static ElementAccess ForSeqStringChar(String::Encoding encoding);
-
// ===========================================================================
// Access to global per-isolate variables (based on external reference).
// Provides access to the backing store of a StatsCounter.
static FieldAccess ForStatsCounter();
- // ===========================================================================
- // Access to activation records on the stack (based on frame pointer).
-
- // Provides access to the next frame pointer in a stack frame.
- static FieldAccess ForFrameCallerFramePtr();
-
- // Provides access to the marker in a stack frame.
- static FieldAccess ForFrameMarker();
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 218e21af0c..612170e5b1 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -19,11 +19,13 @@ namespace compiler {
namespace {
bool CanInlineElementAccess(Handle<Map> map) {
- // TODO(bmeurer): IsJSObjectMap
- // TODO(bmeurer): !map->has_dictionary_elements()
- // TODO(bmeurer): !map->has_sloppy_arguments_elements()
- return map->IsJSArrayMap() && map->has_fast_elements() &&
- !map->has_indexed_interceptor() && !map->is_access_check_needed();
+ if (!map->IsJSObjectMap()) return false;
+ if (map->is_access_check_needed()) return false;
+ if (map->has_indexed_interceptor()) return false;
+ ElementsKind const elements_kind = map->elements_kind();
+ if (IsFastElementsKind(elements_kind)) return true;
+ // TODO(bmeurer): Add support for other elements kind.
+ return false;
}
@@ -73,15 +75,24 @@ PropertyAccessInfo PropertyAccessInfo::DataConstant(
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
Type* receiver_type, FieldIndex field_index, Type* field_type,
- MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
- return PropertyAccessInfo(holder, transition_map, field_index, field_type,
- receiver_type);
+ FieldCheck field_check, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index, field_check,
+ field_type, receiver_type);
}
ElementAccessInfo::ElementAccessInfo() : receiver_type_(Type::None()) {}
+ElementAccessInfo::ElementAccessInfo(Type* receiver_type,
+ ElementsKind elements_kind,
+ MaybeHandle<JSObject> holder)
+ : elements_kind_(elements_kind),
+ holder_(holder),
+ receiver_type_(receiver_type) {}
+
+
PropertyAccessInfo::PropertyAccessInfo()
: kind_(kInvalid), receiver_type_(Type::None()), field_type_(Type::Any()) {}
@@ -106,13 +117,15 @@ PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map,
- FieldIndex field_index, Type* field_type,
+ FieldIndex field_index,
+ FieldCheck field_check, Type* field_type,
Type* receiver_type)
: kind_(kDataField),
receiver_type_(receiver_type),
transition_map_(transition_map),
holder_(holder),
field_index_(field_index),
+ field_check_(field_check),
field_type_(field_type) {}
@@ -122,7 +135,9 @@ AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
native_context_(native_context),
isolate_(native_context->GetIsolate()),
type_cache_(TypeCache::Get()),
- zone_(zone) {}
+ zone_(zone) {
+ DCHECK(native_context->IsNativeContext());
+}
bool AccessInfoFactory::ComputeElementAccessInfo(
@@ -130,9 +145,7 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
// Check if it is safe to inline element access for the {map}.
if (!CanInlineElementAccess(map)) return false;
- // TODO(bmeurer): Add support for holey elements.
- ElementsKind elements_kind = map->elements_kind();
- if (IsHoleyElementsKind(elements_kind)) return false;
+ ElementsKind const elements_kind = map->elements_kind();
// Certain (monomorphic) stores need a prototype chain check because shape
// changes could allow callbacks on elements in the chain that are not
@@ -143,6 +156,12 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
Handle<JSReceiver> prototype =
PrototypeIterator::GetCurrent<JSReceiver>(i);
if (!prototype->IsJSObject()) return false;
+ // TODO(bmeurer): We do not currently support unstable prototypes.
+ // We might want to revisit the way we handle certain keyed stores
+ // because this whole prototype chain check is essential a hack,
+ // and I'm not sure that it is correct at all with dictionaries in
+ // the prototype chain.
+ if (!prototype->map()->is_stable()) return false;
holder = Handle<JSObject>::cast(prototype);
}
}
@@ -156,15 +175,50 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
bool AccessInfoFactory::ComputeElementAccessInfos(
MapHandleList const& maps, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos) {
+ // Collect possible transition targets.
+ MapHandleList possible_transition_targets(maps.length());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
- ElementAccessInfo access_info;
- if (!ComputeElementAccessInfo(map, access_mode, &access_info)) {
- return false;
+ if (CanInlineElementAccess(map) &&
+ IsFastElementsKind(map->elements_kind()) &&
+ GetInitialFastElementsKind() != map->elements_kind()) {
+ possible_transition_targets.Add(map);
+ }
+ }
+ }
+
+ // Separate the actual receiver maps and the possible transition sources.
+ MapHandleList receiver_maps(maps.length());
+ MapTransitionList transitions(maps.length());
+ for (Handle<Map> map : maps) {
+ if (Map::TryUpdate(map).ToHandle(&map)) {
+ Handle<Map> transition_target =
+ Map::FindTransitionedMap(map, &possible_transition_targets);
+ if (transition_target.is_null()) {
+ receiver_maps.Add(map);
+ } else {
+ transitions.push_back(std::make_pair(map, transition_target));
}
- access_infos->push_back(access_info);
}
}
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ // Compute the element access information.
+ ElementAccessInfo access_info;
+ if (!ComputeElementAccessInfo(receiver_map, access_mode, &access_info)) {
+ return false;
+ }
+
+ // Collect the possible transitions for the {receiver_map}.
+ for (auto transition : transitions) {
+ if (transition.second.is_identical_to(receiver_map)) {
+ access_info.transitions().push_back(transition);
+ }
+ }
+
+ // Schedule the access information.
+ access_infos->push_back(access_info);
+ }
return true;
}
@@ -243,7 +297,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
DCHECK(field_type->Is(Type::TaggedPointer()));
}
*access_info = PropertyAccessInfo::DataField(
- Type::Class(receiver_map, zone()), field_index, field_type, holder);
+ Type::Class(receiver_map, zone()), field_index, field_type,
+ FieldCheck::kNone, holder);
return true;
} else {
// TODO(bmeurer): Add support for accessors.
@@ -347,6 +402,26 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
field_index, field_type);
return true;
}
+ // Check for special JSArrayBufferView field accessors.
+ if (Accessors::IsJSArrayBufferViewFieldAccessor(map, name, &offset)) {
+ FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
+ Type* field_type = Type::Tagged();
+ if (Name::Equals(factory()->byte_length_string(), name) ||
+ Name::Equals(factory()->byte_offset_string(), name)) {
+ // The JSArrayBufferView::byte_length and JSArrayBufferView::byte_offset
+ // properties are always numbers in the range [0, kMaxSafeInteger].
+ field_type = type_cache_.kPositiveSafeInteger;
+ } else if (map->IsJSTypedArrayMap()) {
+ DCHECK(Name::Equals(factory()->length_string(), name));
+ // The JSTypedArray::length property is always a number in the range
+ // [0, kMaxSafeInteger].
+ field_type = type_cache_.kPositiveSafeInteger;
+ }
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(map, zone()), field_index, field_type,
+ FieldCheck::kJSArrayBufferViewBufferNotNeutered);
+ return true;
+ }
return false;
}
@@ -397,9 +472,9 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
DCHECK(field_type->Is(Type::TaggedPointer()));
}
dependencies()->AssumeMapNotDeprecated(transition_map);
- *access_info =
- PropertyAccessInfo::DataField(Type::Class(map, zone()), field_index,
- field_type, holder, transition_map);
+ *access_info = PropertyAccessInfo::DataField(
+ Type::Class(map, zone()), field_index, field_type, FieldCheck::kNone,
+ holder, transition_map);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 4f60552111..cae119140a 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -28,24 +28,38 @@ enum class AccessMode { kLoad, kStore };
std::ostream& operator<<(std::ostream&, AccessMode);
+// Mapping of transition source to transition target.
+typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
+
+
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
ElementAccessInfo();
ElementAccessInfo(Type* receiver_type, ElementsKind elements_kind,
- MaybeHandle<JSObject> holder)
- : elements_kind_(elements_kind),
- holder_(holder),
- receiver_type_(receiver_type) {}
+ MaybeHandle<JSObject> holder);
MaybeHandle<JSObject> holder() const { return holder_; }
ElementsKind elements_kind() const { return elements_kind_; }
Type* receiver_type() const { return receiver_type_; }
+ MapTransitionList& transitions() { return transitions_; }
+ MapTransitionList const& transitions() const { return transitions_; }
private:
ElementsKind elements_kind_;
MaybeHandle<JSObject> holder_;
Type* receiver_type_;
+ MapTransitionList transitions_;
+};
+
+
+// Additional checks that need to be perform for data field accesses.
+enum class FieldCheck : uint8_t {
+ // No additional checking needed.
+ kNone,
+ // Check that the [[ViewedArrayBuffer]] of {JSArrayBufferView}s
+ // was not neutered.
+ kJSArrayBufferViewBufferNotNeutered,
};
@@ -62,6 +76,7 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
Type* receiver_type, FieldIndex field_index, Type* field_type,
+ FieldCheck field_check = FieldCheck::kNone,
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
@@ -77,6 +92,7 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder() const { return holder_; }
MaybeHandle<Map> transition_map() const { return transition_map_; }
Handle<Object> constant() const { return constant_; }
+ FieldCheck field_check() const { return field_check_; }
FieldIndex field_index() const { return field_index_; }
Type* field_type() const { return field_type_; }
Type* receiver_type() const { return receiver_type_; }
@@ -87,7 +103,8 @@ class PropertyAccessInfo final {
Type* receiver_type);
PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
- Type* field_type, Type* receiver_type);
+ FieldCheck field_check, Type* field_type,
+ Type* receiver_type);
Kind kind_;
Type* receiver_type_;
@@ -95,6 +112,7 @@ class PropertyAccessInfo final {
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
FieldIndex field_index_;
+ FieldCheck field_check_;
Type* field_type_;
};
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 44d70dcd12..9b074b05cc 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -5,11 +5,11 @@
#include "src/compiler/code-generator.h"
#include "src/arm/macro-assembler-arm.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -147,10 +147,10 @@ class ArmOperandConverter final : public InstructionOperandConverter {
}
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -354,12 +354,29 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(sp, sp, Operand(sp_slot_delta * kPointerSize));
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ }
+ __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -382,10 +399,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -395,6 +414,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(ip);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -410,6 +430,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(ip);
RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -420,10 +441,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -434,8 +457,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -445,6 +473,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -460,13 +490,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -761,18 +794,33 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintmF32:
+ __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVrintmF64:
__ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintpF32:
+ __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVrintpF64:
__ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintzF32:
+ __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVrintzF64:
__ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVrintaF64:
__ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVrintnF32:
+ __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArmVrintnF64:
+ __ vrintn(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -896,8 +944,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmPush:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1019,7 +1069,7 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
if (FLAG_enable_embedded_constant_pool) {
__ Push(lr, fp, pp);
// Adjust FP to point to saved FP.
@@ -1029,13 +1079,13 @@ void CodeGenerator::AssemblePrologue() {
__ mov(fp, sp);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1106,9 +1156,9 @@ void CodeGenerator::AssembleReturn() {
DwVfpRegister::from_code(last));
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ LeaveFrame(StackFrame::MANUAL);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -1124,7 +1174,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- ArmOperandConverter g(this, NULL);
+ ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1232,7 +1282,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- ArmOperandConverter g(this, NULL);
+ ArmOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 6852b69d43..401100be75 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -65,10 +65,15 @@ namespace compiler {
V(ArmVabsF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
+ V(ArmVrintmF32) \
V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
V(ArmVrintzF64) \
V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
new file mode 100644
index 0000000000..f36802ceb3
--- /dev/null
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -0,0 +1,129 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArmAdd:
+ case kArmAnd:
+ case kArmBic:
+ case kArmClz:
+ case kArmCmp:
+ case kArmCmn:
+ case kArmTst:
+ case kArmTeq:
+ case kArmOrr:
+ case kArmEor:
+ case kArmSub:
+ case kArmRsb:
+ case kArmMul:
+ case kArmMla:
+ case kArmMls:
+ case kArmSmmul:
+ case kArmSmmla:
+ case kArmUmull:
+ case kArmSdiv:
+ case kArmUdiv:
+ case kArmMov:
+ case kArmMvn:
+ case kArmBfc:
+ case kArmUbfx:
+ case kArmSxtb:
+ case kArmSxth:
+ case kArmSxtab:
+ case kArmSxtah:
+ case kArmUxtb:
+ case kArmUxth:
+ case kArmUxtab:
+ case kArmUxtah:
+ case kArmVcmpF32:
+ case kArmVaddF32:
+ case kArmVsubF32:
+ case kArmVmulF32:
+ case kArmVmlaF32:
+ case kArmVmlsF32:
+ case kArmVdivF32:
+ case kArmVabsF32:
+ case kArmVnegF32:
+ case kArmVsqrtF32:
+ case kArmVcmpF64:
+ case kArmVaddF64:
+ case kArmVsubF64:
+ case kArmVmulF64:
+ case kArmVmlaF64:
+ case kArmVmlsF64:
+ case kArmVdivF64:
+ case kArmVmodF64:
+ case kArmVabsF64:
+ case kArmVnegF64:
+ case kArmVsqrtF64:
+ case kArmVrintmF32:
+ case kArmVrintmF64:
+ case kArmVrintpF32:
+ case kArmVrintpF64:
+ case kArmVrintzF32:
+ case kArmVrintzF64:
+ case kArmVrintaF64:
+ case kArmVrintnF32:
+ case kArmVrintnF64:
+ case kArmVcvtF32F64:
+ case kArmVcvtF64F32:
+ case kArmVcvtF64S32:
+ case kArmVcvtF64U32:
+ case kArmVcvtS32F64:
+ case kArmVcvtU32F64:
+ case kArmVmovLowU32F64:
+ case kArmVmovLowF64U32:
+ case kArmVmovHighU32F64:
+ case kArmVmovHighF64U32:
+ case kArmVmovF64U32U32:
+ return kNoOpcodeFlags;
+
+ case kArmVldrF32:
+ case kArmVldrF64:
+ case kArmLdrb:
+ case kArmLdrsb:
+ case kArmLdrh:
+ case kArmLdrsh:
+ case kArmLdr:
+ return kIsLoadOperation;
+
+ case kArmVstrF32:
+ case kArmVstrF64:
+ case kArmStrb:
+ case kArmStrh:
+ case kArmStr:
+ case kArmPush:
+ case kArmPoke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 19c81262fd..f3deae7d75 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -303,32 +303,32 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kArmVldrF32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArmVldrF64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kArmLdrb : kArmLdrsb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kArmLdr;
break;
- default:
+ case MachineRepresentation::kNone: // Fall through.
+ case MachineRepresentation::kWord64:
UNREACHABLE();
return;
}
@@ -349,12 +349,12 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -383,26 +383,27 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kArmVstrF32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArmVstrF64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kArmStrb;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kArmStrh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kArmStr;
break;
- default:
+ case MachineRepresentation::kNone: // Fall through.
+ case MachineRepresentation::kWord64:
UNREACHABLE();
return;
}
@@ -419,30 +420,32 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -457,30 +460,33 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
ArmOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -1115,11 +1121,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kArmVrintmF32, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kArmVrintmF64, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kArmVrintpF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kArmVrintpF64, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kArmVrintzF32, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kArmVrintzF64, node);
}
@@ -1130,9 +1156,19 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kArmVrintnF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kArmVrintnF64, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
ArmOperandGenerator g(this);
// Prepare for C function call.
@@ -1143,18 +1179,19 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
- if (Node* input = (*arguments)[n]) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
- g.UseRegister(input));
+ g.UseRegister(input.node()));
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(*arguments)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input));
+ if (input.node() == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
}
}
}
@@ -1321,7 +1358,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
@@ -1555,9 +1592,15 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
if (CpuFeatures::IsSupported(ARMv8)) {
- flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTiesAway;
+ MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 0915b7321d..d356195ecf 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -6,11 +6,11 @@
#include "src/arm64/frames-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -33,6 +33,8 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputDoubleRegister(index);
}
+ size_t OutputCount() { return instr_->OutputCount(); }
+
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
@@ -203,10 +205,21 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
}
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
+ if (offset.from_frame_pointer()) {
+ int from_sp =
+ offset.offset() +
+ ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
+ kPointerSize);
+ // Convert FP-offsets to SP-offsets if it results in better code.
+ if (Assembler::IsImmLSUnscaled(from_sp) ||
+ Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
+ offset = FrameOffset::FromStackPointer(from_sp);
+ }
+ }
return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
offset.offset());
}
@@ -448,13 +461,26 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ Mov(jssp, fp);
- __ Pop(fp, lr);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ Drop(sp_slot_delta);
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Claim(-sp_slot_delta);
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -473,11 +499,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
+ frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -486,6 +514,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Jump(target);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -501,6 +530,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
+ frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
}
@@ -514,9 +544,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, temp);
__ Assert(eq, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(x10);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -530,6 +562,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// guarantee correct alignment of stack pointer.
UNREACHABLE();
break;
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -539,6 +574,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters, 0);
}
+ // CallCFunction only supports register arguments so we never need to call
+ // frame()->ClearOutgoingParameterSlots() here.
+ DCHECK(frame_access_state()->sp_delta() == 0);
break;
}
case kArchJmp:
@@ -551,12 +589,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchLookupSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -588,21 +629,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Bind(ool->exit());
break;
}
+ case kArm64Float32RoundDown:
+ __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float64RoundDown:
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32RoundUp:
+ __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundUp:
+ __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64RoundTiesAway:
__ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32RoundTruncate:
+ __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float64RoundTruncate:
__ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64RoundUp:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArm64Float32RoundTiesEven:
+ __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64RoundTiesEven:
+ __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
__ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
+ }
break;
case kArm64Add32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
@@ -744,8 +805,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputOperand2_32(1));
break;
case kArm64Sub:
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
__ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
i.InputOperand2_64(1));
+ }
break;
case kArm64Sub32:
if (FlagsModeField::decode(opcode) != kFlags_none) {
@@ -819,18 +885,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64Claim: {
+ case kArm64ClaimForCallArguments: {
__ Claim(i.InputInt32(0));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
break;
}
case kArm64Poke: {
Operand operand(i.InputInt32(1) * kPointerSize);
- __ Poke(i.InputRegister(0), operand);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Poke(i.InputFloat64Register(0), operand);
+ } else {
+ __ Poke(i.InputRegister(0), operand);
+ }
break;
}
case kArm64PokePair: {
int slot = i.InputInt32(2) - 1;
- __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
+ slot * kPointerSize);
+ } else {
+ __ PokePair(i.InputRegister(1), i.InputRegister(0),
+ slot * kPointerSize);
+ }
break;
}
case kArm64Clz:
@@ -971,6 +1048,51 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32ToInt64:
+ __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
+ if (i.OutputCount() > 1) {
+ __ Mov(i.OutputRegister(1), 1);
+ Label done;
+ __ Cmp(i.OutputRegister(0), 1);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
+ __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
+ vc);
+ __ B(vc, &done);
+ __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
+ __ Cset(i.OutputRegister(1), eq);
+ __ Bind(&done);
+ }
+ break;
+ case kArm64Float64ToInt64:
+ __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
+ if (i.OutputCount() > 1) {
+ __ Mov(i.OutputRegister(1), 1);
+ Label done;
+ __ Cmp(i.OutputRegister(0), 1);
+ __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
+ __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
+ __ B(vc, &done);
+ __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
+ __ Cset(i.OutputRegister(1), eq);
+ __ Bind(&done);
+ }
+ break;
+ case kArm64Float32ToUint64:
+ __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
+ if (i.OutputCount() > 1) {
+ __ Fcmp(i.InputFloat32Register(0), -1.0);
+ __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
+ __ Cset(i.OutputRegister(1), ne);
+ }
+ break;
+ case kArm64Float64ToUint64:
+ __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
+ if (i.OutputCount() > 1) {
+ __ Fcmp(i.InputDoubleRegister(0), -1.0);
+ __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
+ __ Cset(i.OutputRegister(1), ne);
+ }
+ break;
case kArm64Int32ToFloat64:
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
@@ -983,6 +1105,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Uint64ToFloat32:
+ __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
+ break;
+ case kArm64Uint64ToFloat64:
+ __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
+ break;
case kArm64Float64ExtractLowWord32:
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
@@ -1211,29 +1339,31 @@ void CodeGenerator::AssembleDeoptimizerCall(
}
-// TODO(dcarney): increase stack slots in frame once before first use.
-static int AlignedStackSlots(int stack_slots) {
- if (stack_slots & 1) stack_slots++;
- return stack_slots;
-}
-
-
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ SetStackPointer(csp);
__ Push(lr, fp);
__ Mov(fp, csp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ SetStackPointer(jssp);
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
__ SetStackPointer(jssp);
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
+ if (descriptor->UseNativeStack()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
+ }
__ StubPrologue();
} else {
+ if (descriptor->UseNativeStack()) {
+ __ SetStackPointer(csp);
+ } else {
+ __ SetStackPointer(jssp);
+ }
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1251,13 +1381,15 @@ void CodeGenerator::AssemblePrologue() {
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_shrink_slots > 0) {
- Register sp = __ StackPointer();
- if (!sp.Is(csp)) {
- __ Sub(sp, sp, stack_shrink_slots * kPointerSize);
- }
- __ Sub(csp, csp, AlignedStackSlots(stack_shrink_slots) * kPointerSize);
+ // If frame()->needs_frame() is false, then
+ // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
+ if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
+ // The system stack pointer requires 16-byte alignment at function call
+ // boundaries.
+
+ stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
}
+ __ Claim(stack_shrink_slots);
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
@@ -1301,19 +1433,25 @@ void CodeGenerator::AssembleReturn() {
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Mov(csp, fp);
__ Pop(fp, lr);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ B(&return_label_);
return;
} else {
__ Bind(&return_label_);
- __ Mov(jssp, fp);
+ if (descriptor->UseNativeStack()) {
+ __ Mov(csp, fp);
+ } else {
+ __ Mov(jssp, fp);
+ }
__ Pop(fp, lr);
}
+ } else if (descriptor->UseNativeStack()) {
+ pop_count += (pop_count & 1);
}
__ Drop(pop_count);
__ Ret();
@@ -1322,7 +1460,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- Arm64OperandConverter g(this, NULL);
+ Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1419,7 +1557,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- Arm64OperandConverter g(this, NULL);
+ Arm64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index cd8b4c56b8..ef333480e3 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -76,7 +76,7 @@ namespace compiler {
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
- V(Arm64Claim) \
+ V(Arm64ClaimForCallArguments) \
V(Arm64Poke) \
V(Arm64PokePair) \
V(Arm64Float32Cmp) \
@@ -88,6 +88,7 @@ namespace compiler {
V(Arm64Float32Min) \
V(Arm64Float32Abs) \
V(Arm64Float32Sqrt) \
+ V(Arm64Float32RoundDown) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
@@ -100,17 +101,27 @@ namespace compiler {
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
V(Arm64Float64RoundTruncate) \
- V(Arm64Float64RoundUp) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \
V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
V(Arm64Float64ExtractLowWord32) \
V(Arm64Float64ExtractHighWord32) \
V(Arm64Float64InsertLowWord32) \
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
new file mode 100644
index 0000000000..eb358dd8c4
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -0,0 +1,224 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArm64Add:
+ case kArm64Add32:
+ case kArm64And:
+ case kArm64And32:
+ case kArm64Bic:
+ case kArm64Bic32:
+ case kArm64Clz:
+ case kArm64Clz32:
+ case kArm64Cmp:
+ case kArm64Cmp32:
+ case kArm64Cmn:
+ case kArm64Cmn32:
+ case kArm64Tst:
+ case kArm64Tst32:
+ case kArm64Or:
+ case kArm64Or32:
+ case kArm64Orn:
+ case kArm64Orn32:
+ case kArm64Eor:
+ case kArm64Eor32:
+ case kArm64Eon:
+ case kArm64Eon32:
+ case kArm64Sub:
+ case kArm64Sub32:
+ case kArm64Mul:
+ case kArm64Mul32:
+ case kArm64Smull:
+ case kArm64Umull:
+ case kArm64Madd:
+ case kArm64Madd32:
+ case kArm64Msub:
+ case kArm64Msub32:
+ case kArm64Mneg:
+ case kArm64Mneg32:
+ case kArm64Idiv:
+ case kArm64Idiv32:
+ case kArm64Udiv:
+ case kArm64Udiv32:
+ case kArm64Imod:
+ case kArm64Imod32:
+ case kArm64Umod:
+ case kArm64Umod32:
+ case kArm64Not:
+ case kArm64Not32:
+ case kArm64Lsl:
+ case kArm64Lsl32:
+ case kArm64Lsr:
+ case kArm64Lsr32:
+ case kArm64Asr:
+ case kArm64Asr32:
+ case kArm64Ror:
+ case kArm64Ror32:
+ case kArm64Mov32:
+ case kArm64Sxtb32:
+ case kArm64Sxth32:
+ case kArm64Sxtw:
+ case kArm64Sbfx32:
+ case kArm64Ubfx:
+ case kArm64Ubfx32:
+ case kArm64Ubfiz32:
+ case kArm64Bfi:
+ case kArm64Float32Cmp:
+ case kArm64Float32Add:
+ case kArm64Float32Sub:
+ case kArm64Float32Mul:
+ case kArm64Float32Div:
+ case kArm64Float32Max:
+ case kArm64Float32Min:
+ case kArm64Float32Abs:
+ case kArm64Float32Sqrt:
+ case kArm64Float32RoundDown:
+ case kArm64Float64Cmp:
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ case kArm64Float64Mul:
+ case kArm64Float64Div:
+ case kArm64Float64Mod:
+ case kArm64Float64Max:
+ case kArm64Float64Min:
+ case kArm64Float64Abs:
+ case kArm64Float64Neg:
+ case kArm64Float64Sqrt:
+ case kArm64Float64RoundDown:
+ case kArm64Float64RoundTiesAway:
+ case kArm64Float64RoundTruncate:
+ case kArm64Float64RoundTiesEven:
+ case kArm64Float64RoundUp:
+ case kArm64Float32RoundTiesEven:
+ case kArm64Float32RoundTruncate:
+ case kArm64Float32RoundUp:
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Float32ToInt64:
+ case kArm64Float64ToInt64:
+ case kArm64Float32ToUint64:
+ case kArm64Float64ToUint64:
+ case kArm64Int32ToFloat64:
+ case kArm64Int64ToFloat32:
+ case kArm64Int64ToFloat64:
+ case kArm64Uint32ToFloat64:
+ case kArm64Uint64ToFloat32:
+ case kArm64Uint64ToFloat64:
+ case kArm64Float64ExtractLowWord32:
+ case kArm64Float64ExtractHighWord32:
+ case kArm64Float64InsertLowWord32:
+ case kArm64Float64InsertHighWord32:
+ case kArm64Float64MoveU64:
+ case kArm64U64MoveFloat64:
+ return kNoOpcodeFlags;
+
+ case kArm64TestAndBranch32:
+ case kArm64TestAndBranch:
+ case kArm64CompareAndBranch32:
+ return kIsBlockTerminator;
+
+ case kArm64LdrS:
+ case kArm64LdrD:
+ case kArm64Ldrb:
+ case kArm64Ldrsb:
+ case kArm64Ldrh:
+ case kArm64Ldrsh:
+ case kArm64LdrW:
+ case kArm64Ldr:
+ return kIsLoadOperation;
+
+ case kArm64ClaimForCallArguments:
+ case kArm64Poke:
+ case kArm64PokePair:
+ case kArm64StrS:
+ case kArm64StrD:
+ case kArm64Strb:
+ case kArm64Strh:
+ case kArm64StrW:
+ case kArm64Str:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // Basic latency modeling for arm64 instructions. They have been determined
+ // in an empirical way.
+ switch (instr->arch_opcode()) {
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Int32ToFloat64:
+ case kArm64Uint32ToFloat64:
+ return 3;
+
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ return 2;
+
+ case kArm64Float64Mul:
+ return 3;
+
+ case kArm64Float64Div:
+ return 6;
+
+ case kArm64Lsl:
+ case kArm64Lsl32:
+ case kArm64Lsr:
+ case kArm64Lsr32:
+ case kArm64Asr:
+ case kArm64Asr32:
+ case kArm64Ror:
+ case kArm64Ror32:
+ return 3;
+
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kArm64LdrS:
+ case kArm64LdrD:
+ case kArm64Ldrb:
+ case kArm64Ldrsb:
+ case kArm64Ldrh:
+ case kArm64Ldrsh:
+ case kArm64LdrW:
+ case kArm64Ldr:
+ return 5;
+
+ default:
+ return 1;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 6abad0aa92..1ec5ab4c41 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -338,41 +338,40 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
- switch (rep) {
- case kRepFloat32:
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kArm64LdrS;
immediate_mode = kLoadStoreImm32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArm64LdrD;
immediate_mode = kLoadStoreImm64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
immediate_mode = kLoadStoreImm8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
immediate_mode = kLoadStoreImm16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -392,13 +391,13 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
// TODO(arm64): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -427,36 +426,36 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kArm64StrS;
immediate_mode = kLoadStoreImm32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kArm64StrD;
immediate_mode = kLoadStoreImm64;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kArm64Strb;
immediate_mode = kLoadStoreImm8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kArm64Strh;
immediate_mode = kLoadStoreImm16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kArm64StrW;
immediate_mode = kLoadStoreImm32;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -472,33 +471,34 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -508,33 +508,35 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Arm64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -1237,6 +1239,74 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kArm64Float64ToUint64, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
}
@@ -1326,6 +1396,16 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kArm64Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Uint64ToFloat64, node);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kArm64Float64ExtractLowWord32, node);
}
@@ -1453,11 +1533,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kArm64Float32RoundDown, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kArm64Float64RoundDown, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kArm64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kArm64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kArm64Float32RoundTruncate, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kArm64Float64RoundTruncate, node);
}
@@ -1468,37 +1568,52 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kArm64Float32RoundTiesEven, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kArm64Float64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
Arm64OperandGenerator g(this);
// Push the arguments to the stack.
int aligned_push_count = static_cast<int>(arguments->size());
+
bool pushed_count_uneven = aligned_push_count & 1;
+ int claim_count = aligned_push_count;
+ if (pushed_count_uneven && descriptor->UseNativeStack()) {
+ // We can only claim for an even number of call arguments when we use the
+ // native stack.
+ claim_count++;
+ }
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
if (aligned_push_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
- Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
+ Emit(kArm64ClaimForCallArguments, g.NoOutput(),
+ g.TempImmediate(claim_count));
}
+
// Move arguments to the stack.
- {
- int slot = aligned_push_count - 1;
- // Emit the uneven pushes.
- if (pushed_count_uneven) {
- Node* input = (*arguments)[slot];
- Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
- g.TempImmediate(slot));
- slot--;
- }
- // Now all pushes can be done in pairs.
- for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
- g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
- }
+ int slot = aligned_push_count - 1;
+ while (slot >= 0) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
+ g.TempImmediate(slot));
+ slot--;
+ // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
+ // same type.
+ // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
+ // g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
+ // slot -= 2;
}
}
@@ -1717,12 +1832,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
@@ -1732,6 +1847,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32,
kArithmeticImm, &cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add,
+ kArithmeticImm, &cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub,
+ kArithmeticImm, &cont);
default:
break;
}
@@ -1919,6 +2042,28 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm, &cont);
+}
+
+
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
@@ -2033,11 +2178,17 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
return MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index a58587dfba..c70dfbf650 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -4,6 +4,7 @@
#include "src/compiler/ast-graph-builder.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
@@ -14,8 +15,8 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/compiler/type-hint-analyzer.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -93,11 +94,14 @@ class AstGraphBuilder::AstValueContext final : public AstContext {
// Context to evaluate expression for a condition value (and side effects).
class AstGraphBuilder::AstTestContext final : public AstContext {
public:
- explicit AstTestContext(AstGraphBuilder* owner)
- : AstContext(owner, Expression::kTest) {}
+ AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
+ : AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
~AstTestContext() final;
void ProduceValue(Node* value) final;
Node* ConsumeValue() final;
+
+ private:
+ TypeFeedbackId const feedback_id_;
};
@@ -215,7 +219,7 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
// One recorded control-flow command.
struct Entry {
Command command; // The command type being applied on this path.
- Statement* statement; // The target statement for the command or {NULL}.
+ Statement* statement; // The target statement for the command or {nullptr}.
Node* token; // A token identifying this particular path.
};
@@ -428,7 +432,8 @@ class AstGraphBuilder::FrameStateBeforeAndAfter {
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
+ JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
+ TypeHintAnalysis* type_hint_analysis)
: isolate_(info->isolate()),
local_zone_(local_zone),
info_(info),
@@ -444,6 +449,7 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
input_buffer_(nullptr),
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
+ type_hint_analysis_(type_hint_analysis),
state_values_cache_(jsgraph),
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
local_zone),
@@ -461,8 +467,7 @@ Node* AstGraphBuilder::GetFunctionClosureForContext() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
- // Pass a SMI sentinel and let the runtime look up the empty function.
- return jsgraph()->SmiConstant(0);
+ return BuildLoadNativeContextField(Context::CLOSURE_INDEX);
} else {
DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
@@ -472,8 +477,8 @@ Node* AstGraphBuilder::GetFunctionClosureForContext() {
Node* AstGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
- const Operator* op = common()->Parameter(
- Linkage::kJSFunctionCallClosureParamIndex, "%closure");
+ int index = Linkage::kJSCallClosureParamIndex;
+ const Operator* op = common()->Parameter(index, "%closure");
Node* node = NewNode(op, graph()->start());
function_closure_.set(node);
}
@@ -483,9 +488,9 @@ Node* AstGraphBuilder::GetFunctionClosure() {
Node* AstGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
- // Parameter (arity + 2) is special for the outer context of the function
- const Operator* op = common()->Parameter(
- info()->num_parameters_including_this() + 1, "%context");
+ int params = info()->num_parameters_including_this();
+ int index = Linkage::GetJSCallContextParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%context");
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -493,14 +498,26 @@ Node* AstGraphBuilder::GetFunctionContext() {
}
+Node* AstGraphBuilder::GetNewTarget() {
+ if (!new_target_.is_set()) {
+ int params = info()->num_parameters_including_this();
+ int index = Linkage::GetJSCallNewTargetParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%new.target");
+ Node* node = NewNode(op, graph()->start());
+ new_target_.set(node);
+ }
+ return new_target_.get();
+}
+
+
bool AstGraphBuilder::CreateGraph(bool stack_check) {
Scope* scope = info()->scope();
- DCHECK(graph() != NULL);
+ DCHECK_NOT_NULL(graph());
// Set up the basic structure of the graph. Outputs for {Start} are the formal
- // parameters (including the receiver) plus number of arguments, context and
- // closure.
- int actual_parameter_count = info()->num_parameters_including_this() + 3;
+ // parameters (including the receiver) plus new target, number of arguments,
+ // context and closure.
+ int actual_parameter_count = info()->num_parameters_including_this() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
// Initialize the top-level environment.
@@ -559,6 +576,11 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
+ // Build rest arguments array if it is used.
+ int rest_index;
+ Variable* rest_parameter = scope->rest_parameter(&rest_index);
+ BuildRestArgumentsArray(rest_parameter, rest_index);
+
// Build assignment to {.this_function} variable if it is used.
BuildThisFunctionVariable(scope->this_function_var());
@@ -807,7 +829,7 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
int offset, int count) {
bool should_update = false;
Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
- if (*state_values == NULL || (*state_values)->InputCount() != count) {
+ if (*state_values == nullptr || (*state_values)->InputCount() != count) {
should_update = true;
} else {
DCHECK(static_cast<size_t>(offset + count) <= values()->size());
@@ -913,11 +935,11 @@ void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
- environment()->Push(owner()->BuildToBoolean(value));
+ environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
}
-Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return nullptr; }
Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
@@ -945,14 +967,14 @@ void AstGraphBuilder::ControlScope::PerformCommand(Command command,
Node* value) {
Environment* env = environment()->CopyAsUnreachable();
ControlScope* current = this;
- while (current != NULL) {
+ while (current != nullptr) {
environment()->TrimStack(current->stack_height());
environment()->TrimContextChain(current->context_length());
if (current->Execute(command, target, value)) break;
current = current->outer_;
}
builder()->set_environment(env);
- DCHECK(current != NULL); // Always handled (unless stack is malformed).
+ DCHECK_NOT_NULL(current); // Always handled (unless stack is malformed).
}
@@ -977,7 +999,7 @@ void AstGraphBuilder::ControlScope::ThrowValue(Node* exception_value) {
void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
- if (expr == NULL) {
+ if (expr == nullptr) {
return environment()->Push(jsgraph()->NullConstant());
}
VisitForValue(expr);
@@ -985,7 +1007,7 @@ void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
void AstGraphBuilder::VisitForValueOrTheHole(Expression* expr) {
- if (expr == NULL) {
+ if (expr == nullptr) {
return environment()->Push(jsgraph()->TheHoleConstant());
}
VisitForValue(expr);
@@ -1020,7 +1042,7 @@ void AstGraphBuilder::VisitForEffect(Expression* expr) {
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this);
+ AstTestContext for_condition(this, expr->test_id());
if (!CheckStackOverflow()) {
expr->Accept(this);
} else {
@@ -1119,8 +1141,8 @@ void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
void AstGraphBuilder::VisitBlock(Block* stmt) {
BlockBuilder block(this);
ControlScopeForBreakable scope(this, stmt, &block);
- if (stmt->labels() != NULL) block.BeginBlock();
- if (stmt->scope() == NULL) {
+ if (stmt->labels() != nullptr) block.BeginBlock();
+ if (stmt->scope() == nullptr) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
} else {
@@ -1135,7 +1157,7 @@ void AstGraphBuilder::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
}
- if (stmt->labels() != NULL) block.EndBlock();
+ if (stmt->labels() != nullptr) block.EndBlock();
}
@@ -1188,8 +1210,9 @@ void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
VisitForValue(stmt->expression());
Node* value = environment()->Pop();
+ Node* object = BuildToObject(value, stmt->ToObjectId());
const Operator* op = javascript()->CreateWithContext();
- Node* context = NewNode(op, value, GetFunctionClosureForContext());
+ Node* context = NewNode(op, object, GetFunctionClosureForContext());
PrepareFrameState(context, stmt->EntryId());
VisitInScope(stmt->statement(), stmt->scope(), context);
}
@@ -1204,7 +1227,6 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForValue(stmt->tag());
- Node* tag = environment()->Top();
// Iterate over all cases and create nodes for label comparison.
for (int i = 0; i < clauses->length(); i++) {
@@ -1220,6 +1242,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
// value is still on the operand stack while the label is evaluated.
VisitForValue(clause->label());
Node* label = environment()->Pop();
+ Node* tag = environment()->Top();
const Operator* op = javascript()->StrictEqual();
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -1275,7 +1298,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
LoopBuilder for_loop(this);
VisitIfNotNull(stmt->init());
for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- if (stmt->cond() != NULL) {
+ if (stmt->cond() != nullptr) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
for_loop.BreakUnless(condition);
@@ -1555,18 +1578,18 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
// The prototype is ensured to exist by Runtime_DefineClass. No access check
// is needed here since the constructor is created by the class literal.
- Node* proto =
+ Node* prototype =
BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
// The class literal and the prototype are both expected on the operand stack
// during evaluation of the method values.
environment()->Push(literal);
- environment()->Push(proto);
+ environment()->Push(prototype);
// Create nodes to store method values into the literal.
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
- environment()->Push(property->is_static() ? literal : proto);
+ environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
VisitForValue(property->key());
Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
@@ -1619,11 +1642,11 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- environment()->Pop(); // proto
- environment()->Pop(); // literal
+ prototype = environment()->Pop();
+ literal = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
- literal = NewNode(op, literal, proto);
+ literal = NewNode(op, literal, prototype);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
@@ -1632,7 +1655,7 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
VectorSlotPair feedback = CreateVectorSlotPair(
expr->NeedsProxySlot() ? expr->ProxySlot()
: FeedbackVectorSlot::Invalid());
- BuildVariableAssignment(var, literal, Token::INIT_CONST, feedback,
+ BuildVariableAssignment(var, literal, Token::INIT, feedback,
BailoutId::None(), states);
}
ast_context()->ProduceValue(literal);
@@ -1684,14 +1707,9 @@ void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to materialize a regular expression literal.
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* pattern = jsgraph()->Constant(expr->pattern());
- Node* flags = jsgraph()->Constant(expr->flags());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ expr->pattern(), expr->flags(), expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(literal);
}
@@ -1701,13 +1719,10 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* constants = jsgraph()->Constant(expr->constant_properties());
- const Operator* op =
- javascript()->CreateLiteralObject(expr->ComputeFlags(true));
- Node* literal = NewNode(op, literals_array, literal_index, constants);
+ const Operator* op = javascript()->CreateLiteralObject(
+ expr->constant_properties(), expr->ComputeFlags(true),
+ expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1738,6 +1753,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForValue(property->value());
FrameStateBeforeAndAfter states(this, property->value()->id());
Node* value = environment()->Pop();
+ Node* literal = environment()->Top();
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
@@ -1750,7 +1766,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->key());
VisitForValue(property->value());
Node* value = environment()->Pop();
@@ -1768,7 +1784,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
@@ -1777,7 +1793,8 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
Node* set_prototype = NewNode(op, receiver, value);
// SetPrototype should not lazy deopt on an object literal.
- PrepareFrameState(set_prototype, BailoutId::None());
+ PrepareFrameState(set_prototype,
+ expr->GetIdForPropertySet(property_index));
break;
}
case ObjectLiteral::Property::GETTER:
@@ -1795,6 +1812,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create nodes to define accessors, using only a single call to the runtime
// for each pair of corresponding getters and setters.
+ literal = environment()->Top(); // Reload from operand stack.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
VisitForValue(it->first);
@@ -1824,21 +1842,21 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
Node* call = NewNode(op, receiver, value);
- PrepareFrameState(call, BailoutId::None());
+ PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
continue;
}
- environment()->Push(literal); // Duplicate receiver.
+ environment()->Push(environment()->Top()); // Duplicate receiver.
VisitForValue(property->key());
Node* name = BuildToName(environment()->Pop(),
- expr->GetIdForProperty(property_index));
+ expr->GetIdForPropertyName(property_index));
environment()->Push(name);
VisitForValue(property->value());
Node* value = environment()->Pop();
@@ -1879,6 +1897,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
// Transform literals that contain functions to fast properties.
+ literal = environment()->Top(); // Reload from operand stack.
if (expr->has_function()) {
const Operator* op =
javascript()->CallRuntime(Runtime::kToFastProperties, 1);
@@ -1904,20 +1923,16 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* closure = GetFunctionClosure();
// Create node to deep-copy the literal boilerplate.
- Node* literals_array =
- BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* literal_index = jsgraph()->Constant(expr->literal_index());
- Node* constants = jsgraph()->Constant(expr->constant_elements());
- const Operator* op =
- javascript()->CreateLiteralArray(expr->ComputeFlags(true));
- Node* literal = NewNode(op, literals_array, literal_index, constants);
+ const Operator* op = javascript()->CreateLiteralArray(
+ expr->constant_elements(), expr->ComputeFlags(true),
+ expr->literal_index());
+ Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
- // The array and the literal index are both expected on the operand stack
- // during computation of the element values.
+ // The array is expected on the operand stack during computation of the
+ // element values.
environment()->Push(literal);
- environment()->Push(literal_index);
// Create nodes to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
@@ -1933,6 +1948,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
+ Node* literal = environment()->Top();
Node* store = BuildKeyedStore(literal, index, value, pair);
states.AddToNode(store, expr->GetIdForElement(array_index),
OutputFrameStateCombine::Ignore());
@@ -1944,10 +1960,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// above. The second part is the part after the first spread expression
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
- environment()->Pop(); // Array literal index.
for (; array_index < expr->values()->length(); array_index++) {
Expression* subexpr = expr->values()->at(array_index);
- Node* array = environment()->Pop();
Node* result;
if (subexpr->IsSpread()) {
@@ -1955,6 +1969,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
FrameStateBeforeAndAfter states(this,
subexpr->AsSpread()->expression()->id());
Node* iterable = environment()->Pop();
+ Node* array = environment()->Pop();
Node* function = BuildLoadNativeContextField(
Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
result = NewNode(javascript()->CallFunction(3, language_mode()), function,
@@ -1963,6 +1978,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
VisitForValue(subexpr);
Node* value = environment()->Pop();
+ Node* array = environment()->Pop();
const Operator* op =
javascript()->CallRuntime(Runtime::kAppendElement, 2);
result = NewNode(op, array, value);
@@ -2096,7 +2112,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
- Node* old_value = NULL;
+ Node* old_value = nullptr;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
@@ -2162,7 +2178,9 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
FrameStateBeforeAndAfter states(this, expr->value()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- value = BuildBinaryOp(left, right, expr->binary_op());
+ value =
+ BuildBinaryOp(left, right, expr->binary_op(),
+ expr->binary_operation()->BinaryOperationFeedbackId());
states.AddToNode(value, expr->binary_operation()->id(),
OutputFrameStateCombine::Push());
}
@@ -2447,8 +2465,8 @@ void AstGraphBuilder::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Resolve callee and receiver for a potential direct eval call. This block
- // will mutate the callee and receiver values pushed onto the environment.
+ // Resolve callee for a potential direct eval call. This block will mutate the
+ // callee value pushed onto the environment.
if (possibly_eval && args->length() > 0) {
int arg_count = args->length();
@@ -2489,27 +2507,27 @@ void AstGraphBuilder::VisitCallSuper(Call* expr) {
SuperCallReference* super = expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super);
- // Prepare the callee to the super call. The super constructor is stored as
- // the prototype of the constructor we are currently executing.
+ // Prepare the callee to the super call.
VisitForValue(super->this_function_var());
Node* this_function = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kGetPrototype, 1);
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
Node* super_function = NewNode(op, this_function);
- // TODO(mstarzinger): This probably needs a proper bailout id.
- PrepareFrameState(super_function, BailoutId::None());
environment()->Push(super_function);
// Evaluate all arguments to the super call.
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Original constructor is loaded from the {new.target} variable.
+ // The new target is loaded from the {new.target} variable.
VisitForValue(super->new_target_var());
// Create node to perform the super call.
- const Operator* call = javascript()->CallConstruct(args->length() + 2);
+ const Operator* call =
+ javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
+ FrameStateBeforeAndAfter states(this, super->new_target_var()->id());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -2521,13 +2539,20 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Original constructor is the same as the callee.
+ // The baseline compiler doesn't push the new.target, so we need to record
+ // the frame state before the push.
+ FrameStateBeforeAndAfter states(
+ this, args->is_empty() ? expr->expression()->id() : args->last()->id());
+
+ // The new target is the same as the callee.
environment()->Push(environment()->Peek(args->length()));
// Create node to perform the construct call.
- const Operator* call = javascript()->CallConstruct(args->length() + 2);
+ VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
+ const Operator* call =
+ javascript()->CallConstruct(args->length() + 2, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -2615,7 +2640,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
// Evaluate LHS expression and get old value.
- Node* old_value = NULL;
+ Node* old_value = nullptr;
int stack_depth = -1;
switch (assign_type) {
case VARIABLE: {
@@ -2712,9 +2737,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Create node to perform +1/-1 operation.
Node* value;
{
+ // TODO(bmeurer): Cleanup this feedback/bailout mess!
FrameStateBeforeAndAfter states(this, BailoutId::None());
- value =
- BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+ value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
+ expr->binary_op(), TypeFeedbackId::None());
// This should never deoptimize outside strong mode because otherwise we
// have converted to number before.
states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
@@ -2797,7 +2823,8 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->op());
+ Node* value = BuildBinaryOp(left, right, expr->op(),
+ expr->BinaryOperationFeedbackId());
states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2839,7 +2866,7 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
op = javascript()->HasProperty();
break;
default:
- op = NULL;
+ op = nullptr;
UNREACHABLE();
}
VisitForValue(expr->left());
@@ -2911,7 +2938,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
- if (stmt == NULL) return;
+ if (stmt == nullptr) return;
Visit(stmt);
}
@@ -2991,8 +3018,9 @@ void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
VisitForValue(expr->expression());
Node* operand = environment()->Pop();
- // TODO(mstarzinger): Possible optimization when we are in effect context.
- Node* value = NewNode(javascript()->UnaryNot(), operand);
+ Node* input = BuildToBoolean(operand, expr->expression()->test_id());
+ Node* value = NewNode(common()->Select(MachineRepresentation::kTagged), input,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
ast_context()->ProduceValue(value);
}
@@ -3009,7 +3037,7 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
IfBuilder compare_if(this);
VisitForValue(expr->left());
Node* condition = environment()->Top();
- compare_if.If(BuildToBoolean(condition));
+ compare_if.If(BuildToBoolean(condition, expr->left()->test_id()));
compare_if.Then();
if (is_logical_and) {
environment()->Pop();
@@ -3044,6 +3072,12 @@ VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
}
+void AstGraphBuilder::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ Visit(node->expression());
+}
+
+
namespace {
// Limit of context chain length to which inline check is possible.
@@ -3172,7 +3206,7 @@ Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
- if (arguments == NULL) return NULL;
+ if (arguments == nullptr) return nullptr;
// Allocate and initialize a new arguments object.
CreateArgumentsParameters::Type type =
@@ -3193,6 +3227,25 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
}
+Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
+ if (rest == nullptr) return nullptr;
+
+ // Allocate and initialize a new arguments object.
+ CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
+ const Operator* op = javascript()->CreateArguments(type, index);
+ Node* object = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(object, BailoutId::None());
+
+ // Assign the object to the {rest} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
+ DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+ FrameStateBeforeAndAfter states(this, BailoutId::None());
+ BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
+ BailoutId::None(), states);
+ return object;
+}
+
+
Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
if (this_function_var == nullptr) return nullptr;
@@ -3202,7 +3255,7 @@ Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
// Assign the object to the {.this_function} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(this_function_var, this_function, Token::INIT_CONST,
+ BuildVariableAssignment(this_function_var, this_function, Token::INIT,
VectorSlotPair(), BailoutId::None(), states);
return this_function;
}
@@ -3211,17 +3264,14 @@ Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
if (new_target_var == nullptr) return nullptr;
- // Retrieve the original constructor in case we are called as a constructor.
- const Operator* op =
- javascript()->CallRuntime(Runtime::kGetOriginalConstructor, 0);
- Node* object = NewNode(op);
- PrepareFrameState(object, BailoutId::None());
+ // Retrieve the new target we were called with.
+ Node* object = GetNewTarget();
// Assign the object to the {new.target} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(new_target_var, object, Token::INIT_CONST,
- VectorSlotPair(), BailoutId::None(), states);
+ BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
+ BailoutId::None(), states);
return object;
}
@@ -3230,8 +3280,9 @@ Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
Node* not_hole) {
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
- return NewNode(common()->Select(kMachAnyTagged, BranchHint::kFalse), check,
- for_hole, not_hole);
+ return NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, for_hole, not_hole);
}
@@ -3361,7 +3412,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
@@ -3396,7 +3447,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
@@ -3418,13 +3469,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
// Local var, const, or let variable.
- if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ if (mode == CONST_LEGACY && op == Token::INIT) {
// Perform an initialization check for legacy const variables.
Node* current = environment()->Lookup(variable);
if (current->op() != the_hole->op()) {
value = BuildHoleCheckSilent(current, value, current);
}
- } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3432,13 +3483,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op == Token::INIT_LET) {
+ } else if (mode == LET && op == Token::INIT) {
// No initialization check needed because scoping guarantees it. Note
// that we still perform a lookup to keep the variable live, because
// baseline code might contain debug code that inspects the variable.
Node* current = environment()->Lookup(variable);
CHECK_NOT_NULL(current);
- } else if (mode == LET && op != Token::INIT_LET) {
+ } else if (mode == LET && op != Token::INIT) {
// Perform an initialization check for let declared variables.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
@@ -3446,7 +3497,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
} else if (current->opcode() == IrOpcode::kPhi) {
BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op == Token::INIT_CONST) {
+ } else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
// Note that the {this} variable is the only const variable being able
// to trigger bind operations outside the TDZ, via {super} calls.
@@ -3454,7 +3505,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (current->op() != the_hole->op() && variable->is_this()) {
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op != Token::INIT_CONST) {
+ } else if (mode == CONST && op != Token::INIT) {
// Assignment to const is exception in all modes.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
@@ -3469,13 +3520,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+ if (mode == CONST_LEGACY && op == Token::INIT) {
// Perform an initialization check for legacy const variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckSilent(current, value, current);
- } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3483,13 +3534,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op != Token::INIT_LET) {
+ } else if (mode == LET && op != Token::INIT) {
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op, current_context());
value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
- } else if (mode == CONST && op == Token::INIT_CONST) {
+ } else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
// Note that the {this} variable is the only const variable being able
// to trigger bind operations outside the TDZ, via {super} calls.
@@ -3499,7 +3550,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
- } else if (mode == CONST && op != Token::INIT_CONST) {
+ } else if (mode == CONST && op != Token::INIT) {
// Assignment to const is exception in all modes.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
@@ -3524,7 +3575,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
}
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
@@ -3626,29 +3677,28 @@ Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
- return NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
}
Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
- return graph()->NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ object,
jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
graph()->start(), graph()->start());
}
Node* AstGraphBuilder::BuildLoadGlobalObject() {
- const Operator* load_op =
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
- return NewNode(load_op, GetFunctionContext());
+ return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
}
Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
- Node* global = BuildLoadGlobalObject();
- Node* native_context =
- BuildLoadObjectField(global, JSGlobalObject::kNativeContextOffset);
+ const Operator* op =
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
+ Node* native_context = NewNode(op, current_context());
return NewNode(javascript()->LoadContext(0, index, true), native_context);
}
@@ -3666,9 +3716,14 @@ Node* AstGraphBuilder::BuildLoadFeedbackVector() {
}
-Node* AstGraphBuilder::BuildToBoolean(Node* input) {
+Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
if (Node* node = TryFastToBoolean(input)) return node;
- return NewNode(javascript()->ToBoolean(), input);
+ ToBooleanHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
+ hints = ToBooleanHint::kAny;
+ }
+ return NewNode(javascript()->ToBoolean(hints), input);
}
@@ -3773,45 +3828,51 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
}
-Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
+ TypeFeedbackId feedback_id) {
const Operator* js_op;
+ BinaryOperationHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetBinaryOperationHints(feedback_id, &hints)) {
+ hints = BinaryOperationHints::Any();
+ }
switch (op) {
case Token::BIT_OR:
- js_op = javascript()->BitwiseOr(language_mode());
+ js_op = javascript()->BitwiseOr(language_mode(), hints);
break;
case Token::BIT_AND:
- js_op = javascript()->BitwiseAnd(language_mode());
+ js_op = javascript()->BitwiseAnd(language_mode(), hints);
break;
case Token::BIT_XOR:
- js_op = javascript()->BitwiseXor(language_mode());
+ js_op = javascript()->BitwiseXor(language_mode(), hints);
break;
case Token::SHL:
- js_op = javascript()->ShiftLeft(language_mode());
+ js_op = javascript()->ShiftLeft(language_mode(), hints);
break;
case Token::SAR:
- js_op = javascript()->ShiftRight(language_mode());
+ js_op = javascript()->ShiftRight(language_mode(), hints);
break;
case Token::SHR:
- js_op = javascript()->ShiftRightLogical(language_mode());
+ js_op = javascript()->ShiftRightLogical(language_mode(), hints);
break;
case Token::ADD:
- js_op = javascript()->Add(language_mode());
+ js_op = javascript()->Add(language_mode(), hints);
break;
case Token::SUB:
- js_op = javascript()->Subtract(language_mode());
+ js_op = javascript()->Subtract(language_mode(), hints);
break;
case Token::MUL:
- js_op = javascript()->Multiply(language_mode());
+ js_op = javascript()->Multiply(language_mode(), hints);
break;
case Token::DIV:
- js_op = javascript()->Divide(language_mode());
+ js_op = javascript()->Divide(language_mode(), hints);
break;
case Token::MOD:
- js_op = javascript()->Modulus(language_mode());
+ js_op = javascript()->Modulus(language_mode(), hints);
break;
default:
UNREACHABLE();
- js_op = NULL;
+ js_op = nullptr;
}
return NewNode(js_op, left, right);
}
@@ -3850,8 +3911,8 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
Node* load = NewNode(
javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
current_context());
- Node* check =
- NewNode(javascript()->CallRuntime(Runtime::kInlineIsSmi, 1), load);
+ Node* check = NewNode(javascript()->StrictEqual(), load,
+ jsgraph()->TheHoleConstant());
fast_block.BreakUnless(check, BranchHint::kTrue);
}
@@ -3892,8 +3953,8 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
Node* load = NewNode(
javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
current_context());
- Node* check =
- NewNode(javascript()->CallRuntime(Runtime::kInlineIsSmi, 1), load);
+ Node* check = NewNode(javascript()->StrictEqual(), load,
+ jsgraph()->TheHoleConstant());
fast_block.BreakUnless(check, BranchHint::kTrue);
}
@@ -3939,7 +4000,6 @@ Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSUnaryNot:
case IrOpcode::kJSToBoolean:
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
@@ -3995,7 +4055,7 @@ void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
- if (loop_assignment_analysis_ == NULL) return NULL;
+ if (loop_assignment_analysis_ == nullptr) return nullptr;
return loop_assignment_analysis_->GetVariablesAssignedInLoop(stmt);
}
@@ -4022,7 +4082,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK(op->ControlInputCount() < 2);
DCHECK(op->EffectInputCount() < 2);
- Node* result = NULL;
+ Node* result = nullptr;
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
@@ -4229,7 +4289,7 @@ void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned,
Node* AstGraphBuilder::NewPhi(int count, Node* input, Node* control) {
- const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kTagged, count);
Node** buffer = EnsureInputBufferSize(count + 1);
MemsetPointer(buffer, input, count);
buffer[count] = control;
@@ -4291,7 +4351,8 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
NodeProperties::GetControlInput(value) == control) {
// Phi already exists, add input.
value->InsertInput(graph_zone(), inputs - 1, other);
- NodeProperties::ChangeOp(value, common()->Phi(kMachAnyTagged, inputs));
+ NodeProperties::ChangeOp(
+ value, common()->Phi(MachineRepresentation::kTagged, inputs));
} else if (value != other) {
// Phi does not exist yet, introduce one.
value = NewPhi(inputs, value, control);
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index f5b662224a..3b6302d3dd 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
#define V8_COMPILER_AST_GRAPH_BUILDER_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
@@ -13,15 +13,20 @@
namespace v8 {
namespace internal {
+// Forward declarations.
class BitVector;
+
namespace compiler {
+// Forward declarations.
class ControlBuilder;
class Graph;
class LoopAssignmentAnalysis;
class LoopBuilder;
class Node;
+class TypeHintAnalysis;
+
// The AstGraphBuilder produces a high-level IR graph, based on an
// underlying AST. The produced graph can either be compiled into a
@@ -30,7 +35,8 @@ class Node;
class AstGraphBuilder : public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- LoopAssignmentAnalysis* loop_assignment = NULL);
+ LoopAssignmentAnalysis* loop_assignment = nullptr,
+ TypeHintAnalysis* type_hint_analysis = nullptr);
// Creates a graph by visiting the entire AST.
bool CreateGraph(bool stack_check = true);
@@ -87,6 +93,7 @@ class AstGraphBuilder : public AstVisitor {
// Nodes representing values in the activation record.
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
+ SetOncePointer<Node> new_target_;
// Tracks how many try-blocks are currently entered.
int try_catch_nesting_level_;
@@ -105,6 +112,9 @@ class AstGraphBuilder : public AstVisitor {
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
+ // Result of type hint analysis performed before graph creation.
+ TypeHintAnalysis* type_hint_analysis_;
+
// Cache for StateValues nodes for frame states.
StateValuesCache state_values_cache_;
@@ -147,16 +157,19 @@ class AstGraphBuilder : public AstVisitor {
// Create the main graph body by visiting the AST.
void CreateGraphBody(bool stack_check);
- // Get or create the node that represents the outer function closure.
+ // Get or create the node that represents the incoming function closure.
Node* GetFunctionClosureForContext();
Node* GetFunctionClosure();
- // Get or create the node that represents the outer function context.
+ // Get or create the node that represents the incoming function context.
Node* GetFunctionContext();
+ // Get or create the node that represents the incoming new target value.
+ Node* GetNewTarget();
+
// Node creation helpers.
Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
}
Node* NewNode(const Operator* op, Node* n1) {
@@ -251,6 +264,9 @@ class AstGraphBuilder : public AstVisitor {
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
+ // Builder to create an array of rest parameters if used
+ Node* BuildRestArgumentsArray(Variable* rest, int index);
+
// Builder that assigns to the {.this_function} internal variable if needed.
Node* BuildThisFunctionVariable(Variable* this_function_var);
@@ -308,7 +324,7 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildLoadImmutableObjectField(Node* object, int offset);
// Builders for automatic type conversion.
- Node* BuildToBoolean(Node* input);
+ Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
Node* BuildToName(Node* input, BailoutId bailout_id);
Node* BuildToObject(Node* input, BailoutId bailout_id);
@@ -340,7 +356,8 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildThrow(Node* exception_value);
// Builders for binary operations.
- Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op,
+ TypeFeedbackId feedback_id);
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
@@ -348,7 +365,7 @@ class AstGraphBuilder : public AstVisitor {
// ===========================================================================
// The following build methods have the same contract as the above ones, but
- // they can also return {NULL} to indicate that no fragment was built. Note
+ // they can also return {nullptr} to indicate that no fragment was built. Note
// that these are optimizations, disabling any of them should still produce
// correct graphs.
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 040999aa05..2074c944e6 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -4,7 +4,7 @@
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -22,7 +22,7 @@ LoopAssignmentAnalysis* ALAA::Analyze() {
LoopAssignmentAnalysis* a = new (zone_) LoopAssignmentAnalysis(zone_);
result_ = a;
VisitStatements(info()->literal()->body());
- result_ = NULL;
+ result_ = nullptr;
return a;
}
@@ -126,6 +126,7 @@ void ALAA::VisitClassLiteral(ClassLiteral* e) {
VisitIfNotNull(e->constructor());
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
}
}
@@ -141,6 +142,7 @@ void ALAA::VisitConditional(Conditional* e) {
void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
ZoneList<ObjectLiteralProperty*>* properties = e->properties();
for (int i = 0; i < properties->length(); i++) {
+ Visit(properties->at(i)->key());
Visit(properties->at(i)->value());
}
}
@@ -286,6 +288,12 @@ void ALAA::VisitCountOperation(CountOperation* e) {
}
+void ALAA::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void ALAA::AnalyzeAssignment(Variable* var) {
if (!loop_stack_.empty() && var->IsStackAllocated()) {
loop_stack_.back()->Add(GetVariableIndex(info()->scope(), var));
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
index d7b390009d..169691135a 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
#define V8_COMPILER_AST_LOOP_ASSIGNMENT_ANALYZER_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/bit-vector.h"
#include "src/zone-containers.h"
@@ -26,7 +26,7 @@ class LoopAssignmentAnalysis : public ZoneObject {
if (list_[i].first == loop) return list_[i].second;
}
UNREACHABLE(); // should never ask for loops that aren't here!
- return NULL;
+ return nullptr;
}
int GetAssignmentCountForTesting(Scope* scope, Variable* var);
@@ -63,7 +63,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
void Exit(IterationStatement* loop);
void VisitIfNotNull(AstNode* node) {
- if (node != NULL) Visit(node);
+ if (node != nullptr) Visit(node);
}
void AnalyzeAssignment(Variable* var);
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 23170e701b..a966a5b262 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -81,12 +81,13 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
// Construct increment operation.
Node* base = graph->NewNode(
PointerConstant(&common, data->GetCounterAddress(block_number)));
- Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero,
+ Node* load = graph->NewNode(machine.Load(MachineType::Uint32()), base, zero,
graph->start(), graph->start());
Node* inc = graph->NewNode(machine.Int32Add(), load, one);
- Node* store = graph->NewNode(
- machine.Store(StoreRepresentation(kMachUint32, kNoWriteBarrier)), base,
- zero, inc, graph->start(), graph->start());
+ Node* store =
+ graph->NewNode(machine.Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ base, zero, inc, graph->start(), graph->start());
// Insert the new nodes.
static const int kArraySize = 6;
Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
diff --git a/deps/v8/src/compiler/binary-operator-reducer.cc b/deps/v8/src/compiler/binary-operator-reducer.cc
deleted file mode 100644
index 43d26d8884..0000000000
--- a/deps/v8/src/compiler/binary-operator-reducer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/binary-operator-reducer.h"
-
-#include <algorithm>
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-#include "src/types-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BinaryOperatorReducer::BinaryOperatorReducer(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine)
- : AdvancedReducer(editor),
- graph_(graph),
- common_(common),
- machine_(machine),
- dead_(graph->NewNode(common->Dead())) {}
-
-
-Reduction BinaryOperatorReducer::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kFloat64Mul:
- return ReduceFloat52Mul(node);
- case IrOpcode::kFloat64Div:
- return ReduceFloat52Div(node);
- default:
- break;
- }
- return NoChange();
-}
-
-
-Reduction BinaryOperatorReducer::ReduceFloat52Mul(Node* node) {
- if (!machine()->Is64()) return NoChange();
-
- Float64BinopMatcher m(node);
- if (!m.left().IsChangeInt32ToFloat64() ||
- !m.right().IsChangeInt32ToFloat64()) {
- return NoChange();
- }
-
- Type* type = NodeProperties::GetType(node);
- Type::RangeType* range = type->GetRange();
-
- // JavaScript has 52 bit precision in multiplication
- if (range == nullptr || range->Min() < 0.0 ||
- range->Max() > 0xFFFFFFFFFFFFFULL) {
- return NoChange();
- }
-
- Node* mul = graph()->NewNode(machine()->Int64Mul(), m.left().InputAt(0),
- m.right().InputAt(0));
- Revisit(mul);
-
- Type* range_type = Type::Range(range->Min(), range->Max(), graph()->zone());
-
- // TODO(indutny): Is Type::Number() a proper thing here? It looks like
- // every other place is using Type:Internal() for int64 values.
- // Should we off-load range propagation to Typer?
- NodeProperties::SetType(
- mul, Type::Intersect(range_type, Type::Number(), graph()->zone()));
-
- Node* out = graph()->NewNode(machine()->RoundInt64ToFloat64(), mul);
- return Replace(out);
-}
-
-
-Reduction BinaryOperatorReducer::ReduceFloat52Div(Node* node) {
- if (!machine()->Is64()) return NoChange();
-
- Float64BinopMatcher m(node);
- if (!m.left().IsRoundInt64ToFloat64()) return NoChange();
-
- // Right value should be positive...
- if (!m.right().HasValue() || m.right().Value() <= 0) return NoChange();
-
- // ...integer...
- int64_t value = static_cast<int64_t>(m.right().Value());
- if (value != static_cast<int64_t>(m.right().Value())) return NoChange();
-
- // ...and should be a power of two.
- if (!base::bits::IsPowerOfTwo64(value)) return NoChange();
-
- Node* left = m.left().InputAt(0);
- Type::RangeType* range = NodeProperties::GetType(left)->GetRange();
-
- // The result should fit into 32bit word
- int64_t min = static_cast<int64_t>(range->Min()) / value;
- int64_t max = static_cast<int64_t>(range->Max()) / value;
- if (min < 0 || max > 0xFFFFFFFLL) {
- return NoChange();
- }
-
- int64_t shift = WhichPowerOf2_64(static_cast<int64_t>(m.right().Value()));
-
- // Replace division with 64bit right shift
- Node* shr =
- graph()->NewNode(machine()->Word64Shr(), left,
- graph()->NewNode(common()->Int64Constant(shift)));
- Revisit(shr);
-
- Node* out = graph()->NewNode(machine()->RoundInt64ToFloat64(), shr);
- return Replace(out);
-}
-
-
-Reduction BinaryOperatorReducer::Change(Node* node, Operator const* op,
- Node* a) {
- node->ReplaceInput(0, a);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, op);
- return Changed(node);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/binary-operator-reducer.h b/deps/v8/src/compiler/binary-operator-reducer.h
deleted file mode 100644
index fd0d381c30..0000000000
--- a/deps/v8/src/compiler/binary-operator-reducer.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BINARY_OPERATOR_REDUCER_H_
-#define V8_COMPILER_BINARY_OPERATOR_REDUCER_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class Graph;
-class MachineOperatorBuilder;
-class Operator;
-
-
-// Performs strength reduction on nodes that have common operators.
-class BinaryOperatorReducer final : public AdvancedReducer {
- public:
- BinaryOperatorReducer(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine);
- ~BinaryOperatorReducer() final {}
-
- Reduction Reduce(Node* node) final;
-
- private:
- Reduction ReduceFloat52Mul(Node* node);
- Reduction ReduceFloat52Div(Node* node);
-
- Reduction Change(Node* node, Operator const* op, Node* a);
-
- Graph* graph() const { return graph_; }
- CommonOperatorBuilder* common() const { return common_; }
- MachineOperatorBuilder* machine() const { return machine_; }
- Node* dead() const { return dead_; }
-
- Graph* const graph_;
- CommonOperatorBuilder* const common_;
- MachineOperatorBuilder* const machine_;
- Node* const dead_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_BINARY_OPERATOR_REDUCER_H_
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.cc b/deps/v8/src/compiler/bytecode-branch-analysis.cc
new file mode 100644
index 0000000000..27699a1b9a
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-branch-analysis.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-branch-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The class contains all of the sites that contain
+// branches to a particular target (bytecode offset).
+class BytecodeBranchInfo final : public ZoneObject {
+ public:
+ explicit BytecodeBranchInfo(Zone* zone)
+ : back_edge_offsets_(zone), fore_edge_offsets_(zone) {}
+
+ void AddBranch(int source_offset, int target_offset);
+
+ // The offsets of bytecodes that refer to this bytecode as
+ // a back-edge predecessor.
+ const ZoneVector<int>* back_edge_offsets() { return &back_edge_offsets_; }
+
+ // The offsets of bytecodes that refer to this bytecode as
+ // a forwards-edge predecessor.
+ const ZoneVector<int>* fore_edge_offsets() { return &fore_edge_offsets_; }
+
+ private:
+ ZoneVector<int> back_edge_offsets_;
+ ZoneVector<int> fore_edge_offsets_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeBranchInfo);
+};
+
+
+void BytecodeBranchInfo::AddBranch(int source_offset, int target_offset) {
+ if (source_offset < target_offset) {
+ fore_edge_offsets_.push_back(source_offset);
+ } else {
+ back_edge_offsets_.push_back(source_offset);
+ }
+}
+
+
+BytecodeBranchAnalysis::BytecodeBranchAnalysis(
+ Handle<BytecodeArray> bytecode_array, Zone* zone)
+ : branch_infos_(zone),
+ bytecode_array_(bytecode_array),
+ reachable_(bytecode_array->length(), zone),
+ zone_(zone) {}
+
+
+void BytecodeBranchAnalysis::Analyze() {
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ bool reachable = true;
+ while (!iterator.done()) {
+ interpreter::Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+ // All bytecode basic blocks are generated to be forward reachable
+ // and may also be backward reachable. Hence if there's a forward
+ // branch targetting here the code becomes reachable.
+ reachable = reachable || forward_branches_target(current_offset);
+ if (reachable) {
+ reachable_.Add(current_offset);
+ if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
+ // Only the branch is recorded, the forward path falls through
+ // and is handled as normal bytecode data flow.
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+ // Unless the branch targets the next bytecode it's not
+ // reachable. If it targets the next bytecode the check at the
+ // start of the loop will set the reachable flag.
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ reachable = false;
+ } else if (interpreter::Bytecodes::IsJumpOrReturn(bytecode)) {
+ DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn);
+ reachable = false;
+ }
+ }
+ iterator.Advance();
+ }
+}
+
+
+const ZoneVector<int>* BytecodeBranchAnalysis::BackwardBranchesTargetting(
+ int offset) const {
+ auto iterator = branch_infos_.find(offset);
+ if (branch_infos_.end() != iterator) {
+ return iterator->second->back_edge_offsets();
+ } else {
+ return nullptr;
+ }
+}
+
+
+const ZoneVector<int>* BytecodeBranchAnalysis::ForwardBranchesTargetting(
+ int offset) const {
+ auto iterator = branch_infos_.find(offset);
+ if (branch_infos_.end() != iterator) {
+ return iterator->second->fore_edge_offsets();
+ } else {
+ return nullptr;
+ }
+}
+
+
+void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
+ BytecodeBranchInfo* branch_info = nullptr;
+ auto iterator = branch_infos_.find(target_offset);
+ if (branch_infos_.end() == iterator) {
+ branch_info = new (zone()) BytecodeBranchInfo(zone());
+ branch_infos_.insert(std::make_pair(target_offset, branch_info));
+ } else {
+ branch_info = iterator->second;
+ }
+ branch_info->AddBranch(source_offset, target_offset);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.h b/deps/v8/src/compiler/bytecode-branch-analysis.h
new file mode 100644
index 0000000000..0ef33b640c
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-branch-analysis.h
@@ -0,0 +1,79 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
+
+#include "src/bit-vector.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class BytecodeBranchInfo;
+
+// A class for identifying the branch targets and their branch sites
+// within a bytecode array and also identifying which bytecodes are
+// reachable. This information can be used to construct the local
+// control flow logic for high-level IR graphs built from bytecode.
+//
+// NB This class relies on the only backwards branches in bytecode
+// being jumps back to loop headers.
+class BytecodeBranchAnalysis BASE_EMBEDDED {
+ public:
+ BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
+
+ // Analyze the bytecodes to find the branch sites and their
+ // targets. No other methods in this class return valid information
+ // until this has been called.
+ void Analyze();
+
+ // Offsets of bytecodes having a backward branch to the bytecode at |offset|.
+ const ZoneVector<int>* BackwardBranchesTargetting(int offset) const;
+
+ // Offsets of bytecodes having a forward branch to the bytecode at |offset|.
+ const ZoneVector<int>* ForwardBranchesTargetting(int offset) const;
+
+ // Returns true if the bytecode at |offset| is reachable.
+ bool is_reachable(int offset) const { return reachable_.Contains(offset); }
+
+ // Returns true if there are any forward branches to the bytecode at
+ // |offset|.
+ bool forward_branches_target(int offset) const {
+ const ZoneVector<int>* sites = ForwardBranchesTargetting(offset);
+ return sites != nullptr && sites->size() > 0;
+ }
+
+ // Returns true if there are any backward branches to the bytecode
+ // at |offset|.
+ bool backward_branches_target(int offset) const {
+ const ZoneVector<int>* sites = BackwardBranchesTargetting(offset);
+ return sites != nullptr && sites->size() > 0;
+ }
+
+ private:
+ void AddBranch(int origin_offset, int target_offset);
+
+ Zone* zone() const { return zone_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ ZoneMap<int, BytecodeBranchInfo*> branch_infos_;
+ Handle<BytecodeArray> bytecode_array_;
+ BitVector reachable_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index e113833dc1..cf0b6ab438 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -4,16 +4,78 @@
#include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
-#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Helper for generating frame states for before and after a bytecode.
+class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
+ public:
+ FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder,
+ const interpreter::BytecodeArrayIterator& iterator)
+ : builder_(builder),
+ id_after_(BailoutId::None()),
+ added_to_node_(false),
+ output_poke_offset_(0),
+ output_poke_count_(0) {
+ BailoutId id_before(iterator.current_offset());
+ frame_state_before_ = builder_->environment()->Checkpoint(
+ id_before, OutputFrameStateCombine::Ignore());
+ id_after_ = BailoutId(id_before.ToInt() + iterator.current_bytecode_size());
+ }
+
+ ~FrameStateBeforeAndAfter() {
+ DCHECK(added_to_node_);
+ DCHECK(builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
+ output_poke_count_));
+ }
+
+ private:
+ friend class Environment;
+
+ void AddToNode(Node* node, OutputFrameStateCombine combine) {
+ DCHECK(!added_to_node_);
+ int count = OperatorProperties::GetFrameStateInputCount(node->op());
+ DCHECK_LE(count, 2);
+ if (count >= 1) {
+ // Add the frame state for after the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ Node* frame_state_after =
+ builder_->environment()->Checkpoint(id_after_, combine);
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
+ }
+
+ if (count >= 2) {
+ // Add the frame state for before the operation.
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 1)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+ }
+
+ if (!combine.IsOutputIgnored()) {
+ output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
+ output_poke_count_ = node->op()->ValueOutputCount();
+ }
+ added_to_node_ = true;
+ }
+
+ BytecodeGraphBuilder* builder_;
+ Node* frame_state_before_;
+ BailoutId id_after_;
+
+ bool added_to_node_;
+ int output_poke_offset_;
+ int output_poke_count_;
+};
+
+
// Issues:
-// - Need to deal with FrameState / FrameStateBeforeAndAfter / StateValue.
// - Scopes - intimately tied to AST. Need to eval what is needed.
// - Need to resolve closure parameter treatment.
BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
@@ -27,10 +89,13 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
context_(context),
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
- values_(builder->local_zone()) {
+ values_(builder->local_zone()),
+ parameters_state_values_(nullptr),
+ registers_state_values_(nullptr),
+ accumulator_state_values_(nullptr) {
// The layout of values_ is:
//
- // [receiver] [parameters] [registers]
+ // [receiver] [parameters] [registers] [accumulator]
//
// parameter[0] is the receiver (this), parameters 1..N are the
// parameters supplied to the method (arg0..argN-1). The accumulator
@@ -50,7 +115,26 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
values()->insert(values()->end(), register_count, undefined_constant);
// Accumulator
- accumulator_ = undefined_constant;
+ accumulator_base_ = static_cast<int>(values()->size());
+ values()->push_back(undefined_constant);
+}
+
+
+BytecodeGraphBuilder::Environment::Environment(
+ const BytecodeGraphBuilder::Environment* other)
+ : builder_(other->builder_),
+ register_count_(other->register_count_),
+ parameter_count_(other->parameter_count_),
+ context_(other->context_),
+ control_dependency_(other->control_dependency_),
+ effect_dependency_(other->effect_dependency_),
+ values_(other->zone()),
+ parameters_state_values_(nullptr),
+ registers_state_values_(nullptr),
+ accumulator_state_values_(nullptr),
+ register_base_(other->register_base_),
+ accumulator_base_(other->accumulator_base_) {
+ values_ = other->values_;
}
@@ -64,27 +148,75 @@ int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
}
-void BytecodeGraphBuilder::Environment::BindRegister(
- interpreter::Register the_register, Node* node) {
- int values_index = RegisterToValuesIndex(the_register);
- values()->at(values_index) = node;
+Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
+ return values()->at(accumulator_base_);
}
Node* BytecodeGraphBuilder::Environment::LookupRegister(
interpreter::Register the_register) const {
+ if (the_register.is_function_context()) {
+ return builder()->GetFunctionContext();
+ } else if (the_register.is_function_closure()) {
+ return builder()->GetFunctionClosure();
+ } else if (the_register.is_new_target()) {
+ return builder()->GetNewTarget();
+ } else {
+ int values_index = RegisterToValuesIndex(the_register);
+ return values()->at(values_index);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::ExchangeRegisters(
+ interpreter::Register reg0, interpreter::Register reg1) {
+ int reg0_index = RegisterToValuesIndex(reg0);
+ int reg1_index = RegisterToValuesIndex(reg1);
+ Node* saved_reg0_value = values()->at(reg0_index);
+ values()->at(reg0_index) = values()->at(reg1_index);
+ values()->at(reg1_index) = saved_reg0_value;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindAccumulator(
+ Node* node, FrameStateBeforeAndAfter* states) {
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(0));
+ }
+ values()->at(accumulator_base_) = node;
+}
+
+
+void BytecodeGraphBuilder::Environment::BindRegister(
+ interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states) {
int values_index = RegisterToValuesIndex(the_register);
- return values()->at(values_index);
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
+ values_index));
+ }
+ values()->at(values_index) = node;
}
-void BytecodeGraphBuilder::Environment::BindAccumulator(Node* node) {
- accumulator_ = node;
+void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
+ interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states) {
+ int values_index = RegisterToValuesIndex(first_reg);
+ if (states) {
+ states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
+ values_index));
+ }
+ for (int i = 0; i < node->op()->ValueOutputCount(); i++) {
+ values()->at(values_index + i) =
+ builder()->NewNode(common()->Projection(i), node);
+ }
}
-Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
- return accumulator_;
+void BytecodeGraphBuilder::Environment::RecordAfterState(
+ Node* node, FrameStateBeforeAndAfter* states) {
+ states->AddToNode(node, OutputFrameStateCombine::Ignore());
}
@@ -98,24 +230,188 @@ void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
}
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForLoop() {
+ PrepareForLoop();
+ return new (zone()) Environment(this);
+}
+
+
+BytecodeGraphBuilder::Environment*
+BytecodeGraphBuilder::Environment::CopyForConditional() const {
+ return new (zone()) Environment(this);
+}
+
+
+void BytecodeGraphBuilder::Environment::Merge(
+ BytecodeGraphBuilder::Environment* other) {
+ // Nothing to do if the other environment is dead.
+ if (other->IsMarkedAsUnreachable()) {
+ return;
+ }
+
+ // Create a merge of the control dependencies of both environments and update
+ // the current environment's control dependency accordingly.
+ Node* control = builder()->MergeControl(GetControlDependency(),
+ other->GetControlDependency());
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect dependencies of both environments and update
+ // the current environment's effect dependency accordingly.
+ Node* effect = builder()->MergeEffect(GetEffectDependency(),
+ other->GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Introduce Phi nodes for values that have differing input at merge points,
+ // potentially extending an existing Phi node if possible.
+ context_ = builder()->MergeValue(context_, other->context_, control);
+ for (size_t i = 0; i < values_.size(); i++) {
+ values_[i] = builder()->MergeValue(values_[i], other->values_[i], control);
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+ // Create a control node for the loop header.
+ Node* control = builder()->NewLoop();
+
+ // Create a Phi for external effects.
+ Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
+ UpdateEffectDependency(effect);
+
+ // Assume everything in the loop is updated.
+ context_ = builder()->NewPhi(1, context_, control);
+ int size = static_cast<int>(values()->size());
+ for (int i = 0; i < size; i++) {
+ values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+ }
+
+ // Connect to the loop end.
+ Node* terminate = builder()->graph()->NewNode(
+ builder()->common()->Terminate(), effect, control);
+ builder()->exit_controls_.push_back(terminate);
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
+ Node** state_values, int offset, int count) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return false;
+ }
+ if (*state_values == nullptr) {
+ return true;
+ }
+ DCHECK_EQ((*state_values)->InputCount(), count);
+ DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ for (int i = 0; i < count; i++) {
+ if ((*state_values)->InputAt(i) != env_values[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+ int offset,
+ int count) {
+ if (StateValuesRequireUpdate(state_values, offset, count)) {
+ const Operator* op = common()->StateValues(count);
+ (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+ }
+}
+
+
+Node* BytecodeGraphBuilder::Environment::Checkpoint(
+ BailoutId bailout_id, OutputFrameStateCombine combine) {
+ if (!builder()->info()->is_deoptimization_enabled()) {
+ return builder()->jsgraph()->EmptyFrameState();
+ }
+
+ // TODO(rmcilroy): Consider using StateValuesCache for some state values.
+ UpdateStateValues(&parameters_state_values_, 0, parameter_count());
+ UpdateStateValues(&registers_state_values_, register_base(),
+ register_count());
+ UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+
+ const Operator* op = common()->FrameState(
+ bailout_id, combine, builder()->frame_state_function_info());
+ Node* result = graph()->NewNode(
+ op, parameters_state_values_, registers_state_values_,
+ accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
+ builder()->graph()->start());
+
+ return result;
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
+ Node** state_values, int offset, int count, int output_poke_start,
+ int output_poke_end) {
+ DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
+ for (int i = 0; i < count; i++, offset++) {
+ if (offset < output_poke_start || offset >= output_poke_end) {
+ if ((*state_values)->InputAt(i) != values()->at(offset)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+
+bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
+ int output_poke_offset, int output_poke_count) {
+ // Poke offset is relative to the top of the stack (i.e., the accumulator).
+ int output_poke_start = accumulator_base() - output_poke_offset;
+ int output_poke_end = output_poke_start + output_poke_count;
+ return StateValuesAreUpToDate(&parameters_state_values_, 0, parameter_count(),
+ output_poke_start, output_poke_end) &&
+ StateValuesAreUpToDate(&registers_state_values_, register_base(),
+ register_count(), output_poke_start,
+ output_poke_end) &&
+ StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
+ 1, output_poke_start, output_poke_end);
+}
+
+
BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
CompilationInfo* compilation_info,
JSGraph* jsgraph)
: local_zone_(local_zone),
info_(compilation_info),
jsgraph_(jsgraph),
+ bytecode_array_(handle(info()->shared_info()->bytecode_array())),
+ frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kInterpretedFunction,
+ bytecode_array()->parameter_count(),
+ bytecode_array()->register_count(), info()->shared_info(),
+ CALL_MAINTAINS_NATIVE_CONTEXT)),
+ merge_environments_(local_zone),
+ loop_header_environments_(local_zone),
input_buffer_size_(0),
input_buffer_(nullptr),
- exit_controls_(local_zone) {
- bytecode_array_ = handle(info()->shared_info()->bytecode_array());
+ exit_controls_(local_zone) {}
+
+
+Node* BytecodeGraphBuilder::GetNewTarget() {
+ if (!new_target_.is_set()) {
+ int params = bytecode_array()->parameter_count();
+ int index = Linkage::GetJSCallNewTargetParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%new.target");
+ Node* node = NewNode(op, graph()->start());
+ new_target_.set(node);
+ }
+ return new_target_.get();
}
Node* BytecodeGraphBuilder::GetFunctionContext() {
if (!function_context_.is_set()) {
- // Parameter (arity + 1) is special for the outer context of the function
- const Operator* op =
- common()->Parameter(bytecode_array()->parameter_count(), "%context");
+ int params = bytecode_array()->parameter_count();
+ int index = Linkage::GetJSCallContextParamIndex(params);
+ const Operator* op = common()->Parameter(index, "%context");
Node* node = NewNode(op, graph()->start());
function_context_.set(node);
}
@@ -123,13 +419,72 @@ Node* BytecodeGraphBuilder::GetFunctionContext() {
}
+Node* BytecodeGraphBuilder::GetFunctionClosure() {
+ if (!function_closure_.is_set()) {
+ int index = Linkage::kJSCallClosureParamIndex;
+ const Operator* op = common()->Parameter(index, "%closure");
+ Node* node = NewNode(op, graph()->start());
+ function_closure_.set(node);
+ }
+ return function_closure_.get();
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
+ return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadImmutableObjectField(Node* object,
+ int offset) {
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
+ object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
+ graph()->start(), graph()->start());
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
+ const Operator* op =
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
+ Node* native_context = NewNode(op, environment()->Context());
+ return NewNode(javascript()->LoadContext(0, index, true), native_context);
+}
+
+
+Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
+ if (!feedback_vector_.is_set()) {
+ Node* closure = GetFunctionClosure();
+ Node* shared = BuildLoadImmutableObjectField(
+ closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector = BuildLoadImmutableObjectField(
+ shared, SharedFunctionInfo::kFeedbackVectorOffset);
+ feedback_vector_.set(vector);
+ }
+ return feedback_vector_.get();
+}
+
+
+VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
+ Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
+ FeedbackVectorSlot slot;
+ if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+ slot = feedback_vector->ToSlot(slot_id);
+ }
+ return VectorSlotPair(feedback_vector, slot);
+}
+
+
bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
// Set up the basic structure of the graph. Outputs for {Start} are
// the formal parameters (including the receiver) plus context and
// closure.
- // The additional count items are for the context and closure.
- int actual_parameter_count = bytecode_array()->parameter_count() + 2;
+ // Set up the basic structure of the graph. Outputs for {Start} are the formal
+ // parameters (including the receiver) plus new target, number of arguments,
+ // context and closure.
+ int actual_parameter_count = bytecode_array()->parameter_count() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
Environment env(this, bytecode_array()->register_count(),
@@ -137,13 +492,7 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
GetFunctionContext());
set_environment(&env);
- // Build function context only if there are context allocated variables.
- if (info()->num_heap_slots() > 0) {
- UNIMPLEMENTED(); // TODO(oth): Write ast-graph-builder equivalent.
- } else {
- // Simply use the outer function context in building the graph.
- CreateGraphBody(stack_check);
- }
+ CreateGraphBody(stack_check);
// Finish the basic structure of the graph.
DCHECK_NE(0u, exit_controls_.size());
@@ -159,23 +508,41 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
// TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
// object setup, this function variable if used, tracing hooks.
+
+ if (stack_check) {
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareEntryFrameState(node);
+ }
+
VisitBytecodes();
}
void BytecodeGraphBuilder::VisitBytecodes() {
+ BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
+ analysis.Analyze();
+ set_branch_analysis(&analysis);
interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ set_bytecode_iterator(&iterator);
while (!iterator.done()) {
- switch (iterator.current_bytecode()) {
+ int current_offset = iterator.current_offset();
+ if (analysis.is_reachable(current_offset)) {
+ MergeEnvironmentsOfForwardBranches(current_offset);
+ BuildLoopHeaderForBackwardBranches(current_offset);
+
+ switch (iterator.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(iterator); \
break;
- BYTECODE_LIST(BYTECODE_CASE)
+ BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CODE
+ }
}
iterator.Advance();
}
+ set_branch_analysis(nullptr);
+ set_bytecode_iterator(nullptr);
}
@@ -256,547 +623,1093 @@ void BytecodeGraphBuilder::VisitStar(
}
+void BytecodeGraphBuilder::VisitMov(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->BindRegister(iterator.GetRegisterOperand(1), value);
+}
+
+
+void BytecodeGraphBuilder::VisitExchange(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
+ iterator.GetRegisterOperand(1));
+}
+
+
+void BytecodeGraphBuilder::VisitExchangeWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
+ iterator.GetRegisterOperand(1));
+}
+
+
+void BytecodeGraphBuilder::BuildLoadGlobal(
+ const interpreter::BytecodeArrayIterator& iterator,
+ TypeofMode typeof_mode) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+
+ const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
+ Node* node = NewNode(op, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+}
+
+
+void BytecodeGraphBuilder::BuildStoreGlobal(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+ Node* value = environment()->LookupAccumulator();
+
+ const Operator* op =
+ javascript()->StoreGlobal(language_mode(), name, feedback);
+ Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
}
void BytecodeGraphBuilder::VisitStaGlobalSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildStoreGlobal(iterator);
}
void BytecodeGraphBuilder::VisitStaGlobalStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildStoreGlobal(iterator);
}
void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildStoreGlobal(iterator);
}
void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildStoreGlobal(iterator);
}
void BytecodeGraphBuilder::VisitLdaContextSlot(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ // TODO(mythria): LoadContextSlots are unrolled by the required depth when
+ // generating bytecode. Hence the value of depth is always 0. Update this
+ // code, when the implementation changes.
+ // TODO(mythria): immutable flag is also set to false. This information is not
+ // available in bytecode array. update this code when the implementation
+ // changes.
+ const Operator* op =
+ javascript()->LoadContext(0, iterator.GetIndexOperand(1), false);
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* node = NewNode(op, context);
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaContextSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaContextSlot(iterator);
}
void BytecodeGraphBuilder::VisitStaContextSlot(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ // TODO(mythria): LoadContextSlots are unrolled by the required depth when
+ // generating bytecode. Hence the value of depth is always 0. Update this
+ // code, when the implementation changes.
+ const Operator* op =
+ javascript()->StoreContext(0, iterator.GetIndexOperand(1));
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* value = environment()->LookupAccumulator();
+ NewNode(op, context, value);
}
-void BytecodeGraphBuilder::VisitLoadICSloppy(
+void BytecodeGraphBuilder::VisitStaContextSlotWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ VisitStaContextSlot(iterator);
}
-void BytecodeGraphBuilder::VisitLoadICStrict(
+void BytecodeGraphBuilder::BuildLdaLookupSlot(
+ TypeofMode typeof_mode,
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ Handle<String> name =
+ Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
+ const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
+ Node* value =
+ NewNode(op, BuildLoadFeedbackVector(), environment()->Context());
+ environment()->BindAccumulator(value, &states);
}
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
+void BytecodeGraphBuilder::VisitLdaLookupSlot(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF, iterator);
}
-void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF, iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildStaLookupSlot(
+ LanguageMode language_mode,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* name = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+ Node* language = jsgraph()->Constant(language_mode);
+ const Operator* op = javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
+ Node* store = NewNode(op, value, environment()->Context(), name, language);
+ environment()->BindAccumulator(store, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaLookupSlot(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitLdaLookupSlotInsideTypeof(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildStaLookupSlot(LanguageMode::SLOPPY, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildStaLookupSlot(LanguageMode::STRICT, iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ VisitStaLookupSlotSloppy(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitStaLookupSlotStrict(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildNamedLoad(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildNamedLoad(iterator);
}
void BytecodeGraphBuilder::VisitLoadICSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedLoad(iterator);
}
void BytecodeGraphBuilder::VisitLoadICStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildNamedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildKeyedLoad(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* key = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+
+ const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ environment()->BindAccumulator(node, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedLoad(iterator);
}
void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedLoad(iterator);
}
void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedLoad(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildNamedStore(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Handle<Name> name =
+ Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op =
+ javascript()->StoreNamed(language_mode(), name, feedback);
+ Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
}
void BytecodeGraphBuilder::VisitStoreICSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedStore(iterator);
}
void BytecodeGraphBuilder::VisitStoreICStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildNamedStore(iterator);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
+void BytecodeGraphBuilder::VisitStoreICSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildNamedStore(iterator);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
+void BytecodeGraphBuilder::VisitStoreICStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildNamedStore(iterator);
}
-void BytecodeGraphBuilder::VisitStoreICSloppyWide(
+void BytecodeGraphBuilder::BuildKeyedStore(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* key = environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+
+ const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
+ Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ environment()->RecordAfterState(node, &states);
}
-void BytecodeGraphBuilder::VisitStoreICStrictWide(
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedStore(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedStore(iterator);
}
void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildKeyedStore(iterator);
}
void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildKeyedStore(iterator);
}
void BytecodeGraphBuilder::VisitPushContext(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Node* context = environment()->LookupAccumulator();
+ environment()->BindRegister(iterator.GetRegisterOperand(0), context);
+ environment()->SetContext(context);
}
void BytecodeGraphBuilder::VisitPopContext(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->SetContext(context);
}
void BytecodeGraphBuilder::VisitCreateClosure(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Handle<SharedFunctionInfo> shared_info =
+ Handle<SharedFunctionInfo>::cast(iterator.GetConstantForIndexOperand(0));
+ PretenureFlag tenured =
+ iterator.GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+ const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+ Node* closure = NewNode(op);
+ environment()->BindAccumulator(closure);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateClosureWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ VisitCreateClosure(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateArguments(
+ CreateArgumentsParameters::Type type,
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* op = javascript()->CreateArguments(type, 0);
+ Node* object = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(object, &states);
}
void BytecodeGraphBuilder::VisitCreateMappedArguments(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateArguments(CreateArgumentsParameters::kMappedArguments, iterator);
}
void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateArguments(CreateArgumentsParameters::kUnmappedArguments, iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateLiteral(
+ const Operator* op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* literal = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(literal, &states);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<String> constant_pattern =
+ Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ constant_pattern, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateRegExpLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateRegExpLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<FixedArray> constant_elements =
+ Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralArray(
+ constant_elements, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateArrayLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateArrayLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateArrayLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::BuildCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Handle<FixedArray> constant_properties =
+ Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
+ int literal_index = iterator.GetIndexOperand(1);
+ int literal_flags = iterator.GetImmediateOperand(2);
+ const Operator* op = javascript()->CreateLiteralObject(
+ constant_properties, literal_flags, literal_index);
+ BuildCreateLiteral(op, iterator);
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCreateObjectLiteral(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitCreateObjectLiteralWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildCreateObjectLiteral(iterator);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
+ Node* callee,
+ interpreter::Register receiver,
+ size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(static_cast<int>(arity));
+ all[0] = callee;
+ all[1] = environment()->LookupRegister(receiver);
+ int receiver_index = receiver.index();
+ for (int i = 2; i < static_cast<int>(arity); ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(receiver_index + i - 1));
+ }
+ Node* value = MakeNode(call_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::BuildCall(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
+ // register has been loaded with null / undefined explicitly or we are sure it
+ // is not null / undefined.
+ ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+ Node* callee = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ interpreter::Register receiver = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+ VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(3));
+
+ const Operator* call = javascript()->CallFunction(
+ arg_count + 2, language_mode(), feedback, receiver_hint);
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
}
void BytecodeGraphBuilder::VisitCall(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCall(iterator);
}
-void BytecodeGraphBuilder::VisitCallRuntime(
+void BytecodeGraphBuilder::VisitCallWide(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCall(iterator);
}
void BytecodeGraphBuilder::VisitCallJSRuntime(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* callee = BuildLoadNativeContextField(iterator.GetIndexOperand(0));
+ interpreter::Register receiver = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // Create node to perform the JS runtime call.
+ const Operator* call =
+ javascript()->CallFunction(arg_count + 2, language_mode());
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
+ const Operator* call_runtime_op, interpreter::Register first_arg,
+ size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(arity);
+ int first_arg_index = first_arg.index();
+ for (int i = 0; i < static_cast<int>(arity); ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i));
+ }
+ Node* value = MakeNode(call_runtime_op, static_cast<int>(arity), all, false);
+ return value;
+}
+
+
+void BytecodeGraphBuilder::VisitCallRuntime(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Runtime::FunctionId functionId =
+ static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitCallRuntimeForPair(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Runtime::FunctionId functionId =
+ static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+ interpreter::Register first_return = iterator.GetRegisterOperand(3);
+
+ // Create node to perform the runtime call.
+ const Operator* call = javascript()->CallRuntime(functionId, arg_count);
+ Node* return_pair = ProcessCallRuntimeArguments(call, first_arg, arg_count);
+ environment()->BindRegistersToProjections(first_return, return_pair, &states);
+}
+
+
+Node* BytecodeGraphBuilder::ProcessCallNewArguments(
+ const Operator* call_new_op, interpreter::Register callee,
+ interpreter::Register first_arg, size_t arity) {
+ Node** all = info()->zone()->NewArray<Node*>(arity);
+ all[0] = environment()->LookupRegister(callee);
+ int first_arg_index = first_arg.index();
+ for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
+ all[i] = environment()->LookupRegister(
+ interpreter::Register(first_arg_index + i - 1));
+ }
+ // Original constructor is the same as the callee.
+ all[arity - 1] = environment()->LookupRegister(callee);
+ Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
+ return value;
}
void BytecodeGraphBuilder::VisitNew(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ interpreter::Register callee = iterator.GetRegisterOperand(0);
+ interpreter::Register first_arg = iterator.GetRegisterOperand(1);
+ size_t arg_count = iterator.GetCountOperand(2);
+
+ // TODO(turbofan): Pass the feedback here.
+ const Operator* call = javascript()->CallConstruct(
+ static_cast<int>(arg_count) + 2, VectorSlotPair());
+ Node* value = ProcessCallNewArguments(call, callee, first_arg, arg_count + 2);
+ environment()->BindAccumulator(value, &states);
}
void BytecodeGraphBuilder::VisitThrow(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* value = environment()->LookupAccumulator();
+ // TODO(mythria): Change to Runtime::kThrow when we have deoptimization
+ // information support in the interpreter.
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), value);
+ Node* control = NewNode(common()->Throw(), value);
+ environment()->RecordAfterState(control, &states);
+ UpdateControlDependencyToLeaveFunction(control);
}
void BytecodeGraphBuilder::BuildBinaryOp(
const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
Node* node = NewNode(js_op, left, right);
-
- // TODO(oth): Real frame state and environment check pointing.
- int frame_state_count =
- OperatorProperties::GetFrameStateInputCount(node->op());
- for (int i = 0; i < frame_state_count; i++) {
- NodeProperties::ReplaceFrameStateInput(node, i,
- jsgraph()->EmptyFrameState());
- }
- environment()->BindAccumulator(node);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitAdd(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Add(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Add(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitSub(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Subtract(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Subtract(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitMul(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Multiply(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Multiply(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitDiv(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Divide(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Divide(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitMod(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->Modulus(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->Modulus(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitBitwiseOr(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->BitwiseOr(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseOr(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitBitwiseXor(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->BitwiseXor(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseXor(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitBitwiseAnd(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->BitwiseAnd(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->BitwiseAnd(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitShiftLeft(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->ShiftLeft(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftLeft(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitShiftRight(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->ShiftRight(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftRight(language_mode(), hints), iterator);
}
void BytecodeGraphBuilder::VisitShiftRightLogical(
const interpreter::BytecodeArrayIterator& iterator) {
- BuildBinaryOp(javascript()->ShiftRightLogical(language_mode()), iterator);
+ BinaryOperationHints hints = BinaryOperationHints::Any();
+ BuildBinaryOp(javascript()->ShiftRightLogical(language_mode(), hints),
+ iterator);
}
void BytecodeGraphBuilder::VisitInc(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* js_op =
+ javascript()->Add(language_mode(), BinaryOperationHints::Any());
+ Node* node = NewNode(js_op, environment()->LookupAccumulator(),
+ jsgraph()->OneConstant());
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitDec(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ const Operator* js_op =
+ javascript()->Subtract(language_mode(), BinaryOperationHints::Any());
+ Node* node = NewNode(js_op, environment()->LookupAccumulator(),
+ jsgraph()->OneConstant());
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitLogicalNot(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ environment()->LookupAccumulator());
+ Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ environment()->BindAccumulator(node);
}
void BytecodeGraphBuilder::VisitTypeOf(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Node* node =
+ NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::BuildDelete(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* key = environment()->LookupAccumulator();
+ Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* node =
+ NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitDeletePropertyStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_strict(language_mode()));
+ BuildDelete(iterator);
}
void BytecodeGraphBuilder::VisitDeletePropertySloppy(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ DCHECK(is_sloppy(language_mode()));
+ BuildDelete(iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDeleteLookupSlot(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* name = environment()->LookupAccumulator();
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ Node* result = NewNode(op, environment()->Context(), name);
+ environment()->BindAccumulator(result, &states);
+}
+
+
+void BytecodeGraphBuilder::BuildCompareOp(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+ Node* node = NewNode(js_op, left, right);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitTestEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->Equal(), iterator);
}
void BytecodeGraphBuilder::VisitTestNotEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->NotEqual(), iterator);
}
void BytecodeGraphBuilder::VisitTestEqualStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->StrictEqual(), iterator);
}
void BytecodeGraphBuilder::VisitTestNotEqualStrict(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->StrictNotEqual(), iterator);
}
void BytecodeGraphBuilder::VisitTestLessThan(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->LessThan(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestGreaterThan(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->GreaterThan(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->LessThanOrEqual(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->GreaterThanOrEqual(language_mode()), iterator);
}
void BytecodeGraphBuilder::VisitTestIn(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->HasProperty(), iterator);
}
void BytecodeGraphBuilder::VisitTestInstanceOf(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCompareOp(javascript()->InstanceOf(), iterator);
}
-void BytecodeGraphBuilder::VisitToBoolean(
- const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+void BytecodeGraphBuilder::BuildCastOperator(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* node = NewNode(js_op, environment()->LookupAccumulator());
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitToName(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCastOperator(javascript()->ToName(), iterator);
}
-void BytecodeGraphBuilder::VisitToNumber(
+void BytecodeGraphBuilder::VisitToObject(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCastOperator(javascript()->ToObject(), iterator);
}
-void BytecodeGraphBuilder::VisitToObject(
+void BytecodeGraphBuilder::VisitToNumber(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildCastOperator(javascript()->ToNumber(), iterator);
}
void BytecodeGraphBuilder::VisitJump(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJump();
}
void BytecodeGraphBuilder::VisitJumpConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJump();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJump();
}
void BytecodeGraphBuilder::VisitJumpIfTrue(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::VisitJumpIfFalse(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::VisitJumpIfNull(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->NullConstant());
}
void BytecodeGraphBuilder::VisitJumpIfNullConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfNullConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
}
void BytecodeGraphBuilder::VisitJumpIfUndefined(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
@@ -805,24 +1718,156 @@ void BytecodeGraphBuilder::VisitReturn(
Node* control =
NewNode(common()->Return(), environment()->LookupAccumulator());
UpdateControlDependencyToLeaveFunction(control);
+ set_environment(nullptr);
}
void BytecodeGraphBuilder::VisitForInPrepare(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ Node* prepare = nullptr;
+ {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* receiver = environment()->LookupAccumulator();
+ prepare = NewNode(javascript()->ForInPrepare(), receiver);
+ environment()->RecordAfterState(prepare, &states);
+ }
+ // Project cache_type, cache_array, cache_length into register
+ // operands 1, 2, 3.
+ for (int i = 0; i < 3; i++) {
+ environment()->BindRegister(iterator.GetRegisterOperand(i),
+ NewNode(common()->Projection(i), prepare));
+ }
}
-void BytecodeGraphBuilder::VisitForInNext(
+void BytecodeGraphBuilder::VisitForInDone(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* cache_length =
+ environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
+ environment()->BindAccumulator(exit_cond, &states);
}
-void BytecodeGraphBuilder::VisitForInDone(
+void BytecodeGraphBuilder::VisitForInNext(
const interpreter::BytecodeArrayIterator& iterator) {
- UNIMPLEMENTED();
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* receiver =
+ environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* cache_type =
+ environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ Node* cache_array =
+ environment()->LookupRegister(iterator.GetRegisterOperand(2));
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(3));
+ Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
+ cache_type, index);
+ environment()->BindAccumulator(value, &states);
+}
+
+
+void BytecodeGraphBuilder::VisitForInStep(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ FrameStateBeforeAndAfter states(this, iterator);
+ Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ index = NewNode(javascript()->ForInStep(), index);
+ environment()->BindAccumulator(index, &states);
+}
+
+
+void BytecodeGraphBuilder::MergeEnvironmentsOfBackwardBranches(
+ int source_offset, int target_offset) {
+ DCHECK_GE(source_offset, target_offset);
+ const ZoneVector<int>* branch_sites =
+ branch_analysis()->BackwardBranchesTargetting(target_offset);
+ if (branch_sites->back() == source_offset) {
+ // The set of back branches is complete, merge them.
+ DCHECK_GE(branch_sites->at(0), target_offset);
+ Environment* merged = merge_environments_[branch_sites->at(0)];
+ for (size_t i = 1; i < branch_sites->size(); i++) {
+ DCHECK_GE(branch_sites->at(i), target_offset);
+ merged->Merge(merge_environments_[branch_sites->at(i)]);
+ }
+ // And now merge with loop header environment created when loop
+ // header was visited.
+ loop_header_environments_[target_offset]->Merge(merged);
+ }
+}
+
+
+void BytecodeGraphBuilder::MergeEnvironmentsOfForwardBranches(
+ int source_offset) {
+ if (branch_analysis()->forward_branches_target(source_offset)) {
+ // Merge environments of branches that reach this bytecode.
+ auto branch_sites =
+ branch_analysis()->ForwardBranchesTargetting(source_offset);
+ DCHECK_LT(branch_sites->at(0), source_offset);
+ Environment* merged = merge_environments_[branch_sites->at(0)];
+ for (size_t i = 1; i < branch_sites->size(); i++) {
+ DCHECK_LT(branch_sites->at(i), source_offset);
+ merged->Merge(merge_environments_[branch_sites->at(i)]);
+ }
+ if (environment()) {
+ merged->Merge(environment());
+ }
+ set_environment(merged);
+ }
+}
+
+
+void BytecodeGraphBuilder::BuildLoopHeaderForBackwardBranches(
+ int source_offset) {
+ if (branch_analysis()->backward_branches_target(source_offset)) {
+ // Add loop header and store a copy so we can connect merged back
+ // edge inputs to the loop header.
+ loop_header_environments_[source_offset] = environment()->CopyForLoop();
+ }
+}
+
+
+void BytecodeGraphBuilder::BuildJump(int source_offset, int target_offset) {
+ DCHECK_NULL(merge_environments_[source_offset]);
+ merge_environments_[source_offset] = environment();
+ if (source_offset >= target_offset) {
+ MergeEnvironmentsOfBackwardBranches(source_offset, target_offset);
+ }
+ set_environment(nullptr);
+}
+
+
+void BytecodeGraphBuilder::BuildJump() {
+ int source_offset = bytecode_iterator()->current_offset();
+ int target_offset = bytecode_iterator()->GetJumpTargetOffset();
+ BuildJump(source_offset, target_offset);
+}
+
+
+void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
+ int source_offset = bytecode_iterator()->current_offset();
+ NewBranch(condition);
+ Environment* if_false_environment = environment()->CopyForConditional();
+ NewIfTrue();
+ BuildJump(source_offset, bytecode_iterator()->GetJumpTargetOffset());
+ set_environment(if_false_environment);
+ NewIfFalse();
+}
+
+
+void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition =
+ NewNode(javascript()->StrictEqual(), accumulator, comperand);
+ BuildConditionalJump(condition);
+}
+
+
+void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* to_boolean =
+ NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
+ Node* condition = NewNode(javascript()->StrictEqual(), to_boolean, comperand);
+ BuildConditionalJump(condition);
}
@@ -836,6 +1881,16 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
+void BytecodeGraphBuilder::PrepareEntryFrameState(Node* node) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(
+ node, 0, environment()->Checkpoint(BailoutId(0),
+ OutputFrameStateCombine::Ignore()));
+}
+
+
Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
Node** value_inputs, bool incomplete) {
DCHECK_EQ(op->ValueInputCount(), value_input_count);
@@ -848,7 +1903,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK_LT(op->ControlInputCount(), 2);
DCHECK_LT(op->EffectInputCount(), 2);
- Node* result = NULL;
+ Node* result = nullptr;
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
@@ -898,6 +1953,25 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
}
+Node* BytecodeGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kTagged, count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
+Node* BytecodeGraphBuilder::NewEffectPhi(int count, Node* input,
+ Node* control) {
+ const Operator* phi_op = common()->EffectPhi(count);
+ Node** buffer = EnsureInputBufferSize(count + 1);
+ MemsetPointer(buffer, input, count);
+ buffer[count] = control;
+ return graph()->NewNode(phi_op, count + 1, buffer, true);
+}
+
+
Node* BytecodeGraphBuilder::MergeControl(Node* control, Node* other) {
int inputs = control->op()->ControlInputCount() + 1;
if (control->opcode() == IrOpcode::kLoop) {
@@ -920,6 +1994,41 @@ Node* BytecodeGraphBuilder::MergeControl(Node* control, Node* other) {
}
+Node* BytecodeGraphBuilder::MergeEffect(Node* value, Node* other,
+ Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(value, common()->EffectPhi(inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewEffectPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
+Node* BytecodeGraphBuilder::MergeValue(Node* value, Node* other,
+ Node* control) {
+ int inputs = control->op()->ControlInputCount();
+ if (value->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(value) == control) {
+ // Phi already exists, add input.
+ value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(
+ value, common()->Phi(MachineRepresentation::kTagged, inputs));
+ } else if (value != other) {
+ // Phi does not exist yet, introduce one.
+ value = NewPhi(inputs, value, control);
+ value->ReplaceInput(inputs - 1, other);
+ }
+ return value;
+}
+
+
void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
if (environment()->IsMarkedAsUnreachable()) return;
environment()->MarkAsUnreachable();
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 4e479ba3e6..94a278c3cf 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
#include "src/compiler.h"
+#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/js-graph.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
@@ -28,21 +29,43 @@ class BytecodeGraphBuilder {
private:
class Environment;
+ class FrameStateBeforeAndAfter;
void CreateGraphBody(bool stack_check);
void VisitBytecodes();
Node* LoadAccumulator(Node* value);
+ // Get or create the node that represents the outer function closure.
+ Node* GetFunctionClosure();
+
+ // Get or create the node that represents the outer function context.
Node* GetFunctionContext();
+ // Get or create the node that represents the incoming new target value.
+ Node* GetNewTarget();
+
+ // Builder for accessing a (potentially immutable) object field.
+ Node* BuildLoadObjectField(Node* object, int offset);
+ Node* BuildLoadImmutableObjectField(Node* object, int offset);
+
+ // Builder for accessing type feedback vector.
+ Node* BuildLoadFeedbackVector();
+
+ // Builder for loading the a native context field.
+ Node* BuildLoadNativeContextField(int index);
+
+ // Helper function for creating a pair containing type feedback vector and
+ // a feedback slot.
+ VectorSlotPair CreateVectorSlotPair(int slot_id);
+
void set_environment(Environment* env) { environment_ = env; }
const Environment* environment() const { return environment_; }
Environment* environment() { return environment_; }
// Node creation helpers
Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
}
Node* NewNode(const Operator* op, Node* n1) {
@@ -55,17 +78,98 @@ class BytecodeGraphBuilder {
return MakeNode(op, arraysize(buffer), buffer, false);
}
- Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
- bool incomplete);
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
+ }
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* effect, Node* other_effect, Node* control);
+ Node* MergeValue(Node* value, Node* other_value, Node* control);
- Node** EnsureInputBufferSize(int size);
+ // The main node creation chokepoint. Adds context, frame state, effect,
+ // and control dependencies depending on the operator.
+ Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete);
+ // Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
+ Node** EnsureInputBufferSize(int size);
+
+ Node* ProcessCallArguments(const Operator* call_op, Node* callee,
+ interpreter::Register receiver, size_t arity);
+ Node* ProcessCallNewArguments(const Operator* call_new_op,
+ interpreter::Register callee,
+ interpreter::Register first_arg, size_t arity);
+ Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
+ interpreter::Register first_arg,
+ size_t arity);
+
+ void BuildCreateLiteral(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateRegExpLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateArrayLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateObjectLiteral(
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateArguments(CreateArgumentsParameters::Type type,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildLoadGlobal(const interpreter::BytecodeArrayIterator& iterator,
+ TypeofMode typeof_mode);
+ void BuildStoreGlobal(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildNamedLoad(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildKeyedLoad(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildNamedStore(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildKeyedStore(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildLdaLookupSlot(TypeofMode typeof_mode,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildStaLookupSlot(LanguageMode language_mode,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCall(const interpreter::BytecodeArrayIterator& iterator);
void BuildBinaryOp(const Operator* op,
const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCompareOp(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+ void BuildDelete(const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCastOperator(const Operator* js_op,
+ const interpreter::BytecodeArrayIterator& iterator);
+
+ // Control flow plumbing.
+ void BuildJump(int source_offset, int target_offset);
+ void BuildJump();
+ void BuildConditionalJump(Node* condition);
+ void BuildJumpIfEqual(Node* comperand);
+ void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+
+ // Constructing merge and loop headers.
+ void MergeEnvironmentsOfBackwardBranches(int source_offset,
+ int target_offset);
+ void MergeEnvironmentsOfForwardBranches(int source_offset);
+ void BuildLoopHeaderForBackwardBranches(int source_offset);
+
+ // Attaches a frame state to |node| for the entry to the function.
+ void PrepareEntryFrameState(Node* node);
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
@@ -81,10 +185,30 @@ class BytecodeGraphBuilder {
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
+ const FrameStateFunctionInfo* frame_state_function_info() const {
+ return frame_state_function_info_;
+ }
LanguageMode language_mode() const {
- // TODO(oth): need to propagate language mode through
- return LanguageMode::SLOPPY;
+ // TODO(mythria): Don't rely on parse information to get language mode.
+ return info()->language_mode();
+ }
+
+ const interpreter::BytecodeArrayIterator* bytecode_iterator() const {
+ return bytecode_iterator_;
+ }
+
+ void set_bytecode_iterator(
+ const interpreter::BytecodeArrayIterator* bytecode_iterator) {
+ bytecode_iterator_ = bytecode_iterator;
+ }
+
+ const BytecodeBranchAnalysis* branch_analysis() const {
+ return branch_analysis_;
+ }
+
+ void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
+ branch_analysis_ = branch_analysis;
}
#define DECLARE_VISIT_BYTECODE(name, ...) \
@@ -96,14 +220,31 @@ class BytecodeGraphBuilder {
CompilationInfo* info_;
JSGraph* jsgraph_;
Handle<BytecodeArray> bytecode_array_;
+ const FrameStateFunctionInfo* frame_state_function_info_;
+ const interpreter::BytecodeArrayIterator* bytecode_iterator_;
+ const BytecodeBranchAnalysis* branch_analysis_;
Environment* environment_;
+
+ // Merge environments are snapshots of the environment at a particular
+ // bytecode offset to be merged into a later environment.
+ ZoneMap<int, Environment*> merge_environments_;
+
+ // Loop header environments are environments created for bytecodes
+ // where it is known there are back branches, ie a loop header.
+ ZoneMap<int, Environment*> loop_header_environments_;
+
// Temporary storage for building node input lists.
int input_buffer_size_;
Node** input_buffer_;
// Nodes representing values in the activation record.
SetOncePointer<Node> function_context_;
+ SetOncePointer<Node> function_closure_;
+ SetOncePointer<Node> new_target_;
+
+ // Optimization to cache loaded feedback vector.
+ SetOncePointer<Node> feedback_vector_;
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
@@ -120,11 +261,18 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
int parameter_count() const { return parameter_count_; }
int register_count() const { return register_count_; }
- void BindRegister(interpreter::Register the_register, Node* node);
+ Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
- void BindAccumulator(Node* node);
- Node* LookupAccumulator() const;
+ void ExchangeRegisters(interpreter::Register reg0,
+ interpreter::Register reg1);
+
+ void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegister(interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
bool IsMarkedAsUnreachable() const;
void MarkAsUnreachable();
@@ -135,6 +283,14 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
effect_dependency_ = dependency;
}
+ // Preserve a checkpoint of the environment for the IR graph. Any
+ // further mutation of the environment will not affect checkpoints.
+ Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+
+ // Returns true if the state values are up to date with the current
+ // environment.
+ bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
+
// Control dependency tracked by this environment.
Node* GetControlDependency() const { return control_dependency_; }
void UpdateControlDependency(Node* dependency) {
@@ -142,8 +298,20 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
}
Node* Context() const { return context_; }
+ void SetContext(Node* new_context) { context_ = new_context; }
+
+ Environment* CopyForConditional() const;
+ Environment* CopyForLoop();
+ void Merge(Environment* other);
private:
+ explicit Environment(const Environment* copy);
+ void PrepareForLoop();
+ bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
+ int output_poke_start, int output_poke_end);
+ bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
+ void UpdateStateValues(Node** state_values, int offset, int count);
+
int RegisterToValuesIndex(interpreter::Register the_register) const;
Zone* zone() const { return builder_->local_zone(); }
@@ -152,21 +320,23 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
BytecodeGraphBuilder* builder() const { return builder_; }
const NodeVector* values() const { return &values_; }
NodeVector* values() { return &values_; }
- Node* accumulator() { return accumulator_; }
int register_base() const { return register_base_; }
+ int accumulator_base() const { return accumulator_base_; }
BytecodeGraphBuilder* builder_;
int register_count_;
int parameter_count_;
- Node* accumulator_;
Node* context_;
Node* control_dependency_;
Node* effect_dependency_;
NodeVector values_;
+ Node* parameters_state_values_;
+ Node* registers_state_values_;
+ Node* accumulator_state_values_;
int register_base_;
+ int accumulator_base_;
};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 1c91f7c118..44e0bf1672 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -208,7 +208,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
#endif
// The target for C calls is always an address (i.e. machine pointer).
- MachineType target_type = kMachPtr;
+ MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallAddress, // kind
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index 8552cdf792..f791db1fdc 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -4,12 +4,14 @@
#include "src/compiler/change-lowering.h"
+#include "src/address-map.h"
#include "src/code-factory.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -37,6 +39,16 @@ Reduction ChangeLowering::Reduce(Node* node) {
return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
case IrOpcode::kChangeUint32ToTagged:
return ChangeUint32ToTagged(node->InputAt(0), control);
+ case IrOpcode::kLoadField:
+ return LoadField(node);
+ case IrOpcode::kStoreField:
+ return StoreField(node);
+ case IrOpcode::kLoadElement:
+ return LoadElement(node);
+ case IrOpcode::kStoreElement:
+ return StoreElement(node);
+ case IrOpcode::kAllocate:
+ return Allocate(node);
default:
return NoChange();
}
@@ -76,7 +88,8 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
target, context, effect, control);
Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+ machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier)),
heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
return graph()->NewNode(common()->FinishRegion(), heap_number, store);
}
@@ -123,7 +136,7 @@ Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
- return graph()->NewNode(machine()->Load(kMachFloat64), value,
+ return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
HeapNumberValueIndexConstant(), graph()->start(),
control);
}
@@ -138,9 +151,9 @@ Node* ChangeLowering::TestNotSmi(Node* value) {
Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
- return Replace(graph()->NewNode(common()->Select(kMachAnyTagged), value,
- jsgraph()->TrueConstant(),
- jsgraph()->FalseConstant()));
+ return Replace(
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
}
@@ -220,8 +233,8 @@ Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
vbox = AllocateHeapNumberWithValue(value, if_box);
control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
- value =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vsmi, vbox, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, vbox, control);
return Replace(value);
}
@@ -246,8 +259,8 @@ Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
Node* vfalse = graph()->NewNode(common()->Projection(0), add);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
return Replace(phi);
}
@@ -259,7 +272,6 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
return Replace(ChangeSmiToInt32(value));
}
- const MachineType type = (signedness == kSigned) ? kMachInt32 : kMachUint32;
const Operator* op = (signedness == kSigned)
? machine()->ChangeFloat64ToInt32()
: machine()->ChangeFloat64ToUint32();
@@ -279,7 +291,8 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
Node* vfalse = ChangeSmiToInt32(value);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(type, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, merge);
return Replace(phi);
}
@@ -318,7 +331,7 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
const Operator* merge_op = common()->Merge(2);
const Operator* ephi_op = common()->EffectPhi(2);
- const Operator* phi_op = common()->Phi(kMachFloat64, 2);
+ const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
Node* check1 = TestNotSmi(object);
Node* branch1 =
@@ -375,8 +388,8 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
Node* vfalse = ChangeSmiToFloat64(value);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachFloat64, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
return Replace(phi);
}
@@ -400,13 +413,176 @@ Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
return Replace(phi);
}
+namespace {
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineRepresentation representation,
+ Type* field_type, Type* input_type) {
+ if (field_type->Is(Type::TaggedSigned()) ||
+ input_type->Is(Type::TaggedSigned())) {
+ // Write barriers are only for writes of heap objects.
+ return kNoWriteBarrier;
+ }
+ if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // Write barriers are not necessary when storing true, false, null or
+ // undefined, because these special oddballs are always in the root set.
+ return kNoWriteBarrier;
+ }
+ if (base_is_tagged == kTaggedBase &&
+ representation == MachineRepresentation::kTagged) {
+ if (input_type->IsConstant() &&
+ input_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<HeapObject> input =
+ Handle<HeapObject>::cast(input_type->AsConstant()->Value());
+ if (input->IsMap()) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ Isolate* const isolate = input->GetIsolate();
+ RootIndexMap root_index_map(isolate);
+ int root_index = root_index_map.Lookup(*input);
+ if (root_index != RootIndexMap::kInvalidRootIndex &&
+ isolate->heap()->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
+ }
+ if (field_type->Is(Type::TaggedPointer()) ||
+ input_type->Is(Type::TaggedPointer())) {
+ // Write barriers for heap objects don't need a Smi check.
+ return kPointerWriteBarrier;
+ }
+ // Write barriers are only for writes into heap objects (i.e. tagged base).
+ return kFullWriteBarrier;
+ }
+ return kNoWriteBarrier;
+}
+
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineRepresentation representation,
+ int field_offset, Type* field_type,
+ Type* input_type) {
+ if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
+ input_type);
+}
+
+} // namespace
+
+
+Reduction ChangeLowering::LoadField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::StoreField(Node* node) {
+ const FieldAccess& access = FieldAccessOf(node->op());
+ Type* type = NodeProperties::GetType(node->InputAt(1));
+ WriteBarrierKind kind = ComputeWriteBarrierKind(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.offset, access.type, type);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node,
+ machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), kind)));
+ return Changed(node);
+}
+
+
+Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
+ Node* const key) {
+ Node* index = key;
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = graph()->NewNode(machine()->Word32Shl(), index,
+ jsgraph()->Int32Constant(element_size_shift));
+ }
+ const int fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = graph()->NewNode(machine()->Int32Add(), index,
+ jsgraph()->Int32Constant(fixed_offset));
+ }
+ if (machine()->Is64()) {
+ // TODO(turbofan): This is probably only correct for typed arrays, and only
+ // if the typed arrays are at most 2GiB in size, which happens to match
+ // exactly our current situation.
+ index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+ }
+ return index;
+}
+
+
+Reduction ChangeLowering::LoadElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::StoreElement(Node* node) {
+ const ElementAccess& access = ElementAccessOf(node->op());
+ Type* type = NodeProperties::GetType(node->InputAt(2));
+ node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(),
+ ComputeWriteBarrierKind(access.base_is_tagged,
+ access.machine_type.representation(),
+ access.type, type))));
+ return Changed(node);
+}
+
+
+Reduction ChangeLowering::Allocate(Node* node) {
+ PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+ if (pretenure == NOT_TENURED) {
+ Callable callable = CodeFactory::AllocateInNewSpace(isolate());
+ Node* target = jsgraph()->HeapConstant(callable.code());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ const Operator* op = common()->Call(descriptor);
+ node->InsertInput(graph()->zone(), 0, target);
+ node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, op);
+ } else {
+ DCHECK_EQ(TENURED, pretenure);
+ AllocationSpace space = OLD_SPACE;
+ Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
+ Operator::Properties props = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
+ ExternalReference ref(f, jsgraph()->isolate());
+ int32_t flags = AllocateTargetSpace::encode(space);
+ node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
+ node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ }
+ return Changed(node);
+}
+
+
Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
index ead41b1a00..6d607768d9 100644
--- a/deps/v8/src/compiler/change-lowering.h
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -13,6 +13,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+struct ElementAccess;
class JSGraph;
class Linkage;
class MachineOperatorBuilder;
@@ -49,6 +50,13 @@ class ChangeLowering final : public Reducer {
Signedness signedness);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
+ Reduction LoadField(Node* node);
+ Reduction StoreField(Node* node);
+ Reduction LoadElement(Node* node);
+ Reduction StoreElement(Node* node);
+ Reduction Allocate(Node* node);
+
+ Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.cc b/deps/v8/src/compiler/coalesced-live-ranges.cc
index 44dd336c83..4ac3e2118d 100644
--- a/deps/v8/src/compiler/coalesced-live-ranges.cc
+++ b/deps/v8/src/compiler/coalesced-live-ranges.cc
@@ -27,7 +27,7 @@ LiveRange* LiveRangeConflictIterator::Current() const {
void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
- DCHECK(query_ != nullptr);
+ DCHECK_NOT_NULL(query_);
auto end = intervals_->end();
LifetimePosition q_start = query_->start();
LifetimePosition q_end = query_->end();
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.h b/deps/v8/src/compiler/coalesced-live-ranges.h
index e617c0a251..54bbce2055 100644
--- a/deps/v8/src/compiler/coalesced-live-ranges.h
+++ b/deps/v8/src/compiler/coalesced-live-ranges.h
@@ -89,7 +89,7 @@ class LiveRangeConflictIterator {
}
bool QueryIntersectsAllocatedInterval() const {
- DCHECK(query_ != nullptr);
+ DCHECK_NOT_NULL(query_);
return pos_ != intervals_->end() &&
Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
}
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index b801bdfc70..7295948399 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -128,6 +128,9 @@ class InstructionOperandConverter {
}
Frame* frame() const { return gen_->frame(); }
+ FrameAccessState* frame_access_state() const {
+ return gen_->frame_access_state();
+ }
Isolate* isolate() const { return gen_->isolate(); }
Linkage* linkage() const { return gen_->linkage(); }
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 97f780d1cb..313567ed87 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -34,14 +34,14 @@ class CodeGenerator::JumpTable final : public ZoneObject {
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
- : frame_(frame),
+ : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
linkage_(linkage),
code_(code),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
current_source_position_(SourcePosition::Unknown()),
- masm_(info->isolate(), NULL, 0),
+ masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kYes),
resolver_(this),
safepoints_(code->zone()),
handlers_(code->zone()),
@@ -52,11 +52,13 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
- osr_pc_offset_(-1),
- needs_frame_(frame->GetSpillSlotCount() > 0 || code->ContainsCall()) {
+ osr_pc_offset_(-1) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
+ if (code->ContainsCall()) {
+ frame->MarkNeedsFrame();
+ }
}
@@ -90,6 +92,14 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
inlined_function_count_ = deoptimization_literals_.size();
+ // Define deoptimization literals for all unoptimized code objects of inlined
+ // functions. This ensures unoptimized code is kept alive by optimized code.
+ for (auto& inlined : info->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+ }
+ }
+
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (auto const block : code()->instruction_blocks()) {
@@ -206,8 +216,10 @@ Handle<Code> CodeGenerator::GenerateCode() {
bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
- return code()->InstructionBlockAt(current_block_)->ao_number().IsNext(
- code()->InstructionBlockAt(block)->ao_number());
+ return code()
+ ->InstructionBlockAt(current_block_)
+ ->ao_number()
+ .IsNext(code()->InstructionBlockAt(block)->ao_number());
}
@@ -479,62 +491,84 @@ FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
}
-namespace {
-
-struct OperandAndType {
- InstructionOperand* const operand;
- MachineType const type;
-};
+void CodeGenerator::TranslateStateValueDescriptor(
+ StateValueDescriptor* desc, Translation* translation,
+ InstructionOperandIterator* iter) {
+ if (desc->IsNested()) {
+ translation->BeginCapturedObject(static_cast<int>(desc->size()));
+ for (size_t index = 0; index < desc->fields().size(); index++) {
+ TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
+ }
+ } else if (desc->IsDuplicate()) {
+ translation->DuplicateObject(static_cast<int>(desc->id()));
+ } else {
+ DCHECK(desc->IsPlain());
+ AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
+ desc->type());
+ }
+}
-OperandAndType TypedOperandForFrameState(FrameStateDescriptor* descriptor,
- Instruction* instr,
- size_t frame_state_offset,
- size_t index,
- OutputFrameStateCombine combine) {
- DCHECK(index < descriptor->GetSize(combine));
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput: {
- DCHECK(combine.GetPushCount() <= instr->OutputCount());
- size_t size_without_output =
- descriptor->GetSize(OutputFrameStateCombine::Ignore());
- // If the index is past the existing stack items, return the output.
- if (index >= size_without_output) {
- return {instr->OutputAt(index - size_without_output), kMachAnyTagged};
+void CodeGenerator::TranslateFrameStateDescriptorOperands(
+ FrameStateDescriptor* desc, InstructionOperandIterator* iter,
+ OutputFrameStateCombine combine, Translation* translation) {
+ for (size_t index = 0; index < desc->GetSize(combine); index++) {
+ switch (combine.kind()) {
+ case OutputFrameStateCombine::kPushOutput: {
+ DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+ size_t size_without_output =
+ desc->GetSize(OutputFrameStateCombine::Ignore());
+ // If the index is past the existing stack items in values_.
+ if (index >= size_without_output) {
+ // Materialize the result of the call instruction in this slot.
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - size_without_output),
+ MachineType::AnyTagged());
+ continue;
+ }
+ break;
}
- break;
+ case OutputFrameStateCombine::kPokeAt:
+ // The result of the call should be placed at position
+ // [index_from_top] in the stack (overwriting whatever was
+ // previously there).
+ size_t index_from_top =
+ desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ if (index >= index_from_top &&
+ index < index_from_top + iter->instruction()->OutputCount()) {
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - index_from_top),
+ MachineType::AnyTagged());
+ iter->Advance(); // We do not use this input, but we need to
+ // advace, as the input got replaced.
+ continue;
+ }
+ break;
}
- case OutputFrameStateCombine::kPokeAt:
- size_t index_from_top =
- descriptor->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
- if (index >= index_from_top &&
- index < index_from_top + instr->OutputCount()) {
- return {instr->OutputAt(index - index_from_top), kMachAnyTagged};
- }
- break;
+ StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
+ TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
+ iter);
}
- return {instr->InputAt(frame_state_offset + index),
- descriptor->GetType(index)};
}
-} // namespace
-
void CodeGenerator::BuildTranslationForFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Instruction* instr,
- Translation* translation, size_t frame_state_offset,
- OutputFrameStateCombine state_combine) {
+ FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
+ Translation* translation, OutputFrameStateCombine state_combine) {
// Outer-most state must be added to translation first.
if (descriptor->outer_state() != nullptr) {
- BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
- translation, frame_state_offset,
+ BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
+ translation,
OutputFrameStateCombine::Ignore());
}
- frame_state_offset += descriptor->outer_state()->GetTotalSize();
Handle<SharedFunctionInfo> shared_info;
if (!descriptor->shared_info().ToHandle(&shared_info)) {
- if (!info()->has_shared_info()) return; // Stub with no SharedFunctionInfo.
+ if (!info()->has_shared_info()) {
+ return; // Stub with no SharedFunctionInfo.
+ }
shared_info = info()->shared_info();
}
int shared_info_id = DefineDeoptimizationLiteral(shared_info);
@@ -546,18 +580,25 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
static_cast<unsigned int>(descriptor->GetSize(state_combine) -
(1 + descriptor->parameters_count())));
break;
+ case FrameStateType::kInterpretedFunction:
+ translation->BeginInterpretedFrame(
+ descriptor->bailout_id(), shared_info_id,
+ static_cast<unsigned int>(descriptor->locals_count()));
+ break;
case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame(
shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
+ case FrameStateType::kConstructStub:
+ translation->BeginConstructStubFrame(
+ shared_info_id,
+ static_cast<unsigned int>(descriptor->parameters_count()));
+ break;
}
- for (size_t i = 0; i < descriptor->GetSize(state_combine); i++) {
- OperandAndType op = TypedOperandForFrameState(
- descriptor, instr, frame_state_offset, i, state_combine);
- AddTranslationForOperand(translation, instr, op.operand, op.type);
- }
+ TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
+ translation);
}
@@ -571,8 +612,9 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
Translation translation(
&translations_, static_cast<int>(descriptor->GetFrameCount()),
static_cast<int>(descriptor->GetJSFrameCount()), zone());
- BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
- frame_state_offset, state_combine);
+ InstructionOperandIterator iter(instr, frame_state_offset);
+ BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
+ state_combine);
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
@@ -588,37 +630,39 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
InstructionOperand* op,
MachineType type) {
if (op->IsStackSlot()) {
- if (type == kMachBool || type == kRepBit) {
+ if (type.representation() == MachineRepresentation::kBit) {
translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
- } else if (type == kMachInt32 || type == kMachInt8 || type == kMachInt16) {
+ } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
+ type == MachineType::Int32()) {
translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
- } else if (type == kMachUint32 || type == kMachUint16 ||
- type == kMachUint8) {
+ } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
+ type == MachineType::Uint32()) {
translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
- } else if ((type & kRepMask) == kRepTagged) {
+ } else if (type.representation() == MachineRepresentation::kTagged) {
translation->StoreStackSlot(LocationOperand::cast(op)->index());
} else {
CHECK(false);
}
} else if (op->IsDoubleStackSlot()) {
- DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
+ DCHECK(IsFloatingPoint(type.representation()));
translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
- if (type == kMachBool || type == kRepBit) {
+ if (type.representation() == MachineRepresentation::kBit) {
translation->StoreBoolRegister(converter.ToRegister(op));
- } else if (type == kMachInt32 || type == kMachInt8 || type == kMachInt16) {
+ } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
+ type == MachineType::Int32()) {
translation->StoreInt32Register(converter.ToRegister(op));
- } else if (type == kMachUint32 || type == kMachUint16 ||
- type == kMachUint8) {
+ } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
+ type == MachineType::Uint32()) {
translation->StoreUint32Register(converter.ToRegister(op));
- } else if ((type & kRepMask) == kRepTagged) {
+ } else if (type.representation() == MachineRepresentation::kTagged) {
translation->StoreRegister(converter.ToRegister(op));
} else {
CHECK(false);
}
} else if (op->IsDoubleRegister()) {
- DCHECK((type & (kRepFloat32 | kRepFloat64)) != 0);
+ DCHECK(IsFloatingPoint(type.representation()));
InstructionOperandConverter converter(this, instr);
translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
} else if (op->IsImmediate()) {
@@ -627,20 +671,23 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- DCHECK(type == kMachInt32 || type == kMachUint32 || type == kRepBit);
+ DCHECK(type == MachineType::Int32() || type == MachineType::Uint32() ||
+ type.representation() == MachineRepresentation::kBit);
constant_object =
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
case Constant::kFloat32:
- DCHECK((type & (kRepFloat32 | kRepTagged)) != 0);
+ DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+ type.representation() == MachineRepresentation::kTagged);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
break;
case Constant::kFloat64:
- DCHECK((type & (kRepFloat64 | kRepTagged)) != 0);
+ DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+ type.representation() == MachineRepresentation::kTagged);
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
- DCHECK((type & kRepMask) == kRepTagged);
+ DCHECK(type.representation() == MachineRepresentation::kTagged);
constant_object = constant.ToHeapObject();
break;
default:
@@ -663,6 +710,20 @@ void CodeGenerator::MarkLazyDeoptSite() {
}
+int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int spill_slots = frame()->GetSpillSlotCount();
+ bool has_frame = descriptor->IsJSFunctionCall() || spill_slots > 0;
+ // Leave the PC on the stack on platforms that have that as part of their ABI
+ int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
+ int sp_slot_delta =
+ has_frame ? (frame()->GetTotalFrameSlotCount() - pc_slots) : 0;
+ // Discard only slots that won't be used by new parameters.
+ sp_slot_delta += stack_param_delta;
+ return sp_slot_delta;
+}
+
+
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index d1545d10b9..70bf81f5af 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -16,6 +16,7 @@ namespace internal {
namespace compiler {
// Forward declarations.
+class FrameAccessState;
class Linkage;
class OutOfLineCode;
@@ -27,6 +28,20 @@ struct BranchInfo {
};
+class InstructionOperandIterator {
+ public:
+ InstructionOperandIterator(Instruction* instr, size_t pos)
+ : instr_(instr), pos_(pos) {}
+
+ Instruction* instruction() const { return instr_; }
+ InstructionOperand* Advance() { return instr_->InputAt(pos_++); }
+
+ private:
+ Instruction* instr_;
+ size_t pos_;
+};
+
+
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
public:
@@ -37,7 +52,8 @@ class CodeGenerator final : public GapResolver::Assembler {
Handle<Code> GenerateCode();
InstructionSequence* code() const { return code_; }
- Frame* frame() const { return frame_; }
+ FrameAccessState* frame_access_state() const { return frame_access_state_; }
+ Frame* frame() const { return frame_access_state_->frame(); }
Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
@@ -94,7 +110,10 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleReturn();
// Generates code to deconstruct a the caller's frame, including arguments.
- void AssembleDeconstructActivationRecord();
+ void AssembleDeconstructActivationRecord(int stack_param_delta);
+
+ // Generates code to manipulate the stack in preparation for a tail call.
+ void AssemblePrepareTailCall(int stack_param_delta);
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
@@ -125,21 +144,33 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
- size_t frame_state_offset);
+ FrameStateDescriptor* GetFrameStateDescriptor(
+ Instruction* instr, size_t frame_access_state_offset);
int BuildTranslation(Instruction* instr, int pc_offset,
- size_t frame_state_offset,
+ size_t frame_access_state_offset,
OutputFrameStateCombine state_combine);
void BuildTranslationForFrameStateDescriptor(
- FrameStateDescriptor* descriptor, Instruction* instr,
- Translation* translation, size_t frame_state_offset,
- OutputFrameStateCombine state_combine);
+ FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
+ Translation* translation, OutputFrameStateCombine state_combine);
+ void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+ Translation* translation,
+ InstructionOperandIterator* iter);
+ void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
+ InstructionOperandIterator* iter,
+ OutputFrameStateCombine combine,
+ Translation* translation);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op, MachineType type);
void AddNopForSmiCodeInlining();
void EnsureSpaceForLazyDeopt();
void MarkLazyDeoptSite();
+ // Converts the delta in the number of stack parameter passed from a tail
+ // caller to the callee into the distance (in pointers) the SP must be
+ // adjusted, taking frame elision and other relevant factors into
+ // consideration.
+ int TailCallFrameStackSlotDelta(int stack_param_delta);
+
// ===========================================================================
struct DeoptimizationState : ZoneObject {
@@ -167,7 +198,7 @@ class CodeGenerator final : public GapResolver::Assembler {
friend class OutOfLineCode;
- Frame* const frame_;
+ FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
CompilationInfo* const info_;
@@ -187,7 +218,6 @@ class CodeGenerator final : public GapResolver::Assembler {
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
- bool needs_frame_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-stub-assembler.cc b/deps/v8/src/compiler/code-stub-assembler.cc
new file mode 100644
index 0000000000..b2a05b64f8
--- /dev/null
+++ b/deps/v8/src/compiler/code-stub-assembler.cc
@@ -0,0 +1,176 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-stub-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Kind kind, const char* name)
+ : raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone),
+ Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
+ CallDescriptor::kNoFlags))),
+ kind_(kind),
+ name_(name),
+ code_generated_(false) {}
+
+
+CodeStubAssembler::~CodeStubAssembler() {}
+
+
+Handle<Code> CodeStubAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ Schedule* schedule = raw_assembler_->Export();
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
+ name_);
+
+ code_generated_ = true;
+ return code;
+}
+
+
+Node* CodeStubAssembler::Int32Constant(int value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+
+Node* CodeStubAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+
+Node* CodeStubAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+
+Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+
+Node* CodeStubAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+
+Node* CodeStubAssembler::Parameter(int value) {
+ return raw_assembler_->Parameter(value);
+}
+
+
+void CodeStubAssembler::Return(Node* value) {
+ return raw_assembler_->Return(value);
+}
+
+
+Node* CodeStubAssembler::SmiShiftBitsConstant() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
+
+Node* CodeStubAssembler::SmiTag(Node* value) {
+ return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
+}
+
+
+Node* CodeStubAssembler::SmiUntag(Node* value) {
+ return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
+}
+
+
+Node* CodeStubAssembler::IntPtrAdd(Node* a, Node* b) {
+ return raw_assembler_->IntPtrAdd(a, b);
+}
+
+
+Node* CodeStubAssembler::IntPtrSub(Node* a, Node* b) {
+ return raw_assembler_->IntPtrSub(a, b);
+}
+
+
+Node* CodeStubAssembler::WordShl(Node* value, int shift) {
+ return raw_assembler_->WordShl(value, Int32Constant(shift));
+}
+
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(), object,
+ IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ return raw_assembler_->CallN(descriptor, code_target, args);
+}
+
+
+Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
+ Node* code_target, Node** args) {
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->CallRuntime1(function_id, arg1, context);
+}
+
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2) {
+ return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+}
+
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
+}
+
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1,
+ Node* arg2) {
+ return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+}
+
+
+// RawMachineAssembler delegate helpers:
+Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
+
+
+Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
+
+
+Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/code-stub-assembler.h b/deps/v8/src/compiler/code-stub-assembler.h
new file mode 100644
index 0000000000..3c4ae05eaa
--- /dev/null
+++ b/deps/v8/src/compiler/code-stub-assembler.h
@@ -0,0 +1,96 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/builtins.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+class CallInterfaceDescriptor;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class Schedule;
+
+class CodeStubAssembler {
+ public:
+ CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor, Code::Kind kind,
+ const char* name);
+ virtual ~CodeStubAssembler();
+
+ Handle<Code> GenerateCode();
+
+ // Constants.
+ Node* Int32Constant(int value);
+ Node* IntPtrConstant(intptr_t value);
+ Node* NumberConstant(double value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
+
+ Node* Parameter(int value);
+ void Return(Node* value);
+
+ // Tag and untag Smi values.
+ Node* SmiTag(Node* value);
+ Node* SmiUntag(Node* value);
+
+ // Basic arithmetic operations.
+ Node* IntPtrAdd(Node* a, Node* b);
+ Node* IntPtrSub(Node* a, Node* b);
+ Node* WordShl(Node* value, int shift);
+
+ // Load a field from an object on the heap.
+ Node* LoadObjectField(Node* object, int offset);
+
+ // Call runtime function.
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2);
+
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2);
+
+ private:
+ friend class CodeStubAssemblerTester;
+
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+
+ Node* SmiShiftBitsConstant();
+
+ // Private helpers which delegate to RawMachineAssembler.
+ Graph* graph();
+ Isolate* isolate();
+ Zone* zone();
+
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+ Code::Kind kind_;
+ const char* name_;
+ bool code_generated_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index e7f7436a0b..a0ae6e8ad7 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -16,6 +16,11 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
}
+Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
+ return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
+}
+
+
void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
int32_constants_.GetCachedNodes(nodes);
int64_constants_.GetCachedNodes(nodes);
@@ -23,6 +28,7 @@ void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
float64_constants_.GetCachedNodes(nodes);
external_constants_.GetCachedNodes(nodes);
number_constants_.GetCachedNodes(nodes);
+ heap_constants_.GetCachedNodes(nodes);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index b0100aaac6..720bc1531d 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -12,6 +12,9 @@ namespace internal {
// Forward declarations.
class ExternalReference;
+class HeapObject;
+template <typename>
+class Handle;
namespace compiler {
@@ -47,6 +50,8 @@ class CommonNodeCache final {
return number_constants_.Find(zone(), bit_cast<int64_t>(value));
}
+ Node** FindHeapConstant(Handle<HeapObject> value);
+
// Return all nodes from the cache.
void GetCachedNodes(ZoneVector<Node*>* nodes);
@@ -59,7 +64,8 @@ class CommonNodeCache final {
Int64NodeCache float64_constants_;
IntPtrNodeCache external_constants_;
Int64NodeCache number_constants_;
- Zone* zone_;
+ IntPtrNodeCache heap_constants_;
+ Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
};
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index e3c2ecad6e..2334541f8a 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -204,6 +204,8 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
if_false->opcode() == IrOpcode::kIfFalse &&
if_true->InputAt(0) == if_false->InputAt(0)) {
Node* const branch = if_true->InputAt(0);
+ // Check that the branch is not dead already.
+ if (branch->opcode() != IrOpcode::kBranch) return NoChange();
Node* const cond = branch->InputAt(0);
if (cond->opcode() == IrOpcode::kFloat32LessThan) {
Float32BinopMatcher mcond(cond);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 51f93da3b4..be7730962f 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -36,6 +36,27 @@ BranchHint BranchHintOf(const Operator* const op) {
}
+size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
+
+
+std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return os << "Eager";
+ case DeoptimizeKind::kSoft:
+ return os << "Soft";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+DeoptimizeKind DeoptimizeKindOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+ return OpParameter<DeoptimizeKind>(op);
+}
+
+
size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
@@ -52,7 +73,8 @@ std::ostream& operator<<(std::ostream& os, IfExceptionHint hint) {
bool operator==(SelectParameters const& lhs, SelectParameters const& rhs) {
- return lhs.type() == rhs.type() && lhs.hint() == rhs.hint();
+ return lhs.representation() == rhs.representation() &&
+ lhs.hint() == rhs.hint();
}
@@ -62,12 +84,12 @@ bool operator!=(SelectParameters const& lhs, SelectParameters const& rhs) {
size_t hash_value(SelectParameters const& p) {
- return base::hash_combine(p.type(), p.hint());
+ return base::hash_combine(p.representation(), p.hint());
}
std::ostream& operator<<(std::ostream& os, SelectParameters const& p) {
- return os << p.type() << "|" << p.hint();
+ return os << p.representation() << "|" << p.hint();
}
@@ -83,6 +105,12 @@ size_t ProjectionIndexOf(const Operator* const op) {
}
+MachineRepresentation PhiRepresentationOf(const Operator* const op) {
+ DCHECK_EQ(IrOpcode::kPhi, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
+
int ParameterIndexOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kParameter, op->opcode());
return OpParameter<ParameterInfo>(op).index();
@@ -122,7 +150,6 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
- V(Deoptimize, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
@@ -183,15 +210,15 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
#define CACHED_PHI_LIST(V) \
- V(kMachAnyTagged, 1) \
- V(kMachAnyTagged, 2) \
- V(kMachAnyTagged, 3) \
- V(kMachAnyTagged, 4) \
- V(kMachAnyTagged, 5) \
- V(kMachAnyTagged, 6) \
- V(kMachBool, 2) \
- V(kMachFloat64, 2) \
- V(kMachInt32, 2)
+ V(kTagged, 1) \
+ V(kTagged, 2) \
+ V(kTagged, 3) \
+ V(kTagged, 4) \
+ V(kTagged, 5) \
+ V(kTagged, 6) \
+ V(kBit, 2) \
+ V(kFloat64, 2) \
+ V(kWord32, 2)
#define CACHED_PROJECTION_LIST(V) \
@@ -231,6 +258,18 @@ struct CommonOperatorGlobalCache final {
CACHED_OP_LIST(CACHED)
#undef CACHED
+ template <DeoptimizeKind kKind>
+ struct DeoptimizeOperator final : public Operator1<DeoptimizeKind> {
+ DeoptimizeOperator()
+ : Operator1<DeoptimizeKind>( // --
+ IrOpcode::kDeoptimize, Operator::kNoThrow, // opcode
+ "Deoptimize", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ kKind) {} // parameter
+ };
+ DeoptimizeOperator<DeoptimizeKind::kEager> kDeoptimizeEagerOperator;
+ DeoptimizeOperator<DeoptimizeKind::kSoft> kDeoptimizeSoftOperator;
+
template <IfExceptionHint kCaughtLocally>
struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
IfExceptionOperator()
@@ -321,17 +360,18 @@ struct CommonOperatorGlobalCache final {
CACHED_MERGE_LIST(CACHED_MERGE)
#undef CACHED_MERGE
- template <MachineType kType, int kInputCount>
- struct PhiOperator final : public Operator1<MachineType> {
+ template <MachineRepresentation kRep, int kInputCount>
+ struct PhiOperator final : public Operator1<MachineRepresentation> {
PhiOperator()
- : Operator1<MachineType>( //--
+ : Operator1<MachineRepresentation>( //--
IrOpcode::kPhi, Operator::kPure, // opcode
"Phi", // name
kInputCount, 0, 1, 1, 0, 0, // counts
- kType) {} // parameter
+ kRep) {} // parameter
};
-#define CACHED_PHI(type, input_count) \
- PhiOperator<type, input_count> kPhi##type##input_count##Operator;
+#define CACHED_PHI(rep, input_count) \
+ PhiOperator<MachineRepresentation::rep, input_count> \
+ kPhi##rep##input_count##Operator;
CACHED_PHI_LIST(CACHED_PHI)
#undef CACHED_PHI
@@ -399,7 +439,6 @@ CACHED_OP_LIST(CACHED)
const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
- DCHECK_NE(0u, control_input_count); // Disallow empty ends.
switch (control_input_count) {
#define CACHED_END(input_count) \
case input_count: \
@@ -449,6 +488,18 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
}
+const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind) {
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ return &cache_.kDeoptimizeEagerOperator;
+ case DeoptimizeKind::kSoft:
+ return &cache_.kDeoptimizeSoftOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
switch (hint) {
case IfExceptionHint::kLocallyCaught:
@@ -462,7 +513,6 @@ const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
- DCHECK_GE(control_output_count, 3u); // Disallow trivial switches.
return new (zone()) Operator( // --
IrOpcode::kSwitch, Operator::kKontrol, // opcode
"Switch", // name
@@ -573,22 +623,20 @@ const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
- return new (zone())
- Operator1<float, base::bit_equal_to<float>, base::bit_hash<float>>( // --
- IrOpcode::kFloat32Constant, Operator::kPure, // opcode
- "Float32Constant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<float>( // --
+ IrOpcode::kFloat32Constant, Operator::kPure, // opcode
+ "Float32Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
- return new (zone()) Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>( // --
- IrOpcode::kFloat64Constant, Operator::kPure, // opcode
- "Float64Constant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<double>( // --
+ IrOpcode::kFloat64Constant, Operator::kPure, // opcode
+ "Float64Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
@@ -603,52 +651,50 @@ const Operator* CommonOperatorBuilder::ExternalConstant(
const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
- return new (zone()) Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>( // --
- IrOpcode::kNumberConstant, Operator::kPure, // opcode
- "NumberConstant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<double>( // --
+ IrOpcode::kNumberConstant, Operator::kPure, // opcode
+ "NumberConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
const Operator* CommonOperatorBuilder::HeapConstant(
const Handle<HeapObject>& value) {
- return new (zone())
- Operator1<Handle<HeapObject>, Handle<HeapObject>::equal_to,
- Handle<HeapObject>::hash>( // --
- IrOpcode::kHeapConstant, Operator::kPure, // opcode
- "HeapConstant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ return new (zone()) Operator1<Handle<HeapObject>>( // --
+ IrOpcode::kHeapConstant, Operator::kPure, // opcode
+ "HeapConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
-const Operator* CommonOperatorBuilder::Select(MachineType type,
+const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
BranchHint hint) {
return new (zone()) Operator1<SelectParameters>( // --
IrOpcode::kSelect, Operator::kPure, // opcode
"Select", // name
3, 0, 0, 1, 0, 0, // counts
- SelectParameters(type, hint)); // parameter
+ SelectParameters(rep, hint)); // parameter
}
-const Operator* CommonOperatorBuilder::Phi(MachineType type,
+const Operator* CommonOperatorBuilder::Phi(MachineRepresentation rep,
int value_input_count) {
DCHECK(value_input_count > 0); // Disallow empty phis.
-#define CACHED_PHI(kType, kValueInputCount) \
- if (kType == type && kValueInputCount == value_input_count) { \
- return &cache_.kPhi##kType##kValueInputCount##Operator; \
+#define CACHED_PHI(kRep, kValueInputCount) \
+ if (MachineRepresentation::kRep == rep && \
+ kValueInputCount == value_input_count) { \
+ return &cache_.kPhi##kRep##kValueInputCount##Operator; \
}
CACHED_PHI_LIST(CACHED_PHI)
#undef CACHED_PHI
// Uncached.
- return new (zone()) Operator1<MachineType>( // --
- IrOpcode::kPhi, Operator::kPure, // opcode
- "Phi", // name
- value_input_count, 0, 1, 1, 0, 0, // counts
- type); // parameter
+ return new (zone()) Operator1<MachineRepresentation>( // --
+ IrOpcode::kPhi, Operator::kPure, // opcode
+ "Phi", // name
+ value_input_count, 0, 1, 1, 0, 0, // counts
+ rep); // parameter
}
@@ -707,6 +753,14 @@ const Operator* CommonOperatorBuilder::StateValues(int arguments) {
}
+const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots, int id) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kObjectState, Operator::kPure, // opcode
+ "ObjectState", // name
+ pointer_slots, 0, 0, 1, 0, 0, id); // counts
+}
+
+
const Operator* CommonOperatorBuilder::TypedStateValues(
const ZoneVector<MachineType>* types) {
return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
@@ -795,7 +849,7 @@ const Operator* CommonOperatorBuilder::Projection(size_t index) {
const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
int size) {
if (op->opcode() == IrOpcode::kPhi) {
- return Phi(OpParameter<MachineType>(op), size);
+ return Phi(PhiRepresentationOf(op), size);
} else if (op->opcode() == IrOpcode::kEffectPhi) {
return EffectPhi(size);
} else if (op->opcode() == IrOpcode::kMerge) {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 2ef2880f6d..83cb5b2c66 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_COMMON_OPERATOR_H_
#include "src/compiler/frame-states.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -51,6 +51,16 @@ std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
+// Deoptimize bailout kind.
+enum class DeoptimizeKind : uint8_t { kEager, kSoft };
+
+size_t hash_value(DeoptimizeKind kind);
+
+std::ostream& operator<<(std::ostream&, DeoptimizeKind);
+
+DeoptimizeKind DeoptimizeKindOf(const Operator* const);
+
+
// Prediction whether throw-site is surrounded by any local catch-scope.
enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
@@ -61,15 +71,15 @@ std::ostream& operator<<(std::ostream&, IfExceptionHint);
class SelectParameters final {
public:
- explicit SelectParameters(MachineType type,
+ explicit SelectParameters(MachineRepresentation representation,
BranchHint hint = BranchHint::kNone)
- : type_(type), hint_(hint) {}
+ : representation_(representation), hint_(hint) {}
- MachineType type() const { return type_; }
+ MachineRepresentation representation() const { return representation_; }
BranchHint hint() const { return hint_; }
private:
- const MachineType type_;
+ const MachineRepresentation representation_;
const BranchHint hint_;
};
@@ -85,6 +95,8 @@ SelectParameters const& SelectParametersOf(const Operator* const);
size_t ProjectionIndexOf(const Operator* const);
+MachineRepresentation PhiRepresentationOf(const Operator* const);
+
// The {IrOpcode::kParameter} opcode represents an incoming parameter to the
// function. This class bundles the index and a debug name for such operators.
@@ -124,7 +136,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
- const Operator* Deoptimize();
+ const Operator* Deoptimize(DeoptimizeKind kind);
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
@@ -145,14 +157,16 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* NumberConstant(volatile double);
const Operator* HeapConstant(const Handle<HeapObject>&);
- const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
- const Operator* Phi(MachineType type, int value_input_count);
+ const Operator* Select(MachineRepresentation, BranchHint = BranchHint::kNone);
+ const Operator* Phi(MachineRepresentation representation,
+ int value_input_count);
const Operator* EffectPhi(int effect_input_count);
const Operator* EffectSet(int arguments);
const Operator* Guard(Type* type);
const Operator* BeginRegion();
const Operator* FinishRegion();
const Operator* StateValues(int arguments);
+ const Operator* ObjectState(int pointer_slots, int id);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
OutputFrameStateCombine state_combine,
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index 0c2fa73936..6ff00be596 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -41,8 +41,8 @@ class IfBuilder final : public ControlBuilder {
public:
explicit IfBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- then_environment_(NULL),
- else_environment_(NULL) {}
+ then_environment_(nullptr),
+ else_environment_(nullptr) {}
// Primitive control commands.
void If(Node* condition, BranchHint hint = BranchHint::kNone);
@@ -61,9 +61,9 @@ class LoopBuilder final : public ControlBuilder {
public:
explicit LoopBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- loop_environment_(NULL),
- continue_environment_(NULL),
- break_environment_(NULL) {}
+ loop_environment_(nullptr),
+ continue_environment_(nullptr),
+ break_environment_(nullptr) {}
// Primitive control commands.
void BeginLoop(BitVector* assigned, bool is_osr = false);
@@ -90,9 +90,9 @@ class SwitchBuilder final : public ControlBuilder {
public:
explicit SwitchBuilder(AstGraphBuilder* builder, int case_count)
: ControlBuilder(builder),
- body_environment_(NULL),
- label_environment_(NULL),
- break_environment_(NULL),
+ body_environment_(nullptr),
+ label_environment_(nullptr),
+ break_environment_(nullptr),
body_environments_(case_count, zone()) {}
// Primitive control commands.
@@ -122,7 +122,7 @@ class SwitchBuilder final : public ControlBuilder {
class BlockBuilder final : public ControlBuilder {
public:
explicit BlockBuilder(AstGraphBuilder* builder)
- : ControlBuilder(builder), break_environment_(NULL) {}
+ : ControlBuilder(builder), break_environment_(nullptr) {}
// Primitive control commands.
void BeginBlock();
@@ -145,9 +145,9 @@ class TryCatchBuilder final : public ControlBuilder {
public:
explicit TryCatchBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- catch_environment_(NULL),
- exit_environment_(NULL),
- exception_node_(NULL) {}
+ catch_environment_(nullptr),
+ exit_environment_(nullptr),
+ exception_node_(nullptr) {}
// Primitive control commands.
void BeginTry();
@@ -170,9 +170,9 @@ class TryFinallyBuilder final : public ControlBuilder {
public:
explicit TryFinallyBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
- finally_environment_(NULL),
- token_node_(NULL),
- value_node_(NULL) {}
+ finally_environment_(nullptr),
+ token_node_(nullptr),
+ value_node_(nullptr) {}
// Primitive control commands.
void BeginTry();
diff --git a/deps/v8/src/compiler/control-equivalence.cc b/deps/v8/src/compiler/control-equivalence.cc
index 718de4cb12..af1a11565c 100644
--- a/deps/v8/src/compiler/control-equivalence.cc
+++ b/deps/v8/src/compiler/control-equivalence.cc
@@ -71,7 +71,7 @@ void ControlEquivalence::VisitPost(Node* node, Node* parent_node,
BracketListDelete(blist, node, direction);
// Propagate bracket list up the DFS tree [line:13].
- if (parent_node != NULL) {
+ if (parent_node != nullptr) {
BracketList& parent_blist = GetBracketList(parent_node);
parent_blist.splice(parent_blist.end(), blist);
}
@@ -91,7 +91,7 @@ void ControlEquivalence::VisitBackedge(Node* from, Node* to,
void ControlEquivalence::RunUndirectedDFS(Node* exit) {
ZoneStack<DFSStackEntry> stack(zone_);
- DFSPush(stack, exit, NULL, kInputDirection);
+ DFSPush(stack, exit, nullptr, kInputDirection);
VisitPre(exit);
while (!stack.empty()) { // Undirected depth-first backwards traversal.
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index f562092a8a..e1333052d7 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -49,8 +49,8 @@ struct Diamond {
}
}
- Node* Phi(MachineType machine_type, Node* tv, Node* fv) {
- return graph->NewNode(common->Phi(machine_type, 2), tv, fv, merge);
+ Node* Phi(MachineRepresentation rep, Node* tv, Node* fv) {
+ return graph->NewNode(common->Phi(rep, 2), tv, fv, merge);
}
};
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
new file mode 100644
index 0000000000..df8b65dab2
--- /dev/null
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -0,0 +1,313 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/escape-analysis-reducer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/counters.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysis* escape_analysis,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ escape_analysis_(escape_analysis),
+ zone_(zone),
+ visited_(static_cast<int>(jsgraph->graph()->NodeCount()), zone) {}
+
+
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kLoadField:
+ case IrOpcode::kLoadElement:
+ return ReduceLoad(node);
+ case IrOpcode::kStoreField:
+ case IrOpcode::kStoreElement:
+ return ReduceStore(node);
+ case IrOpcode::kAllocate:
+ return ReduceAllocate(node);
+ case IrOpcode::kFinishRegion:
+ return ReduceFinishRegion(node);
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
+ case IrOpcode::kObjectIsSmi:
+ return ReduceObjectIsSmi(node);
+ default:
+ // TODO(sigurds): Change this to GetFrameStateInputCount once
+ // it is working. For now we use EffectInputCount > 0 to determine
+ // whether a node might have a frame state input.
+ if (node->op()->EffectInputCount() > 0) {
+ return ReduceFrameStateUses(node);
+ }
+ break;
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kLoadField ||
+ node->opcode() == IrOpcode::kLoadElement);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (Node* rep = escape_analysis()->GetReplacement(node)) {
+ visited_.Add(node->id());
+ counters()->turbo_escape_loads_replaced()->Increment();
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+ node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
+ }
+ ReplaceWithValue(node, rep);
+ return Changed(rep);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kStoreField ||
+ node->opcode() == IrOpcode::kStoreElement);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed #%d (%s) from effect chain\n", node->id(),
+ node->op()->mnemonic());
+ }
+ RelaxEffectsAndControls(node);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ if (escape_analysis()->IsVirtual(node)) {
+ RelaxEffectsAndControls(node);
+ counters()->turbo_escape_allocs_replaced()->Increment();
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed allocate #%d from effect chain\n", node->id());
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ Node* effect = NodeProperties::GetEffectInput(node, 0);
+ if (effect->opcode() == IrOpcode::kBeginRegion) {
+ RelaxEffectsAndControls(effect);
+ RelaxEffectsAndControls(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
+ node->id());
+ PrintF(" %d user(s) of #%d remain(s):", node->UseCount(), node->id());
+ for (Edge edge : node->use_edges()) {
+ PrintF(" #%d", edge.from()->id());
+ }
+ PrintF("\n");
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
+ Node* left = NodeProperties::GetValueInput(node, 0);
+ Node* right = NodeProperties::GetValueInput(node, 1);
+ if (escape_analysis()->IsVirtual(left)) {
+ if (escape_analysis()->IsVirtual(right) &&
+ escape_analysis()->CompareVirtualObjects(left, right)) {
+ ReplaceWithValue(node, jsgraph()->TrueConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with true\n", node->id());
+ }
+ }
+ // Right-hand side is not a virtual object, or a different one.
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with false\n", node->id());
+ }
+ return Replace(node);
+ } else if (escape_analysis()->IsVirtual(right)) {
+ // Left-hand side is not a virtual object.
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ref eq #%d with false\n", node->id());
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ if (escape_analysis()->IsVirtual(input)) {
+ ReplaceWithValue(node, jsgraph()->FalseConstant());
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced ObjectIsSmi #%d with false\n", node->id());
+ }
+ return Replace(node);
+ }
+ return NoChange();
+}
+
+
+Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
+ if (visited_.Contains(node->id())) return NoChange();
+ visited_.Add(node->id());
+ DCHECK_GE(node->op()->EffectInputCount(), 1);
+ bool changed = false;
+ for (int i = 0; i < node->InputCount(); ++i) {
+ Node* input = node->InputAt(i);
+ if (input->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret = ReduceFrameState(input, node, false)) {
+ node->ReplaceInput(i, ret);
+ changed = true;
+ }
+ }
+ }
+ if (changed) {
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceFrameState(Node* node, Node* effect,
+ bool multiple_users) {
+ DCHECK(node->opcode() == IrOpcode::kFrameState);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing FrameState %d\n", node->id());
+ }
+ Node* clone = nullptr;
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Node* ret =
+ input->opcode() == IrOpcode::kStateValues
+ ? ReduceStateValueInputs(input, effect, node->UseCount() > 1)
+ : ReduceStateValueInput(node, i, effect, node->UseCount() > 1);
+ if (ret) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ multiple_users = false; // Don't clone anymore.
+ }
+ NodeProperties::ReplaceValueInput(node, ret, i);
+ }
+ }
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret =
+ ReduceFrameState(outer_frame_state, effect, node->UseCount() > 1)) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ multiple_users = false;
+ }
+ NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+ }
+ }
+ return clone;
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
+ bool multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing StateValue #%d\n", node->id());
+ }
+ DCHECK(node->opcode() == IrOpcode::kStateValues);
+ DCHECK_NOT_NULL(effect);
+ Node* clone = nullptr;
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ Node* ret = nullptr;
+ if (input->opcode() == IrOpcode::kStateValues) {
+ ret = ReduceStateValueInputs(input, effect, multiple_users);
+ } else {
+ ret = ReduceStateValueInput(node, i, effect, multiple_users);
+ }
+ if (ret) {
+ node = ret;
+ DCHECK_NULL(clone);
+ clone = ret;
+ multiple_users = false;
+ }
+ }
+ return clone;
+}
+
+
+// Returns the clone if it duplicated the node, and null otherwise.
+Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
+ Node* effect,
+ bool multiple_users) {
+ Node* input = NodeProperties::GetValueInput(node, node_index);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Reducing State Input #%d (%s)\n", input->id(),
+ input->op()->mnemonic());
+ }
+ Node* clone = nullptr;
+ if (input->opcode() == IrOpcode::kFinishRegion ||
+ input->opcode() == IrOpcode::kAllocate) {
+ if (escape_analysis()->IsVirtual(input)) {
+ if (Node* object_state =
+ escape_analysis()->GetOrCreateObjectState(effect, input)) {
+ if (node->UseCount() > 1 || multiple_users) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Cloning #%d", node->id());
+ }
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" to #%d\n", node->id());
+ }
+ }
+ NodeProperties::ReplaceValueInput(node, object_state, node_index);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Replaced state #%d input #%d with object state #%d\n",
+ node->id(), input->id(), object_state->id());
+ }
+ } else {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("No object state replacement available.\n");
+ }
+ }
+ }
+ }
+ return clone;
+}
+
+
+Counters* EscapeAnalysisReducer::counters() const {
+ return jsgraph_->isolate()->counters();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
new file mode 100644
index 0000000000..1c0da165fb
--- /dev/null
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+#define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+
+#include "src/bit-vector.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/graph-reducer.h"
+
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Counters;
+
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+
+class EscapeAnalysisReducer final : public AdvancedReducer {
+ public:
+ EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
+ EscapeAnalysis* escape_analysis, Zone* zone);
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceLoad(Node* node);
+ Reduction ReduceStore(Node* node);
+ Reduction ReduceAllocate(Node* node);
+ Reduction ReduceFinishRegion(Node* node);
+ Reduction ReduceReferenceEqual(Node* node);
+ Reduction ReduceObjectIsSmi(Node* node);
+ Reduction ReduceFrameStateUses(Node* node);
+ Node* ReduceFrameState(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceStateValueInputs(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
+ bool multiple_users);
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
+ Zone* zone() const { return zone_; }
+ Counters* counters() const;
+
+ JSGraph* const jsgraph_;
+ EscapeAnalysis* escape_analysis_;
+ Zone* const zone_;
+ BitVector visited_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
new file mode 100644
index 0000000000..af0ba6a639
--- /dev/null
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -0,0 +1,1471 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/escape-analysis.h"
+
+#include <limits>
+
+#include "src/base/flags.h"
+#include "src/bootstrapper.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects-inl.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const EscapeAnalysis::Alias EscapeAnalysis::kNotReachable =
+ std::numeric_limits<Alias>::max();
+const EscapeAnalysis::Alias EscapeAnalysis::kUntrackable =
+ std::numeric_limits<Alias>::max() - 1;
+
+
+class VirtualObject : public ZoneObject {
+ public:
+ enum Status { kUntracked = 0, kTracked = 1 };
+ VirtualObject(NodeId id, Zone* zone)
+ : id_(id),
+ status_(kUntracked),
+ fields_(zone),
+ phi_(zone),
+ object_state_(nullptr) {}
+
+ VirtualObject(const VirtualObject& other)
+ : id_(other.id_),
+ status_(other.status_),
+ fields_(other.fields_),
+ phi_(other.phi_),
+ object_state_(other.object_state_) {}
+
+ VirtualObject(NodeId id, Zone* zone, size_t field_number)
+ : id_(id),
+ status_(kTracked),
+ fields_(zone),
+ phi_(zone),
+ object_state_(nullptr) {
+ fields_.resize(field_number);
+ phi_.resize(field_number, false);
+ }
+
+ Node* GetField(size_t offset) {
+ if (offset < fields_.size()) {
+ return fields_[offset];
+ }
+ return nullptr;
+ }
+
+ bool IsCreatedPhi(size_t offset) {
+ if (offset < phi_.size()) {
+ return phi_[offset];
+ }
+ return false;
+ }
+
+ bool SetField(size_t offset, Node* node, bool created_phi = false) {
+ bool changed = fields_[offset] != node || phi_[offset] != created_phi;
+ fields_[offset] = node;
+ phi_[offset] = created_phi;
+ if (changed && FLAG_trace_turbo_escape && node) {
+ PrintF("Setting field %zu of #%d to #%d (%s)\n", offset, id(), node->id(),
+ node->op()->mnemonic());
+ }
+ return changed;
+ }
+ bool IsVirtual() const { return status_ == kTracked; }
+ bool IsTracked() const { return status_ != kUntracked; }
+
+ Node** fields_array() { return &fields_.front(); }
+ size_t field_count() { return fields_.size(); }
+ bool ResizeFields(size_t field_count) {
+ if (field_count != fields_.size()) {
+ fields_.resize(field_count);
+ phi_.resize(field_count);
+ return true;
+ }
+ return false;
+ }
+ bool ClearAllFields() {
+ bool changed = false;
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ if (fields_[i] != nullptr) {
+ fields_[i] = nullptr;
+ changed = true;
+ }
+ phi_[i] = false;
+ }
+ return changed;
+ }
+ bool UpdateFrom(const VirtualObject& other);
+ void SetObjectState(Node* node) { object_state_ = node; }
+ Node* GetObjectState() const { return object_state_; }
+
+ NodeId id() const { return id_; }
+ void id(NodeId id) { id_ = id; }
+
+ private:
+ NodeId id_;
+ Status status_;
+ ZoneVector<Node*> fields_;
+ ZoneVector<bool> phi_;
+ Node* object_state_;
+};
+
+
+bool VirtualObject::UpdateFrom(const VirtualObject& other) {
+ bool changed = status_ != other.status_;
+ status_ = other.status_;
+ if (fields_.size() != other.fields_.size()) {
+ fields_ = other.fields_;
+ return true;
+ }
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ if (fields_[i] != other.fields_[i]) {
+ changed = true;
+ fields_[i] = other.fields_[i];
+ }
+ }
+ return changed;
+}
+
+
+class VirtualState : public ZoneObject {
+ public:
+ VirtualState(Zone* zone, size_t size);
+ VirtualState(const VirtualState& states);
+
+ VirtualObject* VirtualObjectFromAlias(size_t alias);
+ VirtualObject* GetOrCreateTrackedVirtualObject(EscapeAnalysis::Alias alias,
+ NodeId id, Zone* zone);
+ void SetVirtualObject(EscapeAnalysis::Alias alias, VirtualObject* state);
+ void LastChangedAt(Node* node) { last_changed_ = node; }
+ Node* GetLastChanged() { return last_changed_; }
+ bool UpdateFrom(VirtualState* state, Zone* zone);
+ bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
+ CommonOperatorBuilder* common, Node* control);
+ size_t size() const { return info_.size(); }
+
+ private:
+ ZoneVector<VirtualObject*> info_;
+ Node* last_changed_;
+};
+
+
+class MergeCache : public ZoneObject {
+ public:
+ explicit MergeCache(Zone* zone)
+ : states_(zone), objects_(zone), fields_(zone) {
+ states_.reserve(4);
+ objects_.reserve(4);
+ fields_.reserve(4);
+ }
+ ZoneVector<VirtualState*>& states() { return states_; }
+ ZoneVector<VirtualObject*>& objects() { return objects_; }
+ ZoneVector<Node*>& fields() { return fields_; }
+ void Clear() {
+ states_.clear();
+ objects_.clear();
+ fields_.clear();
+ }
+ size_t LoadVirtualObjectsFromStatesFor(EscapeAnalysis::Alias alias);
+ void LoadVirtualObjectsForFieldsFrom(
+ VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases);
+ Node* GetFields(size_t pos);
+
+ private:
+ ZoneVector<VirtualState*> states_;
+ ZoneVector<VirtualObject*> objects_;
+ ZoneVector<Node*> fields_;
+};
+
+
+size_t MergeCache::LoadVirtualObjectsFromStatesFor(
+ EscapeAnalysis::Alias alias) {
+ objects_.clear();
+ DCHECK_GT(states_.size(), 0u);
+ size_t min = std::numeric_limits<size_t>::max();
+ for (VirtualState* state : states_) {
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ objects_.push_back(obj);
+ min = std::min(obj->field_count(), min);
+ }
+ }
+ return min;
+}
+
+
+void MergeCache::LoadVirtualObjectsForFieldsFrom(
+ VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases) {
+ objects_.clear();
+ size_t max_alias = state->size();
+ for (Node* field : fields_) {
+ EscapeAnalysis::Alias alias = aliases[field->id()];
+ if (alias >= max_alias) continue;
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ objects_.push_back(obj);
+ }
+ }
+}
+
+
+Node* MergeCache::GetFields(size_t pos) {
+ fields_.clear();
+ Node* rep = objects_.front()->GetField(pos);
+ for (VirtualObject* obj : objects_) {
+ Node* field = obj->GetField(pos);
+ if (field) {
+ fields_.push_back(field);
+ }
+ if (field != rep) {
+ rep = nullptr;
+ }
+ }
+ return rep;
+}
+
+
+VirtualState::VirtualState(Zone* zone, size_t size)
+ : info_(size, nullptr, zone), last_changed_(nullptr) {}
+
+
+VirtualState::VirtualState(const VirtualState& state)
+ : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+ last_changed_(state.last_changed_) {
+ for (size_t i = 0; i < state.info_.size(); ++i) {
+ if (state.info_[i]) {
+ info_[i] =
+ new (info_.get_allocator().zone()) VirtualObject(*state.info_[i]);
+ }
+ }
+}
+
+
+VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
+ return info_[alias];
+}
+
+
+VirtualObject* VirtualState::GetOrCreateTrackedVirtualObject(
+ EscapeAnalysis::Alias alias, NodeId id, Zone* zone) {
+ if (VirtualObject* obj = VirtualObjectFromAlias(alias)) {
+ return obj;
+ }
+ VirtualObject* obj = new (zone) VirtualObject(id, zone, 0);
+ SetVirtualObject(alias, obj);
+ return obj;
+}
+
+
+void VirtualState::SetVirtualObject(EscapeAnalysis::Alias alias,
+ VirtualObject* obj) {
+ info_[alias] = obj;
+}
+
+
+bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
+ bool changed = false;
+ for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ VirtualObject* ls = VirtualObjectFromAlias(alias);
+ VirtualObject* rs = from->VirtualObjectFromAlias(alias);
+
+ if (rs == nullptr) {
+ continue;
+ }
+
+ if (ls == nullptr) {
+ ls = new (zone) VirtualObject(*rs);
+ SetVirtualObject(alias, ls);
+ changed = true;
+ continue;
+ }
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Updating fields of @%d\n", alias);
+ }
+
+ changed = ls->UpdateFrom(*rs) || changed;
+ }
+ return false;
+}
+
+
+namespace {
+
+bool IsEquivalentPhi(Node* node1, Node* node2) {
+ if (node1 == node2) return true;
+ if (node1->opcode() != IrOpcode::kPhi || node2->opcode() != IrOpcode::kPhi ||
+ node1->op()->ValueInputCount() != node2->op()->ValueInputCount()) {
+ return false;
+ }
+ for (int i = 0; i < node1->op()->ValueInputCount(); ++i) {
+ Node* input1 = NodeProperties::GetValueInput(node1, i);
+ Node* input2 = NodeProperties::GetValueInput(node2, i);
+ if (!IsEquivalentPhi(input1, input2)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
+ if (phi->opcode() != IrOpcode::kPhi) return false;
+ if (phi->op()->ValueInputCount() != inputs.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ Node* input = NodeProperties::GetValueInput(phi, static_cast<int>(i));
+ if (!IsEquivalentPhi(input, inputs[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+
+Node* EscapeAnalysis::GetReplacementIfSame(ZoneVector<VirtualObject*>& objs) {
+ Node* rep = GetReplacement(objs.front()->id());
+ for (VirtualObject* obj : objs) {
+ if (GetReplacement(obj->id()) != rep) {
+ return nullptr;
+ }
+ }
+ return rep;
+}
+
+
+bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
+ CommonOperatorBuilder* common, Node* control) {
+ DCHECK_GT(cache->states().size(), 0u);
+ bool changed = false;
+ for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ size_t fields = cache->LoadVirtualObjectsFromStatesFor(alias);
+ if (cache->objects().size() == cache->states().size()) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Merging virtual objects of @%d\n", alias);
+ }
+ VirtualObject* mergeObject = GetOrCreateTrackedVirtualObject(
+ alias, cache->objects().front()->id(), zone);
+ changed = mergeObject->ResizeFields(fields) || changed;
+ for (size_t i = 0; i < fields; ++i) {
+ if (Node* field = cache->GetFields(i)) {
+ changed = mergeObject->SetField(i, field) || changed;
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Field %zu agree on rep #%d\n", i, field->id());
+ }
+ } else {
+ int value_input_count = static_cast<int>(cache->fields().size());
+ if (cache->fields().size() == cache->objects().size()) {
+ Node* rep = mergeObject->GetField(i);
+ if (!rep || !mergeObject->IsCreatedPhi(i)) {
+ cache->fields().push_back(control);
+ Node* phi = graph->NewNode(
+ common->Phi(MachineRepresentation::kTagged,
+ value_input_count),
+ value_input_count + 1, &cache->fields().front());
+ mergeObject->SetField(i, phi, true);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Creating Phi #%d as merge of", phi->id());
+ for (int i = 0; i < value_input_count; i++) {
+ PrintF(" #%d (%s)", cache->fields()[i]->id(),
+ cache->fields()[i]->op()->mnemonic());
+ }
+ PrintF("\n");
+ }
+ changed = true;
+ } else {
+ DCHECK(rep->opcode() == IrOpcode::kPhi);
+ for (int n = 0; n < value_input_count; ++n) {
+ if (n < rep->op()->ValueInputCount()) {
+ Node* old = NodeProperties::GetValueInput(rep, n);
+ if (old != cache->fields()[n]) {
+ changed = true;
+ NodeProperties::ReplaceValueInput(rep, cache->fields()[n],
+ n);
+ }
+ } else {
+ changed = true;
+ rep->InsertInput(graph->zone(), n, cache->fields()[n]);
+ }
+ }
+ if (rep->op()->ValueInputCount() != value_input_count) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Widening Phi #%d of arity %d to %d", rep->id(),
+ rep->op()->ValueInputCount(), value_input_count);
+ }
+ NodeProperties::ChangeOp(
+ rep, common->Phi(MachineRepresentation::kTagged,
+ value_input_count));
+ }
+ }
+ } else {
+ changed = mergeObject->SetField(i, nullptr) || changed;
+ }
+ }
+ }
+ } else {
+ SetVirtualObject(alias, nullptr);
+ }
+ }
+ return changed;
+}
+
+
+EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
+ Graph* graph, Zone* zone)
+ : object_analysis_(object_analysis),
+ graph_(graph),
+ zone_(zone),
+ status_(graph->NodeCount(), kUnknown, zone),
+ queue_(zone) {}
+
+
+EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
+
+
+bool EscapeStatusAnalysis::HasEntry(Node* node) {
+ return status_[node->id()] & (kTracked | kEscaped);
+}
+
+
+bool EscapeStatusAnalysis::IsVirtual(Node* node) {
+ return (status_[node->id()] & kTracked) && !(status_[node->id()] & kEscaped);
+}
+
+
+bool EscapeStatusAnalysis::IsEscaped(Node* node) {
+ return status_[node->id()] & kEscaped;
+}
+
+
+bool EscapeStatusAnalysis::IsAllocation(Node* node) {
+ return node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion;
+}
+
+
+bool EscapeStatusAnalysis::SetEscaped(Node* node) {
+ bool changed = !(status_[node->id()] & kEscaped);
+ status_[node->id()] |= kEscaped | kTracked;
+ return changed;
+}
+
+
+void EscapeStatusAnalysis::Resize() {
+ status_.resize(graph()->NodeCount(), kUnknown);
+}
+
+
+size_t EscapeStatusAnalysis::size() { return status_.size(); }
+
+
+void EscapeStatusAnalysis::Run() {
+ Resize();
+ queue_.push_back(graph()->end());
+ status_[graph()->end()->id()] |= kOnStack;
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop_front();
+ status_[node->id()] &= ~kOnStack;
+ Process(node);
+ status_[node->id()] |= kVisited;
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!(status_[input->id()] & (kVisited | kOnStack))) {
+ queue_.push_back(input);
+ status_[input->id()] |= kOnStack;
+ }
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::RevisitInputs(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!(status_[input->id()] & kOnStack)) {
+ queue_.push_back(input);
+ status_[input->id()] |= kOnStack;
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::RevisitUses(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (!(status_[use->id()] & kOnStack)) {
+ queue_.push_back(use);
+ status_[use->id()] |= kOnStack;
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::Process(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ ProcessAllocate(node);
+ break;
+ case IrOpcode::kFinishRegion:
+ ProcessFinishRegion(node);
+ break;
+ case IrOpcode::kStoreField:
+ ProcessStoreField(node);
+ break;
+ case IrOpcode::kStoreElement:
+ ProcessStoreElement(node);
+ break;
+ case IrOpcode::kLoadField:
+ case IrOpcode::kLoadElement: {
+ if (Node* rep = object_analysis_->GetReplacement(node)) {
+ if (IsAllocation(rep) && CheckUsesForEscape(node, rep)) {
+ RevisitInputs(rep);
+ RevisitUses(rep);
+ }
+ }
+ break;
+ }
+ case IrOpcode::kPhi:
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ if (!IsAllocationPhi(node)) {
+ SetEscaped(node);
+ RevisitUses(node);
+ }
+ }
+ CheckUsesForEscape(node);
+ default:
+ break;
+ }
+}
+
+
+bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (input->opcode() == IrOpcode::kPhi && !IsEscaped(input)) continue;
+ if (IsAllocation(input)) continue;
+ return false;
+ }
+ return true;
+}
+
+
+void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 1);
+ if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
+ RevisitUses(val);
+ RevisitInputs(val);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 2);
+ if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
+ RevisitUses(val);
+ RevisitInputs(val);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
+ }
+ }
+}
+
+
+void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Created status entry for node #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
+ }
+ NumberMatcher size(node->InputAt(0));
+ DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ if (!size.HasValue() && SetEscaped(node)) {
+ RevisitUses(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d to escaped because of non-const alloc\n",
+ node->id());
+ }
+ // This node is known to escape, uses do not have to be checked.
+ return;
+ }
+ }
+ if (CheckUsesForEscape(node, true)) {
+ RevisitUses(node);
+ }
+}
+
+
+bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
+ bool phi_escaping) {
+ for (Edge edge : uses->use_edges()) {
+ Node* use = edge.from();
+ if (edge.index() >= use->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(use->op()))
+ continue;
+ switch (use->opcode()) {
+ case IrOpcode::kPhi:
+ if (phi_escaping && SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because of use by phi node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ // Fallthrough.
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kLoadElement:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kFinishRegion:
+ if (IsEscaped(use) && SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because of use by escaping node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ break;
+ case IrOpcode::kObjectIsSmi:
+ if (!IsAllocation(rep) && SetEscaped(rep)) {
+ PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ return true;
+ }
+ break;
+ default:
+ if (use->op()->EffectInputCount() == 0 &&
+ uses->op()->EffectInputCount() > 0) {
+ PrintF("Encountered unaccounted use by #%d (%s)\n", use->id(),
+ use->op()->mnemonic());
+ UNREACHABLE();
+ }
+ if (SetEscaped(rep)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ if (!HasEntry(node)) {
+ status_[node->id()] |= kTracked;
+ RevisitUses(node);
+ }
+ if (CheckUsesForEscape(node, true)) {
+ RevisitInputs(node);
+ }
+}
+
+
+void EscapeStatusAnalysis::DebugPrint() {
+ for (NodeId id = 0; id < status_.size(); id++) {
+ if (status_[id] & kTracked) {
+ PrintF("Node #%d is %s\n", id,
+ (status_[id] & kEscaped) ? "escaping" : "virtual");
+ }
+ }
+}
+
+
+EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
+ Zone* zone)
+ : graph_(graph),
+ common_(common),
+ zone_(zone),
+ virtual_states_(zone),
+ replacements_(zone),
+ escape_status_(this, graph, zone),
+ cache_(new (zone) MergeCache(zone)),
+ aliases_(zone),
+ next_free_alias_(0) {}
+
+
+EscapeAnalysis::~EscapeAnalysis() {}
+
+
+void EscapeAnalysis::Run() {
+ replacements_.resize(graph()->NodeCount());
+ AssignAliases();
+ RunObjectAnalysis();
+ escape_status_.Run();
+}
+
+
+void EscapeAnalysis::AssignAliases() {
+ ZoneVector<Node*> stack(zone());
+ stack.push_back(graph()->end());
+ CHECK_LT(graph()->NodeCount(), kUntrackable);
+ aliases_.resize(graph()->NodeCount(), kNotReachable);
+ aliases_[graph()->end()->id()] = kUntrackable;
+ while (!stack.empty()) {
+ Node* node = stack.back();
+ stack.pop_back();
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ if (aliases_[node->id()] >= kUntrackable) {
+ aliases_[node->id()] = NextAlias();
+ }
+ break;
+ case IrOpcode::kFinishRegion: {
+ Node* allocate = NodeProperties::GetValueInput(node, 0);
+ if (allocate->opcode() == IrOpcode::kAllocate) {
+ if (aliases_[allocate->id()] >= kUntrackable) {
+ if (aliases_[allocate->id()] == kNotReachable) {
+ stack.push_back(allocate);
+ }
+ aliases_[allocate->id()] = NextAlias();
+ }
+ aliases_[node->id()] = aliases_[allocate->id()];
+ } else {
+ aliases_[node->id()] = NextAlias();
+ }
+ break;
+ }
+ default:
+ DCHECK_EQ(aliases_[node->id()], kUntrackable);
+ break;
+ }
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (aliases_[input->id()] == kNotReachable) {
+ stack.push_back(input);
+ aliases_[input->id()] = kUntrackable;
+ }
+ }
+ }
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Discovered trackable nodes");
+ for (EscapeAnalysis::Alias id = 0; id < graph()->NodeCount(); ++id) {
+ if (aliases_[id] < kUntrackable) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" #%u", id);
+ }
+ }
+ }
+ PrintF("\n");
+ }
+}
+
+
+void EscapeAnalysis::RunObjectAnalysis() {
+ virtual_states_.resize(graph()->NodeCount());
+ ZoneVector<Node*> stack(zone());
+ stack.push_back(graph()->start());
+ while (!stack.empty()) {
+ Node* node = stack.back();
+ stack.pop_back();
+ if (aliases_[node->id()] != kNotReachable && Process(node)) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ Node* use = edge.from();
+ if ((use->opcode() != IrOpcode::kLoadField &&
+ use->opcode() != IrOpcode::kLoadElement) ||
+ !IsDanglingEffectNode(use)) {
+ stack.push_back(use);
+ }
+ }
+ }
+ // First process loads: dangling loads are a problem otherwise.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ Node* use = edge.from();
+ if ((use->opcode() == IrOpcode::kLoadField ||
+ use->opcode() == IrOpcode::kLoadElement) &&
+ IsDanglingEffectNode(use)) {
+ stack.push_back(use);
+ }
+ }
+ }
+ }
+ }
+ if (FLAG_trace_turbo_escape) {
+ DebugPrint();
+ }
+}
+
+
+bool EscapeAnalysis::IsDanglingEffectNode(Node* node) {
+ if (node->op()->EffectInputCount() == 0) return false;
+ if (node->op()->EffectOutputCount() == 0) return false;
+ if (node->op()->EffectInputCount() == 1 &&
+ NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart) {
+ // The start node is used as sentinel for nodes that are in general
+ // effectful, but of which an analysis has determined that they do not
+ // produce effects in this instance. We don't consider these nodes dangling.
+ return false;
+ }
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool EscapeAnalysis::Process(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ ProcessAllocation(node);
+ break;
+ case IrOpcode::kBeginRegion:
+ ForwardVirtualState(node);
+ break;
+ case IrOpcode::kFinishRegion:
+ ProcessFinishRegion(node);
+ break;
+ case IrOpcode::kStoreField:
+ ProcessStoreField(node);
+ break;
+ case IrOpcode::kLoadField:
+ ProcessLoadField(node);
+ break;
+ case IrOpcode::kStoreElement:
+ ProcessStoreElement(node);
+ break;
+ case IrOpcode::kLoadElement:
+ ProcessLoadElement(node);
+ break;
+ case IrOpcode::kStart:
+ ProcessStart(node);
+ break;
+ case IrOpcode::kEffectPhi:
+ return ProcessEffectPhi(node);
+ break;
+ default:
+ if (node->op()->EffectInputCount() > 0) {
+ ForwardVirtualState(node);
+ }
+ ProcessAllocationUsers(node);
+ break;
+ }
+ return true;
+}
+
+
+void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (!NodeProperties::IsValueEdge(edge) &&
+ !NodeProperties::IsContextEdge(edge))
+ continue;
+ switch (node->opcode()) {
+ case IrOpcode::kStoreField:
+ case IrOpcode::kLoadField:
+ case IrOpcode::kStoreElement:
+ case IrOpcode::kLoadElement:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kPhi:
+ break;
+ default:
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(state, input)) {
+ if (obj->ClearAllFields()) {
+ state->LastChangedAt(node);
+ }
+ }
+ break;
+ }
+ }
+}
+
+
+bool EscapeAnalysis::IsEffectBranchPoint(Node* node) {
+ int count = 0;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ if (++count > 1) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+void EscapeAnalysis::ForwardVirtualState(Node* node) {
+ DCHECK_EQ(node->op()->EffectInputCount(), 1);
+ if (node->opcode() != IrOpcode::kLoadField &&
+ node->opcode() != IrOpcode::kLoadElement &&
+ node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
+ PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
+ UNREACHABLE();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Break the cycle for effect phis.
+ if (effect->opcode() == IrOpcode::kEffectPhi) {
+ if (virtual_states_[effect->id()] == nullptr) {
+ virtual_states_[effect->id()] =
+ new (zone()) VirtualState(zone(), AliasCount());
+ }
+ }
+ DCHECK_NOT_NULL(virtual_states_[effect->id()]);
+ if (IsEffectBranchPoint(effect)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Copying object state %p from #%d (%s) to #%d (%s)\n",
+ static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
+ effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ }
+ if (!virtual_states_[node->id()]) {
+ virtual_states_[node->id()] =
+ new (zone()) VirtualState(*virtual_states_[effect->id()]);
+ } else {
+ virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
+ zone());
+ }
+ } else {
+ virtual_states_[node->id()] = virtual_states_[effect->id()];
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Forwarding object state %p from #%d (%s) to #%d (%s)\n",
+ static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
+ effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStart(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStart);
+ virtual_states_[node->id()] = new (zone()) VirtualState(zone(), AliasCount());
+}
+
+
+bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ bool changed = false;
+
+ VirtualState* mergeState = virtual_states_[node->id()];
+ if (!mergeState) {
+ mergeState = new (zone()) VirtualState(zone(), AliasCount());
+ virtual_states_[node->id()] = mergeState;
+ changed = true;
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Effect Phi #%d got new states map %p.\n", node->id(),
+ static_cast<void*>(mergeState));
+ }
+ } else if (mergeState->GetLastChanged() != node) {
+ changed = true;
+ }
+
+ cache_->Clear();
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("At Effect Phi #%d, merging states into %p:", node->id(),
+ static_cast<void*>(mergeState));
+ }
+
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* input = NodeProperties::GetEffectInput(node, i);
+ VirtualState* state = virtual_states_[input->id()];
+ if (state) {
+ cache_->states().push_back(state);
+ }
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" %p (from %d %s)", static_cast<void*>(state), input->id(),
+ input->op()->mnemonic());
+ }
+ }
+ if (FLAG_trace_turbo_escape) {
+ PrintF("\n");
+ }
+
+ if (cache_->states().size() == 0) {
+ return changed;
+ }
+
+ changed = mergeState->MergeFrom(cache_, zone(), graph(), common(),
+ NodeProperties::GetControlInput(node)) ||
+ changed;
+
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Merge %s the node.\n", changed ? "changed" : "did not change");
+ }
+
+ if (changed) {
+ mergeState->LastChangedAt(node);
+ escape_status_.Resize();
+ }
+ return changed;
+}
+
+
+void EscapeAnalysis::ProcessAllocation(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
+ ForwardVirtualState(node);
+
+ // Check if we have already processed this node.
+ if (virtual_states_[node->id()]->VirtualObjectFromAlias(
+ aliases_[node->id()])) {
+ return;
+ }
+
+ NumberMatcher size(node->InputAt(0));
+ DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
+ node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ if (size.HasValue()) {
+ virtual_states_[node->id()]->SetVirtualObject(
+ aliases_[node->id()],
+ new (zone())
+ VirtualObject(node->id(), zone(), size.Value() / kPointerSize));
+ } else {
+ virtual_states_[node->id()]->SetVirtualObject(
+ aliases_[node->id()], new (zone()) VirtualObject(node->id(), zone()));
+ }
+ virtual_states_[node->id()]->LastChangedAt(node);
+}
+
+
+void EscapeAnalysis::ProcessFinishRegion(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
+ ForwardVirtualState(node);
+ Node* allocation = NodeProperties::GetValueInput(node, 0);
+ if (allocation->opcode() == IrOpcode::kAllocate) {
+ VirtualState* state = virtual_states_[node->id()];
+ if (!state->VirtualObjectFromAlias(aliases_[node->id()])) {
+ VirtualObject* vobj_alloc =
+ state->VirtualObjectFromAlias(aliases_[allocation->id()]);
+ DCHECK_NOT_NULL(vobj_alloc);
+ state->SetVirtualObject(aliases_[node->id()], vobj_alloc);
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Linked finish region node #%d to node #%d\n", node->id(),
+ allocation->id());
+ }
+ state->LastChangedAt(node);
+ }
+ }
+}
+
+
+Node* EscapeAnalysis::replacement(NodeId id) {
+ if (id >= replacements_.size()) return nullptr;
+ return replacements_[id];
+}
+
+
+Node* EscapeAnalysis::replacement(Node* node) {
+ return replacement(node->id());
+}
+
+
+bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
+ bool changed = replacements_[node->id()] != rep;
+ replacements_[node->id()] = rep;
+ return changed;
+}
+
+
+bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
+ Node* rep) {
+ if (SetReplacement(node, rep)) {
+ state->LastChangedAt(node);
+ if (FLAG_trace_turbo_escape) {
+ if (rep) {
+ PrintF("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
+ rep->op()->mnemonic());
+ } else {
+ PrintF("Replacement of #%d cleared\n", node->id());
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+
+Node* EscapeAnalysis::ResolveReplacement(Node* node) {
+ while (replacement(node)) {
+ node = replacement(node);
+ }
+ return node;
+}
+
+
+Node* EscapeAnalysis::GetReplacement(Node* node) {
+ return GetReplacement(node->id());
+}
+
+
+Node* EscapeAnalysis::GetReplacement(NodeId id) {
+ Node* node = nullptr;
+ while (replacement(id)) {
+ node = replacement(id);
+ id = node->id();
+ }
+ return node;
+}
+
+
+bool EscapeAnalysis::IsVirtual(Node* node) {
+ if (node->id() >= escape_status_.size()) {
+ return false;
+ }
+ return escape_status_.IsVirtual(node);
+}
+
+
+bool EscapeAnalysis::IsEscaped(Node* node) {
+ if (node->id() >= escape_status_.size()) {
+ return false;
+ }
+ return escape_status_.IsEscaped(node);
+}
+
+
+bool EscapeAnalysis::SetEscaped(Node* node) {
+ return escape_status_.SetEscaped(node);
+}
+
+
+VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
+ if (VirtualState* states = virtual_states_[at->id()]) {
+ return states->VirtualObjectFromAlias(aliases_[id]);
+ }
+ return nullptr;
+}
+
+
+VirtualObject* EscapeAnalysis::ResolveVirtualObject(VirtualState* state,
+ Node* node) {
+ VirtualObject* obj = GetVirtualObject(state, ResolveReplacement(node));
+ while (obj && replacement(obj->id())) {
+ if (VirtualObject* next = GetVirtualObject(state, replacement(obj->id()))) {
+ obj = next;
+ } else {
+ break;
+ }
+ }
+ return obj;
+}
+
+
+bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
+ DCHECK(IsVirtual(left) && IsVirtual(right));
+ left = ResolveReplacement(left);
+ right = ResolveReplacement(right);
+ if (IsEquivalentPhi(left, right)) {
+ return true;
+ }
+ return false;
+}
+
+
+int EscapeAnalysis::OffsetFromAccess(Node* node) {
+ DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
+ return OpParameter<FieldAccess>(node).offset / kPointerSize;
+}
+
+
+void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* node,
+ VirtualState* state) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF("Load #%d from phi #%d", node->id(), from->id());
+ }
+
+ cache_->fields().clear();
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ cache_->fields().push_back(input);
+ }
+
+ cache_->LoadVirtualObjectsForFieldsFrom(state, aliases_);
+ if (cache_->objects().size() == cache_->fields().size()) {
+ cache_->GetFields(offset);
+ if (cache_->fields().size() == cache_->objects().size()) {
+ Node* rep = replacement(node);
+ if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
+ int value_input_count = static_cast<int>(cache_->fields().size());
+ cache_->fields().push_back(NodeProperties::GetControlInput(from));
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, value_input_count),
+ value_input_count + 1, &cache_->fields().front());
+ escape_status_.Resize();
+ SetReplacement(node, phi);
+ state->LastChangedAt(node);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" got phi created.\n");
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has already phi #%d.\n", rep->id());
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has incomplete field info.\n");
+ }
+ } else if (FLAG_trace_turbo_escape) {
+ PrintF(" has incomplete virtual object info.\n");
+ }
+}
+
+
+void EscapeAnalysis::ProcessLoadField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
+ ForwardVirtualState(node);
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ int offset = OffsetFromAccess(node);
+ if (!object->IsTracked()) return;
+ Node* value = object->GetField(offset);
+ if (value) {
+ value = ResolveReplacement(value);
+ }
+ // Record that the load has this alias.
+ UpdateReplacement(state, node, value);
+ } else {
+ if (from->opcode() == IrOpcode::kPhi &&
+ OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
+ int offset = OffsetFromAccess(node);
+ // Only binary phis are supported for now.
+ ProcessLoadFromPhi(offset, from, node, state);
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessLoadElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
+ ForwardVirtualState(node);
+ Node* from = NodeProperties::GetValueInput(node, 0);
+ VirtualState* state = virtual_states_[node->id()];
+ Node* index_node = node->InputAt(1);
+ NumberMatcher index(index_node);
+ DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
+ index_node->opcode() != IrOpcode::kInt64Constant &&
+ index_node->opcode() != IrOpcode::kFloat32Constant &&
+ index_node->opcode() != IrOpcode::kFloat64Constant);
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ if (index.HasValue()) {
+ int offset = index.Value() + access.header_size / kPointerSize;
+ if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ CHECK_EQ(access.header_size % kPointerSize, 0);
+
+ if (!object->IsTracked()) return;
+ Node* value = object->GetField(offset);
+ if (value) {
+ value = ResolveReplacement(value);
+ }
+ // Record that the load has this alias.
+ UpdateReplacement(state, node, value);
+ } else if (from->opcode() == IrOpcode::kPhi) {
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ int offset = index.Value() + access.header_size / kPointerSize;
+ ProcessLoadFromPhi(offset, from, node, state);
+ }
+ } else {
+ // We have a load from a non-const index, cannot eliminate object.
+ if (SetEscaped(from)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because store element #%d to "
+ "non-const "
+ "index #%d (%s)\n",
+ from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStoreField(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
+ ForwardVirtualState(node);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* val = NodeProperties::GetValueInput(node, 1);
+ VirtualState* state = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(state, to)) {
+ if (!obj->IsTracked()) return;
+ int offset = OffsetFromAccess(node);
+ if (obj->SetField(offset, ResolveReplacement(val))) {
+ state->LastChangedAt(node);
+ }
+ }
+}
+
+
+void EscapeAnalysis::ProcessStoreElement(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
+ ForwardVirtualState(node);
+ Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* index_node = node->InputAt(1);
+ NumberMatcher index(index_node);
+ DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
+ index_node->opcode() != IrOpcode::kInt64Constant &&
+ index_node->opcode() != IrOpcode::kFloat32Constant &&
+ index_node->opcode() != IrOpcode::kFloat64Constant);
+ ElementAccess access = OpParameter<ElementAccess>(node);
+ Node* val = NodeProperties::GetValueInput(node, 2);
+ if (index.HasValue()) {
+ int offset = index.Value() + access.header_size / kPointerSize;
+ VirtualState* states = virtual_states_[node->id()];
+ if (VirtualObject* obj = ResolveVirtualObject(states, to)) {
+ if (!obj->IsTracked()) return;
+ CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ CHECK_EQ(access.header_size % kPointerSize, 0);
+ if (obj->SetField(offset, ResolveReplacement(val))) {
+ states->LastChangedAt(node);
+ }
+ }
+ } else {
+ // We have a store to a non-const index, cannot eliminate object.
+ if (SetEscaped(to)) {
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Setting #%d (%s) to escaped because store element #%d to "
+ "non-const "
+ "index #%d (%s)\n",
+ to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ }
+ }
+}
+
+
+Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
+ if ((node->opcode() == IrOpcode::kFinishRegion ||
+ node->opcode() == IrOpcode::kAllocate) &&
+ IsVirtual(node)) {
+ if (VirtualObject* vobj =
+ ResolveVirtualObject(virtual_states_[effect->id()], node)) {
+ if (Node* object_state = vobj->GetObjectState()) {
+ return object_state;
+ } else {
+ cache_->fields().clear();
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ cache_->fields().push_back(field);
+ }
+ }
+ int input_count = static_cast<int>(cache_->fields().size());
+ Node* new_object_state =
+ graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
+ input_count, &cache_->fields().front());
+ vobj->SetObjectState(new_object_state);
+ if (FLAG_trace_turbo_escape) {
+ PrintF(
+ "Creating object state #%d for vobj %p (from node #%d) at effect "
+ "#%d\n",
+ new_object_state->id(), static_cast<void*>(vobj), node->id(),
+ effect->id());
+ }
+ // Now fix uses of other objects.
+ for (size_t i = 0; i < vobj->field_count(); ++i) {
+ if (Node* field = vobj->GetField(i)) {
+ if (Node* field_object_state =
+ GetOrCreateObjectState(effect, field)) {
+ NodeProperties::ReplaceValueInput(
+ new_object_state, field_object_state, static_cast<int>(i));
+ }
+ }
+ }
+ return new_object_state;
+ }
+ }
+ }
+ return nullptr;
+}
+
+
+void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
+ PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
+ object->field_count());
+ for (size_t i = 0; i < object->field_count(); ++i) {
+ if (Node* f = object->GetField(i)) {
+ PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
+ }
+ }
+}
+
+
+void EscapeAnalysis::DebugPrintState(VirtualState* state) {
+ PrintF("Dumping object state %p\n", static_cast<void*>(state));
+ for (Alias alias = 0; alias < AliasCount(); ++alias) {
+ if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
+ DebugPrintObject(object, alias);
+ }
+ }
+}
+
+
+void EscapeAnalysis::DebugPrint() {
+ ZoneVector<VirtualState*> object_states(zone());
+ for (NodeId id = 0; id < virtual_states_.size(); id++) {
+ if (VirtualState* states = virtual_states_[id]) {
+ if (std::find(object_states.begin(), object_states.end(), states) ==
+ object_states.end()) {
+ object_states.push_back(states);
+ }
+ }
+ }
+ for (size_t n = 0; n < object_states.size(); n++) {
+ DebugPrintState(object_states[n]);
+ }
+}
+
+
+VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
+ Node* node) {
+ if (node->id() >= aliases_.size()) return nullptr;
+ Alias alias = aliases_[node->id()];
+ if (alias >= state->size()) return nullptr;
+ return state->VirtualObjectFromAlias(alias);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
new file mode 100644
index 0000000000..ea7b11ecdf
--- /dev/null
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -0,0 +1,169 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
+#define V8_COMPILER_ESCAPE_ANALYSIS_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class EscapeAnalysis;
+class VirtualState;
+class VirtualObject;
+
+
+// EscapeStatusAnalysis determines for each allocation whether it escapes.
+class EscapeStatusAnalysis {
+ public:
+ ~EscapeStatusAnalysis();
+
+ enum EscapeStatusFlag {
+ kUnknown = 0u,
+ kTracked = 1u << 0,
+ kEscaped = 1u << 1,
+ kOnStack = 1u << 2,
+ kVisited = 1u << 3,
+ };
+ typedef base::Flags<EscapeStatusFlag, unsigned char> EscapeStatusFlags;
+
+ void Run();
+
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool IsAllocation(Node* node);
+
+ void DebugPrint();
+
+ friend class EscapeAnalysis;
+
+ private:
+ EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
+ Zone* zone);
+ void Process(Node* node);
+ void ProcessAllocate(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessStoreElement(Node* node);
+ bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
+ return CheckUsesForEscape(node, node, phi_escaping);
+ }
+ bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
+ void RevisitUses(Node* node);
+ void RevisitInputs(Node* node);
+ bool SetEscaped(Node* node);
+ bool HasEntry(Node* node);
+ void Resize();
+ size_t size();
+ bool IsAllocationPhi(Node* node);
+
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
+
+ EscapeAnalysis* object_analysis_;
+ Graph* const graph_;
+ Zone* const zone_;
+ ZoneVector<EscapeStatusFlags> status_;
+ ZoneDeque<Node*> queue_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::EscapeStatusFlags)
+
+
+// Forward Declaration.
+class MergeCache;
+
+
+// EscapeObjectAnalysis simulates stores to determine values of loads if
+// an object is virtual and eliminated.
+class EscapeAnalysis {
+ public:
+ typedef NodeId Alias;
+
+ EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
+ ~EscapeAnalysis();
+
+ void Run();
+
+ Node* GetReplacement(Node* node);
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool CompareVirtualObjects(Node* left, Node* right);
+ Node* GetOrCreateObjectState(Node* effect, Node* node);
+
+ private:
+ void RunObjectAnalysis();
+ void AssignAliases();
+ bool Process(Node* node);
+ void ProcessLoadField(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessLoadElement(Node* node);
+ void ProcessStoreElement(Node* node);
+ void ProcessAllocationUsers(Node* node);
+ void ProcessAllocation(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessCall(Node* node);
+ void ProcessStart(Node* node);
+ bool ProcessEffectPhi(Node* node);
+ void ProcessLoadFromPhi(int offset, Node* from, Node* node,
+ VirtualState* states);
+
+ void ForwardVirtualState(Node* node);
+ bool IsEffectBranchPoint(Node* node);
+ bool IsDanglingEffectNode(Node* node);
+ int OffsetFromAccess(Node* node);
+
+ VirtualObject* GetVirtualObject(Node* at, NodeId id);
+ VirtualObject* ResolveVirtualObject(VirtualState* state, Node* node);
+ Node* GetReplacementIfSame(ZoneVector<VirtualObject*>& objs);
+
+ bool SetEscaped(Node* node);
+ Node* replacement(NodeId id);
+ Node* replacement(Node* node);
+ Node* ResolveReplacement(Node* node);
+ Node* GetReplacement(NodeId id);
+ bool SetReplacement(Node* node, Node* rep);
+ bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
+
+ VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
+
+ void DebugPrint();
+ void DebugPrintState(VirtualState* state);
+ void DebugPrintObject(VirtualObject* state, Alias id);
+
+ Alias NextAlias() { return next_free_alias_++; }
+ Alias AliasCount() const { return next_free_alias_; }
+
+ Graph* graph() const { return graph_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Zone* zone() const { return zone_; }
+
+ static const Alias kNotReachable;
+ static const Alias kUntrackable;
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ Zone* const zone_;
+ ZoneVector<VirtualState*> virtual_states_;
+ ZoneVector<Node*> replacements_;
+ EscapeStatusAnalysis escape_status_;
+ MergeCache* cache_;
+ ZoneVector<Alias> aliases_;
+ Alias next_free_alias_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ESCAPE_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/fast-accessor-assembler.cc b/deps/v8/src/compiler/fast-accessor-assembler.cc
new file mode 100644
index 0000000000..09d513fdc6
--- /dev/null
+++ b/deps/v8/src/compiler/fast-accessor-assembler.cc
@@ -0,0 +1,220 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/fast-accessor-assembler.h"
+
+#include "src/base/logging.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/verifier.h"
+#include "src/handles-inl.h"
+#include "src/objects.h" // For FAA::GetInternalField impl.
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
+ : zone_(),
+ assembler_(new RawMachineAssembler(
+ isolate, new (zone()) Graph(zone()),
+ Linkage::GetJSCallDescriptor(&zone_, false, 1,
+ CallDescriptor::kNoFlags))),
+ state_(kBuilding) {}
+
+
+FastAccessorAssembler::~FastAccessorAssembler() {}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::IntegerConstant(
+ int const_value) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->NumberConstant(const_value));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
+ CHECK_EQ(kBuilding, state_);
+
+ // For JS call descriptor, the receiver is parameter 0. If we use other
+ // call descriptors, this may or may not hold. So let's check.
+ CHECK(assembler_->call_descriptor()->IsJSFunctionCall());
+ return FromRaw(assembler_->Parameter(0));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
+ ValueId value, int field_no) {
+ CHECK_EQ(kBuilding, state_);
+ // Determine the 'value' object's instance type.
+ Node* object_map =
+ assembler_->Load(MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(
+ Internals::kHeapObjectMapOffset - kHeapObjectTag));
+ Node* instance_type = assembler_->WordAnd(
+ assembler_->Load(
+ MachineType::Uint16(), object_map,
+ assembler_->IntPtrConstant(
+ Internals::kMapInstanceTypeAndBitFieldOffset - kHeapObjectTag)),
+ assembler_->IntPtrConstant(0xff));
+
+ // Check whether we have a proper JSObject.
+ RawMachineLabel is_jsobject, is_not_jsobject, merge;
+ assembler_->Branch(
+ assembler_->WordEqual(
+ instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+ &is_jsobject, &is_not_jsobject);
+
+ // JSObject? Then load the internal field field_no.
+ assembler_->Bind(&is_jsobject);
+ Node* internal_field = assembler_->Load(
+ MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag +
+ kPointerSize * field_no));
+ assembler_->Goto(&merge);
+
+ // No JSObject? Return undefined.
+ // TODO(vogelheim): Check whether this is the appropriate action, or whether
+ // the method should take a label instead.
+ assembler_->Bind(&is_not_jsobject);
+ Node* fail_value = assembler_->UndefinedConstant();
+ assembler_->Goto(&merge);
+
+ // Return.
+ assembler_->Bind(&merge);
+ Node* phi = assembler_->Phi(MachineRepresentation::kTagged, internal_field,
+ fail_value);
+ return FromRaw(phi);
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
+ int offset) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->Load(MachineType::IntPtr(), FromId(value),
+ assembler_->IntPtrConstant(offset)));
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
+ int offset) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(
+ assembler_->Load(MachineType::AnyTagged(),
+ assembler_->Load(MachineType::Pointer(), FromId(value),
+ assembler_->IntPtrConstant(offset))));
+}
+
+
+void FastAccessorAssembler::ReturnValue(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Return(FromId(value));
+}
+
+
+void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel pass, fail;
+ assembler_->Branch(
+ assembler_->Word32Equal(
+ assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
+ assembler_->Int32Constant(0)),
+ &pass, &fail);
+ assembler_->Bind(&fail);
+ assembler_->Return(assembler_->NullConstant());
+ assembler_->Bind(&pass);
+}
+
+
+void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel is_null, not_null;
+ assembler_->Branch(
+ assembler_->IntPtrEqual(FromId(value), assembler_->IntPtrConstant(0)),
+ &is_null, &not_null);
+ assembler_->Bind(&is_null);
+ assembler_->Return(assembler_->NullConstant());
+ assembler_->Bind(&not_null);
+}
+
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel* label =
+ new (zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
+ return FromRaw(label);
+}
+
+
+void FastAccessorAssembler::SetLabel(LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Bind(FromId(label_id));
+}
+
+
+void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
+ LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ RawMachineLabel pass;
+ assembler_->Branch(
+ assembler_->IntPtrEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+ &pass, FromId(label_id));
+ assembler_->Bind(&pass);
+}
+
+
+MaybeHandle<Code> FastAccessorAssembler::Build() {
+ CHECK_EQ(kBuilding, state_);
+
+ // Cleanup: We no longer need this.
+ nodes_.clear();
+ labels_.clear();
+
+ // Export the schedule and call the compiler.
+ Schedule* schedule = assembler_->Export();
+ MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
+ schedule, Code::STUB, "FastAccessorAssembler");
+
+ // Update state & return.
+ state_ = !code.is_null() ? kBuilt : kError;
+ return code;
+}
+
+
+FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
+ nodes_.push_back(node);
+ ValueId value = {nodes_.size() - 1};
+ return value;
+}
+
+
+FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
+ RawMachineLabel* label) {
+ labels_.push_back(label);
+ LabelId label_id = {labels_.size() - 1};
+ return label_id;
+}
+
+
+Node* FastAccessorAssembler::FromId(ValueId value) const {
+ CHECK_LT(value.value_id, nodes_.size());
+ CHECK_NOT_NULL(nodes_.at(value.value_id));
+ return nodes_.at(value.value_id);
+}
+
+
+RawMachineLabel* FastAccessorAssembler::FromId(LabelId label) const {
+ CHECK_LT(label.label_id, labels_.size());
+ CHECK_NOT_NULL(labels_.at(label.label_id));
+ return labels_.at(label.label_id);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/fast-accessor-assembler.h b/deps/v8/src/compiler/fast-accessor-assembler.h
new file mode 100644
index 0000000000..a9df3f0749
--- /dev/null
+++ b/deps/v8/src/compiler/fast-accessor-assembler.h
@@ -0,0 +1,106 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+#define V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
+
+#include <stdint.h>
+#include <vector>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "include/v8-experimental.h"
+#include "src/base/macros.h"
+#include "src/base/smart-pointers.h"
+#include "src/handles.h"
+
+
+namespace v8 {
+namespace internal {
+
+class Code;
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class Node;
+class RawMachineAssembler;
+class RawMachineLabel;
+
+
+// This interface "exports" an aggregated subset of RawMachineAssembler, for
+// use by the API to implement Fast Dom Accessors.
+//
+// This interface is made for this single purpose only and does not attempt
+// to implement a general purpose solution. If you need one, please look at
+// RawMachineAssembler instead.
+//
+// The life cycle of a FastAccessorAssembler has two phases:
+// - After creating the instance, you can call an arbitrary sequence of
+// builder functions to build the desired function.
+// - When done, you can Build() the accessor and query for the build results.
+//
+// You cannot call any result getters before Build() was called & successful;
+// and you cannot call any builder functions after Build() was called.
+class FastAccessorAssembler {
+ public:
+ typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
+ typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
+
+ explicit FastAccessorAssembler(Isolate* isolate);
+ ~FastAccessorAssembler();
+
+ // Builder / assembler functions:
+ ValueId IntegerConstant(int int_constant);
+ ValueId GetReceiver();
+ ValueId LoadInternalField(ValueId value_id, int field_no);
+ ValueId LoadValue(ValueId value_id, int offset);
+ ValueId LoadObject(ValueId value_id, int offset);
+
+ // Builder / assembler functions for control flow.
+ void ReturnValue(ValueId value_id);
+ void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
+ void CheckNotZeroOrReturnNull(ValueId value_id);
+
+ // TODO(vogelheim): Implement a C++ callback.
+ // void CheckNotNullOrCallback(ValueId value_id, ..c++-callback type...,
+ // ValueId arg1, ValueId arg2, ...);
+
+ LabelId MakeLabel();
+ void SetLabel(LabelId label_id);
+ void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+
+ // Assemble the code.
+ MaybeHandle<Code> Build();
+
+ private:
+ ValueId FromRaw(Node* node);
+ LabelId FromRaw(RawMachineLabel* label);
+ Node* FromId(ValueId value) const;
+ RawMachineLabel* FromId(LabelId value) const;
+
+ Zone* zone() { return &zone_; }
+
+ Zone zone_;
+ base::SmartPointer<RawMachineAssembler> assembler_;
+
+ // To prevent exposing the RMA internals to the outside world, we'll map
+ // Node + Label pointers integers wrapped in ValueId and LabelId instances.
+ // These vectors maintain this mapping.
+ std::vector<Node*> nodes_;
+ std::vector<RawMachineLabel*> labels_;
+
+ // Remember the current state for easy error checking. (We prefer to be
+ // strict as this class will be exposed at the API.)
+ enum { kBuilding, kBuilt, kError } state_;
+
+ DISALLOW_COPY_AND_ASSIGN(FastAccessorAssembler);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_FAST_ACCESSOR_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index f800b7786f..7c3f9b2741 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -22,7 +22,8 @@ void FrameElider::MarkBlocks() {
for (auto block : instruction_blocks()) {
if (block->needs_frame()) continue;
for (auto i = block->code_start(); i < block->code_end(); ++i) {
- if (InstructionAt(i)->IsCall()) {
+ if (InstructionAt(i)->IsCall() ||
+ InstructionAt(i)->opcode() == ArchOpcode::kArchDeoptimize) {
block->mark_needs_frame();
break;
}
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 7170a845f7..387d6a9bbb 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/functional.h"
#include "src/compiler/frame-states.h"
+
+#include "src/base/functional.h"
#include "src/handles-inl.h"
namespace v8 {
@@ -51,9 +52,15 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kJavaScriptFunction:
os << "JS_FRAME";
break;
+ case FrameStateType::kInterpretedFunction:
+ os << "INTERPRETED_FRAME";
+ break;
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
break;
+ case FrameStateType::kConstructStub:
+ os << "CONSTRUCT_STUB";
+ break;
}
return os;
}
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 0684f112aa..ddb55c35d2 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -76,8 +76,10 @@ class OutputFrameStateCombine {
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
- kArgumentsAdaptor // Represents an ArgumentsAdaptorFrame.
+ kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
+ kInterpretedFunction, // Represents an InterpretedFrame.
+ kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
+ kConstructStub // Represents a ConstructStubFrame.
};
@@ -107,6 +109,11 @@ class FrameStateFunctionInfo {
return context_calling_mode_;
}
+ static bool IsJSFunctionType(FrameStateType type) {
+ return type == FrameStateType::kJavaScriptFunction ||
+ type == FrameStateType::kInterpretedFunction;
+ }
+
private:
FrameStateType const type_;
int const parameter_count_;
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index 079fccb71c..b08030b8c6 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -12,12 +12,40 @@ namespace v8 {
namespace internal {
namespace compiler {
-Frame::Frame(int fixed_frame_size_in_slots)
- : frame_slot_count_(fixed_frame_size_in_slots),
- spilled_callee_register_slot_count_(0),
- stack_slot_count_(0),
- allocated_registers_(NULL),
- allocated_double_registers_(NULL) {}
+Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
+ : needs_frame_((descriptor != nullptr) &&
+ descriptor->RequiresFrameAsIncoming()),
+ frame_slot_count_(fixed_frame_size_in_slots),
+ callee_saved_slot_count_(0),
+ spill_slot_count_(0),
+ allocated_registers_(nullptr),
+ allocated_double_registers_(nullptr) {}
+
+
+void FrameAccessState::SetFrameAccessToDefault() {
+ if (frame()->needs_frame() && !FLAG_turbo_sp_frame_access) {
+ SetFrameAccessToFP();
+ } else {
+ SetFrameAccessToSP();
+ }
+}
+
+
+FrameOffset FrameAccessState::GetFrameOffset(int spill_slot) const {
+ const int offset =
+ (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
+ kPointerSize;
+ if (access_frame_with_fp()) {
+ DCHECK(frame()->needs_frame());
+ return FrameOffset::FromFramePointer(offset);
+ } else {
+ // No frame. Retrieve all parameters relative to stack pointer.
+ int sp_offset =
+ offset + ((frame()->GetSpToFpSlotCount() + sp_delta()) * kPointerSize);
+ return FrameOffset::FromStackPointer(sp_offset);
+ }
+}
+
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 9764b261ef..72f756b0dc 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -12,19 +12,27 @@ namespace v8 {
namespace internal {
namespace compiler {
+class CallDescriptor;
+
// Collects the spill slot and other frame slot requirements for a compiled
// function. Frames are usually populated by the register allocator and are used
-// by Linkage to generate code for the prologue and epilogue to compiled code.
+// by Linkage to generate code for the prologue and epilogue to compiled
+// code. Frame objects must be considered immutable once they've been
+// instantiated and the basic information about the frame has been collected
+// into them. Mutable state associated with the frame is stored separately in
+// FrameAccessState.
//
-// Frames are divided up into three regions. The first is the fixed header,
-// which always has a constant size and can be predicted before code generation
-// begins depending on the type of code being generated. The second is the
-// region for spill slots, which is immediately below the fixed header and grows
-// as the register allocator needs to spill to the stack and asks the frame for
-// more space. The third region, which contains the callee-saved registers must
-// be reserved after register allocation, since its size can only be precisely
-// determined after register allocation once the number of used callee-saved
-// register is certain.
+// Frames are divided up into three regions.
+// - The first is the fixed header, which always has a constant size and can be
+// predicted before code generation begins depending on the type of code being
+// generated.
+// - The second is the region for spill slots, which is immediately below the
+// fixed header and grows as the register allocator needs to spill to the
+// stack and asks the frame for more space.
+// - The third region, which contains the callee-saved registers must be
+// reserved after register allocation, since its size can only be precisely
+// determined after register allocation once the number of used callee-saved
+// register is certain.
//
// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
// two slots.
@@ -35,10 +43,10 @@ namespace compiler {
// for example JSFunctions store the function context and marker in the fixed
// header, with slot index 2 corresponding to the current function context and 3
// corresponding to the frame marker/JSFunction. The frame region immediately
-// below the fixed header contains spill slots starting a 4 for JsFunctions. The
-// callee-saved frame region below that starts at 4+spilled_slot_count. Callee
-// stack slots corresponding to parameters are accessible through negative slot
-// ids.
+// below the fixed header contains spill slots starting at 4 for JsFunctions.
+// The callee-saved frame region below that starts at 4+spill_slot_count_.
+// Callee stack slots corresponding to parameters are accessible through
+// negative slot ids.
//
// Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its
@@ -47,95 +55,120 @@ namespace compiler {
// Below an example JSFunction Frame with slot ids, frame regions and contents:
//
// slot JS frame
-// +-----------------+----------------------------
-// -n-1 | parameter 0 | ^
-// |- - - - - - - - -| |
-// -n | | Caller
-// ... | ... | frame slots
-// -2 | parameter n-1 | (slot < 0)
-// |- - - - - - - - -| |
-// -1 | parameter n | v
-// -----+-----------------+----------------------------
-// 0 | return addr | ^ ^
-// |- - - - - - - - -| | |
-// 1 | saved frame ptr | Fixed |
-// |- - - - - - - - -| Header <-- frame ptr |
-// 2 | Context | | |
-// |- - - - - - - - -| | |
-// 3 |JSFunction/Marker| v |
-// +-----------------+---- |
-// 4 | spill 1 | ^ Callee
-// |- - - - - - - - -| | frame slots
-// ... | ... | Spill slots (slot >= 0)
-// |- - - - - - - - -| | |
-// m+4 | spill m | v |
-// +-----------------+---- |
-// m+5 | callee-saved 1 | ^ |
-// |- - - - - - - - -| | |
-// | ... | Callee-saved |
-// |- - - - - - - - -| | |
-// m+r+4 | callee-saved r | v v
-// -----+-----------------+----- <-- stack ptr ---------
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | Context | | |
+// |- - - - - - - - -| | |
+// 3 |JSFunction/Marker| v |
+// +-----------------+---- |
+// 4 | spill 1 | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | ... | Spill slots (slot >= 0)
+// |- - - - - - - - -| | |
+// m+4 | spill m | v |
+// +-----------------+---- |
+// m+5 | callee-saved 1 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Callee-saved |
+// |- - - - - - - - -| | |
+// m+r+4 | callee-saved r | v v
+// -----+-----------------+----- <-- stack ptr -------------
//
class Frame : public ZoneObject {
public:
- explicit Frame(int fixed_frame_size_in_slots);
+ explicit Frame(int fixed_frame_size_in_slots,
+ const CallDescriptor* descriptor);
- inline int GetTotalFrameSlotCount() { return frame_slot_count_; }
+ static int FPOffsetToSlot(int frame_offset) {
+ return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
+ frame_offset / kPointerSize;
+ }
- inline int GetSavedCalleeRegisterSlotCount() {
- return spilled_callee_register_slot_count_;
+ static int SlotToFPOffset(int slot) {
+ return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
+ kPointerSize;
}
- inline int GetSpillSlotCount() { return stack_slot_count_; }
+
+ inline bool needs_frame() const { return needs_frame_; }
+ inline void MarkNeedsFrame() { needs_frame_ = true; }
+
+ inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
+
+ inline int GetSpToFpSlotCount() const {
+ return GetTotalFrameSlotCount() -
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ }
+ inline int GetSavedCalleeRegisterSlotCount() const {
+ return callee_saved_slot_count_;
+ }
+ inline int GetSpillSlotCount() const { return spill_slot_count_; }
inline void SetElidedFrameSizeInSlots(int slots) {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
- DCHECK_EQ(0, stack_slot_count_);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ DCHECK_EQ(0, spill_slot_count_);
frame_slot_count_ = slots;
}
void SetAllocatedRegisters(BitVector* regs) {
- DCHECK(allocated_registers_ == NULL);
+ DCHECK(allocated_registers_ == nullptr);
allocated_registers_ = regs;
}
void SetAllocatedDoubleRegisters(BitVector* regs) {
- DCHECK(allocated_double_registers_ == NULL);
+ DCHECK(allocated_double_registers_ == nullptr);
allocated_double_registers_ = regs;
}
- bool DidAllocateDoubleRegisters() {
+ bool DidAllocateDoubleRegisters() const {
return !allocated_double_registers_->IsEmpty();
}
int AlignSavedCalleeRegisterSlots() {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ needs_frame_ = true;
int delta = frame_slot_count_ & 1;
frame_slot_count_ += delta;
return delta;
}
void AllocateSavedCalleeRegisterSlots(int count) {
+ needs_frame_ = true;
frame_slot_count_ += count;
- spilled_callee_register_slot_count_ += count;
+ callee_saved_slot_count_ += count;
}
int AllocateSpillSlot(int width) {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ needs_frame_ = true;
int frame_slot_count_before = frame_slot_count_;
int slot = AllocateAlignedFrameSlot(width);
- stack_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
+ spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
return slot;
}
int ReserveSpillSlots(size_t slot_count) {
- DCHECK_EQ(0, spilled_callee_register_slot_count_);
- DCHECK_EQ(0, stack_slot_count_);
- stack_slot_count_ += static_cast<int>(slot_count);
+ DCHECK_EQ(0, callee_saved_slot_count_);
+ DCHECK_EQ(0, spill_slot_count_);
+ needs_frame_ = true;
+ spill_slot_count_ += static_cast<int>(slot_count);
frame_slot_count_ += static_cast<int>(slot_count);
return frame_slot_count_ - 1;
}
+ static const int kContextSlot = 2 + StandardFrameConstants::kCPSlotCount;
+ static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
+
private:
int AllocateAlignedFrameSlot(int width) {
DCHECK(width == 4 || width == 8);
@@ -149,9 +182,10 @@ class Frame : public ZoneObject {
}
private:
+ bool needs_frame_;
int frame_slot_count_;
- int spilled_callee_register_slot_count_;
- int stack_slot_count_;
+ int callee_saved_slot_count_;
+ int spill_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
@@ -184,6 +218,38 @@ class FrameOffset {
static const int kFromSp = 1;
static const int kFromFp = 0;
};
+
+// Encapsulates the mutable state maintained during code generation about the
+// current function's frame.
+class FrameAccessState : public ZoneObject {
+ public:
+ explicit FrameAccessState(Frame* const frame)
+ : frame_(frame), access_frame_with_fp_(false), sp_delta_(0) {
+ SetFrameAccessToDefault();
+ }
+
+ Frame* frame() const { return frame_; }
+
+ int sp_delta() const { return sp_delta_; }
+ void ClearSPDelta() { sp_delta_ = 0; }
+ void IncreaseSPDelta(int amount) { sp_delta_ += amount; }
+
+ bool access_frame_with_fp() const { return access_frame_with_fp_; }
+ void SetFrameAccessToDefault();
+ void SetFrameAccessToFP() { access_frame_with_fp_ = true; }
+ void SetFrameAccessToSP() { access_frame_with_fp_ = false; }
+
+ // Get the frame offset for a given spill slot. The location depends on the
+ // calling convention and the specific frame layout, and may thus be
+ // architecture-specific. Negative spill slots indicate arguments on the
+ // caller's frame.
+ FrameOffset GetFrameOffset(int spill_slot) const;
+
+ private:
+ Frame* const frame_;
+ bool access_frame_with_fp_;
+ int sp_delta_;
+};
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 273b5dd0cf..683c345c14 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -73,7 +73,8 @@ class AdvancedReducer : public Reducer {
// Revisit the {node} again later.
virtual void Revisit(Node* node) = 0;
// Replace value uses of {node} with {value} and effect uses of {node} with
- // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // {effect}. If {effect == nullptr}, then use the effect input to {node}.
+ // All
// control uses will be relaxed assuming {node} cannot throw.
virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
Node* control) = 0;
@@ -149,7 +150,7 @@ class GraphReducer : public AdvancedReducer::Editor {
void Replace(Node* node, Node* replacement) final;
// Replace value uses of {node} with {value} and effect uses of {node} with
- // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // {effect}. If {effect == nullptr}, then use the effect input to {node}. All
// control uses will be relaxed assuming {node} cannot throw.
void ReplaceWithValue(Node* node, Node* value, Node* effect,
Node* control) final;
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 1b0997a6bd..07851768b3 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -41,7 +41,7 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
'_');
EmbeddedVector<char, 256> full_filename;
- if (phase == NULL) {
+ if (phase == nullptr) {
SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
} else {
SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
@@ -50,9 +50,9 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
}
-static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
+static int SafeId(Node* node) { return node == nullptr ? -1 : node->id(); }
static const char* SafeMnemonic(Node* node) {
- return node == NULL ? "null" : node->op()->mnemonic();
+ return node == nullptr ? "null" : node->op()->mnemonic();
}
#define DEAD_COLOR "#999999"
@@ -158,7 +158,7 @@ class JSONGraphEdgeWriter {
void PrintEdges(Node* node) {
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- if (input == NULL) continue;
+ if (input == nullptr) continue;
PrintEdge(node, i, input);
}
}
@@ -169,7 +169,7 @@ class JSONGraphEdgeWriter {
} else {
os_ << ",\n";
}
- const char* edge_type = NULL;
+ const char* edge_type = nullptr;
if (index < NodeProperties::FirstValueIndex(from)) {
edge_type = "unknown";
} else if (index < NodeProperties::FirstContextIndex(from)) {
@@ -207,190 +207,6 @@ std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
}
-class GraphVisualizer {
- public:
- GraphVisualizer(std::ostream& os, Zone* zone, const Graph* graph)
- : all_(zone, graph), os_(os) {}
-
- void Print();
-
- void PrintNode(Node* node, bool gray);
-
- private:
- void PrintEdge(Edge edge);
-
- AllNodes all_;
- std::ostream& os_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
-};
-
-
-static Node* GetControlCluster(Node* node) {
- if (OperatorProperties::IsBasicBlockBegin(node->op())) {
- return node;
- } else if (node->op()->ControlInputCount() == 1) {
- Node* control = NodeProperties::GetControlInput(node, 0);
- return control != NULL &&
- OperatorProperties::IsBasicBlockBegin(control->op())
- ? control
- : NULL;
- } else {
- return NULL;
- }
-}
-
-
-void GraphVisualizer::PrintNode(Node* node, bool gray) {
- Node* control_cluster = GetControlCluster(node);
- if (control_cluster != NULL) {
- os_ << " subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
- }
- os_ << " ID" << SafeId(node) << " [\n";
-
- os_ << " shape=\"record\"\n";
- switch (node->opcode()) {
- case IrOpcode::kEnd:
- case IrOpcode::kDead:
- case IrOpcode::kStart:
- os_ << " style=\"diagonals\"\n";
- break;
- case IrOpcode::kMerge:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kLoop:
- os_ << " style=\"rounded\"\n";
- break;
- default:
- break;
- }
-
- if (gray) {
- os_ << " style=\"filled\"\n"
- << " fillcolor=\"" DEAD_COLOR "\"\n";
- }
-
- std::ostringstream label;
- label << *node->op();
- os_ << " label=\"{{#" << SafeId(node) << ":" << Escaped(label);
-
- auto i = node->input_edges().begin();
- for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">#" << SafeId((*i).to());
- }
- for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
- ++i, j--) {
- os_ << "|<I" << (*i).index() << ">X #" << SafeId((*i).to());
- }
- for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
- ++i, j--) {
- os_ << "|<I" << (*i).index() << ">F #" << SafeId((*i).to());
- }
- for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">E #" << SafeId((*i).to());
- }
-
- if (OperatorProperties::IsBasicBlockBegin(node->op()) ||
- GetControlCluster(node) == NULL) {
- for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
- os_ << "|<I" << (*i).index() << ">C #" << SafeId((*i).to());
- }
- }
- os_ << "}";
-
- if (FLAG_trace_turbo_types && NodeProperties::IsTyped(node)) {
- Type* type = NodeProperties::GetType(node);
- std::ostringstream type_out;
- type->PrintTo(type_out);
- os_ << "|" << Escaped(type_out);
- }
- os_ << "}\"\n";
-
- os_ << " ]\n";
- if (control_cluster != NULL) os_ << " }\n";
-}
-
-
-static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
- if (NodeProperties::IsPhi(from)) {
- Node* control = NodeProperties::GetControlInput(from, 0);
- return control != NULL && control->opcode() != IrOpcode::kMerge &&
- control != to && index != 0;
- } else if (from->opcode() == IrOpcode::kLoop) {
- return index != 0;
- } else {
- return false;
- }
-}
-
-
-void GraphVisualizer::PrintEdge(Edge edge) {
- Node* from = edge.from();
- int index = edge.index();
- Node* to = edge.to();
-
- if (!all_.IsLive(to)) return; // skip inputs that point to dead or NULL.
-
- bool unconstrained = IsLikelyBackEdge(from, index, to);
- os_ << " ID" << SafeId(from);
-
- if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
- GetControlCluster(from) == NULL ||
- (from->op()->ControlInputCount() > 0 &&
- NodeProperties::GetControlInput(from) != to)) {
- os_ << ":I" << index << ":n -> ID" << SafeId(to) << ":s"
- << "[" << (unconstrained ? "constraint=false, " : "")
- << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
- << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
- << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
- } else {
- os_ << " -> ID" << SafeId(to) << ":s [color=transparent, "
- << (unconstrained ? "constraint=false, " : "")
- << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
- }
- os_ << "\n";
-}
-
-
-void GraphVisualizer::Print() {
- os_ << "digraph D {\n"
- << " node [fontsize=8,height=0.25]\n"
- << " rankdir=\"BT\"\n"
- << " ranksep=\"1.2 equally\"\n"
- << " overlap=\"false\"\n"
- << " splines=\"true\"\n"
- << " concentrate=\"true\"\n"
- << " \n";
-
- // Find all nodes that are not reachable from end that use live nodes.
- std::set<Node*> gray;
- for (Node* const node : all_.live) {
- for (Node* const use : node->uses()) {
- if (!all_.IsLive(use)) gray.insert(use);
- }
- }
-
- // Make sure all nodes have been output before writing out the edges.
- for (Node* const node : all_.live) PrintNode(node, false);
- for (Node* const node : gray) PrintNode(node, true);
-
- // With all the nodes written, add the edges.
- for (Node* const node : all_.live) {
- for (Edge edge : node->use_edges()) {
- PrintEdge(edge);
- }
- }
- os_ << "}\n";
-}
-
-
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad) {
- Zone tmp_zone;
- GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
- return os;
-}
-
-
class GraphC1Visualizer {
public:
GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
@@ -581,7 +397,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
PrintIndent();
os_ << "flags\n";
- if (current->dominator() != NULL) {
+ if (current->dominator() != nullptr) {
PrintBlockProperty("dominator", current->dominator()->rpo_number());
}
@@ -639,7 +455,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
os_ << " ";
PrintType(node);
}
- if (positions != NULL) {
+ if (positions != nullptr) {
SourcePosition position = positions->GetSourcePosition(node);
if (position.IsKnown()) {
os_ << " pos:" << position.raw();
@@ -652,7 +468,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (control != BasicBlock::kNone) {
PrintIndent();
os_ << "0 0 ";
- if (current->control_input() != NULL) {
+ if (current->control_input() != nullptr) {
PrintNode(current->control_input());
} else {
os_ << -1 - current->rpo_number() << " Goto";
@@ -661,7 +477,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
for (BasicBlock* successor : current->successors()) {
os_ << " B" << successor->rpo_number();
}
- if (FLAG_trace_turbo_types && current->control_input() != NULL) {
+ if (FLAG_trace_turbo_types && current->control_input() != nullptr) {
os_ << " ";
PrintType(current->control_input());
}
@@ -669,7 +485,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
}
}
- if (instructions != NULL) {
+ if (instructions != nullptr) {
Tag LIR_tag(this, "LIR");
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
@@ -715,7 +531,7 @@ void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
int vreg) {
- if (range != NULL && !range->IsEmpty()) {
+ if (range != nullptr && !range->IsEmpty()) {
PrintIndent();
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
@@ -755,7 +571,7 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
}
UsePosition* current_pos = range->first_pos();
- while (current_pos != NULL) {
+ while (current_pos != nullptr) {
if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
os_ << " " << current_pos->pos().value() << " M";
}
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index d719540e23..1a971a55ed 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -24,14 +24,6 @@ class SourcePositionTable;
FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
const char* suffix, const char* mode);
-struct AsDOT {
- explicit AsDOT(const Graph& g) : graph(g) {}
- const Graph& graph;
-};
-
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
-
-
struct AsJSON {
AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
const Graph& graph;
@@ -56,8 +48,8 @@ struct AsC1VCompilation {
struct AsC1V {
AsC1V(const char* phase, const Schedule* schedule,
- const SourcePositionTable* positions = NULL,
- const InstructionSequence* instructions = NULL)
+ const SourcePositionTable* positions = nullptr,
+ const InstructionSequence* instructions = nullptr)
: schedule_(schedule),
instructions_(instructions),
positions_(positions),
@@ -76,7 +68,6 @@ struct AsC1VRegisterAllocationData {
const RegisterAllocationData* data_;
};
-std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac);
std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
std::ostream& operator<<(std::ostream& os,
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 28686aa2ca..b53c7fd308 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -87,9 +87,6 @@ class Graph : public ZoneObject {
// Clone the {node}, and assign a new node id to the copy.
Node* CloneNode(const Node* node);
- template <class Visitor>
- inline void VisitNodeInputsFromEnd(Visitor* visitor);
-
Zone* zone() const { return zone_; }
Node* start() const { return start_; }
Node* end() const { return end_; }
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 5ca9c20396..f63bc22e43 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -4,6 +4,7 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -11,7 +12,6 @@
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/frames-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -48,12 +48,18 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return Operand(offset.from_stack_pointer() ? esp : ebp,
offset.offset() + extra);
}
+ Operand ToMaterializableOperand(int materializable_offset) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ Frame::FPOffsetToSlot(materializable_offset));
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
Operand HighOperand(InstructionOperand* op) {
DCHECK(op->IsDoubleStackSlot());
return ToOperand(op, kPointerSize);
@@ -326,13 +332,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} while (false)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(esp, ebp);
- __ pop(ebp);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(esp, Immediate(sp_slot_delta * kPointerSize));
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ mov(ebp, MemOperand(ebp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -352,10 +370,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(reg);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -364,6 +384,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -376,6 +397,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -385,8 +407,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -395,10 +419,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -408,6 +437,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -420,12 +451,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -619,6 +653,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
+ case kSSEFloat32Round: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -959,14 +1000,51 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kIA32PushFloat32:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ Move(kScratchDoubleReg, i.InputDouble(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ movsd(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movss(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
+ break;
+ case kIA32PushFloat64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else if (HasImmediateInput(instr, 0)) {
+ __ Move(kScratchDoubleReg, i.InputDouble(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ __ movsd(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), kScratchDoubleReg);
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
+ break;
case kIA32Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
__ push(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kIA32Poke: {
@@ -1337,20 +1415,20 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
- // TODO(turbofan): this prologue is redundant with OSR, but needed for
+ // TODO(turbofan): this prologue is redundant with OSR, but still needed for
// code aging.
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1398,10 +1476,10 @@ void CodeGenerator::AssembleReturn() {
}
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -1421,7 +1499,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1448,11 +1526,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromFrame(src, &offset)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ mov(dst, Operand(ebp, offset));
+ __ mov(dst, g.ToMaterializableOperand(offset));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- __ push(Operand(ebp, offset));
+ __ push(g.ToMaterializableOperand(offset));
__ pop(dst);
}
} else if (destination->IsRegister()) {
@@ -1531,25 +1609,38 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- IA32OperandConverter g(this, NULL);
+ IA32OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
Register src = g.ToRegister(source);
Register dst = g.ToRegister(destination);
- __ xchg(dst, src);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
} else if (source->IsRegister() && destination->IsStackSlot()) {
// Register-memory.
- __ xchg(g.ToRegister(source), g.ToOperand(destination));
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- __ push(dst);
+ Register src = g.ToRegister(source);
__ push(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand dst = g.ToOperand(destination);
+ __ mov(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
__ pop(dst);
- __ pop(src);
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory.
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 97dec17c03..816487db8c 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -43,6 +43,7 @@ namespace compiler {
V(SSEFloat32Abs) \
V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
+ V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
@@ -95,6 +96,8 @@ namespace compiler {
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
+ V(IA32PushFloat32) \
+ V(IA32PushFloat64) \
V(IA32Poke) \
V(IA32StackCheck)
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
new file mode 100644
index 0000000000..0a8fcac59a
--- /dev/null
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -0,0 +1,135 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kIA32Add:
+ case kIA32And:
+ case kIA32Cmp:
+ case kIA32Test:
+ case kIA32Or:
+ case kIA32Xor:
+ case kIA32Sub:
+ case kIA32Imul:
+ case kIA32ImulHigh:
+ case kIA32UmulHigh:
+ case kIA32Idiv:
+ case kIA32Udiv:
+ case kIA32Not:
+ case kIA32Neg:
+ case kIA32Shl:
+ case kIA32Shr:
+ case kIA32Sar:
+ case kIA32Ror:
+ case kIA32Lzcnt:
+ case kIA32Tzcnt:
+ case kIA32Popcnt:
+ case kIA32Lea:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Mul:
+ case kSSEFloat32Div:
+ case kSSEFloat32Max:
+ case kSSEFloat32Min:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat32Round:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Mul:
+ case kSSEFloat64Div:
+ case kSSEFloat64Mod:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ case kSSEFloat64Sqrt:
+ case kSSEFloat64Round:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ case kSSEInt32ToFloat64:
+ case kSSEUint32ToFloat64:
+ case kSSEFloat64ExtractLowWord32:
+ case kSSEFloat64ExtractHighWord32:
+ case kSSEFloat64InsertLowWord32:
+ case kSSEFloat64InsertHighWord32:
+ case kSSEFloat64LoadLowWord32:
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ case kAVXFloat32Div:
+ case kAVXFloat32Max:
+ case kAVXFloat32Min:
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ case kAVXFloat64Div:
+ case kAVXFloat64Max:
+ case kAVXFloat64Min:
+ case kAVXFloat64Abs:
+ case kAVXFloat64Neg:
+ case kAVXFloat32Abs:
+ case kAVXFloat32Neg:
+ case kIA32BitcastFI:
+ case kIA32BitcastIF:
+ return (instr->addressing_mode() == kMode_None)
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
+
+ case kIA32Movsxbl:
+ case kIA32Movzxbl:
+ case kIA32Movb:
+ case kIA32Movsxwl:
+ case kIA32Movzxwl:
+ case kIA32Movw:
+ case kIA32Movl:
+ case kIA32Movss:
+ case kIA32Movsd:
+ // Moves are used for memory load/store operations.
+ return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
+
+ case kIA32StackCheck:
+ return kIsLoadOperation;
+
+ case kIA32Push:
+ case kIA32PushFloat32:
+ case kIA32PushFloat64:
+ case kIA32Poke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 8225c96b12..090645212e 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -50,18 +50,18 @@ class IA32OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == NULL)
+ int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
- if (base != NULL) {
+ if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
- base = NULL;
+ base = nullptr;
}
}
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
@@ -84,7 +84,7 @@ class IA32OperandGenerator final : public OperandGenerator {
}
} else {
DCHECK(scale >= 0 && scale <= 3);
- if (index != NULL) {
+ if (index != nullptr) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
@@ -109,7 +109,7 @@ class IA32OperandGenerator final : public OperandGenerator {
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -169,29 +169,29 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -214,12 +214,12 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -256,26 +256,27 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kIA32Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kIA32Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kIA32Movb;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kIA32Movw;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -283,7 +284,8 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
+ } else if (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@@ -296,36 +298,39 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -345,38 +350,42 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
IA32OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
InstructionOperand value_operand =
- g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
- : g.UseRegister(value));
+ g.CanBeImmediate(value) ? g.UseImmediate(value)
+ : ((rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)
+ ? g.UseByteRegister(value)
+ : g.UseRegister(value));
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -547,8 +556,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
VisitShift(this, node, kIA32Shl);
@@ -594,7 +603,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
@@ -631,8 +640,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
IA32OperandGenerator g(this);
@@ -851,11 +860,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -866,9 +895,19 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
IA32OperandGenerator g(this);
// Prepare for C function call.
@@ -881,29 +920,34 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
- if (Node* input = (*arguments)[n]) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node)
- ? g.UseImmediate(input)
- : g.UseRegister(input);
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(*arguments)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input == nullptr) continue;
- // TODO(titzer): IA32Push cannot handle stack->stack double moves
- // because there is no way to encode fixed double slots.
+ if (input.node() == nullptr) continue;
InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input))
- ? g.UseRegister(input)
- : g.Use(input);
- Emit(kIA32Push, g.NoOutput(), value);
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
+ if (input.type() == MachineType::Float32()) {
+ Emit(kIA32PushFloat32, g.NoOutput(), value);
+ } else if (input.type() == MachineType::Float64()) {
+ Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else {
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
}
}
}
@@ -1061,12 +1105,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1274,8 +1318,14 @@ InstructionSelector::SupportedMachineOperatorFlags() {
flags |= MachineOperatorBuilder::kWord32Popcnt;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
- flags |= MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index d4cc2db9a6..6c31ac8f9d 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -39,38 +39,43 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchTailCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchTailCallJSFunction) \
- V(ArchPrepareCallCFunction) \
- V(ArchCallCFunction) \
- V(ArchLazyBailout) \
- V(ArchJmp) \
- V(ArchLookupSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(ArchStoreWithWriteBarrier) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define COMMON_ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchPrepareTailCall) \
+ V(ArchLazyBailout) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchThrowTerminator) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(ArchStoreWithWriteBarrier) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadWord64) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreWord64) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64)
+
+#define ARCH_OPCODE_LIST(V) \
+ COMMON_ARCH_OPCODE_LIST(V) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
new file mode 100644
index 0000000000..2f329ead41
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -0,0 +1,280 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+#include "src/base/adapters.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
+ Zone* zone,
+ Instruction* instr)
+ : instr_(instr),
+ successors_(zone),
+ unscheduled_predecessors_count_(0),
+ latency_(GetInstructionLatency(instr)),
+ total_latency_(-1),
+ start_cycle_(-1) {
+}
+
+
+void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
+ ScheduleGraphNode* node) {
+ successors_.push_back(node);
+ node->unscheduled_predecessors_count_++;
+}
+
+
+InstructionScheduler::InstructionScheduler(Zone* zone,
+ InstructionSequence* sequence)
+ : zone_(zone),
+ sequence_(sequence),
+ graph_(zone),
+ last_side_effect_instr_(nullptr),
+ pending_loads_(zone),
+ last_live_in_reg_marker_(nullptr) {
+}
+
+
+void InstructionScheduler::StartBlock(RpoNumber rpo) {
+ DCHECK(graph_.empty());
+ DCHECK(last_side_effect_instr_ == nullptr);
+ DCHECK(pending_loads_.empty());
+ DCHECK(last_live_in_reg_marker_ == nullptr);
+ sequence()->StartBlock(rpo);
+}
+
+
+void InstructionScheduler::EndBlock(RpoNumber rpo) {
+ ScheduleBlock();
+ sequence()->EndBlock(rpo);
+ graph_.clear();
+ last_side_effect_instr_ = nullptr;
+ pending_loads_.clear();
+ last_live_in_reg_marker_ = nullptr;
+}
+
+
+void InstructionScheduler::AddInstruction(Instruction* instr) {
+ ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
+
+ if (IsBlockTerminator(instr)) {
+ // Make sure that basic block terminators are not moved by adding them
+ // as successor of every instruction.
+ for (auto node : graph_) {
+ node->AddSuccessor(new_node);
+ }
+ } else if (IsFixedRegisterParameter(instr)) {
+ if (last_live_in_reg_marker_ != nullptr) {
+ last_live_in_reg_marker_->AddSuccessor(new_node);
+ }
+ last_live_in_reg_marker_ = new_node;
+ } else {
+ if (last_live_in_reg_marker_ != nullptr) {
+ last_live_in_reg_marker_->AddSuccessor(new_node);
+ }
+
+ // Instructions with side effects and memory operations can't be
+ // reordered with respect to each other.
+ if (HasSideEffect(instr)) {
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ for (auto load : pending_loads_) {
+ load->AddSuccessor(new_node);
+ }
+ pending_loads_.clear();
+ last_side_effect_instr_ = new_node;
+ } else if (IsLoadOperation(instr)) {
+ // Load operations can't be reordered with side effects instructions but
+ // independent loads can be reordered with respect to each other.
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ pending_loads_.push_back(new_node);
+ }
+
+ // Look for operand dependencies.
+ for (auto node : graph_) {
+ if (HasOperandDependency(node->instruction(), instr)) {
+ node->AddSuccessor(new_node);
+ }
+ }
+ }
+
+ graph_.push_back(new_node);
+}
+
+
+bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
+ ScheduleGraphNode *node2) const {
+ return node1->total_latency() > node2->total_latency();
+}
+
+
+void InstructionScheduler::ScheduleBlock() {
+ ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
+
+ // Compute total latencies so that we can schedule the critical path first.
+ ComputeTotalLatencies();
+
+ // Add nodes which don't have dependencies to the ready list.
+ for (auto node : graph_) {
+ if (!node->HasUnscheduledPredecessor()) {
+ ready_list.push_back(node);
+ }
+ }
+
+ // Go through the ready list and schedule the instructions.
+ int cycle = 0;
+ while (!ready_list.empty()) {
+ auto candidate = ready_list.end();
+ for (auto iterator = ready_list.begin(); iterator != ready_list.end();
+ ++iterator) {
+ // Look for the best candidate to schedule.
+ // We only consider instructions that have all their operands ready and
+ // we try to schedule the critical path first (we look for the instruction
+ // with the highest latency on the path to reach the end of the graph).
+ if (cycle >= (*iterator)->start_cycle()) {
+ if ((candidate == ready_list.end()) ||
+ CompareNodes(*iterator, *candidate)) {
+ candidate = iterator;
+ }
+ }
+ }
+
+ if (candidate != ready_list.end()) {
+ sequence()->AddInstruction((*candidate)->instruction());
+
+ for (auto successor : (*candidate)->successors()) {
+ successor->DropUnscheduledPredecessor();
+ successor->set_start_cycle(
+ std::max(successor->start_cycle(),
+ cycle + (*candidate)->latency()));
+
+ if (!successor->HasUnscheduledPredecessor()) {
+ ready_list.push_back(successor);
+ }
+ }
+
+ ready_list.erase(candidate);
+ }
+
+ cycle++;
+ }
+}
+
+
+int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kArchNop:
+ case kArchStackPointer:
+ case kArchFramePointer:
+ case kArchTruncateDoubleToI:
+ return kNoOpcodeFlags;
+
+ case kArchPrepareCallCFunction:
+ case kArchPrepareTailCall:
+ case kArchCallCFunction:
+ case kArchCallCodeObject:
+ case kArchCallJSFunction:
+ case kArchLazyBailout:
+ return kHasSideEffect;
+
+ case kArchTailCallCodeObject:
+ case kArchTailCallJSFunction:
+ return kHasSideEffect | kIsBlockTerminator;
+
+ case kArchDeoptimize:
+ case kArchJmp:
+ case kArchLookupSwitch:
+ case kArchTableSwitch:
+ case kArchRet:
+ case kArchThrowTerminator:
+ return kIsBlockTerminator;
+
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ return kIsLoadOperation;
+
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ case kArchStoreWithWriteBarrier:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ return GetTargetInstructionFlags(instr);
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+bool InstructionScheduler::HasOperandDependency(
+ const Instruction* instr1, const Instruction* instr2) const {
+ for (size_t i = 0; i < instr1->OutputCount(); ++i) {
+ for (size_t j = 0; j < instr2->InputCount(); ++j) {
+ const InstructionOperand* output = instr1->OutputAt(i);
+ const InstructionOperand* input = instr2->InputAt(j);
+
+ if (output->IsUnallocated() && input->IsUnallocated() &&
+ (UnallocatedOperand::cast(output)->virtual_register() ==
+ UnallocatedOperand::cast(input)->virtual_register())) {
+ return true;
+ }
+
+ if (output->IsConstant() && input->IsUnallocated() &&
+ (ConstantOperand::cast(output)->virtual_register() ==
+ UnallocatedOperand::cast(input)->virtual_register())) {
+ return true;
+ }
+ }
+ }
+
+ // TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies?
+
+ return false;
+}
+
+
+bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
+ return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
+ (instr->flags_mode() == kFlags_branch));
+}
+
+
+void InstructionScheduler::ComputeTotalLatencies() {
+ for (auto node : base::Reversed(graph_)) {
+ int max_latency = 0;
+
+ for (auto successor : node->successors()) {
+ DCHECK(successor->total_latency() != -1);
+ if (successor->total_latency() > max_latency) {
+ max_latency = successor->total_latency();
+ }
+ }
+
+ node->set_total_latency(max_latency + node->latency());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
new file mode 100644
index 0000000000..fafbe47908
--- /dev/null
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -0,0 +1,162 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+#define V8_COMPILER_INSTRUCTION_SCHEDULER_H_
+
+#include "src/compiler/instruction.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A set of flags describing properties of the instructions so that the
+// scheduler is aware of dependencies between instructions.
+enum ArchOpcodeFlags {
+ kNoOpcodeFlags = 0,
+ kIsBlockTerminator = 1, // The instruction marks the end of a basic block
+ // e.g.: jump and return instructions.
+ kHasSideEffect = 2, // The instruction has some side effects (memory
+ // store, function call...)
+ kIsLoadOperation = 4, // The instruction is a memory load.
+};
+
+
+class InstructionScheduler final : public ZoneObject {
+ public:
+ InstructionScheduler(Zone* zone, InstructionSequence* sequence);
+
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
+
+ void AddInstruction(Instruction* instr);
+
+ static bool SchedulerSupported();
+
+ private:
+ // A scheduling graph node.
+ // Represent an instruction and their dependencies.
+ class ScheduleGraphNode: public ZoneObject {
+ public:
+ ScheduleGraphNode(Zone* zone, Instruction* instr);
+
+ // Mark the instruction represented by 'node' as a dependecy of this one.
+ // The current instruction will be registered as an unscheduled predecessor
+ // of 'node' (i.e. it must be scheduled before 'node').
+ void AddSuccessor(ScheduleGraphNode* node);
+
+ // Check if all the predecessors of this instruction have been scheduled.
+ bool HasUnscheduledPredecessor() {
+ return unscheduled_predecessors_count_ != 0;
+ }
+
+ // Record that we have scheduled one of the predecessors of this node.
+ void DropUnscheduledPredecessor() {
+ DCHECK(unscheduled_predecessors_count_ > 0);
+ unscheduled_predecessors_count_--;
+ }
+
+ Instruction* instruction() { return instr_; }
+ ZoneDeque<ScheduleGraphNode*>& successors() { return successors_; }
+ int latency() const { return latency_; }
+
+ int total_latency() const { return total_latency_; }
+ void set_total_latency(int latency) { total_latency_ = latency; }
+
+ int start_cycle() const { return start_cycle_; }
+ void set_start_cycle(int start_cycle) { start_cycle_ = start_cycle; }
+
+ private:
+ Instruction* instr_;
+ ZoneDeque<ScheduleGraphNode*> successors_;
+
+ // Number of unscheduled predecessors for this node.
+ int unscheduled_predecessors_count_;
+
+ // Estimate of the instruction latency (the number of cycles it takes for
+ // instruction to complete).
+ int latency_;
+
+ // The sum of all the latencies on the path from this node to the end of
+ // the graph (i.e. a node with no successor).
+ int total_latency_;
+
+ // The scheduler keeps a nominal cycle count to keep track of when the
+ // result of an instruction is available. This field is updated by the
+ // scheduler to indicate when the value of all the operands of this
+ // instruction will be available.
+ int start_cycle_;
+ };
+
+ // Compare the two nodes and return true if node1 is a better candidate than
+ // node2 (i.e. node1 should be scheduled before node2).
+ bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+
+ // Perform scheduling for the current block.
+ void ScheduleBlock();
+
+ // Return the scheduling properties of the given instruction.
+ int GetInstructionFlags(const Instruction* instr) const;
+ int GetTargetInstructionFlags(const Instruction* instr) const;
+
+ // Return true if instr2 uses any value defined by instr1.
+ bool HasOperandDependency(const Instruction* instr1,
+ const Instruction* instr2) const;
+
+ // Return true if the instruction is a basic block terminator.
+ bool IsBlockTerminator(const Instruction* instr) const;
+
+ // Check whether the given instruction has side effects (e.g. function call,
+ // memory store).
+ bool HasSideEffect(const Instruction* instr) const {
+ return GetInstructionFlags(instr) & kHasSideEffect;
+ }
+
+ // Return true if the instruction is a memory load.
+ bool IsLoadOperation(const Instruction* instr) const {
+ return GetInstructionFlags(instr) & kIsLoadOperation;
+ }
+
+ // Identify nops used as a definition point for live-in registers at
+ // function entry.
+ bool IsFixedRegisterParameter(const Instruction* instr) const {
+ return (instr->arch_opcode() == kArchNop) &&
+ (instr->OutputCount() == 1) &&
+ (instr->OutputAt(0)->IsUnallocated()) &&
+ UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
+ }
+
+ void ComputeTotalLatencies();
+
+ static int GetInstructionLatency(const Instruction* instr);
+
+ Zone* zone() { return zone_; }
+ InstructionSequence* sequence() { return sequence_; }
+
+ Zone* zone_;
+ InstructionSequence* sequence_;
+ ZoneVector<ScheduleGraphNode*> graph_;
+
+ // Last side effect instruction encountered while building the graph.
+ ScheduleGraphNode* last_side_effect_instr_;
+
+ // Set of load instructions encountered since the last side effect instruction
+ // which will be added as predecessors of the next instruction with side
+ // effects.
+ ZoneVector<ScheduleGraphNode*> pending_loads_;
+
+ // Live-in register markers are nop instructions which are emitted at the
+ // beginning of a basic block so that the register allocator will find a
+ // defining instruction for live-in values. They must not be moved.
+ // All these nops are chained together and added as a predecessor of every
+ // other instructions in the basic block.
+ ScheduleGraphNode* last_live_in_reg_marker_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INSTRUCTION_SCHEDULER_H_
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index cd41e42eff..5cca8880d5 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -68,8 +68,16 @@ class OperandGenerator {
}
InstructionOperand DefineAsLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Define(node, ToUnallocatedOperand(location, type, GetVReg(node)));
+ MachineRepresentation rep) {
+ return Define(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ }
+
+ InstructionOperand DefineAsDualLocation(Node* node,
+ LinkageLocation primary_location,
+ LinkageLocation secondary_location) {
+ return Define(node,
+ ToDualLocationUnallocatedOperand(
+ primary_location, secondary_location, GetVReg(node)));
}
InstructionOperand Use(Node* node) {
@@ -120,9 +128,15 @@ class OperandGenerator {
reg.code(), GetVReg(node)));
}
- InstructionOperand UseExplicit(Register reg) {
- MachineType machine_type = InstructionSequence::DefaultRepresentation();
- return ExplicitOperand(LocationOperand::REGISTER, machine_type, reg.code());
+ InstructionOperand UseExplicit(LinkageLocation location) {
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+ if (location.IsRegister()) {
+ return ExplicitOperand(LocationOperand::REGISTER, rep,
+ location.AsRegister());
+ } else {
+ return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
+ location.GetLocation());
+ }
}
InstructionOperand UseImmediate(Node* node) {
@@ -130,8 +144,20 @@ class OperandGenerator {
}
InstructionOperand UseLocation(Node* node, LinkageLocation location,
- MachineType type) {
- return Use(node, ToUnallocatedOperand(location, type, GetVReg(node)));
+ MachineRepresentation rep) {
+ return Use(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ }
+
+ // Used to force gap moves from the from_location to the to_location
+ // immediately before an instruction.
+ InstructionOperand UsePointerLocation(LinkageLocation to_location,
+ LinkageLocation from_location) {
+ MachineRepresentation rep = MachineType::PointerRepresentation();
+ UnallocatedOperand casted_from_operand =
+ UnallocatedOperand::cast(TempLocation(from_location, rep));
+ selector_->Emit(kArchNop, casted_from_operand);
+ return ToUnallocatedOperand(to_location, rep,
+ casted_from_operand.virtual_register());
}
InstructionOperand TempRegister() {
@@ -144,7 +170,8 @@ class OperandGenerator {
UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
- sequence()->MarkAsRepresentation(kRepFloat64, op.virtual_register());
+ sequence()->MarkAsRepresentation(MachineRepresentation::kFloat64,
+ op.virtual_register());
return op;
}
@@ -157,8 +184,9 @@ class OperandGenerator {
return sequence()->AddImmediate(Constant(imm));
}
- InstructionOperand TempLocation(LinkageLocation location, MachineType type) {
- return ToUnallocatedOperand(location, type,
+ InstructionOperand TempLocation(LinkageLocation location,
+ MachineRepresentation rep) {
+ return ToUnallocatedOperand(location, rep,
sequence()->NextVirtualRegister());
}
@@ -211,8 +239,20 @@ class OperandGenerator {
return operand;
}
+ UnallocatedOperand ToDualLocationUnallocatedOperand(
+ LinkageLocation primary_location, LinkageLocation secondary_location,
+ int virtual_register) {
+ // We only support the primary location being a register and the secondary
+ // one a slot.
+ DCHECK(primary_location.IsRegister() &&
+ secondary_location.IsCalleeFrameSlot());
+ int reg_id = primary_location.AsRegister();
+ int slot_id = secondary_location.AsCalleeFrameSlot();
+ return UnallocatedOperand(reg_id, slot_id, virtual_register);
+ }
+
UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
- MachineType type,
+ MachineRepresentation rep,
int virtual_register) {
if (location.IsAnyRegister()) {
// any machine register.
@@ -230,8 +270,7 @@ class OperandGenerator {
location.AsCalleeFrameSlot(), virtual_register);
}
// a fixed register.
- MachineType rep = RepresentationOf(type);
- if (rep == kRepFloat64 || rep == kRepFloat32) {
+ if (IsFloatingPoint(rep)) {
return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
location.AsRegister(), virtual_register);
}
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index eac5571e9c..86868e59ee 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -12,6 +12,7 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
+#include "src/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -29,12 +30,13 @@ InstructionSelector::InstructionSelector(
source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
- current_block_(NULL),
+ current_block_(nullptr),
instructions_(zone),
defined_(node_count, false, zone),
used_(node_count, false, zone),
virtual_registers_(node_count,
- InstructionOperand::kInvalidVirtualRegister, zone) {
+ InstructionOperand::kInvalidVirtualRegister, zone),
+ scheduler_(nullptr) {
instructions_.reserve(node_count);
}
@@ -61,17 +63,55 @@ void InstructionSelector::SelectInstructions() {
}
// Schedule the selected instructions.
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
+ }
+
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
- sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number()));
+ StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
- sequence()->AddInstruction(instructions_[start]);
+ AddInstruction(instructions_[start]);
}
- sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number()));
+ EndBlock(RpoNumber::FromInt(block->rpo_number()));
+ }
+}
+
+
+void InstructionSelector::StartBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->StartBlock(rpo);
+ } else {
+ sequence()->StartBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::EndBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->EndBlock(rpo);
+ } else {
+ sequence()->EndBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::AddInstruction(Instruction* instr) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->AddInstruction(instr);
+ } else {
+ sequence()->AddInstruction(instr);
}
}
@@ -81,7 +121,7 @@ Instruction* InstructionSelector::Emit(InstructionCode opcode,
size_t temp_count,
InstructionOperand* temps) {
size_t output_count = output.IsInvalid() ? 0 : 1;
- return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+ return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
@@ -240,16 +280,15 @@ void InstructionSelector::MarkAsUsed(Node* node) {
}
-void InstructionSelector::MarkAsRepresentation(MachineType rep,
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op) {
UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
- rep = RepresentationOf(rep);
sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
-void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
- rep = RepresentationOf(rep);
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ Node* node) {
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
@@ -268,6 +307,9 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
case IrOpcode::kFloat64Constant:
case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
+ case IrOpcode::kObjectState:
+ UNREACHABLE();
+ break;
default:
switch (kind) {
case FrameStateInputKind::kStackSlot:
@@ -275,21 +317,94 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
case FrameStateInputKind::kAny:
return g->UseAny(input);
}
- UNREACHABLE();
- return InstructionOperand();
+ }
+ UNREACHABLE();
+ return InstructionOperand();
+}
+
+
+class StateObjectDeduplicator {
+ public:
+ explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
+ static const size_t kNotDuplicated = SIZE_MAX;
+
+ size_t GetObjectId(Node* node) {
+ for (size_t i = 0; i < objects_.size(); ++i) {
+ if (objects_[i] == node) {
+ return i;
+ }
+ }
+ return kNotDuplicated;
+ }
+
+ size_t InsertObject(Node* node) {
+ size_t id = objects_.size();
+ objects_.push_back(node);
+ return id;
+ }
+
+ private:
+ ZoneVector<Node*> objects_;
+};
+
+
+// Returns the number of instruction operands added to inputs.
+size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* input, MachineType type,
+ FrameStateInputKind kind, Zone* zone) {
+ switch (input->opcode()) {
+ case IrOpcode::kObjectState: {
+ size_t id = deduplicator->GetObjectId(input);
+ if (id == StateObjectDeduplicator::kNotDuplicated) {
+ size_t entries = 0;
+ id = deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Recursive(zone, id));
+ StateValueDescriptor* new_desc = &descriptor->fields().back();
+ for (Edge edge : input->input_edges()) {
+ entries += AddOperandToStateValueDescriptor(
+ new_desc, inputs, g, deduplicator, edge.to(),
+ MachineType::AnyTagged(), kind, zone);
+ }
+ return entries;
+ } else {
+ // Crankshaft counts duplicate objects for the running id, so we have
+ // to push the input again.
+ deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Duplicate(zone, id));
+ return 0;
+ }
+ break;
+ }
+ default: {
+ inputs->push_back(OperandForDeopt(g, input, kind));
+ descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
+ return 1;
+ }
}
}
-void AddFrameStateInputs(Node* state, OperandGenerator* g,
- InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor,
- FrameStateInputKind kind, Zone* zone) {
+// Returns the number of instruction operands added to inputs.
+size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+ Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+ size_t entries = 0;
+ size_t initial_size = inputs->size();
+ USE(initial_size); // initial_size is only used for debug.
+
if (descriptor->outer_state()) {
- AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), g, inputs,
- descriptor->outer_state(), kind, zone);
+ entries += AddInputsToFrameStateDescriptor(
+ descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
+ g, deduplicator, inputs, kind, zone);
}
Node* parameters = state->InputAt(kFrameStateParametersInput);
@@ -303,30 +418,34 @@ void AddFrameStateInputs(Node* state, OperandGenerator* g,
DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
- ZoneVector<MachineType> types(zone);
- types.reserve(descriptor->GetSize());
-
- size_t value_index = 0;
- inputs->push_back(OperandForDeopt(g, function, kind));
- descriptor->SetType(value_index++, kMachAnyTagged);
+ StateValueDescriptor* values_descriptor =
+ descriptor->GetStateValueDescriptor();
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, function,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
for (StateValuesAccess::TypedNode input_node :
StateValuesAccess(parameters)) {
- inputs->push_back(OperandForDeopt(g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
}
if (descriptor->HasContext()) {
- inputs->push_back(OperandForDeopt(g, context, kind));
- descriptor->SetType(value_index++, kMachAnyTagged);
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, context,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
}
for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
- inputs->push_back(OperandForDeopt(g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
}
for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
- inputs->push_back(OperandForDeopt(g, input_node.node, kind));
- descriptor->SetType(value_index++, input_node.type);
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
}
- DCHECK(value_index == descriptor->GetSize());
+ DCHECK_EQ(initial_size + entries, inputs->size());
+ return entries;
}
} // namespace
@@ -356,14 +475,14 @@ struct CallBuffer {
NodeVector output_nodes;
InstructionOperandVector outputs;
InstructionOperandVector instruction_args;
- NodeVector pushed_nodes;
+ ZoneVector<PushParameter> pushed_nodes;
size_t input_count() const { return descriptor->InputCount(); }
size_t frame_state_count() const { return descriptor->FrameStateCount(); }
size_t frame_state_value_count() const {
- return (frame_state_descriptor == NULL)
+ return (frame_state_descriptor == nullptr)
? 0
: (frame_state_descriptor->GetTotalSize() +
1); // Include deopt id.
@@ -374,8 +493,8 @@ struct CallBuffer {
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate) {
+ CallBufferFlags flags,
+ int stack_param_delta) {
OperandGenerator g(this);
DCHECK_LE(call->op()->ValueOutputCount(),
static_cast<int>(buffer->descriptor->ReturnCount()));
@@ -400,13 +519,13 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// Filter out the outputs that aren't live because no projection uses them.
size_t outputs_needed_by_framestate =
- buffer->frame_state_descriptor == NULL
+ buffer->frame_state_descriptor == nullptr
? 0
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live =
- buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
+ bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ i < outputs_needed_by_framestate;
if (output_is_live) {
MachineType type =
buffer->descriptor->GetReturnType(static_cast<int>(i));
@@ -415,9 +534,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* output = buffer->output_nodes[i];
InstructionOperand op =
- output == NULL ? g.TempLocation(location, type)
- : g.DefineAsLocation(output, location, type);
- MarkAsRepresentation(type, op);
+ output == nullptr
+ ? g.TempLocation(location, type.representation())
+ : g.DefineAsLocation(output, location, type.representation());
+ MarkAsRepresentation(type.representation(), op);
buffer->outputs.push_back(op);
}
@@ -426,6 +546,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// The first argument is always the callee code.
Node* callee = call->InputAt(0);
+ bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
+ bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
buffer->instruction_args.push_back(
@@ -443,7 +565,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
- buffer->descriptor->GetInputType(0)));
+ buffer->descriptor->GetInputType(0).representation()));
break;
case CallDescriptor::kLazyBailout:
// The target is ignored, but we still need to pass a value here.
@@ -456,19 +578,26 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// follows (n is the number of value inputs to the frame state):
// arg 1 : deoptimization id.
// arg 2 - arg (n + 1) : value inputs to the frame state.
- if (buffer->frame_state_descriptor != NULL) {
+ size_t frame_state_entries = 0;
+ USE(frame_state_entries); // frame_state_entries is only used for debug.
+ if (buffer->frame_state_descriptor != nullptr) {
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
- AddFrameStateInputs(frame_state, &g, &buffer->instruction_args,
- buffer->frame_state_descriptor,
- FrameStateInputKind::kStackSlot, instruction_zone());
+
+ StateObjectDeduplicator deduplicator(instruction_zone());
+
+ frame_state_entries =
+ 1 + AddInputsToFrameStateDescriptor(
+ buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
+ &buffer->instruction_args, FrameStateInputKind::kStackSlot,
+ instruction_zone());
+
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
- DCHECK(1 + buffer->frame_state_value_count() ==
- buffer->instruction_args.size());
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -478,27 +607,47 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
size_t pushed_count = 0;
+ bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
+
+ LinkageLocation location = buffer->descriptor->GetInputLocation(index);
+ if (call_tail) {
+ location = LinkageLocation::ConvertToTailCallerLocation(
+ location, stack_param_delta);
+ }
InstructionOperand op =
- g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
- buffer->descriptor->GetInputType(index));
- if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
+ g.UseLocation(*iter, location,
+ buffer->descriptor->GetInputType(index).representation());
+ if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
- buffer->pushed_nodes.resize(stack_index + 1, NULL);
+ buffer->pushed_nodes.resize(stack_index + 1);
}
- DCHECK(!buffer->pushed_nodes[stack_index]);
- buffer->pushed_nodes[stack_index] = *iter;
+ PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
+ buffer->pushed_nodes[stack_index] = parameter;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- buffer->frame_state_value_count());
+ frame_state_entries);
+ if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
+ stack_param_delta != 0) {
+ // For tail calls that change the size of their parameter list and keep
+ // their return address on the stack, move the return address to just above
+ // the parameters.
+ LinkageLocation saved_return_location =
+ LinkageLocation::ForSavedCallerReturnAddress();
+ InstructionOperand return_address =
+ g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
+ saved_return_location, stack_param_delta),
+ saved_return_location);
+ buffer->instruction_args.push_back(return_address);
+ }
}
@@ -539,7 +688,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
instruction_block->set_code_start(static_cast<int>(instructions_.size()));
instruction_block->set_code_end(current_block_end);
- current_block_ = NULL;
+ current_block_ = nullptr;
}
@@ -585,7 +734,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
// All other successors must be cases.
sw.case_count = block->SuccessorCount() - 1;
- DCHECK_LE(1u, sw.case_count);
sw.case_branches = &block->successors().front();
// Determine case values and their min/max.
sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
@@ -611,12 +759,9 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
return VisitReturn(input);
}
case BasicBlock::kDeoptimize: {
- // If the result itself is a return, return its input.
- Node* value =
- (input != nullptr && input->opcode() == IrOpcode::kDeoptimize)
- ? input->InputAt(0)
- : input;
- return VisitDeoptimize(value);
+ DeoptimizeKind kind = DeoptimizeKindOf(input->op());
+ Node* value = input->InputAt(0);
+ return VisitDeoptimize(kind, value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -661,14 +806,14 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kParameter: {
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
- MarkAsRepresentation(type, node);
+ MarkAsRepresentation(type.representation(), node);
return VisitParameter(node);
}
case IrOpcode::kOsrValue:
return MarkAsReference(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
- MachineType type = OpParameter<MachineType>(node);
- MarkAsRepresentation(type, node);
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
case IrOpcode::kProjection:
@@ -692,10 +837,11 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitCall(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
return;
case IrOpcode::kLoad: {
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- MarkAsRepresentation(rep, node);
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
case IrOpcode::kStore:
@@ -776,8 +922,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return MarkAsWord64(node), VisitInt64Add(node);
+ case IrOpcode::kInt64AddWithOverflow:
+ return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
return MarkAsWord64(node), VisitInt64Sub(node);
+ case IrOpcode::kInt64SubWithOverflow:
+ return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
@@ -806,6 +956,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kChangeInt32ToInt64:
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
@@ -822,6 +980,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kRoundUint64ToFloat32:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
+ case IrOpcode::kRoundUint64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
case IrOpcode::kBitcastFloat64ToInt64:
return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
case IrOpcode::kBitcastInt32ToFloat32:
@@ -874,12 +1036,24 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
+ case IrOpcode::kFloat32RoundDown:
+ return MarkAsFloat32(node), VisitFloat32RoundDown(node);
case IrOpcode::kFloat64RoundDown:
return MarkAsFloat64(node), VisitFloat64RoundDown(node);
+ case IrOpcode::kFloat32RoundUp:
+ return MarkAsFloat32(node), VisitFloat32RoundUp(node);
+ case IrOpcode::kFloat64RoundUp:
+ return MarkAsFloat64(node), VisitFloat64RoundUp(node);
+ case IrOpcode::kFloat32RoundTruncate:
+ return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
+ case IrOpcode::kFloat32RoundTiesEven:
+ return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
+ case IrOpcode::kFloat64RoundTiesEven:
+ return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
case IrOpcode::kFloat64ExtractLowWord32:
return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
case IrOpcode::kFloat64ExtractHighWord32:
@@ -893,7 +1067,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kLoadFramePointer:
return VisitLoadFramePointer(node);
case IrOpcode::kCheckedLoad: {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineRepresentation rep =
+ CheckedLoadRepresentationOf(node->op()).representation();
MarkAsRepresentation(rep, node);
return VisitCheckedLoad(node);
}
@@ -994,9 +1169,19 @@ void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
@@ -1038,6 +1223,26 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
@@ -1053,6 +1258,16 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
@@ -1082,9 +1297,16 @@ void InstructionSelector::VisitGuard(Node* node) {
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
int index = ParameterIndexOf(node->op());
- Emit(kArchNop,
- g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
- linkage()->GetParameterType(index)));
+ InstructionOperand op =
+ linkage()->ParameterHasSecondaryLocation(index)
+ ? g.DefineAsDualLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterSecondaryLocation(index))
+ : g.DefineAsLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterType(index).representation());
+
+ Emit(kArchNop, op);
}
@@ -1093,8 +1315,9 @@ void InstructionSelector::VisitIfException(Node* node) {
Node* call = node->InputAt(1);
DCHECK_EQ(IrOpcode::kCall, call->opcode());
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
- Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
- descriptor->GetReturnType(0)));
+ Emit(kArchNop,
+ g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
+ descriptor->GetReturnType(0).representation()));
}
@@ -1102,7 +1325,7 @@ void InstructionSelector::VisitOsrValue(Node* node) {
OperandGenerator g(this);
int index = OpParameter<int>(node);
Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
- kMachAnyTagged));
+ MachineRepresentation::kTagged));
}
@@ -1128,6 +1351,12 @@ void InstructionSelector::VisitProjection(Node* node) {
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
@@ -1166,7 +1395,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
// the code object in a register if there are multiple uses of it.
// Improve constant pool and the heuristics in the register allocator
// for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, true);
+ CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
+ InitializeCallBuffer(node, &buffer, call_buffer_flags);
EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
@@ -1219,11 +1449,17 @@ void InstructionSelector::VisitTailCall(Node* node) {
// TODO(turbofan): Relax restriction for stack parameters.
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node)) {
+ int stack_param_delta = 0;
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node,
+ &stack_param_delta)) {
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, IsTailCallAddressImmediate());
+ CallBufferFlags flags(kCallCodeImmediate | kCallTail);
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
@@ -1240,6 +1476,11 @@ void InstructionSelector::VisitTailCall(Node* node) {
}
opcode |= MiscField::encode(descriptor->flags());
+ buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta));
+
+ Emit(kArchPrepareTailCall, g.NoOutput(),
+ g.TempImmediate(stack_param_delta));
+
// Emit the tailcall instruction.
Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
&buffer.instruction_args.front());
@@ -1253,7 +1494,11 @@ void InstructionSelector::VisitTailCall(Node* node) {
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, IsTailCallAddressImmediate());
+ CallBufferFlags flags = kCallCodeImmediate;
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags);
EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
@@ -1300,38 +1545,47 @@ void InstructionSelector::VisitReturn(Node* ret) {
for (int i = 0; i < ret_count; ++i) {
value_locations[i] =
g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
- linkage()->GetReturnType(i));
+ linkage()->GetReturnType(i).representation());
}
Emit(kArchRet, 0, nullptr, ret_count, value_locations);
}
}
-void InstructionSelector::VisitDeoptimize(Node* value) {
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
OperandGenerator g(this);
FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
- size_t arg_count = desc->GetTotalSize() + 1; // Include deopt id.
InstructionOperandVector args(instruction_zone());
- args.reserve(arg_count);
+ args.reserve(desc->GetTotalSize() + 1); // Include deopt id.
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(desc);
args.push_back(g.TempImmediate(state_id.ToInt()));
- AddFrameStateInputs(value, &g, &args, desc, FrameStateInputKind::kAny,
- instruction_zone());
+ StateObjectDeduplicator deduplicator(instruction_zone());
- DCHECK_EQ(args.size(), arg_count);
+ AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
+ FrameStateInputKind::kAny,
+ instruction_zone());
- Emit(kArchDeoptimize, 0, nullptr, arg_count, &args.front(), 0, nullptr);
+ InstructionCode opcode = kArchDeoptimize;
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ opcode |= MiscField::encode(Deoptimizer::EAGER);
+ break;
+ case DeoptimizeKind::kSoft:
+ opcode |= MiscField::encode(Deoptimizer::SOFT);
+ break;
+ }
+ Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
}
void InstructionSelector::VisitThrow(Node* value) {
OperandGenerator g(this);
- Emit(kArchNop, g.NoOutput()); // TODO(titzer)
+ Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer)
}
@@ -1351,7 +1605,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
DCHECK_EQ(parameters, state_info.parameter_count());
DCHECK_EQ(locals, state_info.local_count());
- FrameStateDescriptor* outer_state = NULL;
+ FrameStateDescriptor* outer_state = nullptr;
Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
if (outer_node->opcode() == IrOpcode::kFrameState) {
outer_state = GetFrameStateDescriptor(outer_node);
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 68a45157f9..52aea70eb6 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -9,6 +9,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/zone-containers.h"
@@ -25,8 +26,20 @@ class Linkage;
class OperandGenerator;
struct SwitchInfo;
-typedef ZoneVector<InstructionOperand> InstructionOperandVector;
+// This struct connects nodes of parameters which are going to be pushed on the
+// call stack with their parameter index in the call descriptor of the callee.
+class PushParameter {
+ public:
+ PushParameter() : node_(nullptr), type_(MachineType::None()) {}
+ PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
+
+ Node* node() const { return node_; }
+ MachineType type() const { return type_; }
+ private:
+ Node* node_;
+ MachineType type_;
+};
// Instruction selection generates an InstructionSequence for a given Schedule.
class InstructionSelector final {
@@ -46,40 +59,44 @@ class InstructionSelector final {
// Visit code for the entire graph with the included schedule.
void SelectInstructions();
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
+ void AddInstruction(Instruction* instr);
+
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
// ===========================================================================
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
InstructionOperand e, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
InstructionOperand c, InstructionOperand d,
InstructionOperand e, InstructionOperand f,
- size_t temp_count = 0, InstructionOperand* temps = NULL);
+ size_t temp_count = 0, InstructionOperand* temps = nullptr);
Instruction* Emit(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count = 0,
- InstructionOperand* temps = NULL);
+ InstructionOperand* temps = nullptr);
Instruction* Emit(Instruction* instr);
// ===========================================================================
@@ -153,16 +170,34 @@ class InstructionSelector final {
// Inform the register allocation of the representation of the value produced
// by {node}.
- void MarkAsRepresentation(MachineType rep, Node* node);
- void MarkAsWord32(Node* node) { MarkAsRepresentation(kRepWord32, node); }
- void MarkAsWord64(Node* node) { MarkAsRepresentation(kRepWord64, node); }
- void MarkAsFloat32(Node* node) { MarkAsRepresentation(kRepFloat32, node); }
- void MarkAsFloat64(Node* node) { MarkAsRepresentation(kRepFloat64, node); }
- void MarkAsReference(Node* node) { MarkAsRepresentation(kRepTagged, node); }
+ void MarkAsRepresentation(MachineRepresentation rep, Node* node);
+ void MarkAsWord32(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kWord32, node);
+ }
+ void MarkAsWord64(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kWord64, node);
+ }
+ void MarkAsFloat32(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kFloat32, node);
+ }
+ void MarkAsFloat64(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kFloat64, node);
+ }
+ void MarkAsReference(Node* node) {
+ MarkAsRepresentation(MachineRepresentation::kTagged, node);
+ }
// Inform the register allocation of the representation of the unallocated
// operand {op}.
- void MarkAsRepresentation(MachineType rep, const InstructionOperand& op);
+ void MarkAsRepresentation(MachineRepresentation rep,
+ const InstructionOperand& op);
+
+ enum CallBufferFlag {
+ kCallCodeImmediate = 1u << 0,
+ kCallAddressImmediate = 1u << 1,
+ kCallTail = 1u << 2
+ };
+ typedef base::Flags<CallBufferFlag> CallBufferFlags;
// Initialize the call buffer with the InstructionOperands, nodes, etc,
// corresponding
@@ -170,8 +205,7 @@ class InstructionSelector final {
// {call_code_immediate} to generate immediate operands to calls of code.
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate);
+ CallBufferFlags flags, int stack_param_delta = 0);
bool IsTailCallAddressImmediate();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
@@ -207,11 +241,11 @@ class InstructionSelector final {
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
- void VisitDeoptimize(Node* value);
+ void VisitDeoptimize(DeoptimizeKind kind, Node* value);
void VisitReturn(Node* ret);
void VisitThrow(Node* value);
- void EmitPrepareArguments(NodeVector* arguments,
+ void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
// ===========================================================================
@@ -236,6 +270,7 @@ class InstructionSelector final {
BoolVector defined_;
BoolVector used_;
IntVector virtual_registers_;
+ InstructionScheduler* scheduler_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 1f9543a635..383e27dac6 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -6,6 +6,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/instruction.h"
#include "src/compiler/schedule.h"
+#include "src/compiler/state-values-utils.h"
namespace v8 {
namespace internal {
@@ -59,6 +60,22 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
+void InstructionOperand::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.op_ = *this;
+ os << wrapper << std::endl;
+}
+
+
+void InstructionOperand::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
const InstructionOperand& op = printable.op_;
@@ -122,25 +139,34 @@ std::ostream& operator<<(std::ostream& os,
if (allocated.IsExplicit()) {
os << "|E";
}
- switch (allocated.machine_type()) {
- case kRepWord32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kNone:
+ os << "|-";
+ break;
+ case MachineRepresentation::kBit:
+ os << "|b";
+ break;
+ case MachineRepresentation::kWord8:
+ os << "|w8";
+ break;
+ case MachineRepresentation::kWord16:
+ os << "|w16";
+ break;
+ case MachineRepresentation::kWord32:
os << "|w32";
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
os << "|w64";
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
os << "|f32";
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
os << "|f64";
break;
- case kRepTagged:
+ case MachineRepresentation::kTagged:
os << "|t";
break;
- default:
- os << "|?";
- break;
}
return os << "]";
}
@@ -152,6 +178,24 @@ std::ostream& operator<<(std::ostream& os,
}
+void MoveOperands::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionOperand wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.op_ = destination();
+ os << wrapper << " = ";
+ wrapper.op_ = source();
+ os << wrapper << std::endl;
+}
+
+
+void MoveOperands::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableMoveOperands& printable) {
const MoveOperands& mo = *printable.move_operands_;
@@ -195,12 +239,12 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
}
-ExplicitOperand::ExplicitOperand(LocationKind kind, MachineType machine_type,
+ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
int index)
- : LocationOperand(EXPLICIT, kind, machine_type, index) {
- DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(machine_type),
+ : LocationOperand(EXPLICIT, kind, rep, index) {
+ DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
Register::from_code(index).IsAllocatable());
- DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(machine_type),
+ DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(rep),
DoubleRegister::from_code(index).IsAllocatable());
}
@@ -209,7 +253,7 @@ Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
TempCountField::encode(0) | IsCallField::encode(false)),
- reference_map_(NULL) {
+ reference_map_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
}
@@ -224,7 +268,7 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
InputCountField::encode(input_count) |
TempCountField::encode(temp_count) |
IsCallField::encode(false)),
- reference_map_(NULL) {
+ reference_map_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
size_t offset = 0;
@@ -254,6 +298,22 @@ bool Instruction::AreMovesRedundant() const {
}
+void Instruction::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstruction wrapper;
+ wrapper.instr_ = this;
+ wrapper.register_configuration_ = config;
+ os << wrapper << std::endl;
+}
+
+
+void Instruction::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
@@ -399,7 +459,7 @@ std::ostream& operator<<(std::ostream& os,
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
os << "(";
- if (instr.parallel_moves()[i] != NULL) {
+ if (instr.parallel_moves()[i] != nullptr) {
PrintableParallelMove ppm = {printable.register_configuration_,
instr.parallel_moves()[i]};
os << ppm;
@@ -508,7 +568,7 @@ size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
static RpoNumber GetRpo(const BasicBlock* block) {
- if (block == NULL) return RpoNumber::Invalid();
+ if (block == nullptr) return RpoNumber::Invalid();
return RpoNumber::FromInt(block->rpo_number());
}
@@ -543,7 +603,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
Zone* zone, const Schedule* schedule) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
new (blocks) InstructionBlocks(
- static_cast<int>(schedule->rpo_order()->size()), NULL, zone);
+ static_cast<int>(schedule->rpo_order()->size()), nullptr, zone);
size_t rpo_number = 0;
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
@@ -629,7 +689,7 @@ int InstructionSequence::AddInstruction(Instruction* instr) {
int index = static_cast<int>(instructions_.size());
instructions_.push_back(instr);
if (instr->NeedsReferenceMap()) {
- DCHECK(instr->reference_map() == NULL);
+ DCHECK(instr->reference_map() == nullptr);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
reference_map->set_instruction_position(index);
instr->set_reference_map(reference_map);
@@ -656,28 +716,28 @@ InstructionBlock* InstructionSequence::GetInstructionBlock(
}
-static MachineType FilterRepresentation(MachineType rep) {
- DCHECK_EQ(rep, RepresentationOf(rep));
+static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
switch (rep) {
- case kRepBit:
- case kRepWord8:
- case kRepWord16:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
return InstructionSequence::DefaultRepresentation();
- case kRepWord32:
- case kRepWord64:
- case kRepFloat32:
- case kRepFloat64:
- case kRepTagged:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kTagged:
return rep;
- default:
+ case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return kMachNone;
+ return MachineRepresentation::kNone;
}
-MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
+MachineRepresentation InstructionSequence::GetRepresentation(
+ int virtual_register) const {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
@@ -687,17 +747,17 @@ MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
}
-void InstructionSequence::MarkAsRepresentation(MachineType machine_type,
+void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int virtual_register) {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
}
- machine_type = FilterRepresentation(machine_type);
- DCHECK_IMPLIES(representations_[virtual_register] != machine_type,
+ rep = FilterRepresentation(rep);
+ DCHECK_IMPLIES(representations_[virtual_register] != rep,
representations_[virtual_register] == DefaultRepresentation());
- representations_[virtual_register] = machine_type;
+ representations_[virtual_register] = rep;
}
@@ -744,6 +804,22 @@ void InstructionSequence::SetSourcePosition(const Instruction* instr,
}
+void InstructionSequence::Print(const RegisterConfiguration* config) const {
+ OFStream os(stdout);
+ PrintableInstructionSequence wrapper;
+ wrapper.register_configuration_ = config;
+ wrapper.sequence_ = this;
+ os << wrapper << std::endl;
+}
+
+
+void InstructionSequence::Print() const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config);
+}
+
+
FrameStateDescriptor::FrameStateDescriptor(
Zone* zone, FrameStateType type, BailoutId bailout_id,
OutputFrameStateCombine state_combine, size_t parameters_count,
@@ -756,11 +832,9 @@ FrameStateDescriptor::FrameStateDescriptor(
parameters_count_(parameters_count),
locals_count_(locals_count),
stack_count_(stack_count),
- types_(zone),
+ values_(zone),
shared_info_(shared_info),
- outer_state_(outer_state) {
- types_.resize(GetSize(), kMachNone);
-}
+ outer_state_(outer_state) {}
size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
@@ -779,7 +853,7 @@ size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
size_t FrameStateDescriptor::GetTotalSize() const {
size_t total_size = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
total_size += iter->GetSize();
}
@@ -789,7 +863,7 @@ size_t FrameStateDescriptor::GetTotalSize() const {
size_t FrameStateDescriptor::GetFrameCount() const {
size_t count = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
++count;
}
@@ -799,9 +873,9 @@ size_t FrameStateDescriptor::GetFrameCount() const {
size_t FrameStateDescriptor::GetJSFrameCount() const {
size_t count = 0;
- for (const FrameStateDescriptor* iter = this; iter != NULL;
+ for (const FrameStateDescriptor* iter = this; iter != nullptr;
iter = iter->outer_state_) {
- if (iter->type_ == FrameStateType::kJavaScriptFunction) {
+ if (FrameStateFunctionInfo::IsJSFunctionType(iter->type_)) {
++count;
}
}
@@ -809,17 +883,6 @@ size_t FrameStateDescriptor::GetJSFrameCount() const {
}
-MachineType FrameStateDescriptor::GetType(size_t index) const {
- return types_[index];
-}
-
-
-void FrameStateDescriptor::SetType(size_t index, MachineType type) {
- DCHECK(index < GetSize());
- types_[index] = type;
-}
-
-
std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
return os << rpo.ToSize();
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 7ab2b90778..8a6a0ae92a 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -23,8 +23,10 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
class Schedule;
+
class InstructionOperand {
public:
static const int kInvalidVirtualRegister = -1;
@@ -62,6 +64,7 @@ class InstructionOperand {
INSTRUCTION_OPERAND_PREDICATE(Allocated, ALLOCATED)
#undef INSTRUCTION_OPERAND_PREDICATE
+ inline bool IsAnyRegister() const;
inline bool IsRegister() const;
inline bool IsDoubleRegister() const;
inline bool IsStackSlot() const;
@@ -94,6 +97,9 @@ class InstructionOperand {
return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
@@ -105,6 +111,9 @@ class InstructionOperand {
};
+typedef ZoneVector<InstructionOperand> InstructionOperandVector;
+
+
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
InstructionOperand op_;
@@ -192,6 +201,12 @@ class UnallocatedOperand : public InstructionOperand {
value_ |= LifetimeField::encode(lifetime);
}
+ UnallocatedOperand(int reg_id, int slot_id, int virtual_register)
+ : UnallocatedOperand(FIXED_REGISTER, reg_id, virtual_register) {
+ value_ |= HasSecondaryStorageField::encode(true);
+ value_ |= SecondaryStorageField::encode(slot_id);
+ }
+
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
@@ -222,6 +237,15 @@ class UnallocatedOperand : public InstructionOperand {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_DOUBLE_REGISTER;
}
+ bool HasSecondaryStorage() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == FIXED_REGISTER &&
+ HasSecondaryStorageField::decode(value_);
+ }
+ int GetSecondaryStorage() const {
+ DCHECK(HasSecondaryStorage());
+ return SecondaryStorageField::decode(value_);
+ }
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
BasicPolicy basic_policy() const {
@@ -301,7 +325,9 @@ class UnallocatedOperand : public InstructionOperand {
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
class ExtendedPolicyField : public BitField64<ExtendedPolicy, 36, 3> {};
class LifetimeField : public BitField64<Lifetime, 39, 1> {};
- class FixedRegisterField : public BitField64<int, 40, 6> {};
+ class HasSecondaryStorageField : public BitField64<bool, 40, 1> {};
+ class FixedRegisterField : public BitField64<int, 41, 6> {};
+ class SecondaryStorageField : public BitField64<int, 47, 3> {};
private:
explicit UnallocatedOperand(int virtual_register)
@@ -375,12 +401,12 @@ class LocationOperand : public InstructionOperand {
LocationOperand(InstructionOperand::Kind operand_kind,
LocationOperand::LocationKind location_kind,
- MachineType machine_type, int index)
+ MachineRepresentation rep, int index)
: InstructionOperand(operand_kind) {
DCHECK_IMPLIES(location_kind == REGISTER, index >= 0);
- DCHECK(IsSupportedMachineType(machine_type));
+ DCHECK(IsSupportedRepresentation(rep));
value_ |= LocationKindField::encode(location_kind);
- value_ |= MachineTypeField::encode(machine_type);
+ value_ |= RepresentationField::encode(rep);
value_ |= static_cast<int64_t>(index) << IndexField::kShift;
}
@@ -405,20 +431,26 @@ class LocationOperand : public InstructionOperand {
return LocationKindField::decode(value_);
}
- MachineType machine_type() const { return MachineTypeField::decode(value_); }
+ MachineRepresentation representation() const {
+ return RepresentationField::decode(value_);
+ }
- static bool IsSupportedMachineType(MachineType machine_type) {
- if (RepresentationOf(machine_type) != machine_type) return false;
- switch (machine_type) {
- case kRepWord32:
- case kRepWord64:
- case kRepFloat32:
- case kRepFloat64:
- case kRepTagged:
+ static bool IsSupportedRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kTagged:
return true;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kNone:
return false;
}
+ UNREACHABLE();
+ return false;
}
static LocationOperand* cast(InstructionOperand* op) {
@@ -438,19 +470,18 @@ class LocationOperand : public InstructionOperand {
STATIC_ASSERT(KindField::kSize == 3);
class LocationKindField : public BitField64<LocationKind, 3, 2> {};
- class MachineTypeField : public BitField64<MachineType, 5, 16> {};
+ class RepresentationField : public BitField64<MachineRepresentation, 5, 8> {};
class IndexField : public BitField64<int32_t, 35, 29> {};
};
class ExplicitOperand : public LocationOperand {
public:
- ExplicitOperand(LocationKind kind, MachineType machine_type, int index);
+ ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
static ExplicitOperand* New(Zone* zone, LocationKind kind,
- MachineType machine_type, int index) {
- return InstructionOperand::New(zone,
- ExplicitOperand(kind, machine_type, index));
+ MachineRepresentation rep, int index) {
+ return InstructionOperand::New(zone, ExplicitOperand(kind, rep, index));
}
INSTRUCTION_OPERAND_CASTS(ExplicitOperand, EXPLICIT);
@@ -459,13 +490,12 @@ class ExplicitOperand : public LocationOperand {
class AllocatedOperand : public LocationOperand {
public:
- AllocatedOperand(LocationKind kind, MachineType machine_type, int index)
- : LocationOperand(ALLOCATED, kind, machine_type, index) {}
+ AllocatedOperand(LocationKind kind, MachineRepresentation rep, int index)
+ : LocationOperand(ALLOCATED, kind, rep, index) {}
static AllocatedOperand* New(Zone* zone, LocationKind kind,
- MachineType machine_type, int index) {
- return InstructionOperand::New(zone,
- AllocatedOperand(kind, machine_type, index));
+ MachineRepresentation rep, int index) {
+ return InstructionOperand::New(zone, AllocatedOperand(kind, rep, index));
}
INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
@@ -475,44 +505,47 @@ class AllocatedOperand : public LocationOperand {
#undef INSTRUCTION_OPERAND_CASTS
-bool InstructionOperand::IsRegister() const {
+bool InstructionOperand::IsAnyRegister() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
- LocationOperand::REGISTER &&
- !IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+ LocationOperand::REGISTER;
+}
+
+
+bool InstructionOperand::IsRegister() const {
+ return IsAnyRegister() &&
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
}
bool InstructionOperand::IsDoubleRegister() const {
- return (IsAllocated() || IsExplicit()) &&
- LocationOperand::cast(this)->location_kind() ==
- LocationOperand::REGISTER &&
- IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+ return IsAnyRegister() &&
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
}
bool InstructionOperand::IsStackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
- !IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+ !IsFloatingPoint(LocationOperand::cast(this)->representation());
}
bool InstructionOperand::IsDoubleStackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
- IsFloatingPoint(LocationOperand::cast(this)->machine_type());
+ IsFloatingPoint(LocationOperand::cast(this)->representation());
}
uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAllocated() || IsExplicit()) {
// TODO(dcarney): put machine type last and mask.
- MachineType canonicalized_machine_type =
- IsFloatingPoint(LocationOperand::cast(this)->machine_type())
- ? kMachFloat64
- : kMachNone;
+ MachineRepresentation canonicalized_representation =
+ IsFloatingPoint(LocationOperand::cast(this)->representation())
+ ? MachineRepresentation::kFloat64
+ : MachineRepresentation::kNone;
return InstructionOperand::KindField::update(
- LocationOperand::MachineTypeField::update(this->value_,
- canonicalized_machine_type),
+ LocationOperand::RepresentationField::update(
+ this->value_, canonicalized_representation),
LocationOperand::EXPLICIT);
}
return this->value_;
@@ -572,6 +605,9 @@ class MoveOperands final : public ZoneObject {
return source_.IsInvalid();
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
private:
InstructionOperand source_;
InstructionOperand destination_;
@@ -698,7 +734,7 @@ class Instruction final {
// TODO(titzer): make call into a flags.
static Instruction* New(Zone* zone, InstructionCode opcode) {
- return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+ return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
}
static Instruction* New(Zone* zone, InstructionCode opcode,
@@ -706,9 +742,9 @@ class Instruction final {
size_t input_count, InstructionOperand* inputs,
size_t temp_count, InstructionOperand* temps) {
DCHECK(opcode >= 0);
- DCHECK(output_count == 0 || outputs != NULL);
- DCHECK(input_count == 0 || inputs != NULL);
- DCHECK(temp_count == 0 || temps != NULL);
+ DCHECK(output_count == 0 || outputs != nullptr);
+ DCHECK(input_count == 0 || inputs != nullptr);
+ DCHECK(temp_count == 0 || temps != nullptr);
size_t total_extra_ops = output_count + input_count + temp_count;
if (total_extra_ops != 0) total_extra_ops--;
int size = static_cast<int>(
@@ -724,7 +760,7 @@ class Instruction final {
}
bool IsCall() const { return IsCallField::decode(bit_field_); }
bool NeedsReferenceMap() const { return IsCall(); }
- bool HasReferenceMap() const { return reference_map_ != NULL; }
+ bool HasReferenceMap() const { return reference_map_ != nullptr; }
bool ClobbersRegisters() const { return IsCall(); }
bool ClobbersTemps() const { return IsCall(); }
@@ -740,7 +776,7 @@ class Instruction final {
void OverwriteWithNop() {
opcode_ = ArchOpcodeField::encode(kArchNop);
bit_field_ = 0;
- reference_map_ = NULL;
+ reference_map_ = nullptr;
}
bool IsNop() const {
@@ -775,6 +811,9 @@ class Instruction final {
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
+
private:
explicit Instruction(InstructionCode opcode);
@@ -911,6 +950,59 @@ class Constant final {
};
+std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+
+// Forward declarations.
+class FrameStateDescriptor;
+
+
+enum class StateValueKind { kPlain, kNested, kDuplicate };
+
+
+class StateValueDescriptor {
+ public:
+ explicit StateValueDescriptor(Zone* zone)
+ : kind_(StateValueKind::kPlain),
+ type_(MachineType::AnyTagged()),
+ id_(0),
+ fields_(zone) {}
+
+ static StateValueDescriptor Plain(Zone* zone, MachineType type) {
+ return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+ }
+ static StateValueDescriptor Recursive(Zone* zone, size_t id) {
+ return StateValueDescriptor(StateValueKind::kNested, zone,
+ MachineType::AnyTagged(), id);
+ }
+ static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
+ return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+ MachineType::AnyTagged(), id);
+ }
+
+ size_t size() { return fields_.size(); }
+ ZoneVector<StateValueDescriptor>& fields() { return fields_; }
+ int IsPlain() { return kind_ == StateValueKind::kPlain; }
+ int IsNested() { return kind_ == StateValueKind::kNested; }
+ int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
+ MachineType type() const { return type_; }
+ MachineType GetOperandType(size_t index) const {
+ return fields_[index].type_;
+ }
+ size_t id() const { return id_; }
+
+ private:
+ StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
+ size_t id)
+ : kind_(kind), type_(type), id_(id), fields_(zone) {}
+
+ StateValueKind kind_;
+ MachineType type_;
+ size_t id_;
+ ZoneVector<StateValueDescriptor> fields_;
+};
+
+
class FrameStateDescriptor : public ZoneObject {
public:
FrameStateDescriptor(Zone* zone, FrameStateType type, BailoutId bailout_id,
@@ -929,7 +1021,7 @@ class FrameStateDescriptor : public ZoneObject {
MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateDescriptor* outer_state() const { return outer_state_; }
bool HasContext() const {
- return type_ == FrameStateType::kJavaScriptFunction;
+ return FrameStateFunctionInfo::IsJSFunctionType(type_);
}
size_t GetSize(OutputFrameStateCombine combine =
@@ -938,8 +1030,10 @@ class FrameStateDescriptor : public ZoneObject {
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
- MachineType GetType(size_t index) const;
- void SetType(size_t index, MachineType type);
+ MachineType GetType(size_t index) const {
+ return values_.GetOperandType(index);
+ }
+ StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
private:
FrameStateType type_;
@@ -948,12 +1042,13 @@ class FrameStateDescriptor : public ZoneObject {
size_t parameters_count_;
size_t locals_count_;
size_t stack_count_;
- ZoneVector<MachineType> types_;
+ StateValueDescriptor values_;
MaybeHandle<SharedFunctionInfo> const shared_info_;
FrameStateDescriptor* outer_state_;
};
-std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
class PhiInstruction final : public ZoneObject {
@@ -1066,13 +1161,14 @@ class InstructionBlock final : public ZoneObject {
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
- zone_allocator<std::pair<int, Constant> > > ConstantMap;
+ zone_allocator<std::pair<const int, Constant> > > ConstantMap;
typedef ZoneDeque<Instruction*> InstructionDeque;
typedef ZoneDeque<ReferenceMap*> ReferenceMapDeque;
-typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
typedef ZoneVector<InstructionBlock*> InstructionBlocks;
+
+// Forward declarations.
struct PrintableInstructionSequence;
@@ -1114,23 +1210,18 @@ class InstructionSequence final : public ZoneObject {
InstructionBlock* GetInstructionBlock(int instruction_index) const;
- static MachineType DefaultRepresentation() {
- return kPointerSize == 8 ? kRepWord64 : kRepWord32;
+ static MachineRepresentation DefaultRepresentation() {
+ return MachineType::PointerRepresentation();
}
- MachineType GetRepresentation(int virtual_register) const;
- void MarkAsRepresentation(MachineType machine_type, int virtual_register);
+ MachineRepresentation GetRepresentation(int virtual_register) const;
+ void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
bool IsReference(int virtual_register) const {
- return GetRepresentation(virtual_register) == kRepTagged;
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kTagged;
}
bool IsFloat(int virtual_register) const {
- switch (GetRepresentation(virtual_register)) {
- case kRepFloat32:
- case kRepFloat64:
- return true;
- default:
- return false;
- }
+ return IsFloatingPoint(GetRepresentation(virtual_register));
}
Instruction* GetBlockStart(RpoNumber rpo) const;
@@ -1229,6 +1320,8 @@ class InstructionSequence final : public ZoneObject {
}
return false;
}
+ void Print(const RegisterConfiguration* config) const;
+ void Print() const;
private:
friend std::ostream& operator<<(std::ostream& os,
@@ -1246,7 +1339,7 @@ class InstructionSequence final : public ZoneObject {
InstructionDeque instructions_;
int next_virtual_register_;
ReferenceMapDeque reference_maps_;
- ZoneVector<MachineType> representations_;
+ ZoneVector<MachineRepresentation> representations_;
DeoptimizationVector deoptimization_entries_;
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
diff --git a/deps/v8/src/compiler/interpreter-assembler.cc b/deps/v8/src/compiler/interpreter-assembler.cc
index ed056cfe56..7080d02120 100644
--- a/deps/v8/src/compiler/interpreter-assembler.cc
+++ b/deps/v8/src/compiler/interpreter-assembler.cc
@@ -10,13 +10,13 @@
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
#include "src/frames.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
@@ -30,11 +30,13 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
: bytecode_(bytecode),
raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone),
- Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
+ Linkage::GetInterpreterDispatchDescriptor(zone),
+ MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags())),
- end_nodes_(zone),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
+ bytecode_offset_(raw_assembler_->Parameter(
+ Linkage::kInterpreterBytecodeOffsetParameter)),
context_(
raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
code_generated_(false) {}
@@ -46,14 +48,14 @@ InterpreterAssembler::~InterpreterAssembler() {}
Handle<Code> InterpreterAssembler::GenerateCode() {
DCHECK(!code_generated_);
- End();
+ // Disallow empty handlers that never return.
+ DCHECK_NE(0, graph()->end()->InputCount());
const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
Schedule* schedule = raw_assembler_->Export();
- // TODO(rmcilroy): use a non-testing code generator.
- Handle<Code> code = Pipeline::GenerateCodeForInterpreter(
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
- bytecode_name);
+ Code::STUB, bytecode_name);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
@@ -80,6 +82,9 @@ Node* InterpreterAssembler::GetContext() { return context_; }
void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
+Node* InterpreterAssembler::BytecodeOffset() { return bytecode_offset_; }
+
+
Node* InterpreterAssembler::RegisterFileRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
}
@@ -90,44 +95,62 @@ Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
}
-Node* InterpreterAssembler::BytecodeOffset() {
- return raw_assembler_->Parameter(
- Linkage::kInterpreterBytecodeOffsetParameter);
-}
-
-
Node* InterpreterAssembler::DispatchTableRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
}
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return WordShl(index, kPointerSizeLog2);
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+ return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
}
-Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+Node* InterpreterAssembler::LoadRegister(int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(),
+ RegisterFileRawPointer(), Int32Constant(offset));
}
Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
- return raw_assembler_->Load(
- kMachAnyTagged, RegisterFileRawPointer(),
- RegisterFrameOffset(Int32Constant(reg.ToOperand())));
+ return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+}
+
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return WordShl(index, kPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return raw_assembler_->Load(kMachAnyTagged, RegisterFileRawPointer(),
+ return raw_assembler_->Load(MachineType::AnyTagged(),
+ RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
}
+Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
+ return raw_assembler_->Store(MachineRepresentation::kTagged,
+ RegisterFileRawPointer(), Int32Constant(offset),
+ value, kNoWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value,
+ interpreter::Register reg) {
+ return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+}
+
+
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- return raw_assembler_->Store(kMachAnyTagged, RegisterFileRawPointer(),
- RegisterFrameOffset(reg_index), value,
- kNoWriteBarrier);
+ return raw_assembler_->Store(
+ MachineRepresentation::kTagged, RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
+}
+
+
+Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+ // Register indexes are negative, so the next index is minus one.
+ return IntPtrAdd(reg_index, Int32Constant(-1));
}
@@ -136,7 +159,7 @@ Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
DCHECK_EQ(interpreter::OperandSize::kByte,
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
return raw_assembler_->Load(
- kMachUint8, BytecodeArrayTaggedPointer(),
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(),
Int32Constant(interpreter::Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
@@ -148,7 +171,7 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
DCHECK_EQ(interpreter::OperandSize::kByte,
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
Node* load = raw_assembler_->Load(
- kMachInt8, BytecodeArrayTaggedPointer(),
+ MachineType::Int8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(),
Int32Constant(interpreter::Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
@@ -166,7 +189,7 @@ Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
if (TargetSupportsUnalignedAccess()) {
return raw_assembler_->Load(
- kMachUint16, BytecodeArrayTaggedPointer(),
+ MachineType::Uint16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(),
Int32Constant(interpreter::Bytecodes::GetOperandOffset(
bytecode_, operand_index))));
@@ -174,10 +197,10 @@ Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
int offset =
interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
Node* first_byte = raw_assembler_->Load(
- kMachUint8, BytecodeArrayTaggedPointer(),
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
Node* second_byte = raw_assembler_->Load(
- kMachUint8, BytecodeArrayTaggedPointer(),
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
@@ -192,10 +215,62 @@ Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
}
+Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
+ int operand_index) {
+ DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ int operand_offset =
+ interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* load;
+ if (TargetSupportsUnalignedAccess()) {
+ load = raw_assembler_->Load(
+ MachineType::Int16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset + 1);
+ Node* lo_byte_offset = Int32Constant(operand_offset);
+#elif V8_TARGET_BIG_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset);
+ Node* lo_byte_offset = Int32Constant(operand_offset + 1);
+#else
+#error "Unknown Architecture"
+#endif
+ Node* hi_byte =
+ raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), hi_byte_offset));
+ Node* lo_byte =
+ raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), lo_byte_offset));
+ hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
+ load = raw_assembler_->Word32Or(hi_byte, lo_byte);
+ }
+
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = raw_assembler_->ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kCount8,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
+ switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case interpreter::OperandSize::kByte:
+ DCHECK_EQ(
+ interpreter::OperandType::kCount8,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case interpreter::OperandSize::kShort:
+ DCHECK_EQ(
+ interpreter::OperandType::kCount16,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
}
@@ -226,13 +301,23 @@ Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
-#ifdef DEBUG
- interpreter::OperandType operand_type =
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index);
- DCHECK(operand_type == interpreter::OperandType::kReg8 ||
- operand_type == interpreter::OperandType::kMaybeReg8);
-#endif
- return BytecodeOperandSignExtended(operand_index);
+ switch (interpreter::Bytecodes::GetOperandType(bytecode_, operand_index)) {
+ case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kRegPair8:
+ case interpreter::OperandType::kMaybeReg8:
+ DCHECK_EQ(
+ interpreter::OperandSize::kByte,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+ case interpreter::OperandType::kReg16:
+ DCHECK_EQ(
+ interpreter::OperandSize::kShort,
+ interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return BytecodeOperandShortSignExtended(operand_index);
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
}
@@ -297,7 +382,8 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(index, kPointerSizeLog2));
- return raw_assembler_->Load(kMachAnyTagged, constant_pool, entry_offset);
+ return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
+ entry_offset);
}
@@ -306,18 +392,19 @@ Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
Node* entry_offset =
IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
WordShl(Int32Constant(index), kPointerSizeLog2));
- return raw_assembler_->Load(kMachAnyTagged, fixed_array, entry_offset);
+ return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
+ entry_offset);
}
Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
- return raw_assembler_->Load(kMachAnyTagged, object,
+ return raw_assembler_->Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
- return raw_assembler_->Load(kMachAnyTagged, context,
+ return raw_assembler_->Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
}
@@ -326,7 +413,7 @@ Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
- return raw_assembler_->Load(kMachAnyTagged, context, offset);
+ return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
}
@@ -335,14 +422,14 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
Int32Constant(Context::kHeaderSize - kHeapObjectTag));
- return raw_assembler_->Store(kMachAnyTagged, context, offset, value,
- kFullWriteBarrier);
+ return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
+ value, kFullWriteBarrier);
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
Node* function = raw_assembler_->Load(
- kMachAnyTagged, RegisterFileRawPointer(),
+ MachineType::AnyTagged(), RegisterFileRawPointer(),
IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
Node* shared_info =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
@@ -352,9 +439,13 @@ Node* InterpreterAssembler::LoadTypeFeedbackVector() {
}
-Node* InterpreterAssembler::CallConstruct(Node* original_constructor,
- Node* constructor, Node* first_arg,
- Node* arg_count) {
+Node* InterpreterAssembler::Projection(int index, Node* node) {
+ return raw_assembler_->Projection(index, node);
+}
+
+
+Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
+ Node* first_arg, Node* arg_count) {
Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
@@ -363,7 +454,7 @@ Node* InterpreterAssembler::CallConstruct(Node* original_constructor,
Node** args = zone()->NewArray<Node*>(5);
args[0] = arg_count;
- args[1] = original_constructor;
+ args[1] = new_target;
args[2] = constructor;
args[3] = first_arg;
args[4] = GetContext();
@@ -372,8 +463,23 @@ Node* InterpreterAssembler::CallConstruct(Node* original_constructor,
}
+void InterpreterAssembler::CallPrologue() {
+ StoreRegister(SmiTag(bytecode_offset_),
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+}
+
+
+void InterpreterAssembler::CallEpilogue() {
+ // Restore the bytecode offset from the stack frame.
+ bytecode_offset_ = SmiUntag(LoadRegister(
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+}
+
+
Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
+ CallPrologue();
+
Node* stack_pointer_before_call = nullptr;
if (FLAG_debug_code) {
stack_pointer_before_call = raw_assembler_->LoadStackPointer();
@@ -384,6 +490,8 @@ Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
kUnexpectedStackPointer);
}
+
+ CallEpilogue();
return return_val;
}
@@ -454,11 +562,11 @@ Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
- Node* arg_count) {
- Callable callable = CodeFactory::InterpreterCEntry(isolate());
+ Node* arg_count, int result_size) {
+ Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
+ isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoProperties, MachineType::AnyTagged(), result_size);
Node* code_target = HeapConstant(callable.code());
// Get the function entry from the function id.
@@ -467,8 +575,9 @@ Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
Node* function_offset = raw_assembler_->Int32Mul(
function_id, Int32Constant(sizeof(Runtime::Function)));
Node* function = IntPtrAdd(function_table, function_offset);
- Node* function_entry = raw_assembler_->Load(
- kMachPtr, function, Int32Constant(offsetof(Runtime::Function, entry)));
+ Node* function_entry =
+ raw_assembler_->Load(MachineType::Pointer(), function,
+ Int32Constant(offsetof(Runtime::Function, entry)));
Node** args = zone()->NewArray<Node*>(4);
args[0] = arg_count;
@@ -482,21 +591,32 @@ Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1) {
- return raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
+ CallPrologue();
+ Node* return_val =
+ raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
+ CallEpilogue();
+ return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2) {
- return raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
+ CallPrologue();
+ Node* return_val =
+ raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
+ CallEpilogue();
+ return return_val;
}
Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* arg1, Node* arg2, Node* arg3,
Node* arg4) {
- return raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3, arg4,
- GetContext());
+ CallPrologue();
+ Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
+ arg4, GetContext());
+ CallEpilogue();
+ return return_val;
}
@@ -516,10 +636,8 @@ void InterpreterAssembler::Return() {
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
GetContext() };
- Node* tail_call = raw_assembler_->TailCallN(
- call_descriptor(), exit_trampoline_code_object, args);
- // This should always be the end node.
- AddEndInput(tail_call);
+ raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
+ args);
}
@@ -537,7 +655,7 @@ void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
- RawMachineAssembler::Label match, no_match;
+ RawMachineLabel match, no_match;
Node* condition = raw_assembler_->WordEqual(lhs, rhs);
raw_assembler_->Branch(condition, &match, &no_match);
raw_assembler_->Bind(&match);
@@ -554,12 +672,12 @@ void InterpreterAssembler::Dispatch() {
void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = raw_assembler_->Load(
- kMachUint8, BytecodeArrayTaggedPointer(), new_bytecode_offset);
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
// TODO(rmcilroy): Create a code target dispatch table to avoid conversion
// from code object on every dispatch.
Node* target_code_object = raw_assembler_->Load(
- kMachPtr, DispatchTableRawPointer(),
+ MachineType::Pointer(), DispatchTableRawPointer(),
raw_assembler_->Word32Shl(target_bytecode,
Int32Constant(kPointerSizeLog2)));
@@ -576,23 +694,21 @@ void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
BytecodeArrayTaggedPointer(),
DispatchTableRawPointer(),
GetContext() };
- Node* tail_call =
- raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
- // This should always be the end node.
- AddEndInput(tail_call);
+ raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
Node* abort_id = SmiTag(Int32Constant(bailout_reason));
- CallRuntime(Runtime::kAbort, abort_id);
- Return();
+ Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
+ // Unreached, but keeps turbofan happy.
+ raw_assembler_->Return(ret_value);
}
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
- RawMachineAssembler::Label match, no_match;
+ RawMachineLabel match, no_match;
Node* condition = raw_assembler_->WordEqual(lhs, rhs);
raw_assembler_->Branch(condition, &match, &no_match);
raw_assembler_->Bind(&no_match);
@@ -601,21 +717,6 @@ void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
}
-void InterpreterAssembler::AddEndInput(Node* input) {
- DCHECK_NOT_NULL(input);
- end_nodes_.push_back(input);
-}
-
-
-void InterpreterAssembler::End() {
- DCHECK(!end_nodes_.empty());
- int end_count = static_cast<int>(end_nodes_.size());
- Node* end = graph()->NewNode(raw_assembler_->common()->End(end_count),
- end_count, &end_nodes_[0]);
- graph()->SetEnd(end);
-}
-
-
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
@@ -642,11 +743,6 @@ CallDescriptor* InterpreterAssembler::call_descriptor() const {
}
-Schedule* InterpreterAssembler::schedule() {
- return raw_assembler_->schedule();
-}
-
-
Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
diff --git a/deps/v8/src/compiler/interpreter-assembler.h b/deps/v8/src/compiler/interpreter-assembler.h
index 65955a92ba..fb79d3eaa2 100644
--- a/deps/v8/src/compiler/interpreter-assembler.h
+++ b/deps/v8/src/compiler/interpreter-assembler.h
@@ -13,7 +13,6 @@
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -61,10 +60,16 @@ class InterpreterAssembler {
void SetContext(Node* value);
// Loads from and stores to the interpreter register file.
+ Node* LoadRegister(int offset);
Node* LoadRegister(interpreter::Register reg);
Node* LoadRegister(Node* reg_index);
+ Node* StoreRegister(Node* value, int offset);
+ Node* StoreRegister(Node* value, interpreter::Register reg);
Node* StoreRegister(Node* value, Node* reg_index);
+ // Returns the next consecutive register.
+ Node* NextRegister(Node* reg_index);
+
// Returns the location in memory of the register |reg_index| in the
// interpreter register file.
Node* RegisterLocation(Node* reg_index);
@@ -103,13 +108,16 @@ class InterpreterAssembler {
// Load the TypeFeedbackVector for the current function.
Node* LoadTypeFeedbackVector();
+ // Project the output value at index |index|
+ Node* Projection(int index, Node* node);
+
// Call constructor |constructor| with |arg_count| arguments (not
// including receiver) and the first argument located at
- // |first_arg|. The |original_constructor| is the same as the
+ // |first_arg|. The |new_target| is the same as the
// |constructor| for the new keyword, but differs for the super
// keyword.
- Node* CallConstruct(Node* original_constructor, Node* constructor,
- Node* first_arg, Node* arg_count);
+ Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
+ Node* arg_count);
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
@@ -125,7 +133,8 @@ class InterpreterAssembler {
Node* arg2, Node* arg3, Node* arg4, Node* arg5);
// Call runtime function.
- Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count);
+ Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
+ int return_size = 1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
@@ -148,9 +157,6 @@ class InterpreterAssembler {
void Abort(BailoutReason bailout_reason);
protected:
- // Close the graph.
- void End();
-
static bool TargetSupportsUnalignedAccess();
// Protected helpers (for testing) which delegate to RawMachineAssembler.
@@ -167,6 +173,11 @@ class InterpreterAssembler {
// Returns a raw pointer to first entry in the interpreter dispatch table.
Node* DispatchTableRawPointer();
+ // Saves and restores interpreter bytecode offset to the interpreter stack
+ // frame when performing a call.
+ void CallPrologue();
+ void CallEpilogue();
+
// Returns the offset of register |index| relative to RegisterFilePointer().
Node* RegisterFrameOffset(Node* index);
@@ -174,11 +185,10 @@ class InterpreterAssembler {
Node* BytecodeOperand(int operand_index);
Node* BytecodeOperandSignExtended(int operand_index);
Node* BytecodeOperandShort(int operand_index);
+ Node* BytecodeOperandShortSignExtended(int operand_index);
Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
- Node* CallJSBuiltin(int context_index, Node* receiver, Node** js_args,
- int js_arg_count);
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
@@ -191,19 +201,17 @@ class InterpreterAssembler {
// Abort operations for debug code.
void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
- // Adds an end node of the graph.
- void AddEndInput(Node* input);
-
// Private helpers which delegate to RawMachineAssembler.
Isolate* isolate();
- Schedule* schedule();
Zone* zone();
interpreter::Bytecode bytecode_;
base::SmartPointer<RawMachineAssembler> raw_assembler_;
- ZoneVector<Node*> end_nodes_;
+
Node* accumulator_;
+ Node* bytecode_offset_;
Node* context_;
+
bool code_generated_;
DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 6db5f99e3b..a7a7da57cd 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/diamond.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
@@ -91,41 +90,6 @@ JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
: AdvancedReducer(editor), jsgraph_(jsgraph) {}
-// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
-Reduction JSBuiltinReducer::ReduceFunctionCall(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
- Handle<JSFunction> apply = Handle<JSFunction>::cast(
- HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
- // Change context of {node} to the Function.prototype.call context,
- // to ensure any exception is thrown in the correct context.
- NodeProperties::ReplaceContextInput(
- node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
- // Remove the target from {node} and use the receiver as target instead, and
- // the thisArg becomes the new target. If thisArg was not provided, insert
- // undefined instead.
- size_t arity = p.arity();
- DCHECK_LE(2u, arity);
- ConvertReceiverMode convert_mode;
- if (arity == 2) {
- // The thisArg was not provided, use undefined as receiver.
- convert_mode = ConvertReceiverMode::kNullOrUndefined;
- node->ReplaceInput(0, node->InputAt(1));
- node->ReplaceInput(1, jsgraph()->UndefinedConstant());
- } else {
- // Just remove the target, which is the first value input.
- convert_mode = ConvertReceiverMode::kAny;
- node->RemoveInput(0);
- --arity;
- }
- // TODO(turbofan): Migrate the call count to the new operator?
- NodeProperties::ChangeOp(node, javascript()->CallFunction(
- arity, p.language_mode(), VectorSlotPair(),
- convert_mode, p.tail_call_mode()));
- return Changed(node);
-}
-
-
// ECMA-262, section 15.8.2.11.
Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
JSCallReduction r(node);
@@ -143,7 +107,7 @@ Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
for (int i = 1; i < r.GetJSCallArity(); i++) {
Node* const input = r.GetJSCallInput(i);
value = graph()->NewNode(
- common()->Select(kMachNone),
+ common()->Select(MachineRepresentation::kNone),
graph()->NewNode(simplified()->NumberLessThan(), input, value), value,
input);
}
@@ -185,8 +149,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
- case kFunctionCall:
- return ReduceFunctionCall(node);
case kMathMax:
reduction = ReduceMathMax(node);
break;
@@ -228,11 +190,6 @@ SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
return jsgraph()->simplified();
}
-
-JSOperatorBuilder* JSBuiltinReducer::javascript() const {
- return jsgraph()->javascript();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 772cbdbf25..cfacdc1e8c 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -14,7 +14,6 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class JSGraph;
-class JSOperatorBuilder;
class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
@@ -38,7 +37,6 @@ class JSBuiltinReducer final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
- JSOperatorBuilder* javascript() const;
JSGraph* jsgraph_;
};
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
new file mode 100644
index 0000000000..a15d6fd6fd
--- /dev/null
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -0,0 +1,557 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-call-reducer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
+#include "src/type-feedback-vector-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+VectorSlotPair CallCountFeedback(VectorSlotPair p) {
+ // Extract call count from {p}.
+ if (!p.IsValid()) return VectorSlotPair();
+ CallICNexus n(p.vector(), p.slot());
+ int const call_count = n.ExtractCallCount();
+ if (call_count <= 0) return VectorSlotPair();
+
+ // Create megamorphic CallIC feedback with the given {call_count}.
+ StaticFeedbackVectorSpec spec;
+ FeedbackVectorSlot slot = spec.AddCallICSlot();
+ Handle<TypeFeedbackMetadata> metadata =
+ TypeFeedbackMetadata::New(n.GetIsolate(), &spec);
+ Handle<TypeFeedbackVector> vector =
+ TypeFeedbackVector::New(n.GetIsolate(), metadata);
+ CallICNexus nexus(vector, slot);
+ nexus.ConfigureMegamorphic(call_count);
+ return VectorSlotPair(vector, slot);
+}
+
+} // namespace
+
+
+Reduction JSCallReducer::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCallConstruct:
+ return ReduceJSCallConstruct(node);
+ case IrOpcode::kJSCallFunction:
+ return ReduceJSCallFunction(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+// ES6 section 22.1.1 The Array Constructor
+Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+
+ // Check if we have an allocation site from the CallIC.
+ Handle<AllocationSite> site;
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsAllocationSite()) {
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ }
+
+ // Turn the {node} into a {JSCreateArray} call.
+ DCHECK_LE(2u, p.arity());
+ size_t const arity = p.arity() - 2;
+ NodeProperties::ReplaceValueInput(node, target, 0);
+ NodeProperties::ReplaceValueInput(node, target, 1);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ // TODO(bmeurer): We might need to propagate the tail call mode to
+ // the JSCreateArray operator, because an Array call in tail call
+ // position must always properly consume the parent stack frame.
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+}
+
+
+// ES6 section 20.1.1 The Number Constructor
+Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+
+ // Turn the {node} into a {JSToNumber} call.
+ DCHECK_LE(2u, p.arity());
+ Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ NodeProperties::ReplaceValueInputs(node, value);
+ NodeProperties::ChangeOp(node, javascript()->ToNumber());
+ return Changed(node);
+}
+
+
+// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
+Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> apply =
+ Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
+ if (arity == 2) {
+ // Neither thisArg nor argArray was provided.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else if (arity == 3) {
+ // The argArray was not provided, just remove the {target}.
+ node->RemoveInput(0);
+ --arity;
+ } else if (arity == 4) {
+ // Check if argArray is an arguments object, and {node} is the only value
+ // user of argArray (except for value uses in frame states).
+ Node* arg_array = NodeProperties::GetValueInput(node, 3);
+ if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
+ for (Edge edge : arg_array->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (edge.from() == node) continue;
+ return NoChange();
+ }
+ // Get to the actual frame state from which to extract the arguments;
+ // we can only optimize this in case the {node} was already inlined into
+ // some other function (and same for the {arg_array}).
+ CreateArgumentsParameters const& p =
+ CreateArgumentsParametersOf(arg_array->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
+ FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+ // Need to take the parameters from the arguments adaptor.
+ frame_state = outer_state;
+ }
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ }
+ // Remove the argArray input from the {node}.
+ node->RemoveInput(static_cast<int>(--arity));
+ // Add the actual parameters to the {node}, skipping the receiver.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ for (int i = p.start_index() + 1; i < state_info.parameter_count(); ++i) {
+ node->InsertInput(graph()->zone(), static_cast<int>(arity),
+ parameters->InputAt(i));
+ ++arity;
+ }
+ // Drop the {target} from the {node}.
+ node->RemoveInput(0);
+ --arity;
+ } else {
+ return NoChange();
+ }
+ // Change {node} to the new {JSCallFunction} operator.
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Change context of {node} to the Function.prototype.apply context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+
+// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
+Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Handle<JSFunction> call = Handle<JSFunction>::cast(
+ HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
+ // Change context of {node} to the Function.prototype.call context,
+ // to ensure any exception is thrown in the correct context.
+ NodeProperties::ReplaceContextInput(
+ node, jsgraph()->HeapConstant(handle(call->context(), isolate())));
+ // Remove the target from {node} and use the receiver as target instead, and
+ // the thisArg becomes the new target. If thisArg was not provided, insert
+ // undefined instead.
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ ConvertReceiverMode convert_mode;
+ if (arity == 2) {
+ // The thisArg was not provided, use undefined as receiver.
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ node->ReplaceInput(0, node->InputAt(1));
+ node->ReplaceInput(1, jsgraph()->UndefinedConstant());
+ } else {
+ // Just remove the target, which is the first value input.
+ convert_mode = ConvertReceiverMode::kAny;
+ node->RemoveInput(0);
+ --arity;
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+
+Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Try to specialize JSCallFunction {node}s with constant {target}s.
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ if (m.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Raise a TypeError if the {target} is a "classConstructor".
+ if (IsClassConstructor(shared->kind())) {
+ NodeProperties::RemoveFrameStateInput(node, 0);
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructorNonCallableError, 1));
+ return Changed(node);
+ }
+
+ // Check for known builtin functions.
+ if (shared->HasBuiltinFunctionId()) {
+ switch (shared->builtin_function_id()) {
+ case kFunctionApply:
+ return ReduceFunctionPrototypeApply(node);
+ case kFunctionCall:
+ return ReduceFunctionPrototypeCall(node);
+ default:
+ break;
+ }
+ }
+
+ // Check for the Array constructor.
+ if (*function == function->native_context()->array_function()) {
+ return ReduceArrayConstructor(node);
+ }
+
+ // Check for the Number constructor.
+ if (*function == function->native_context()->number_function()) {
+ return ReduceNumberConstructor(node);
+ }
+ } else if (m.Value()->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> function =
+ Handle<JSBoundFunction>::cast(m.Value());
+ Handle<JSReceiver> bound_target_function(
+ function->bound_target_function(), isolate());
+ Handle<Object> bound_this(function->bound_this(), isolate());
+ Handle<FixedArray> bound_arguments(function->bound_arguments(),
+ isolate());
+ CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+ ConvertReceiverMode const convert_mode =
+ (bound_this->IsNull() || bound_this->IsUndefined())
+ ? ConvertReceiverMode::kNullOrUndefined
+ : ConvertReceiverMode::kNotNullOrUndefined;
+ size_t arity = p.arity();
+ DCHECK_LE(2u, arity);
+ // Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->Constant(bound_target_function), 0);
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
+ 1);
+ // Insert the [[BoundArguments]] for {node}.
+ for (int i = 0; i < bound_arguments->length(); ++i) {
+ node->InsertInput(
+ graph()->zone(), i + 2,
+ jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
+ arity++;
+ }
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, p.language_mode(),
+ CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
+ // Don't mess with other {node}s that have a constant {target}.
+ // TODO(bmeurer): Also support proxies here.
+ return NoChange();
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // Extract feedback from the {node} using the CallICNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ Handle<Object> feedback(nexus.GetFeedback(), isolate());
+ if (feedback->IsAllocationSite()) {
+ // Retrieve the Array function from the {node}.
+ Node* array_function;
+ Handle<Context> native_context;
+ if (GetNativeContext(node).ToHandle(&native_context)) {
+ array_function = jsgraph()->HeapConstant(
+ handle(native_context->array_function(), isolate()));
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ array_function = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
+ native_context, native_context, effect);
+ }
+
+ // Check that the {target} is still the {array_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, array_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::ReplaceValueInput(node, array_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ return ReduceArrayConstructor(node);
+ } else if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ Node* target_function =
+ jsgraph()->Constant(handle(cell->value(), isolate()));
+
+ // Check that the {target} is still the {target_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, target_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallFunction node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+
+ // Try to further reduce the JSCallFunction {node}.
+ Reduction const reduction = ReduceJSCallFunction(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to specialize JSCallConstruct {node}s with constant {target}s.
+ HeapObjectMatcher m(target);
+ if (m.HasValue()) {
+ if (m.Value()->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+
+ // Raise a TypeError if the {target} is not a constructor.
+ if (!function->IsConstructor()) {
+ // Drop the lazy bailout location and use the eager bailout point for
+ // the runtime function (actually as lazy bailout point). It doesn't
+ // really matter which bailout location we use since we never really
+ // go back after throwing the exception.
+ NodeProperties::RemoveFrameStateInput(node, 0);
+ NodeProperties::ReplaceValueInputs(node, target);
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallRuntime(Runtime::kThrowCalledNonCallable, 1));
+ return Changed(node);
+ }
+
+ // Check for the ArrayConstructor.
+ if (*function == function->native_context()->array_function()) {
+ // Check if we have an allocation site.
+ Handle<AllocationSite> site;
+ if (p.feedback().IsValid()) {
+ Handle<Object> feedback(
+ p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ if (feedback->IsAllocationSite()) {
+ site = Handle<AllocationSite>::cast(feedback);
+ }
+ }
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+ }
+ }
+
+ // Don't mess with other {node}s that have a constant {target}.
+ // TODO(bmeurer): Also support optimizing bound functions and proxies here.
+ return NoChange();
+ }
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ // TODO(mvstanton): Use ConstructICNexus here, once available.
+ Handle<Object> feedback;
+ if (!p.feedback().IsValid()) return NoChange();
+ feedback = handle(p.feedback().vector()->Get(p.feedback().slot()), isolate());
+ if (feedback->IsAllocationSite()) {
+ // The feedback is an AllocationSite, which means we have called the
+ // Array function and collected transition (and pretenuring) feedback
+ // for the resulting arrays. This has to be kept in sync with the
+ // implementation of the CallConstructStub.
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
+
+ // Retrieve the Array function from the {node}.
+ Node* array_function;
+ Handle<Context> native_context;
+ if (GetNativeContext(node).ToHandle(&native_context)) {
+ array_function = jsgraph()->HeapConstant(
+ handle(native_context->array_function(), isolate()));
+ } else {
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ array_function = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
+ native_context, native_context, effect);
+ }
+
+ // Check that the {target} is still the {array_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, array_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Turn the {node} into a {JSCreateArray} call.
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ NodeProperties::RemoveFrameStateInput(node, 1);
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
+ return Changed(node);
+ } else if (feedback->IsWeakCell()) {
+ Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
+ if (cell->value()->IsJSFunction()) {
+ Node* target_function =
+ jsgraph()->Constant(handle(cell->value(), isolate()));
+
+ // Check that the {target} is still the {target_function}.
+ Node* check = effect =
+ graph()->NewNode(javascript()->StrictEqual(), target, target_function,
+ context, effect, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ control = graph()->NewNode(common()->IfTrue(), branch);
+
+ // Specialize the JSCallConstruct node to the {target_function}.
+ NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ NodeProperties::ReplaceControlInput(node, control);
+ if (target == new_target) {
+ NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
+ }
+
+ // Try to further reduce the JSCallConstruct {node}.
+ Reduction const reduction = ReduceJSCallConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
+
+MaybeHandle<Context> JSCallReducer::GetNativeContext(Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context());
+}
+
+
+Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
+
+
+Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
+
+
+CommonOperatorBuilder* JSCallReducer::common() const {
+ return jsgraph()->common();
+}
+
+
+JSOperatorBuilder* JSCallReducer::javascript() const {
+ return jsgraph()->javascript();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
new file mode 100644
index 0000000000..9ffae152ac
--- /dev/null
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -0,0 +1,67 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CALL_REDUCER_H_
+#define V8_COMPILER_JS_CALL_REDUCER_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+
+
+// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
+// which might allow inlining or other optimizations to be performed afterwards.
+class JSCallReducer final : public Reducer {
+ public:
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSCallReducer(JSGraph* jsgraph, Flags flags,
+ MaybeHandle<Context> native_context)
+ : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceArrayConstructor(Node* node);
+ Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceFunctionPrototypeApply(Node* node);
+ Reduction ReduceFunctionPrototypeCall(Node* node);
+ Reduction ReduceJSCallConstruct(Node* node);
+ Reduction ReduceJSCallFunction(Node* node);
+
+ MaybeHandle<Context> GetNativeContext(Node* node);
+
+ Graph* graph() const;
+ Flags flags() const { return flags_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ MaybeHandle<Context> native_context() const { return native_context_; }
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+
+ JSGraph* const jsgraph_;
+ Flags const flags_;
+ MaybeHandle<Context> const native_context_;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_CALL_REDUCER_H_
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index a4f3ff4986..4d9d1d9504 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -34,25 +34,7 @@ MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
node->opcode() == IrOpcode::kJSStoreContext);
Node* const object = NodeProperties::GetValueInput(node, 0);
- switch (object->opcode()) {
- case IrOpcode::kHeapConstant:
- return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(object));
- case IrOpcode::kParameter: {
- Node* const start = NodeProperties::GetValueInput(object, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(object->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- if (index == start->op()->ValueOutputCount() - 2) {
- return context();
- }
- break;
- }
- default:
- break;
- }
- return MaybeHandle<Context>();
+ return NodeProperties::GetSpecializationContext(object, context());
}
@@ -95,9 +77,6 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Success. The context load can be replaced with the constant.
// TODO(titzer): record the specialization for sharing code across multiple
// contexts that have the same value in the corresponding context slot.
- if (value->IsConsString()) {
- value = String::Flatten(Handle<String>::cast(value), TENURED);
- }
Node* constant = jsgraph_->Constant(value);
ReplaceWithValue(node, constant);
return Replace(constant);
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 6e4b0def56..769d615e4a 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -27,41 +27,44 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
- DisallowHeapAllocation no_gc;
- Object* object;
+ Handle<Object> value;
int const index = OpParameter<int>(node);
int const parameters_count = frame()->ComputeParametersCount() + 1;
if (index == Linkage::kOsrContextSpillSlotIndex) {
- object = frame()->context();
+ value = handle(frame()->context(), isolate());
} else if (index >= parameters_count) {
- object = frame()->GetExpression(index - parameters_count);
+ value = handle(frame()->GetExpression(index - parameters_count), isolate());
} else {
// The OsrValue index 0 is the receiver.
- object = index ? frame()->GetParameter(index - 1) : frame()->receiver();
+ value =
+ handle(index ? frame()->GetParameter(index - 1) : frame()->receiver(),
+ isolate());
}
- return Replace(jsgraph()->Constant(handle(object, isolate())));
+ return Replace(jsgraph()->Constant(value));
}
Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
- DisallowHeapAllocation no_gc;
- Object* object;
+ Handle<Object> value;
int const index = ParameterIndexOf(node->op());
int const parameters_count = frame()->ComputeParametersCount() + 1;
- if (index == Linkage::kJSFunctionCallClosureParamIndex) {
- object = frame()->function();
- } else if (index == parameters_count) {
- // The Parameter index (arity + 1) is the parameter count.
- object = Smi::FromInt(parameters_count - 1);
- } else if (index == parameters_count + 1) {
- // The Parameter index (arity + 2) is the context.
- object = frame()->context();
+ if (index == Linkage::kJSCallClosureParamIndex) {
+ // The Parameter index references the closure.
+ value = handle(frame()->function(), isolate());
+ } else if (index == Linkage::GetJSCallArgCountParamIndex(parameters_count)) {
+ // The Parameter index references the parameter count.
+ value = handle(Smi::FromInt(parameters_count - 1), isolate());
+ } else if (index == Linkage::GetJSCallContextParamIndex(parameters_count)) {
+ // The Parameter index references the context.
+ value = handle(frame()->context(), isolate());
} else {
// The Parameter index 0 is the receiver.
- object = index ? frame()->GetParameter(index - 1) : frame()->receiver();
+ value =
+ handle(index ? frame()->GetParameter(index - 1) : frame()->receiver(),
+ isolate());
}
- return Replace(jsgraph()->Constant(handle(object, isolate())));
+ return Replace(jsgraph()->Constant(value));
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 8c363d3e8b..15ce908a1c 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -63,12 +63,14 @@ Reduction JSGenericLowering::Reduce(Node* node) {
}
-#define REPLACE_BINARY_OP_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
+#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
+ void JSGenericLowering::Lower##Op(Node* node) { \
+ BinaryOperationParameters const& p = \
+ BinaryOperationParametersOf(node->op()); \
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
- ReplaceWithStubCall(node, CodeFactory::BinaryOpIC( \
- isolate(), token, \
- strength(OpParameter<LanguageMode>(node))), \
+ ReplaceWithStubCall(node, \
+ CodeFactory::BinaryOpIC(isolate(), token, \
+ strength(p.language_mode())), \
CallDescriptor::kPatchableCallSiteWithNop | flags); \
}
REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
@@ -157,7 +159,7 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0,
CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
- Operator::kNoProperties, kMachIntPtr);
+ Operator::kNoProperties, MachineType::IntPtr());
Node* compare =
graph()->NewNode(common()->Call(desc_compare),
static_cast<int>(inputs.size()), &inputs.front());
@@ -202,7 +204,8 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
node->ReplaceInput(0, booleanize);
node->ReplaceInput(1, true_value);
node->ReplaceInput(2, false_value);
- NodeProperties::ChangeOp(node, common()->Select(kMachAnyTagged));
+ NodeProperties::ChangeOp(node,
+ common()->Select(MachineRepresentation::kTagged));
}
@@ -223,8 +226,8 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
- CallDescriptor* desc =
- Linkage::GetRuntimeCallDescriptor(zone(), f, nargs, properties);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), f, nargs, properties, CallDescriptor::kNeedsFrameState);
Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
Node* arity = jsgraph()->Int32Constant(nargs);
node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
@@ -234,15 +237,6 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
}
-void JSGenericLowering::LowerJSUnaryNot(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable = CodeFactory::ToBoolean(
- isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
-}
-
-
void JSGenericLowering::LowerJSTypeOf(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::Typeof(isolate());
@@ -252,8 +246,7 @@ void JSGenericLowering::LowerJSTypeOf(Node* node) {
void JSGenericLowering::LowerJSToBoolean(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Callable callable =
- CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+ Callable callable = CodeFactory::ToBoolean(isolate());
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
@@ -314,10 +307,15 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
Callable callable = CodeFactory::LoadICInOptimizedCode(
isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
// Load global object from the context.
- Node* global = graph()->NewNode(machine()->Load(kMachAnyTagged), context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(
- Context::GLOBAL_OBJECT_INDEX)),
- effect, graph()->start());
+ Node* native_context =
+ graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
+ effect, graph()->start());
+ Node* global = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), native_context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
+ effect, graph()->start());
node->InsertInput(zone(), 0, global);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -331,12 +329,8 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
LanguageMode language_mode = p.language_mode();
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), language_mode, UNINITIALIZED);
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
@@ -348,12 +342,8 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
@@ -367,18 +357,19 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
// Load global object from the context.
- Node* global = graph()->NewNode(machine()->Load(kMachAnyTagged), context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(
- Context::GLOBAL_OBJECT_INDEX)),
- effect, graph()->start());
+ Node* native_context =
+ graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
+ jsgraph()->IntPtrConstant(
+ Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
+ effect, graph()->start());
+ Node* global = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), native_context,
+ jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
+ effect, graph()->start());
node->InsertInput(zone(), 0, global);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- if (FLAG_vector_stores) {
- DCHECK(p.feedback().index() != -1);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- } else {
- node->RemoveInput(3);
- }
+ DCHECK(p.feedback().index() != -1);
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable,
CallDescriptor::kPatchableCallSite | flags);
}
@@ -408,7 +399,7 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+ 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
NodeProperties::GetValueInput(node, 0),
jsgraph()->Int32Constant(
Context::SlotOffset(Context::PREVIOUS_INDEX)),
@@ -418,7 +409,7 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
node->AppendInput(zone(), graph()->start());
- NodeProperties::ChangeOp(node, machine()->Load(kMachAnyTagged));
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
}
@@ -426,7 +417,7 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
const ContextAccess& access = ContextAccessOf(node->op());
for (size_t i = 0; i < access.depth(); ++i) {
node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+ 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
NodeProperties::GetValueInput(node, 0),
jsgraph()->Int32Constant(
Context::SlotOffset(Context::PREVIOUS_INDEX)),
@@ -436,8 +427,9 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
- NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
- kMachAnyTagged, kFullWriteBarrier)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier)));
}
@@ -456,7 +448,9 @@ void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
}
-void JSGenericLowering::LowerJSCreate(Node* node) { UNIMPLEMENTED(); }
+void JSGenericLowering::LowerJSCreate(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kNewObject);
+}
void JSGenericLowering::LowerJSCreateArguments(Node* node) {
@@ -469,12 +463,33 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
break;
case CreateArgumentsParameters::kRestArray:
- UNIMPLEMENTED();
+ node->InsertInput(zone(), 1, jsgraph()->Constant(p.start_index()));
+ ReplaceWithRuntimeCall(node, Runtime::kNewRestArguments_Generic);
break;
}
}
+void JSGenericLowering::LowerJSCreateArray(Node* node) {
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity());
+ Node* new_target = node->InputAt(1);
+ // TODO(turbofan): We embed the AllocationSite from the Operator at this
+ // point, which we should not do once we want to both consume the feedback
+ // but at the same time shared the optimized code across native contexts,
+ // as the AllocationSite is associated with a single native context (it's
+ // stored in the type feedback vector after all). Once we go for cross
+ // context code generation, we should somehow find a way to get to the
+ // allocation site for the actual native context at runtime.
+ Node* type_info = p.site().is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(p.site());
+ node->RemoveInput(1);
+ node->InsertInput(zone(), 1 + arity, new_target);
+ node->InsertInput(zone(), 2 + arity, type_info);
+ ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+}
+
+
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters p = CreateClosureParametersOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
@@ -484,20 +499,43 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
}
+void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
+}
+
+
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
- int literal_flags = OpParameter<int>(node->op());
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(literal_flags));
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
}
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
- int literal_flags = OpParameter<int>(node->op());
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(literal_flags));
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
}
+void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* literal_flags = jsgraph()->SmiConstant(p.flags());
+ Node* pattern = jsgraph()->HeapConstant(p.constant());
+ node->InsertInput(graph()->zone(), 1, literal_index);
+ node->InsertInput(graph()->zone(), 2, pattern);
+ node->InsertInput(graph()->zone(), 3, literal_flags);
+ ReplaceWithStubCall(node, callable, flags);
+}
+
+
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
Handle<String> name = OpParameter<Handle<String>>(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
@@ -520,22 +558,21 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
- // TODO(bmeurer): Use the Construct builtin here.
- int arity = OpParameter<int>(node);
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- CallDescriptor* desc =
- Linkage::GetStubCallDescriptor(isolate(), zone(), d, arity - 1, flags);
- Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
- Node* actual_construct = NodeProperties::GetValueInput(node, 0);
- Node* original_construct = NodeProperties::GetValueInput(node, arity - 1);
- node->RemoveInput(arity - 1); // Drop original constructor.
+ Callable callable = CodeFactory::Construct(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 1, jsgraph()->Int32Constant(arity - 2));
- node->InsertInput(zone(), 2, actual_construct);
- node->InsertInput(zone(), 3, original_construct);
- node->InsertInput(zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, receiver);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -587,7 +624,8 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
Runtime::Function const* function =
Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function->function_id, 1, Operator::kNoProperties);
+ zone(), function->function_id, 1, Operator::kNoProperties,
+ CallDescriptor::kNeedsFrameState);
Node* cache_type = effect = graph()->NewNode(
common()->Call(descriptor),
jsgraph()->CEntryStubConstant(function->result_size), object,
@@ -596,11 +634,11 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
control = graph()->NewNode(common()->IfSuccess(), cache_type);
Node* object_map = effect = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object,
+ machine()->Load(MachineType::AnyTagged()), object,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
effect, control);
Node* cache_type_map = effect = graph()->NewNode(
- machine()->Load(kMachAnyTagged), cache_type,
+ machine()->Load(MachineType::AnyTagged()), cache_type,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
effect, control);
Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
@@ -621,7 +659,7 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
{
// Enum cache case.
Node* cache_type_enum_length = etrue0 = graph()->NewNode(
- machine()->Load(kMachUint32), cache_type,
+ machine()->Load(MachineType::Uint32()), cache_type,
jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
effect, if_true0);
cache_type_enum_length =
@@ -650,16 +688,16 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
{
// Load the enumeration cache from the instance descriptors of {object}.
Node* object_map_descriptors = efalse1 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object_map,
+ machine()->Load(MachineType::AnyTagged()), object_map,
jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
etrue0, if_false1);
Node* object_map_enum_cache = efalse1 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object_map_descriptors,
+ machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
kHeapObjectTag),
efalse1, if_false1);
cache_array_false1 = efalse1 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), object_map_enum_cache,
+ machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
jsgraph()->IntPtrConstant(
DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
efalse1, if_false1);
@@ -669,8 +707,8 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
etrue0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
cache_array_true0 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true1,
- cache_array_false1, if_true0);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true1, cache_array_false1, if_true0);
cache_length_true0 = graph()->NewNode(
machine()->WordShl(),
@@ -689,46 +727,25 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
Node* efalse0;
{
// FixedArray case.
- Node* object_instance_type = efalse0 = graph()->NewNode(
- machine()->Load(kMachUint8), object_map,
- jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
- effect, if_false0);
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- Node* check1 = graph()->NewNode(
- machine()->Uint32LessThanOrEqual(), object_instance_type,
- jsgraph()->Uint32Constant(LAST_JS_PROXY_TYPE));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* cache_type_true1 = jsgraph()->ZeroConstant(); // Zero indicates proxy
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* cache_type_false1 = jsgraph()->OneConstant(); // One means slow check
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- cache_type_false0 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_type_true1,
- cache_type_false1, if_false0);
-
+ cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
- machine()->Load(kMachAnyTagged), cache_array_false0,
+ machine()->Load(MachineType::AnyTagged()), cache_array_false0,
jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
- efalse0, if_false0);
+ effect, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* cache_array =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true0,
- cache_array_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true0, cache_array_false0, control);
Node* cache_length =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_length_true0,
- cache_length_false0, control);
- cache_type = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
- cache_type_true0, cache_type_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true0, cache_length_false0, control);
+ cache_type =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_type_true0, cache_type_false0, control);
for (auto edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
@@ -778,7 +795,7 @@ void JSGenericLowering::LowerJSLoadMessage(Node* node) {
node->RemoveInput(NodeProperties::FirstContextIndex(node));
node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
- NodeProperties::ChangeOp(node, machine()->Load(kMachAnyTagged));
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
}
@@ -788,7 +805,8 @@ void JSGenericLowering::LowerJSStoreMessage(Node* node) {
node->RemoveInput(NodeProperties::FirstContextIndex(node));
node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
- StoreRepresentation representation(kMachAnyTagged, kNoWriteBarrier);
+ StoreRepresentation representation(MachineRepresentation::kTagged,
+ kNoWriteBarrier);
NodeProperties::ChangeOp(node, machine()->Store(representation));
}
@@ -801,7 +819,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* limit = graph()->NewNode(
- machine()->Load(kMachPtr),
+ machine()->Load(MachineType::Pointer()),
jsgraph()->ExternalConstant(
ExternalReference::address_of_stack_limit(isolate())),
jsgraph()->IntPtrConstant(0), effect, control);
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index 497f098baf..e6f01b3efb 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -28,13 +28,11 @@ struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<JSGlobalObject> global_object, CompilationDependencies* dependencies)
+ MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
flags_(flags),
- global_object_(global_object),
- script_context_table_(
- global_object->native_context()->script_context_table(), isolate()),
+ native_context_(native_context),
dependencies_(dependencies),
type_cache_(TypeCache::Get()) {}
@@ -58,9 +56,13 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Retrieve the global object from the given {node}.
+ Handle<JSGlobalObject> global_object;
+ if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
+
// Try to lookup the name on the script context table first (lexical scoping).
ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(name, &result)) {
+ if (LookupInScriptContextTable(global_object, name, &result)) {
if (result.context->is_the_hole(result.index)) return NoChange();
Node* context = jsgraph()->HeapConstant(result.context);
Node* value = effect = graph()->NewNode(
@@ -72,7 +74,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
// Lookup on the global object instead. We only deal with own data
// properties of the global object here (represented as PropertyCell).
- LookupIterator it(global_object(), name, LookupIterator::OWN);
+ LookupIterator it(global_object, name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
PropertyDetails property_details = property_cell->property_details();
@@ -145,9 +147,13 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Retrieve the global object from the given {node}.
+ Handle<JSGlobalObject> global_object;
+ if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
+
// Try to lookup the name on the script context table first (lexical scoping).
ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(name, &result)) {
+ if (LookupInScriptContextTable(global_object, name, &result)) {
if (result.context->is_the_hole(result.index)) return NoChange();
if (result.immutable) return NoChange();
Node* context = jsgraph()->HeapConstant(result.context);
@@ -159,7 +165,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
// Lookup on the global object instead. We only deal with own data
// properties of the global object here (represented as PropertyCell).
- LookupIterator it(global_object(), name, LookupIterator::OWN);
+ LookupIterator it(global_object, name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
PropertyDetails property_details = property_cell->property_details();
@@ -182,8 +188,9 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_false);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
control = graph()->NewNode(common()->IfTrue(), branch);
@@ -201,8 +208,9 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_true);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_true);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
control = graph()->NewNode(common()->IfFalse(), branch);
@@ -221,8 +229,9 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_false);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
control = graph()->NewNode(common()->IfTrue(), branch);
@@ -254,16 +263,27 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
}
+MaybeHandle<JSGlobalObject> JSGlobalObjectSpecialization::GetGlobalObject(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationGlobalObject(context,
+ native_context());
+}
+
+
bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
- Handle<Name> name, ScriptContextTableLookupResult* result) {
+ Handle<JSGlobalObject> global_object, Handle<Name> name,
+ ScriptContextTableLookupResult* result) {
if (!name->IsString()) return false;
+ Handle<ScriptContextTable> script_context_table(
+ global_object->native_context()->script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
- if (!ScriptContextTable::Lookup(script_context_table(),
+ if (!ScriptContextTable::Lookup(script_context_table,
Handle<String>::cast(name), &lookup_result)) {
return false;
}
Handle<Context> script_context = ScriptContextTable::GetContext(
- script_context_table(), lookup_result.context_index);
+ script_context_table, lookup_result.context_index);
result->context = script_context;
result->immutable = IsImmutableVariableMode(lookup_result.mode);
result->index = lookup_result.slot_index;
diff --git a/deps/v8/src/compiler/js-global-object-specialization.h b/deps/v8/src/compiler/js-global-object-specialization.h
index 49b4114676..83d890c938 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.h
+++ b/deps/v8/src/compiler/js-global-object-specialization.h
@@ -13,7 +13,6 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
-class ScriptContextTable;
class TypeCache;
@@ -39,7 +38,7 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<JSGlobalObject> global_object,
+ MaybeHandle<Context> native_context,
CompilationDependencies* dependencies);
Reduction Reduce(Node* node) final;
@@ -48,8 +47,12 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSStoreGlobal(Node* node);
+ // Retrieve the global object from the given {node} if known.
+ MaybeHandle<JSGlobalObject> GetGlobalObject(Node* node);
+
struct ScriptContextTableLookupResult;
- bool LookupInScriptContextTable(Handle<Name> name,
+ bool LookupInScriptContextTable(Handle<JSGlobalObject> global_object,
+ Handle<Name> name,
ScriptContextTableLookupResult* result);
Graph* graph() const;
@@ -59,16 +62,12 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
Flags flags() const { return flags_; }
- Handle<JSGlobalObject> global_object() const { return global_object_; }
- Handle<ScriptContextTable> script_context_table() const {
- return script_context_table_;
- }
+ MaybeHandle<Context> native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
Flags const flags_;
- Handle<JSGlobalObject> global_object_;
- Handle<ScriptContextTable> script_context_table_;
+ MaybeHandle<Context> native_context_;
CompilationDependencies* const dependencies_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 782236fe0c..e938798287 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -11,12 +11,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> value) {
- // TODO(bmeurer): Flatten cons strings here before we canonicalize them?
- return graph()->NewNode(common()->HeapConstant(value));
-}
-
-
#define CACHED(name, expr) \
cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
@@ -24,43 +18,40 @@ Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> value) {
Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) {
return CACHED(kCEntryStubConstant,
- ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+ HeapConstant(CEntryStub(isolate(), 1).GetCode()));
}
- return ImmovableHeapConstant(CEntryStub(isolate(), result_size).GetCode());
+ return HeapConstant(CEntryStub(isolate(), result_size).GetCode());
}
Node* JSGraph::EmptyFixedArrayConstant() {
return CACHED(kEmptyFixedArrayConstant,
- ImmovableHeapConstant(factory()->empty_fixed_array()));
+ HeapConstant(factory()->empty_fixed_array()));
}
Node* JSGraph::UndefinedConstant() {
- return CACHED(kUndefinedConstant,
- ImmovableHeapConstant(factory()->undefined_value()));
+ return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
}
Node* JSGraph::TheHoleConstant() {
- return CACHED(kTheHoleConstant,
- ImmovableHeapConstant(factory()->the_hole_value()));
+ return CACHED(kTheHoleConstant, HeapConstant(factory()->the_hole_value()));
}
Node* JSGraph::TrueConstant() {
- return CACHED(kTrueConstant, ImmovableHeapConstant(factory()->true_value()));
+ return CACHED(kTrueConstant, HeapConstant(factory()->true_value()));
}
Node* JSGraph::FalseConstant() {
- return CACHED(kFalseConstant,
- ImmovableHeapConstant(factory()->false_value()));
+ return CACHED(kFalseConstant, HeapConstant(factory()->false_value()));
}
Node* JSGraph::NullConstant() {
- return CACHED(kNullConstant, ImmovableHeapConstant(factory()->null_value()));
+ return CACHED(kNullConstant, HeapConstant(factory()->null_value()));
}
@@ -81,11 +72,14 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
- // TODO(turbofan): canonicalize heap constants using <magic approach>.
- // TODO(titzer): We could also match against the addresses of immortable
- // immovables here, even without access to the heap, thus always
- // canonicalizing references to them.
- return ImmovableHeapConstant(value);
+ if (value->IsConsString()) {
+ value = String::Flatten(Handle<String>::cast(value), TENURED);
+ }
+ Node** loc = cache_.FindHeapConstant(value);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->HeapConstant(value));
+ }
+ return *loc;
}
@@ -126,7 +120,7 @@ Node* JSGraph::Constant(int32_t value) {
Node* JSGraph::Int32Constant(int32_t value) {
Node** loc = cache_.FindInt32Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Int32Constant(value));
}
return *loc;
@@ -135,7 +129,7 @@ Node* JSGraph::Int32Constant(int32_t value) {
Node* JSGraph::Int64Constant(int64_t value) {
Node** loc = cache_.FindInt64Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Int64Constant(value));
}
return *loc;
@@ -144,7 +138,7 @@ Node* JSGraph::Int64Constant(int64_t value) {
Node* JSGraph::NumberConstant(double value) {
Node** loc = cache_.FindNumberConstant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->NumberConstant(value));
}
return *loc;
@@ -153,7 +147,7 @@ Node* JSGraph::NumberConstant(double value) {
Node* JSGraph::Float32Constant(float value) {
Node** loc = cache_.FindFloat32Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Float32Constant(value));
}
return *loc;
@@ -162,7 +156,7 @@ Node* JSGraph::Float32Constant(float value) {
Node* JSGraph::Float64Constant(double value) {
Node** loc = cache_.FindFloat64Constant(value);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->Float64Constant(value));
}
return *loc;
@@ -171,7 +165,7 @@ Node* JSGraph::Float64Constant(double value) {
Node* JSGraph::ExternalConstant(ExternalReference reference) {
Node** loc = cache_.FindExternalConstant(reference);
- if (*loc == NULL) {
+ if (*loc == nullptr) {
*loc = graph()->NewNode(common()->ExternalConstant(reference));
}
return *loc;
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 16760a5a9d..5a25ed0697 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -158,7 +158,6 @@ class JSGraph : public ZoneObject {
CommonNodeCache cache_;
Node* cached_nodes_[kNumCachedNodes];
- Node* ImmovableHeapConstant(Handle<HeapObject> value);
Node* NumberConstant(double value);
DISALLOW_COPY_AND_ASSIGN(JSGraph);
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index ec00e9bde4..cd5637b0c4 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -13,7 +13,7 @@ namespace internal {
namespace compiler {
Reduction JSInliningHeuristic::Reduce(Node* node) {
- if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
// Check if we already saw that {node} before, and if so, just skip it.
if (seen_.find(node->id()) != seen_.end()) return NoChange();
@@ -26,7 +26,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Functions marked with %SetForceInlineFlag are immediately inlined.
if (function->shared()->force_inline()) {
- return inliner_.ReduceJSCallFunction(node, function);
+ return inliner_.ReduceJSCall(node, function);
}
// Handling of special inlining modes right away:
@@ -36,7 +36,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
case kRestrictedInlining:
return NoChange();
case kStressInlining:
- return inliner_.ReduceJSCallFunction(node, function);
+ return inliner_.ReduceJSCall(node, function);
case kGeneralInlining:
break;
}
@@ -48,6 +48,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Built-in functions are handled by the JSBuiltinReducer.
if (function->shared()->HasBuiltinFunctionId()) return NoChange();
+ // Don't inline builtins.
+ if (function->shared()->IsBuiltin()) return NoChange();
+
// Quick check on source code length to avoid parsing large candidate.
if (function->shared()->SourceSize() > FLAG_max_inlined_source_size) {
return NoChange();
@@ -64,18 +67,21 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Stop inlinining once the maximum allowed level is reached.
int level = 0;
- for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = NodeProperties::GetFrameStateInput(frame_state, 0)) {
if (++level > FLAG_max_inlining_levels) return NoChange();
}
// Gather feedback on how often this call site has been hit before.
- CallFunctionParameters p = CallFunctionParametersOf(node->op());
int calls = -1; // Same default as CallICNexus::ExtractCallCount.
- if (p.feedback().IsValid()) {
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
- calls = nexus.ExtractCallCount();
+ // TODO(turbofan): We also want call counts for constructor calls.
+ if (node->opcode() == IrOpcode::kJSCallFunction) {
+ CallFunctionParameters p = CallFunctionParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ calls = nexus.ExtractCallCount();
+ }
}
// ---------------------------------------------------------------------------
@@ -92,13 +98,23 @@ void JSInliningHeuristic::Finalize() {
if (candidates_.empty()) return; // Nothing to do without candidates.
if (FLAG_trace_turbo_inlining) PrintCandidates();
+ // We inline at most one candidate in every iteration of the fixpoint.
+ // This is to ensure that we don't consume the full inlining budget
+ // on things that aren't called very often.
+ // TODO(bmeurer): Use std::priority_queue instead of std::set here.
while (!candidates_.empty()) {
- if (cumulative_count_ > FLAG_max_inlined_nodes_cumulative) break;
+ if (cumulative_count_ > FLAG_max_inlined_nodes_cumulative) return;
auto i = candidates_.begin();
- Candidate const& candidate = *i;
- inliner_.ReduceJSCallFunction(candidate.node, candidate.function);
- cumulative_count_ += candidate.function->shared()->ast_node_count();
+ Candidate candidate = *i;
candidates_.erase(i);
+ // Make sure we don't try to inline dead candidate nodes.
+ if (!candidate.node->IsDead()) {
+ Reduction r = inliner_.ReduceJSCall(candidate.node, candidate.function);
+ if (r.Changed()) {
+ cumulative_count_ += candidate.function->shared()->ast_node_count();
+ return;
+ }
+ }
}
}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index f041698ab9..99a1547b9a 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -4,25 +4,21 @@
#include "src/compiler/js-inlining.h"
-#include "src/ast.h"
-#include "src/ast-numbering.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/common-operator-reducer.h"
-#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-global-object-specialization.h"
-#include "src/compiler/js-native-context-specialization.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
namespace v8 {
namespace internal {
@@ -34,36 +30,46 @@ namespace compiler {
} while (false)
-// Provides convenience accessors for calls to JS functions.
-class JSCallFunctionAccessor {
+// Provides convenience accessors for the common layout of nodes having either
+// the {JSCallFunction} or the {JSCallConstruct} operator.
+class JSCallAccessor {
public:
- explicit JSCallFunctionAccessor(Node* call) : call_(call) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ explicit JSCallAccessor(Node* call) : call_(call) {
+ DCHECK(call->opcode() == IrOpcode::kJSCallFunction ||
+ call->opcode() == IrOpcode::kJSCallConstruct);
}
- Node* jsfunction() { return call_->InputAt(0); }
-
- Node* receiver() { return call_->InputAt(1); }
+ Node* target() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have same layout here.
+ return call_->InputAt(0);
+ }
- Node* formal_argument(size_t index) {
- DCHECK(index < formal_arguments());
- return call_->InputAt(static_cast<int>(2 + index));
+ Node* receiver() {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, call_->opcode());
+ return call_->InputAt(1);
}
- size_t formal_arguments() {
- // {value_inputs} includes jsfunction and receiver.
- size_t value_inputs = call_->op()->ValueInputCount();
- DCHECK_GE(call_->InputCount(), 2);
- return value_inputs - 2;
+ Node* new_target() {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, call_->opcode());
+ return call_->InputAt(formal_arguments() + 1);
}
Node* frame_state_before() {
return NodeProperties::GetFrameStateInput(call_, 1);
}
+
Node* frame_state_after() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have frame state after.
return NodeProperties::GetFrameStateInput(call_, 0);
}
+ int formal_arguments() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have two extra inputs:
+ // - JSCallConstruct: Includes target function and new target.
+ // - JSCallFunction: Includes target function and receiver.
+ return call_->op()->ValueInputCount() - 2;
+ }
+
private:
Node* call_;
};
@@ -126,22 +132,23 @@ class CopyVisitor {
};
-Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
- Node* start, Node* end) {
+Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, Node* start, Node* end) {
// The scheduler is smart enough to place our code; we just ensure {control}
// becomes the control input of the start of the inlinee, and {effect} becomes
// the effect input of the start of the inlinee.
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
+ int const inlinee_new_target_index =
+ static_cast<int>(start->op()->ValueOutputCount()) - 3;
int const inlinee_arity_index =
static_cast<int>(start->op()->ValueOutputCount()) - 2;
- // Context is last parameter.
int const inlinee_context_index =
static_cast<int>(start->op()->ValueOutputCount()) - 1;
- // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
- // context, effect, control.
+ // {inliner_inputs} counts JSFunction, receiver, arguments, but not
+ // new target value, argument count, context, effect or control.
int inliner_inputs = call->op()->ValueInputCount();
// Iterate over all uses of the start node.
for (Edge edge : start->use_edges()) {
@@ -150,10 +157,13 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
case IrOpcode::kParameter: {
int index = 1 + ParameterIndexOf(use->op());
DCHECK_LE(index, inlinee_context_index);
- if (index < inliner_inputs && index < inlinee_arity_index) {
+ if (index < inliner_inputs && index < inlinee_new_target_index) {
// There is an input from the call, and the index is a value
// projection but not the context, so rewire the input.
Replace(use, call->InputAt(index));
+ } else if (index == inlinee_new_target_index) {
+ // The projection is requesting the new target value.
+ Replace(use, new_target);
} else if (index == inlinee_arity_index) {
// The projection is requesting the number of arguments.
Replace(use, jsgraph_->Int32Constant(inliner_inputs - 2));
@@ -213,7 +223,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
values.push_back(control_output);
effects.push_back(control_output);
Node* value_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Phi(kMachAnyTagged, input_count),
+ jsgraph_->common()->Phi(MachineRepresentation::kTagged, input_count),
static_cast<int>(values.size()), &values.front());
Node* effect_output = jsgraph_->graph()->NewNode(
jsgraph_->common()->EffectPhi(input_count),
@@ -227,12 +237,13 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
}
-Node* JSInliner::CreateArgumentsAdaptorFrameState(
- JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info) {
+Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared) {
const FrameStateFunctionInfo* state_info =
jsgraph_->common()->CreateFrameStateFunctionInfo(
- FrameStateType::kArgumentsAdaptor,
- static_cast<int>(call->formal_arguments()) + 1, 0, shared_info,
+ frame_state_type, parameter_count + 1, 0, shared,
CALL_MAINTAINS_NATIVE_CONTEXT);
const Operator* op = jsgraph_->common()->FrameState(
@@ -240,56 +251,79 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(
const Operator* op0 = jsgraph_->common()->StateValues(0);
Node* node0 = jsgraph_->graph()->NewNode(op0);
NodeVector params(local_zone_);
- params.push_back(call->receiver());
- for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
- params.push_back(call->formal_argument(argument));
+ for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
+ params.push_back(node->InputAt(1 + parameter));
}
const Operator* op_param =
jsgraph_->common()->StateValues(static_cast<int>(params.size()));
Node* params_node = jsgraph_->graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
- return jsgraph_->graph()->NewNode(
- op, params_node, node0, node0, jsgraph_->UndefinedConstant(),
- call->jsfunction(), call->frame_state_after());
+ return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
+ jsgraph_->UndefinedConstant(),
+ node->InputAt(0), outer_frame_state);
+}
+
+
+namespace {
+
+// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
+bool NeedsImplicitReceiver(Handle<JSFunction> function, Isolate* isolate) {
+ Code* construct_stub = function->shared()->construct_stub();
+ return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
+ construct_stub != *isolate->builtins()->ConstructedNonConstructable();
}
+} // namespace
+
Reduction JSInliner::Reduce(Node* node) {
- if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
+ if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
- JSCallFunctionAccessor call(node);
- HeapObjectMatcher match(call.jsfunction());
+ // This reducer can handle both normal function calls as well a constructor
+ // calls whenever the target is a constant function object, as follows:
+ // - JSCallFunction(target:constant, receiver, args...)
+ // - JSCallConstruct(target:constant, args..., new.target)
+ HeapObjectMatcher match(node->InputAt(0));
if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- return ReduceJSCallFunction(node, function);
+ return ReduceJSCall(node, function);
}
-Reduction JSInliner::ReduceJSCallFunction(Node* node,
- Handle<JSFunction> function) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- JSCallFunctionAccessor call(node);
+Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
+ DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+ JSCallAccessor call(node);
+ // Function must be inlineable.
if (!function->shared()->IsInlineable()) {
- // Function must be inlineable.
TRACE("Not inlining %s into %s because callee is not inlineable\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+ // Constructor must be constructable.
+ if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ !function->IsConstructor()) {
+ TRACE("Not inlining %s into %s because constructor is not constructable.\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
- if (IsClassConstructor(function->shared()->kind())) {
- TRACE("Not inlining %s into %s because callee is classConstructor\n",
+ if (node->opcode() == IrOpcode::kJSCallFunction &&
+ IsClassConstructor(function->shared()->kind())) {
+ TRACE("Not inlining %s into %s because callee is a class constructor.\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+ // Function contains break points.
if (function->shared()->HasDebugInfo()) {
- // Function contains break points.
TRACE("Not inlining %s into %s because callee may contain break points\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -343,9 +377,6 @@ Reduction JSInliner::ReduceJSCallFunction(Node* node,
if (info_->is_deoptimization_enabled()) {
info.MarkAsDeoptimizationEnabled();
}
- if (info_->is_native_context_specializing()) {
- info.MarkAsNativeContextSpecializing();
- }
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
@@ -359,7 +390,7 @@ Reduction JSInliner::ReduceJSCallFunction(Node* node,
// In strong mode, in case of too few arguments we need to throw a TypeError
// so we must not inline this call.
- size_t parameter_count = info.literal()->parameter_count();
+ int parameter_count = info.literal()->parameter_count();
if (is_strong(info.language_mode()) &&
call.formal_arguments() < parameter_count) {
TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
@@ -397,32 +428,54 @@ Reduction JSInliner::ReduceJSCallFunction(Node* node,
AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
graph_builder.CreateGraph(false);
- // TODO(mstarzinger): Unify this with the Pipeline once JSInliner refactoring
- // starts.
- if (info.is_native_context_specializing()) {
- GraphReducer graph_reducer(local_zone_, &graph, jsgraph.Dead());
- DeadCodeElimination dead_code_elimination(&graph_reducer, &graph,
- jsgraph.common());
- CommonOperatorReducer common_reducer(&graph_reducer, &graph,
- jsgraph.common(), jsgraph.machine());
- JSGlobalObjectSpecialization global_object_specialization(
- &graph_reducer, &jsgraph,
- info.is_deoptimization_enabled()
- ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
- : JSGlobalObjectSpecialization::kNoFlags,
- handle(info.global_object(), info.isolate()), info_->dependencies());
- JSNativeContextSpecialization native_context_specialization(
- &graph_reducer, &jsgraph,
- info.is_deoptimization_enabled()
- ? JSNativeContextSpecialization::kDeoptimizationEnabled
- : JSNativeContextSpecialization::kNoFlags,
- handle(info.global_object()->native_context(), info.isolate()),
- info_->dependencies(), local_zone_);
- graph_reducer.AddReducer(&dead_code_elimination);
- graph_reducer.AddReducer(&common_reducer);
- graph_reducer.AddReducer(&global_object_specialization);
- graph_reducer.AddReducer(&native_context_specialization);
- graph_reducer.ReduceGraph();
+ CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
+ visitor.CopyGraph();
+
+ Node* start = visitor.GetCopy(graph.start());
+ Node* end = visitor.GetCopy(graph.end());
+ Node* frame_state = call.frame_state_after();
+ Node* new_target = jsgraph_->UndefinedConstant();
+
+ // Insert nodes around the call that model the behavior required for a
+ // constructor dispatch (allocate implicit receiver and check return value).
+ // This models the behavior usually accomplished by our {JSConstructStub}.
+ // Note that the context has to be the callers context (input to call node).
+ Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
+ if (node->opcode() == IrOpcode::kJSCallConstruct &&
+ NeedsImplicitReceiver(function, info_->isolate())) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* create = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->Create(), call.target(), call.new_target(),
+ context, call.frame_state_before(), effect);
+ NodeProperties::ReplaceEffectInput(node, create);
+ // Insert a check of the return value to determine whether the return value
+ // or the implicit receiver should be selected as a result of the call.
+ Node* check = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
+ node, context, node, start);
+ Node* select = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Select(MachineRepresentation::kTagged), check, node,
+ create);
+ NodeProperties::ReplaceUses(node, select, check, node, node);
+ NodeProperties::ReplaceValueInput(select, node, 1);
+ NodeProperties::ReplaceValueInput(check, node, 0);
+ NodeProperties::ReplaceEffectInput(check, node);
+ receiver = create; // The implicit receiver.
+ }
+
+ // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
+ // normal {JSCallFunction} node so that the rest of the inlining machinery
+ // behaves as if we were dealing with a regular function invocation.
+ if (node->opcode() == IrOpcode::kJSCallConstruct) {
+ new_target = call.new_target(); // Retrieve new target value input.
+ node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
+ node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+ // Insert a construct stub frame into the chain of frame states. This will
+ // reconstruct the proper frame when deoptimizing within the constructor.
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ FrameStateType::kConstructStub, info.shared_info());
}
// The inlinee specializes to the context from the JSFunction object.
@@ -431,20 +484,14 @@ Reduction JSInliner::ReduceJSCallFunction(Node* node,
// type feedback in the compiler.
Node* context = jsgraph_->Constant(handle(function->context()));
- CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
- visitor.CopyGraph();
-
- Node* start = visitor.GetCopy(graph.start());
- Node* end = visitor.GetCopy(graph.end());
- Node* frame_state = call.frame_state_after();
-
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
// passed into this node has to be the callees context (loaded above). Note
// that the frame state passed to the JSConvertReceiver must be the frame
// state _before_ the call; it is not necessary to fiddle with the receiver
// in that frame state tho, as the conversion of the receiver can be repeated
// any number of times, it's not observable.
- if (is_sloppy(info.language_mode()) && !function->shared()->native()) {
+ if (node->opcode() == IrOpcode::kJSCallFunction &&
+ is_sloppy(info.language_mode()) && !function->shared()->native()) {
const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* convert = jsgraph_->graph()->NewNode(
@@ -455,15 +502,17 @@ Reduction JSInliner::ReduceJSCallFunction(Node* node,
}
// Insert argument adaptor frame if required. The callees formal parameter
- // count (i.e. value outputs of start node minus target, receiver, num args
- // and context) have to match the number of arguments passed to the call.
- DCHECK_EQ(static_cast<int>(parameter_count),
- start->op()->ValueOutputCount() - 4);
+ // count (i.e. value outputs of start node minus target, receiver, new target,
+ // arguments count and context) have to match the number of arguments passed
+ // to the call.
+ DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
if (call.formal_arguments() != parameter_count) {
- frame_state = CreateArgumentsAdaptorFrameState(&call, info.shared_info());
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, call.formal_arguments(),
+ FrameStateType::kArgumentsAdaptor, info.shared_info());
}
- return InlineCall(node, context, frame_state, start, end);
+ return InlineCall(node, new_target, context, frame_state, start, end);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 2ad49842f5..99eff96c4c 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -16,9 +16,6 @@ class CompilationInfo;
namespace compiler {
-// Forward declarations.
-class JSCallFunctionAccessor;
-
// The JSInliner provides the core graph inlining machinery. Note that this
// class only deals with the mechanics of how to inline one graph into another,
// heuristics that decide what and how much to inline are beyond its scope.
@@ -36,18 +33,20 @@ class JSInliner final : public AdvancedReducer {
// Can be used by inlining heuristics or by testing code directly, without
// using the above generic reducer interface of the inlining machinery.
- Reduction ReduceJSCallFunction(Node* node, Handle<JSFunction> function);
+ Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
private:
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
- Node* CreateArgumentsAdaptorFrameState(
- JSCallFunctionAccessor* call, Handle<SharedFunctionInfo> shared_info);
+ Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
+ int parameter_count,
+ FrameStateType frame_state_type,
+ Handle<SharedFunctionInfo> shared);
- Reduction InlineCall(Node* call, Node* context, Node* frame_state,
- Node* start, Node* end);
+ Reduction InlineCall(Node* call, Node* new_target, Node* context,
+ Node* frame_state, Node* start, Node* end);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 379f8b7490..ca5cb932b4 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -37,16 +37,14 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineConstructDouble:
return ReduceConstructDouble(node);
- case Runtime::kInlineDateField:
- return ReduceDateField(node);
+ case Runtime::kInlineCreateIterResultObject:
+ return ReduceCreateIterResultObject(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
case Runtime::kInlineDoubleHi:
return ReduceDoubleHi(node);
case Runtime::kInlineDoubleLo:
return ReduceDoubleLo(node);
- case Runtime::kInlineHeapObjectGetMap:
- return ReduceHeapObjectGetMap(node);
case Runtime::kInlineIncrementStatsCounter:
return ReduceIncrementStatsCounter(node);
case Runtime::kInlineIsArray:
@@ -56,31 +54,21 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsFunction:
- return ReduceIsInstanceType(node, JS_FUNCTION_TYPE);
+ return ReduceIsFunction(node);
case Runtime::kInlineIsRegExp:
return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineIsJSReceiver:
+ return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
case Runtime::kInlineJSValueGetValue:
return ReduceJSValueGetValue(node);
- case Runtime::kInlineMapGetInstanceType:
- return ReduceMapGetInstanceType(node);
case Runtime::kInlineMathClz32:
return ReduceMathClz32(node);
case Runtime::kInlineMathFloor:
return ReduceMathFloor(node);
case Runtime::kInlineMathSqrt:
return ReduceMathSqrt(node);
- case Runtime::kInlineOneByteSeqStringGetChar:
- return ReduceSeqStringGetChar(node, String::ONE_BYTE_ENCODING);
- case Runtime::kInlineOneByteSeqStringSetChar:
- return ReduceSeqStringSetChar(node, String::ONE_BYTE_ENCODING);
- case Runtime::kInlineStringGetLength:
- return ReduceStringGetLength(node);
- case Runtime::kInlineTwoByteSeqStringGetChar:
- return ReduceSeqStringGetChar(node, String::TWO_BYTE_ENCODING);
- case Runtime::kInlineTwoByteSeqStringSetChar:
- return ReduceSeqStringSetChar(node, String::TWO_BYTE_ENCODING);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
case Runtime::kInlineIsMinusZero:
@@ -89,10 +77,16 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceFixedArrayGet(node);
case Runtime::kInlineFixedArraySet:
return ReduceFixedArraySet(node);
- case Runtime::kInlineGetTypeFeedbackVector:
- return ReduceGetTypeFeedbackVector(node);
- case Runtime::kInlineGetCallerJSFunction:
- return ReduceGetCallerJSFunction(node);
+ case Runtime::kInlineRegExpConstructResult:
+ return ReduceRegExpConstructResult(node);
+ case Runtime::kInlineRegExpExec:
+ return ReduceRegExpExec(node);
+ case Runtime::kInlineRegExpFlags:
+ return ReduceRegExpFlags(node);
+ case Runtime::kInlineRegExpSource:
+ return ReduceRegExpSource(node);
+ case Runtime::kInlineSubString:
+ return ReduceSubString(node);
case Runtime::kInlineToInteger:
return ReduceToInteger(node);
case Runtime::kInlineToLength:
@@ -107,10 +101,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToPrimitive(node);
case Runtime::kInlineToString:
return ReduceToString(node);
- case Runtime::kInlineThrowNotDateError:
- return ReduceThrowNotDateError(node);
case Runtime::kInlineCall:
return ReduceCall(node);
+ case Runtime::kInlineTailCall:
+ return ReduceTailCall(node);
+ case Runtime::kInlineGetSuperConstructor:
+ return ReduceGetSuperConstructor(node);
default:
break;
}
@@ -118,6 +114,16 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
}
+Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
+ Node* const value = NodeProperties::GetValueInput(node, 0);
+ Node* const done = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ return Change(node, javascript()->CreateIterResultObject(), value, done,
+ context, effect);
+}
+
+
Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
Node* high = NodeProperties::GetValueInput(node, 0);
Node* low = NodeProperties::GetValueInput(node, 1);
@@ -131,24 +137,6 @@ Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceDateField(Node* node) {
- Node* const value = NodeProperties::GetValueInput(node, 0);
- Node* const index = NodeProperties::GetValueInput(node, 1);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- NumberMatcher mindex(index);
- if (mindex.Is(JSDate::kDateValue)) {
- return Change(
- node,
- simplified()->LoadField(AccessBuilder::ForJSDateField(
- static_cast<JSDate::FieldIndex>(static_cast<int>(mindex.Value())))),
- value, effect, control);
- }
- // TODO(turbofan): Optimize more patterns.
- return NoChange();
-}
-
-
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
if (mode() != kDeoptimizationEnabled) return NoChange();
Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
@@ -157,7 +145,8 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
node->TrimInputCount(0);
@@ -176,15 +165,6 @@ Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceHeapObjectGetMap(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(AccessBuilder::ForMap()), value,
- effect, control);
-}
-
-
Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
if (!FLAG_native_code_counters) return ChangeToUndefined(node);
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
@@ -217,8 +197,6 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
// } else {
// return %_GetInstanceType(%_GetMap(value)) == instance_type;
// }
- MachineType const type = static_cast<MachineType>(kTypeBool | kRepTagged);
-
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -246,31 +224,108 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
ReplaceWithValue(node, node, ephi);
// Turn the {node} into a Phi.
- return Change(node, common()->Phi(type, 2), vtrue, vfalse, merge);
+ return Change(node, common()->Phi(MachineRepresentation::kTagged, 2), vtrue,
+ vfalse, merge);
}
-Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
- return Change(node, simplified()->ObjectIsSmi());
+Reduction JSIntrinsicLowering::ReduceIsFunction(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (value_type->Is(Type::Function())) {
+ value = jsgraph()->TrueConstant();
+ } else {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return FIRST_FUNCTION_TYPE <= %_GetInstanceType(%_GetMap(value))
+ // }
+ STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false),
+ effect, if_false);
+ Node* vfalse =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(FIRST_FUNCTION_TYPE), efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+ ReplaceWithValue(node, node, effect, control);
+ return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
+Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
- effect, control);
+ if (value_type->Is(Type::Receiver())) {
+ value = jsgraph()->TrueConstant();
+ } else if (!value_type->Maybe(Type::Receiver())) {
+ value = jsgraph()->FalseConstant();
+ } else {
+ // if (%_IsSmi(value)) {
+ // return false;
+ // } else {
+ // return FIRST_JS_RECEIVER_TYPE <= %_GetInstanceType(%_GetMap(value))
+ // }
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, effect, if_false),
+ effect, if_false);
+ Node* vfalse = graph()->NewNode(
+ machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Int32Constant(FIRST_JS_RECEIVER_TYPE), efalse);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+ ReplaceWithValue(node, node, effect, control);
+ return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceMapGetInstanceType(Node* node) {
+Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- return Change(node,
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- value, effect, control);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
+ effect, control);
}
@@ -290,53 +345,6 @@ Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceSeqStringGetChar(
- Node* node, String::Encoding encoding) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- RelaxControls(node);
- node->ReplaceInput(2, effect);
- node->ReplaceInput(3, control);
- node->TrimInputCount(4);
- NodeProperties::ChangeOp(
- node,
- simplified()->LoadElement(AccessBuilder::ForSeqStringChar(encoding)));
- return Changed(node);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceSeqStringSetChar(
- Node* node, String::Encoding encoding) {
- // Note: The intrinsic has a strange argument order, so we need to reshuffle.
- Node* index = NodeProperties::GetValueInput(node, 0);
- Node* chr = NodeProperties::GetValueInput(node, 1);
- Node* string = NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- ReplaceWithValue(node, string, node);
- NodeProperties::RemoveType(node);
- node->ReplaceInput(0, string);
- node->ReplaceInput(1, index);
- node->ReplaceInput(2, chr);
- node->ReplaceInput(3, effect);
- node->ReplaceInput(4, control);
- node->TrimInputCount(5);
- NodeProperties::ChangeOp(
- node,
- simplified()->StoreElement(AccessBuilder::ForSeqStringChar(encoding)));
- return Changed(node);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceStringGetLength(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(AccessBuilder::ForStringLength()),
- value, effect, control);
-}
-
-
Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;
@@ -347,7 +355,8 @@ Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// }
const Operator* const merge_op = common()->Merge(2);
const Operator* const ephi_op = common()->EffectPhi(2);
- const Operator* const phi_op = common()->Phi(kMachAnyTagged, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kTagged, 2);
Node* value = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -460,57 +469,39 @@ Reduction JSIntrinsicLowering::ReduceFixedArraySet(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceGetTypeFeedbackVector(Node* node) {
- Node* func = node->InputAt(0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- FieldAccess access = AccessBuilder::ForJSFunctionSharedFunctionInfo();
- Node* load =
- graph()->NewNode(simplified()->LoadField(access), func, effect, control);
- access = AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector();
- return Change(node, simplified()->LoadField(access), load, load, control);
+Reduction JSIntrinsicLowering::ReduceRegExpConstructResult(Node* node) {
+ // TODO(bmeurer): Introduce JSCreateRegExpResult?
+ return Change(node, CodeFactory::RegExpConstructResult(isolate()), 0);
}
-Reduction JSIntrinsicLowering::ReduceGetCallerJSFunction(Node* node) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
+Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
+ return Change(node, CodeFactory::RegExpExec(isolate()), 4);
+}
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* outer_frame = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_frame->opcode() == IrOpcode::kFrameState) {
- // Use the runtime implementation to throw the appropriate error if the
- // containing function is inlined.
- return NoChange();
- }
- // TODO(danno): This implementation forces intrinsic lowering to happen after
- // inlining, which is fine for now, but eventually the frame-querying logic
- // probably should go later, e.g. in instruction selection, so that there is
- // no phase-ordering dependency.
- FieldAccess access = AccessBuilder::ForFrameCallerFramePtr();
- Node* fp = graph()->NewNode(machine()->LoadFramePointer());
- Node* next_fp =
- graph()->NewNode(simplified()->LoadField(access), fp, effect, control);
- return Change(node, simplified()->LoadField(AccessBuilder::ForFrameMarker()),
- next_fp, effect, control);
+Reduction JSIntrinsicLowering::ReduceRegExpFlags(Node* node) {
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSRegExpFlags());
+ return Change(node, op, receiver, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceThrowNotDateError(Node* node) {
- if (mode() != kDeoptimizationEnabled) return NoChange();
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 1);
+Reduction JSIntrinsicLowering::ReduceRegExpSource(Node* node) {
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSRegExpSource());
+ return Change(node, op, receiver, effect, control);
+}
- // TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
- return Changed(node);
+Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
+ return Change(node, CodeFactory::SubString(isolate()), 3);
}
@@ -548,7 +539,7 @@ Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
} else {
if (value_type->Min() <= 0.0) {
value = graph()->NewNode(
- common()->Select(kMachAnyTagged),
+ common()->Select(MachineRepresentation::kTagged),
graph()->NewNode(simplified()->NumberLessThanOrEqual(), value,
jsgraph()->ZeroConstant()),
jsgraph()->ZeroConstant(), value);
@@ -557,7 +548,7 @@ Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
}
if (value_type->Max() > kMaxSafeInteger) {
value = graph()->NewNode(
- common()->Select(kMachAnyTagged),
+ common()->Select(MachineRepresentation::kTagged),
graph()->NewNode(simplified()->NumberLessThanOrEqual(),
jsgraph()->Constant(kMaxSafeInteger), value),
jsgraph()->Constant(kMaxSafeInteger), value);
@@ -569,14 +560,7 @@ Reduction JSIntrinsicLowering::ReduceToLength(Node* node) {
ReplaceWithValue(node, value);
return Replace(value);
}
- Callable callable = CodeFactory::ToLength(isolate());
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
+ return Change(node, CodeFactory::ToLength(isolate()), 0);
}
@@ -608,11 +592,33 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
NodeProperties::ChangeOp(
node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow));
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
+ size_t const arity = CallRuntimeParametersOf(node->op()).arity();
+ NodeProperties::ChangeOp(
+ node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
TailCallMode::kAllow));
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
+ Node* active_function = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* active_function_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ active_function, effect, control);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ active_function_map, effect, control);
+}
+
+
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
@@ -655,6 +661,18 @@ Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
}
+Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
+ int stack_parameter_count) {
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), stack_parameter_count,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+}
+
+
Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 8989ba19a1..1977a5847d 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -12,6 +12,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
+class Callable;
class TypeCache;
@@ -38,29 +39,28 @@ class JSIntrinsicLowering final : public AdvancedReducer {
private:
Reduction ReduceConstructDouble(Node* node);
- Reduction ReduceDateField(Node* node);
+ Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceDoubleHi(Node* node);
Reduction ReduceDoubleLo(Node* node);
- Reduction ReduceHeapObjectGetMap(Node* node);
Reduction ReduceIncrementStatsCounter(Node* node);
Reduction ReduceIsMinusZero(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
+ Reduction ReduceIsFunction(Node* node);
+ Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
Reduction ReduceJSValueGetValue(Node* node);
- Reduction ReduceMapGetInstanceType(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathSqrt(Node* node);
- Reduction ReduceSeqStringGetChar(Node* node, String::Encoding encoding);
- Reduction ReduceSeqStringSetChar(Node* node, String::Encoding encoding);
- Reduction ReduceStringGetLength(Node* node);
Reduction ReduceValueOf(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
- Reduction ReduceGetTypeFeedbackVector(Node* node);
- Reduction ReduceGetCallerJSFunction(Node* node);
- Reduction ReduceThrowNotDateError(Node* node);
+ Reduction ReduceRegExpConstructResult(Node* node);
+ Reduction ReduceRegExpExec(Node* node);
+ Reduction ReduceRegExpFlags(Node* node);
+ Reduction ReduceRegExpSource(Node* node);
+ Reduction ReduceSubString(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToName(Node* node);
@@ -69,6 +69,8 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceToPrimitive(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
+ Reduction ReduceTailCall(Node* node);
+ Reduction ReduceGetSuperConstructor(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
@@ -76,6 +78,8 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
Node* d);
Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
+ Reduction Change(Node* node, Callable const& callable,
+ int stack_parameter_count);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 9e687bdc07..06cf770f33 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -8,11 +8,13 @@
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/field-index-inl.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/type-cache.h"
#include "src/type-feedback-vector.h"
@@ -23,7 +25,7 @@ namespace compiler {
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<Context> native_context, CompilationDependencies* dependencies,
+ MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
@@ -31,14 +33,11 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
native_context_(native_context),
dependencies_(dependencies),
zone_(zone),
- type_cache_(TypeCache::Get()),
- access_info_factory_(dependencies, native_context, graph()->zone()) {}
+ type_cache_(TypeCache::Get()) {}
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kJSCallFunction:
- return ReduceJSCallFunction(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSStoreNamed:
@@ -54,56 +53,6 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
}
-Reduction JSNativeContextSpecialization::ReduceJSCallFunction(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
- CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- Node* control = NodeProperties::GetControlInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
- // Don't mess with JSCallFunction nodes that have a constant {target}.
- if (HeapObjectMatcher(target).HasValue()) return NoChange();
- if (!p.feedback().IsValid()) return NoChange();
- CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
- Handle<Object> feedback(nexus.GetFeedback(), isolate());
- if (feedback->IsWeakCell()) {
- Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
- if (cell->value()->IsJSFunction()) {
- // Avoid cross-context leaks, meaning don't embed references to functions
- // in other native contexts.
- Handle<JSFunction> function(JSFunction::cast(cell->value()), isolate());
- if (function->context()->native_context() != *native_context()) {
- return NoChange();
- }
-
- // Check that the {target} is still the {target_function}.
- Node* target_function = jsgraph()->HeapConstant(function);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
- target, target_function);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- effect, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- control = graph()->NewNode(common()->IfTrue(), branch);
-
- // Specialize the JSCallFunction node to the {target_function}.
- NodeProperties::ReplaceValueInput(node, target_function, 0);
- NodeProperties::ReplaceControlInput(node, control);
- return Changed(node);
- }
- // TODO(bmeurer): Also support optimizing bound functions and proxies here.
- }
- return NoChange();
-}
-
-
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
@@ -120,9 +69,15 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
// Compute property access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
ZoneVector<PropertyAccessInfo> access_infos(zone());
- if (!access_info_factory().ComputePropertyAccessInfos(
+ if (!access_info_factory.ComputePropertyAccessInfos(
receiver_maps, name, access_mode, &access_infos)) {
return NoChange();
}
@@ -234,7 +189,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(receiver_type, holder);
+ AssumePrototypesStable(receiver_type, native_context, holder);
}
// Generate the actual property access.
@@ -261,7 +216,33 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
} else {
DCHECK(access_info.IsDataField());
FieldIndex const field_index = access_info.field_index();
+ FieldCheck const field_check = access_info.field_check();
Type* const field_type = access_info.field_type();
+ switch (field_check) {
+ case FieldCheck::kNone:
+ break;
+ case FieldCheck::kJSArrayBufferViewBufferNotNeutered: {
+ Node* this_buffer = this_effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ this_receiver, this_effect, this_control);
+ Node* this_buffer_bit_field = this_effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferBitField()),
+ this_buffer, this_effect, this_control);
+ Node* check = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), this_buffer_bit_field,
+ jsgraph()->Int32Constant(
+ 1 << JSArrayBuffer::WasNeutered::kShift)),
+ jsgraph()->Int32Constant(0));
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ break;
+ }
+ }
if (access_mode == AccessMode::kLoad &&
access_info.holder().ToHandle(&holder)) {
this_receiver = jsgraph()->Constant(holder);
@@ -273,7 +254,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
this_storage, this_effect, this_control);
}
FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
- field_type, kMachAnyTagged};
+ field_type, MachineType::AnyTagged()};
if (access_mode == AccessMode::kLoad) {
if (field_type->Is(Type::UntaggedFloat64())) {
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
@@ -284,7 +265,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
}
- field_access.machine_type = kMachFloat64;
+ field_access.machine_type = MachineType::Float64();
}
this_value = this_effect =
graph()->NewNode(simplified()->LoadField(field_access),
@@ -328,11 +309,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
this_storage, this_effect, this_control);
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
- field_access.machine_type = kMachFloat64;
+ field_access.machine_type = MachineType::Float64();
}
} else {
// Unboxed double field, we store directly to the field.
- field_access.machine_type = kMachFloat64;
+ field_access.machine_type = MachineType::Float64();
}
} else if (field_type->Is(Type::TaggedSigned())) {
Node* check =
@@ -342,6 +323,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
exit_controls.push_back(
graph()->NewNode(common()->IfFalse(), branch));
this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value, this_control);
} else if (field_type->Is(Type::TaggedPointer())) {
Node* check =
graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
@@ -430,8 +413,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
? exit_controls.front()
: graph()->NewNode(common()->Merge(exit_control_count),
exit_control_count, &exit_controls.front());
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- exit_effect, exit_control);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, exit_effect, exit_control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
@@ -447,8 +431,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
control = graph()->NewNode(common()->Merge(control_count), control_count,
&controls.front());
values.push_back(control);
- value = graph()->NewNode(common()->Phi(kMachAnyTagged, control_count),
- control_count + 1, &values.front());
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
effects.push_back(control);
effect = graph()->NewNode(common()->EffectPhi(control_count),
control_count + 1, &effects.front());
@@ -496,10 +481,12 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
- AccessMode access_mode, LanguageMode language_mode) {
+ AccessMode access_mode, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -507,10 +494,19 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ // TODO(bmeurer): Add support for non-standard stores.
+ if (store_mode != STANDARD_STORE) return NoChange();
+
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
// Compute element access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
- if (!access_info_factory().ComputeElementAccessInfos(
- receiver_maps, access_mode, &access_infos)) {
+ if (!access_info_factory.ComputeElementAccessInfos(receiver_maps, access_mode,
+ &access_infos)) {
return NoChange();
}
@@ -547,30 +543,85 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* this_receiver = receiver;
Node* this_value = value;
Node* this_index = index;
- Node* this_effect = effect;
+ Node* this_effect;
Node* this_control;
// Perform map check on {receiver}.
Type* receiver_type = access_info.receiver_type();
+ bool receiver_is_jsarray = true;
{
ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
for (auto i = access_info.receiver_type()->Classes(); !i.Done();
i.Advance()) {
Handle<Map> map = i.Current();
Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
receiver_map, jsgraph()->Constant(map));
Node* branch =
graph()->NewNode(common()->Branch(), check, fallthrough_control);
this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(effect);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
+ }
+
+ // Generate possible elements kind transitions.
+ for (auto transition : access_info.transitions()) {
+ Handle<Map> transition_source = transition.first;
+ Handle<Map> transition_target = transition.second;
+
+ // Check if {receiver} has the specified {transition_source} map.
+ Node* check = graph()->NewNode(
+ simplified()->ReferenceEqual(Type::Any()), receiver_map,
+ jsgraph()->HeapConstant(transition_source));
+ Node* branch =
+ graph()->NewNode(common()->Branch(), check, fallthrough_control);
+
+ // Migrate {receiver} from {transition_source} to {transition_target}.
+ Node* transition_control = graph()->NewNode(common()->IfTrue(), branch);
+ Node* transition_effect = effect;
+ if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
+ transition_target->elements_kind())) {
+ // In-place migration, just store the {transition_target} map.
+ transition_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+ jsgraph()->HeapConstant(transition_target), transition_effect,
+ transition_control);
+ } else {
+ // Instance migration, let the stub deal with the {receiver}.
+ TransitionElementsKindStub stub(isolate(),
+ transition_source->elements_kind(),
+ transition_target->elements_kind(),
+ transition_source->IsJSArrayMap());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ transition_effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(stub.GetCode()),
+ receiver, jsgraph()->HeapConstant(transition_target), context,
+ frame_state, transition_effect, transition_control);
+ }
+ this_controls.push_back(transition_control);
+ this_effects.push_back(transition_effect);
+
fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
}
+
+ // Create single chokepoint for the control.
int const this_control_count = static_cast<int>(this_controls.size());
- this_control =
- (this_control_count == 1)
- ? this_controls.front()
- : graph()->NewNode(common()->Merge(this_control_count),
- this_control_count, &this_controls.front());
+ if (this_control_count == 1) {
+ this_control = this_controls.front();
+ this_effect = this_effects.front();
+ } else {
+ this_control =
+ graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ this_effect =
+ graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_control_count + 1, &this_effects.front());
+ }
}
// Certain stores need a prototype chain check because shape changes
@@ -578,7 +629,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// not compatible with (monomorphic) keyed stores.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(receiver_type, holder);
+ AssumePrototypesStable(receiver_type, native_context, holder);
}
// Check that the {index} is actually a Number.
@@ -632,17 +683,15 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Load the length of the {receiver}.
- FieldAccess length_access = {
- kTaggedBase, JSArray::kLengthOffset, factory()->name_string(),
- type_cache_.kJSArrayLengthType, kMachAnyTagged};
- if (IsFastDoubleElementsKind(elements_kind)) {
- length_access.type = type_cache_.kFixedDoubleArrayLengthType;
- } else if (IsFastElementsKind(elements_kind)) {
- length_access.type = type_cache_.kFixedArrayLengthType;
- }
Node* this_length = this_effect =
- graph()->NewNode(simplified()->LoadField(length_access), this_receiver,
- this_effect, this_control);
+ receiver_is_jsarray
+ ? graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ this_receiver, this_effect, this_control)
+ : graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ this_elements, this_effect, this_control);
// Check that the {index} is in the valid range for the {receiver}.
Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
@@ -654,10 +703,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Compute the element access.
Type* element_type = Type::Any();
- MachineType element_machine_type = kMachAnyTagged;
+ MachineType element_machine_type = MachineType::AnyTagged();
if (IsFastDoubleElementsKind(elements_kind)) {
element_type = type_cache_.kFloat64;
- element_machine_type = kMachFloat64;
+ element_machine_type = MachineType::Float64();
} else if (IsFastSmiElementsKind(elements_kind)) {
element_type = type_cache_.kSmi;
}
@@ -665,10 +714,88 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
element_type, element_machine_type};
// Access the actual element.
+ // TODO(bmeurer): Refactor this into separate methods or even a separate
+ // class that deals with the elements access.
if (access_mode == AccessMode::kLoad) {
+ // Compute the real element access type, which includes the hole in case
+ // of holey backing stores.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ element_access.type = Type::Union(
+ element_type,
+ Type::Constant(factory()->the_hole_value(), graph()->zone()),
+ graph()->zone());
+ }
+ // Perform the actual backing store access.
this_value = this_effect = graph()->NewNode(
simplified()->LoadElement(element_access), this_elements, this_index,
this_effect, this_control);
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ // Perform the hole check on the result.
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
+ this_value, jsgraph()->TheHoleConstant());
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ // Check if we are allowed to turn the hole into undefined.
+ Type* initial_holey_array_type = Type::Class(
+ handle(isolate()->get_initial_js_array_map(elements_kind)),
+ graph()->zone());
+ if (receiver_type->NowIs(initial_holey_array_type) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ // Add a code dependency on the array protector cell.
+ AssumePrototypesStable(receiver_type, native_context,
+ isolate()->initial_object_prototype());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ // Turn the hole into undefined.
+ this_control =
+ graph()->NewNode(common()->Merge(2), if_true, if_false);
+ this_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->UndefinedConstant(), this_value, this_control);
+ element_type =
+ Type::Union(element_type, Type::Undefined(), graph()->zone());
+ } else {
+ // Deoptimize in case of the hole.
+ exit_controls.push_back(if_true);
+ this_control = if_false;
+ }
+ // Rename the result to represent the actual type (not polluted by the
+ // hole).
+ this_value = graph()->NewNode(common()->Guard(element_type), this_value,
+ this_control);
+ } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ Node* check =
+ graph()->NewNode(simplified()->NumberIsHoleNaN(), this_value);
+ // Check if we are allowed to return the hole directly.
+ Type* initial_holey_array_type = Type::Class(
+ handle(isolate()->get_initial_js_array_map(elements_kind)),
+ graph()->zone());
+ if (receiver_type->NowIs(initial_holey_array_type) &&
+ isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ // Add a code dependency on the array protector cell.
+ AssumePrototypesStable(receiver_type, native_context,
+ isolate()->initial_object_prototype());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ // Turn the hole into undefined.
+ this_value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged,
+ BranchHint::kFalse),
+ check, jsgraph()->UndefinedConstant(), this_value);
+ } else {
+ // Deoptimize in case of the hole.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check, this_control);
+ this_control = graph()->NewNode(common()->IfFalse(), branch);
+ exit_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ }
+ }
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsFastSmiElementsKind(elements_kind)) {
@@ -677,6 +804,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
check, this_control);
exit_controls.push_back(graph()->NewNode(common()->IfFalse(), branch));
this_control = graph()->NewNode(common()->IfTrue(), branch);
+ this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value, this_control);
} else if (IsFastDoubleElementsKind(elements_kind)) {
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
@@ -716,8 +845,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
? exit_controls.front()
: graph()->NewNode(common()->Merge(exit_control_count),
exit_control_count, &exit_controls.front());
- Node* deoptimize = graph()->NewNode(common()->Deoptimize(), frame_state,
- exit_effect, exit_control);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, exit_effect, exit_control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
@@ -733,8 +863,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
control = graph()->NewNode(common()->Merge(control_count), control_count,
&controls.front());
values.push_back(control);
- value = graph()->NewNode(common()->Phi(kMachAnyTagged, control_count),
- control_count + 1, &values.front());
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
effects.push_back(control);
effect = graph()->NewNode(common()->EffectPhi(control_count),
control_count + 1, &effects.front());
@@ -746,7 +877,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
- AccessMode access_mode, LanguageMode language_mode) {
+ AccessMode access_mode, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
@@ -785,7 +917,7 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
// Try to lower the element access based on the {receiver_maps}.
return ReduceElementAccess(node, index, value, receiver_maps, access_mode,
- language_mode);
+ language_mode, store_mode);
}
@@ -801,7 +933,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
// Try to lower the keyed access based on the {nexus}.
return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kLoad,
- p.language_mode());
+ p.language_mode(), STANDARD_STORE);
}
@@ -815,34 +947,29 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
if (!p.feedback().IsValid()) return NoChange();
KeyedStoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ // Extract the keyed access store mode from the KEYED_STORE_IC.
+ KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
+
// Try to lower the keyed access based on the {nexus}.
return ReduceKeyedAccess(node, index, value, nexus, AccessMode::kStore,
- p.language_mode());
+ p.language_mode(), store_mode);
}
void JSNativeContextSpecialization::AssumePrototypesStable(
- Type* receiver_type, Handle<JSObject> holder) {
+ Type* receiver_type, Handle<Context> native_context,
+ Handle<JSObject> holder) {
// Determine actual holder and perform prototype chain checks.
for (auto i = receiver_type->Classes(); !i.Done(); i.Advance()) {
Handle<Map> map = i.Current();
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context())
+ if (Map::GetConstructorFunction(map, native_context)
.ToHandle(&constructor)) {
map = handle(constructor->initial_map(), isolate());
}
- for (PrototypeIterator j(map); !j.IsAtEnd(); j.Advance()) {
- // Check that the {prototype} still has the same map. All prototype
- // maps are guaranteed to be stable, so it's sufficient to add a
- // stability dependency here.
- Handle<JSReceiver> const prototype =
- PrototypeIterator::GetCurrent<JSReceiver>(j);
- dependencies()->AssumeMapStable(handle(prototype->map(), isolate()));
- // Stop once we get to the holder.
- if (prototype.is_identical_to(holder)) break;
- }
+ dependencies()->AssumePrototypeMapsStable(map, holder);
}
}
@@ -859,6 +986,14 @@ void JSNativeContextSpecialization::MarkAsDeferred(Node* if_projection) {
}
+MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
+ Node* node) {
+ Node* const context = NodeProperties::GetContextInput(node);
+ return NodeProperties::GetSpecializationNativeContext(context,
+ native_context());
+}
+
+
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 89adcce601..45ff87f619 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_JS_NATIVE_CONTEXT_SPECIALIZATION_H_
#include "src/base/flags.h"
-#include "src/compiler/access-info.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
@@ -22,6 +21,7 @@ class TypeCache;
namespace compiler {
// Forward declarations.
+enum class AccessMode;
class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
@@ -43,14 +43,13 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
- Handle<Context> native_context,
+ MaybeHandle<Context> native_context,
CompilationDependencies* dependencies,
Zone* zone);
Reduction Reduce(Node* node) final;
private:
- Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
@@ -59,11 +58,13 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandleList const& receiver_maps,
AccessMode access_mode,
- LanguageMode language_mode);
+ LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
FeedbackNexus const& nexus,
AccessMode access_mode,
- LanguageMode language_mode);
+ LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
@@ -72,12 +73,17 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(Type* receiver_type, Handle<JSObject> holder);
+ void AssumePrototypesStable(Type* receiver_type,
+ Handle<Context> native_context,
+ Handle<JSObject> holder);
// Assuming that {if_projection} is either IfTrue or IfFalse, adds a hint on
// the dominating Branch that {if_projection} is the unlikely (deferred) case.
void MarkAsDeferred(Node* if_projection);
+ // Retrieve the native context from the given {node} if known.
+ MaybeHandle<Context> GetNativeContext(Node* node);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
@@ -87,18 +93,16 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
Flags flags() const { return flags_; }
- Handle<Context> native_context() const { return native_context_; }
+ MaybeHandle<Context> native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
- AccessInfoFactory& access_info_factory() { return access_info_factory_; }
JSGraph* const jsgraph_;
Flags const flags_;
- Handle<Context> native_context_;
+ MaybeHandle<Context> native_context_;
CompilationDependencies* const dependencies_;
Zone* const zone_;
TypeCache const& type_cache_;
- AccessInfoFactory access_info_factory_;
DISALLOW_COPY_AND_ASSIGN(JSNativeContextSpecialization);
};
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index c4500a50bb..1455f0a9a9 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -46,6 +46,12 @@ ConvertReceiverMode ConvertReceiverModeOf(Operator const* op) {
}
+ToBooleanHints ToBooleanHintsOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSToBoolean, op->opcode());
+ return OpParameter<ToBooleanHints>(op);
+}
+
+
size_t hash_value(TailCallMode mode) {
return base::hash_value(static_cast<unsigned>(mode));
}
@@ -63,6 +69,74 @@ std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
}
+bool operator==(BinaryOperationParameters const& lhs,
+ BinaryOperationParameters const& rhs) {
+ return lhs.language_mode() == rhs.language_mode() &&
+ lhs.hints() == rhs.hints();
+}
+
+
+bool operator!=(BinaryOperationParameters const& lhs,
+ BinaryOperationParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(BinaryOperationParameters const& p) {
+ return base::hash_combine(p.language_mode(), p.hints());
+}
+
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationParameters const& p) {
+ return os << p.language_mode() << ", " << p.hints();
+}
+
+
+BinaryOperationParameters const& BinaryOperationParametersOf(
+ Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
+ op->opcode() == IrOpcode::kJSBitwiseXor ||
+ op->opcode() == IrOpcode::kJSBitwiseAnd ||
+ op->opcode() == IrOpcode::kJSShiftLeft ||
+ op->opcode() == IrOpcode::kJSShiftRight ||
+ op->opcode() == IrOpcode::kJSShiftRightLogical ||
+ op->opcode() == IrOpcode::kJSAdd ||
+ op->opcode() == IrOpcode::kJSSubtract ||
+ op->opcode() == IrOpcode::kJSMultiply ||
+ op->opcode() == IrOpcode::kJSDivide ||
+ op->opcode() == IrOpcode::kJSModulus);
+ return OpParameter<BinaryOperationParameters>(op);
+}
+
+
+bool operator==(CallConstructParameters const& lhs,
+ CallConstructParameters const& rhs) {
+ return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
+}
+
+
+bool operator!=(CallConstructParameters const& lhs,
+ CallConstructParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CallConstructParameters const& p) {
+ return base::hash_combine(p.arity(), p.feedback());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
+ return os << p.arity();
+}
+
+
+CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, op->opcode());
+ return OpParameter<CallConstructParameters>(op);
+}
+
+
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
<< ", " << p.tail_call_mode();
@@ -322,6 +396,37 @@ const CreateArgumentsParameters& CreateArgumentsParametersOf(
}
+bool operator==(CreateArrayParameters const& lhs,
+ CreateArrayParameters const& rhs) {
+ return lhs.arity() == rhs.arity() &&
+ lhs.site().location() == rhs.site().location();
+}
+
+
+bool operator!=(CreateArrayParameters const& lhs,
+ CreateArrayParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CreateArrayParameters const& p) {
+ return base::hash_combine(p.arity(), p.site().location());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
+ os << p.arity();
+ if (!p.site().is_null()) os << ", " << Brief(*p.site());
+ return os;
+}
+
+
+const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, op->opcode());
+ return OpParameter<CreateArrayParameters>(op);
+}
+
+
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
@@ -351,49 +456,68 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
}
-#define CACHED_OP_LIST(V) \
- V(Equal, Operator::kNoProperties, 2, 1) \
- V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kNoThrow, 2, 1) \
- V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
- V(UnaryNot, Operator::kEliminatable, 1, 1) \
- V(ToBoolean, Operator::kEliminatable, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kNoProperties, 1, 1) \
- V(Yield, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 0, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kEliminatable, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(ForInDone, Operator::kPure, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(ForInStep, Operator::kPure, 1, 1) \
- V(LoadMessage, Operator::kNoThrow, 0, 1) \
- V(StoreMessage, Operator::kNoThrow, 1, 0) \
- V(StackCheck, Operator::kNoProperties, 0, 0) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1) \
+bool operator==(CreateLiteralParameters const& lhs,
+ CreateLiteralParameters const& rhs) {
+ return lhs.constant().location() == rhs.constant().location() &&
+ lhs.flags() == rhs.flags() && lhs.index() == rhs.index();
+}
+
+
+bool operator!=(CreateLiteralParameters const& lhs,
+ CreateLiteralParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CreateLiteralParameters const& p) {
+ return base::hash_combine(p.constant().location(), p.flags(), p.index());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CreateLiteralParameters const& p) {
+ return os << Brief(*p.constant()) << ", " << p.flags() << ", " << p.index();
+}
+
+
+const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCreateLiteralArray ||
+ op->opcode() == IrOpcode::kJSCreateLiteralObject ||
+ op->opcode() == IrOpcode::kJSCreateLiteralRegExp);
+ return OpParameter<CreateLiteralParameters>(op);
+}
+
+
+#define CACHED_OP_LIST(V) \
+ V(Equal, Operator::kNoProperties, 2, 1) \
+ V(NotEqual, Operator::kNoProperties, 2, 1) \
+ V(StrictEqual, Operator::kNoThrow, 2, 1) \
+ V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kNoProperties, 1, 1) \
+ V(Yield, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kEliminatable, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow, 0, 1) \
+ V(StoreMessage, Operator::kNoThrow, 1, 0) \
+ V(StackCheck, Operator::kNoProperties, 0, 0) \
+ V(CreateWithContext, Operator::kNoProperties, 2, 1) \
V(CreateModuleContext, Operator::kNoProperties, 2, 1)
-#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
- V(LessThan, Operator::kNoProperties, 2, 1) \
- V(GreaterThan, Operator::kNoProperties, 2, 1) \
- V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(BitwiseOr, Operator::kNoProperties, 2, 1) \
- V(BitwiseXor, Operator::kNoProperties, 2, 1) \
- V(BitwiseAnd, Operator::kNoProperties, 2, 1) \
- V(ShiftLeft, Operator::kNoProperties, 2, 1) \
- V(ShiftRight, Operator::kNoProperties, 2, 1) \
- V(ShiftRightLogical, Operator::kNoProperties, 2, 1) \
- V(Add, Operator::kNoProperties, 2, 1) \
- V(Subtract, Operator::kNoProperties, 2, 1) \
- V(Multiply, Operator::kNoProperties, 2, 1) \
- V(Divide, Operator::kNoProperties, 2, 1) \
- V(Modulus, Operator::kNoProperties, 2, 1)
+#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
+ V(LessThan, Operator::kNoProperties, 2, 1) \
+ V(GreaterThan, Operator::kNoProperties, 2, 1) \
+ V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)
struct JSOperatorGlobalCache final {
@@ -467,6 +591,148 @@ CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
#undef CACHED_WITH_LANGUAGE_MODE
+const Operator* JSOperatorBuilder::BitwiseOr(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseOr, Operator::kNoProperties, // opcode
+ "JSBitwiseOr", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::BitwiseXor(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseXor, Operator::kNoProperties, // opcode
+ "JSBitwiseXor", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::BitwiseAnd(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSBitwiseAnd, Operator::kNoProperties, // opcode
+ "JSBitwiseAnd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftLeft(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftLeft, Operator::kNoProperties, // opcode
+ "JSShiftLeft", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftRight(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftRight, Operator::kNoProperties, // opcode
+ "JSShiftRight", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ShiftRightLogical(
+ LanguageMode language_mode, BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSShiftRightLogical, Operator::kNoProperties, // opcode
+ "JSShiftRightLogical", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Add(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSAdd, Operator::kNoProperties, // opcode
+ "JSAdd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Subtract(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSSubtract, Operator::kNoProperties, // opcode
+ "JSSubtract", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Multiply(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSMultiply, Operator::kNoProperties, // opcode
+ "JSMultiply", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Divide(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSDivide, Operator::kNoProperties, // opcode
+ "JSDivide", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::Modulus(LanguageMode language_mode,
+ BinaryOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ BinaryOperationParameters parameters(language_mode, hints);
+ return new (zone()) Operator1<BinaryOperationParameters>( //--
+ IrOpcode::kJSModulus, Operator::kNoProperties, // opcode
+ "JSModulus", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<ToBooleanHints>( //--
+ IrOpcode::kJSToBoolean, Operator::kEliminatable, // opcode
+ "JSToBoolean", // name
+ 1, 1, 0, 1, 1, 0, // inputs/outputs
+ hints); // parameter
+}
+
+
const Operator* JSOperatorBuilder::CallFunction(
size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
@@ -493,12 +759,14 @@ const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
}
-const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CallConstruct(
+ size_t arity, VectorSlotPair const& feedback) {
+ CallConstructParameters parameters(arity, feedback);
+ return new (zone()) Operator1<CallConstructParameters>( // --
IrOpcode::kJSCallConstruct, Operator::kNoProperties, // opcode
"JSCallConstruct", // name
- arguments, 1, 1, 1, 1, 2, // counts
- arguments); // parameter
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
@@ -637,6 +905,19 @@ const Operator* JSOperatorBuilder::CreateArguments(
}
+const Operator* JSOperatorBuilder::CreateArray(size_t arity,
+ Handle<AllocationSite> site) {
+ // constructor, new_target, arg1, ..., argN
+ int const value_input_count = static_cast<int>(arity) + 2;
+ CreateArrayParameters parameters(arity, site);
+ return new (zone()) Operator1<CreateArrayParameters>( // --
+ IrOpcode::kJSCreateArray, Operator::kNoProperties, // opcode
+ "JSCreateArray", // name
+ value_input_count, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
CreateClosureParameters parameters(shared_info, pretenure);
@@ -648,21 +929,41 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
-const Operator* JSOperatorBuilder::CreateLiteralArray(int literal_flags) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CreateLiteralArray(
+ Handle<FixedArray> constant_elements, int literal_flags,
+ int literal_index) {
+ CreateLiteralParameters parameters(constant_elements, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralArray, Operator::kNoProperties, // opcode
"JSCreateLiteralArray", // name
- 3, 1, 1, 1, 1, 2, // counts
- literal_flags); // parameter
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
-const Operator* JSOperatorBuilder::CreateLiteralObject(int literal_flags) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CreateLiteralObject(
+ Handle<FixedArray> constant_properties, int literal_flags,
+ int literal_index) {
+ CreateLiteralParameters parameters(constant_properties, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
IrOpcode::kJSCreateLiteralObject, Operator::kNoProperties, // opcode
"JSCreateLiteralObject", // name
- 3, 1, 1, 1, 1, 2, // counts
- literal_flags); // parameter
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateLiteralRegExp(
+ Handle<String> constant_pattern, int literal_flags, int literal_index) {
+ CreateLiteralParameters parameters(constant_pattern, literal_flags,
+ literal_index);
+ return new (zone()) Operator1<CreateLiteralParameters>( // --
+ IrOpcode::kJSCreateLiteralRegExp, Operator::kNoProperties, // opcode
+ "JSCreateLiteralRegExp", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
}
@@ -677,8 +978,7 @@ const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
const Operator* JSOperatorBuilder::CreateCatchContext(
const Handle<String>& name) {
- return new (zone()) Operator1<Handle<String>, Handle<String>::equal_to,
- Handle<String>::hash>( // --
+ return new (zone()) Operator1<Handle<String>>( // --
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
2, 1, 1, 1, 1, 2, // counts
@@ -688,8 +988,7 @@ const Operator* JSOperatorBuilder::CreateCatchContext(
const Operator* JSOperatorBuilder::CreateBlockContext(
const Handle<ScopeInfo>& scpope_info) {
- return new (zone()) Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
- Handle<ScopeInfo>::hash>( // --
+ return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
"JSCreateBlockContext", // name
1, 1, 1, 1, 1, 2, // counts
@@ -699,8 +998,7 @@ const Operator* JSOperatorBuilder::CreateBlockContext(
const Operator* JSOperatorBuilder::CreateScriptContext(
const Handle<ScopeInfo>& scpope_info) {
- return new (zone()) Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
- Handle<ScopeInfo>::hash>( // --
+ return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
"JSCreateScriptContext", // name
1, 1, 1, 1, 1, 2, // counts
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 6bd6516af3..ca7c7ea657 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_JS_OPERATOR_H_
#define V8_COMPILER_JS_OPERATOR_H_
+#include "src/compiler/type-hints.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -43,7 +44,11 @@ size_t hash_value(VectorSlotPair const&);
// The ConvertReceiverMode is used as parameter by JSConvertReceiver operators.
-ConvertReceiverMode ConvertReceiverModeOf(const Operator* op);
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+
+
+// The ToBooleanHints are used as parameter by JSToBoolean operators.
+ToBooleanHints ToBooleanHintsOf(Operator const* op);
// Defines whether tail call optimization is allowed.
@@ -54,6 +59,59 @@ size_t hash_value(TailCallMode);
std::ostream& operator<<(std::ostream&, TailCallMode);
+// Defines the language mode and hints for a JavaScript binary operations.
+// This is used as parameter by JSAdd, JSSubtract, etc. operators.
+class BinaryOperationParameters final {
+ public:
+ BinaryOperationParameters(LanguageMode language_mode,
+ BinaryOperationHints hints)
+ : language_mode_(language_mode), hints_(hints) {}
+
+ LanguageMode language_mode() const { return language_mode_; }
+ BinaryOperationHints hints() const { return hints_; }
+
+ private:
+ LanguageMode const language_mode_;
+ BinaryOperationHints const hints_;
+};
+
+bool operator==(BinaryOperationParameters const&,
+ BinaryOperationParameters const&);
+bool operator!=(BinaryOperationParameters const&,
+ BinaryOperationParameters const&);
+
+size_t hash_value(BinaryOperationParameters const&);
+
+std::ostream& operator<<(std::ostream&, BinaryOperationParameters const&);
+
+BinaryOperationParameters const& BinaryOperationParametersOf(Operator const*);
+
+
+// Defines the arity and the feedback for a JavaScript constructor call. This is
+// used as a parameter by JSCallConstruct operators.
+class CallConstructParameters final {
+ public:
+ CallConstructParameters(size_t arity, VectorSlotPair const& feedback)
+ : arity_(arity), feedback_(feedback) {}
+
+ size_t arity() const { return arity_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ size_t const arity_;
+ VectorSlotPair const feedback_;
+};
+
+bool operator==(CallConstructParameters const&, CallConstructParameters const&);
+bool operator!=(CallConstructParameters const&, CallConstructParameters const&);
+
+size_t hash_value(CallConstructParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
+
+CallConstructParameters const& CallConstructParametersOf(Operator const*);
+
+
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCallFunction operators.
class CallFunctionParameters final {
@@ -327,6 +385,31 @@ const CreateArgumentsParameters& CreateArgumentsParametersOf(
const Operator* op);
+// Defines shared information for the array that should be created. This is
+// used as parameter by JSCreateArray operators.
+class CreateArrayParameters final {
+ public:
+ explicit CreateArrayParameters(size_t arity, Handle<AllocationSite> site)
+ : arity_(arity), site_(site) {}
+
+ size_t arity() const { return arity_; }
+ Handle<AllocationSite> site() const { return site_; }
+
+ private:
+ size_t const arity_;
+ Handle<AllocationSite> const site_;
+};
+
+bool operator==(CreateArrayParameters const&, CreateArrayParameters const&);
+bool operator!=(CreateArrayParameters const&, CreateArrayParameters const&);
+
+size_t hash_value(CreateArrayParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
+
+const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
+
+
// Defines shared information for the closure that should be created. This is
// used as a parameter by JSCreateClosure operators.
class CreateClosureParameters final {
@@ -353,6 +436,34 @@ std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
+// Defines shared information for the literal that should be created. This is
+// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
+// JSCreateLiteralRegExp operators.
+class CreateLiteralParameters final {
+ public:
+ CreateLiteralParameters(Handle<HeapObject> constant, int flags, int index)
+ : constant_(constant), flags_(flags), index_(index) {}
+
+ Handle<HeapObject> constant() const { return constant_; }
+ int flags() const { return flags_; }
+ int index() const { return index_; }
+
+ private:
+ Handle<HeapObject> const constant_;
+ int const flags_;
+ int const index_;
+};
+
+bool operator==(CreateLiteralParameters const&, CreateLiteralParameters const&);
+bool operator!=(CreateLiteralParameters const&, CreateLiteralParameters const&);
+
+size_t hash_value(CreateLiteralParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
+
+const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+
+
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
@@ -368,20 +479,29 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* GreaterThan(LanguageMode language_mode);
const Operator* LessThanOrEqual(LanguageMode language_mode);
const Operator* GreaterThanOrEqual(LanguageMode language_mode);
- const Operator* BitwiseOr(LanguageMode language_mode);
- const Operator* BitwiseXor(LanguageMode language_mode);
- const Operator* BitwiseAnd(LanguageMode language_mode);
- const Operator* ShiftLeft(LanguageMode language_mode);
- const Operator* ShiftRight(LanguageMode language_mode);
- const Operator* ShiftRightLogical(LanguageMode language_mode);
- const Operator* Add(LanguageMode language_mode);
- const Operator* Subtract(LanguageMode language_mode);
- const Operator* Multiply(LanguageMode language_mode);
- const Operator* Divide(LanguageMode language_mode);
- const Operator* Modulus(LanguageMode language_mode);
-
- const Operator* UnaryNot();
- const Operator* ToBoolean();
+ const Operator* BitwiseOr(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* BitwiseXor(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* BitwiseAnd(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftLeft(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftRight(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* ShiftRightLogical(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Add(LanguageMode language_mode, BinaryOperationHints hints);
+ const Operator* Subtract(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Multiply(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Divide(LanguageMode language_mode,
+ BinaryOperationHints hints);
+ const Operator* Modulus(LanguageMode language_mode,
+ BinaryOperationHints hints);
+
+ const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToNumber();
const Operator* ToString();
const Operator* ToName();
@@ -391,10 +511,16 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsParameters::Type type,
int start_index);
+ const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
PretenureFlag pretenure);
- const Operator* CreateLiteralArray(int literal_flags);
- const Operator* CreateLiteralObject(int literal_flags);
+ const Operator* CreateIterResultObject();
+ const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+ int literal_flags, int literal_index);
+ const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
+ int literal_flags, int literal_index);
+ const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
+ int literal_flags, int literal_index);
const Operator* CallFunction(
size_t arity, LanguageMode language_mode,
@@ -402,7 +528,7 @@ class JSOperatorBuilder final : public ZoneObject {
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
- const Operator* CallConstruct(int arguments);
+ const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index f221577104..5e0712a7f1 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -12,31 +12,14 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
+#include "src/type-cache.h"
#include "src/types.h"
namespace v8 {
namespace internal {
namespace compiler {
-// TODO(turbofan): js-typed-lowering improvements possible
-// - immediately put in type bounds for all new nodes
-// - relax effects from generic but not-side-effecting operations
-
-
-JSTypedLowering::JSTypedLowering(Editor* editor,
- CompilationDependencies* dependencies,
- Flags flags, JSGraph* jsgraph, Zone* zone)
- : AdvancedReducer(editor),
- dependencies_(dependencies),
- flags_(flags),
- jsgraph_(jsgraph) {
- for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
- double min = kMinInt / (1 << k);
- double max = kMaxInt / (1 << k);
- shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
- }
-}
-
+namespace {
// A helper class to construct inline allocations on the simplified operator
// level. This keeps track of the effect chain for initial stores on a newly
@@ -50,10 +33,11 @@ class AllocationBuilder final {
control_(control) {}
// Primitive allocation of static size.
- void Allocate(int size) {
+ void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
- allocation_ = graph()->NewNode(
- simplified()->Allocate(), jsgraph()->Constant(size), effect_, control_);
+ allocation_ =
+ graph()->NewNode(simplified()->Allocate(pretenure),
+ jsgraph()->Constant(size), effect_, control_);
effect_ = allocation_;
}
@@ -63,9 +47,21 @@ class AllocationBuilder final {
value, effect_, control_);
}
+ // Primitive store into an element.
+ void Store(ElementAccess const& access, Node* index, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+ index, value, effect_, control_);
+ }
+
// Compound allocation of a FixedArray.
- void AllocateArray(int length, Handle<Map> map) {
- Allocate(FixedArray::SizeFor(length));
+ void AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+ map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ Allocate(size, pretenure);
Store(AccessBuilder::ForMap(), map);
Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
@@ -100,6 +96,8 @@ class AllocationBuilder final {
Node* control_;
};
+} // namespace
+
// A helper class to simplify the process of reducing a single binop node with a
// JSOperator. This class manages the rewriting of context, control, and effect
@@ -220,7 +218,16 @@ class JSBinopReduction final {
return ChangeToPureOperator(op, false, type);
}
- bool IsStrong() { return is_strong(OpParameter<LanguageMode>(node_)); }
+ // TODO(turbofan): Strong mode should be killed soonish!
+ bool IsStrong() const {
+ if (node_->opcode() == IrOpcode::kJSLessThan ||
+ node_->opcode() == IrOpcode::kJSLessThanOrEqual ||
+ node_->opcode() == IrOpcode::kJSGreaterThan ||
+ node_->opcode() == IrOpcode::kJSGreaterThanOrEqual) {
+ return is_strong(OpParameter<LanguageMode>(node_));
+ }
+ return is_strong(BinaryOperationParametersOf(node_->op()).language_mode());
+ }
bool LeftInputIs(Type* t) { return left_type()->Is(t); }
@@ -377,8 +384,8 @@ class JSBinopReduction final {
// Wire conversions to existing {IfException} continuation.
Node* exception_merge = if_exception;
Node* exception_value =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), left_exception,
- right_exception, exception_merge);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ left_exception, right_exception, exception_merge);
Node* exception_effect =
graph()->NewNode(common()->EffectPhi(2), left_exception,
right_exception, exception_merge);
@@ -417,7 +424,34 @@ class JSBinopReduction final {
};
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+
+
+JSTypedLowering::JSTypedLowering(Editor* editor,
+ CompilationDependencies* dependencies,
+ Flags flags, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ dependencies_(dependencies),
+ flags_(flags),
+ jsgraph_(jsgraph),
+ true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
+ false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+ the_hole_type_(
+ Type::Constant(factory()->the_hole_value(), graph()->zone())),
+ type_cache_(TypeCache::Get()) {
+ for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
+ double min = kMinInt / (1 << k);
+ double max = kMaxInt / (1 << k);
+ shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+ }
+}
+
+
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
@@ -448,6 +482,8 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSModulus(x:number, x:number) => NumberModulus(x, y)
@@ -460,6 +496,8 @@ Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
const Operator* numberOp) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
if (r.BothInputsAre(Type::Number())) {
@@ -474,6 +512,8 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.IsStrong()) {
if (r.BothInputsAre(Type::Number())) {
@@ -492,6 +532,8 @@ Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
Signedness left_signedness,
const Operator* shift_op) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.IsStrong()) {
if (r.BothInputsAre(Type::Number())) {
@@ -508,6 +550,8 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
// If both inputs are definitely strings, perform a string comparison.
@@ -579,6 +623,8 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
@@ -620,6 +666,8 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+
JSBinopReduction r(this, node);
if (r.left() == r.right()) {
// x === x is always true if x != NaN
@@ -638,6 +686,10 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
return Replace(replacement);
}
}
+ if (r.OneInputIs(the_hole_type_)) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
+ invert);
+ }
if (r.OneInputIs(Type::Undefined())) {
return r.ChangeToPureOperator(
simplified()->ReferenceEqual(Type::Undefined()), invert);
@@ -674,40 +726,6 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
}
-Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
- Node* const input = node->InputAt(0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(Type::Boolean())) {
- // JSUnaryNot(x:boolean) => BooleanNot(x)
- RelaxEffectsAndControls(node);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->BooleanNot());
- return Changed(node);
- } else if (input_type->Is(Type::OrderedNumber())) {
- // JSUnaryNot(x:number) => NumberEqual(x,#0)
- RelaxEffectsAndControls(node);
- node->ReplaceInput(1, jsgraph()->ZeroConstant());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, simplified()->NumberEqual());
- return Changed(node);
- } else if (input_type->Is(Type::String())) {
- // JSUnaryNot(x:string) => NumberEqual(x.length,#0)
- FieldAccess const access = AccessBuilder::ForStringLength();
- // It is safe for the load to be effect-free (i.e. not linked into effect
- // chain) because we assume String::length to be immutable.
- Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
- ReplaceWithValue(node, node, length);
- node->ReplaceInput(0, length);
- node->ReplaceInput(1, jsgraph()->ZeroConstant());
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, simplified()->NumberEqual());
- return Changed(node);
- }
- return NoChange();
-}
-
-
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
Node* const input = node->InputAt(0);
Type* const input_type = NodeProperties::GetType(input);
@@ -747,6 +765,21 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
if (result.Changed()) return result;
return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
}
+ // Check for ToNumber truncation of signaling NaN to undefined mapping.
+ if (input->opcode() == IrOpcode::kSelect) {
+ Node* check = NodeProperties::GetValueInput(input, 0);
+ Node* vtrue = NodeProperties::GetValueInput(input, 1);
+ Type* vtrue_type = NodeProperties::GetType(vtrue);
+ Node* vfalse = NodeProperties::GetValueInput(input, 2);
+ Type* vfalse_type = NodeProperties::GetType(vfalse);
+ if (vtrue_type->Is(Type::Undefined()) && vfalse_type->Is(Type::Number())) {
+ if (check->opcode() == IrOpcode::kNumberIsHoleNaN &&
+ check->InputAt(0) == vfalse) {
+ // JSToNumber(Select(NumberIsHoleNaN(x), y:undefined, x:number)) => x
+ return Replace(vfalse);
+ }
+ }
+ }
// Check if we have a cached conversion.
Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::Number())) {
@@ -812,10 +845,10 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
return Changed(input); // JSToString(x:string) => x
}
if (input_type->Is(Type::Boolean())) {
- return Replace(
- graph()->NewNode(common()->Select(kMachAnyTagged), input,
- jsgraph()->HeapConstant(factory()->true_string()),
- jsgraph()->HeapConstant(factory()->false_string())));
+ return Replace(graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged), input,
+ jsgraph()->HeapConstant(factory()->true_string()),
+ jsgraph()->HeapConstant(factory()->false_string())));
}
if (input_type->Is(Type::Undefined())) {
return Replace(jsgraph()->HeapConstant(factory()->undefined_string()));
@@ -911,8 +944,9 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
control = graph()->NewNode(common()->Merge(2), if_convert, if_done);
effect = graph()->NewNode(common()->EffectPhi(2), econvert, edone, control);
- receiver = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), rconvert,
- rdone, control);
+ receiver =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ rconvert, rdone, control);
}
ReplaceWithValue(node, receiver, effect, control);
return Changed(receiver);
@@ -935,6 +969,27 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
}
+ // Optimize "prototype" property of functions.
+ if (name.is_identical_to(factory()->prototype_string()) &&
+ receiver_type->IsConstant() &&
+ receiver_type->AsConstant()->Value()->IsJSFunction()) {
+ // TODO(turbofan): This lowering might not kick in if we ever lower
+ // the C++ accessor for "prototype" in an earlier optimization pass.
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(receiver_type->AsConstant()->Value());
+ if (function->has_initial_map()) {
+ // We need to add a code dependency on the initial map of the {function}
+ // in order to be notified about changes to the "prototype" of {function},
+ // so it doesn't make sense to continue unless deoptimization is enabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* value =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
return NoChange();
}
@@ -950,7 +1005,8 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
if (!array->GetBuffer()->was_neutered()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
- size_t const k = ElementSizeLog2Of(access.machine_type());
+ size_t const k =
+ ElementSizeLog2Of(access.machine_type().representation());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
if (key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
@@ -996,7 +1052,8 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (!array->GetBuffer()->was_neutered()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
- size_t const k = ElementSizeLog2Of(access.machine_type());
+ size_t const k =
+ ElementSizeLog2Of(access.machine_type().representation());
double const byte_length = array->byte_length()->Number();
CHECK_LT(k, arraysize(shifted_int32_ranges_));
if (access.external_array_type() != kExternalUint8ClampedArray &&
@@ -1022,14 +1079,6 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
frame_state_for_to_number, effect, control);
}
}
- // For integer-typed arrays, convert to the integer type.
- if (TypeOf(access.machine_type()) == kTypeInt32 &&
- !value_type->Is(Type::Signed32())) {
- value = graph()->NewNode(simplified()->NumberToInt32(), value);
- } else if (TypeOf(access.machine_type()) == kTypeUint32 &&
- !value_type->Is(Type::Unsigned32())) {
- value = graph()->NewNode(simplified()->NumberToUint32(), value);
- }
// Check if we can avoid the bounds check.
if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
RelaxControls(node);
@@ -1067,126 +1116,185 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
// If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ if (!(flags() & kDeoptimizationEnabled) ||
+ (flags() & kDisableBinaryOpReduction)) {
+ return NoChange();
+ }
+
+ // If we are in a try block, don't optimize since the runtime call
+ // in the proxy case can throw.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
JSBinopReduction r(this, node);
Node* effect = r.effect();
Node* control = r.control();
- if (r.right_type()->IsConstant() &&
- r.right_type()->AsConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
- Handle<SharedFunctionInfo> shared(function->shared(), isolate());
- if (!function->map()->has_non_instance_prototype()) {
- JSFunction::EnsureHasInitialMap(function);
- DCHECK(function->has_initial_map());
- Handle<Map> initial_map(function->initial_map(), isolate());
- this->dependencies()->AssumeInitialMapCantChange(initial_map);
- Node* prototype =
- jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+ if (!r.right_type()->IsConstant() ||
+ !r.right_type()->AsConstant()->Value()->IsJSFunction()) {
+ return NoChange();
+ }
- Node* if_is_smi = nullptr;
- Node* e_is_smi = nullptr;
- // If the left hand side is an object, no smi check is needed.
- if (r.left_type()->Maybe(Type::TaggedSigned())) {
- Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
- Node* branch_is_smi = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), is_smi, control);
- if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
- e_is_smi = effect;
- control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
- }
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- r.left(), effect, control);
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- Node* loop = control =
- graph()->NewNode(common()->Loop(2), control, control);
-
- Node* loop_effect = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
-
- Node* loop_object_map = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
- object_map, r.left(), loop);
-
-
- Node* object_prototype = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapPrototype()),
- loop_object_map, loop_effect, control);
-
- // Check if object prototype is equal to function prototype.
- Node* eq_proto =
- graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
- object_prototype, prototype);
- Node* branch_eq_proto = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), eq_proto, control);
- Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
- Node* e_eq_proto = effect;
-
- control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
-
- // If not, check if object prototype is the null prototype.
- Node* null_proto =
- graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
- object_prototype, jsgraph()->NullConstant());
- Node* branch_null_proto = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), null_proto, control);
- Node* if_null_proto =
- graph()->NewNode(common()->IfTrue(), branch_null_proto);
- Node* e_null_proto = effect;
-
- control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
- Node* load_object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- object_prototype, effect, control);
- // Close the loop.
- loop_effect->ReplaceInput(1, effect);
- loop_object_map->ReplaceInput(1, load_object_map);
- loop->ReplaceInput(1, control);
-
- control =
- graph()->NewNode(common()->Merge(2), if_eq_proto, if_null_proto);
- effect = graph()->NewNode(common()->EffectPhi(2), e_eq_proto,
- e_null_proto, control);
-
-
- Node* result = graph()->NewNode(common()->Phi(kTypeBool, 2),
- jsgraph()->TrueConstant(),
- jsgraph()->FalseConstant(), control);
-
- if (if_is_smi != nullptr) {
- DCHECK(e_is_smi != nullptr);
- control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
- result = graph()->NewNode(common()->Phi(kTypeBool, 2),
- jsgraph()->FalseConstant(), result, control);
- }
- ReplaceWithValue(node, result, effect, control);
- return Changed(result);
- }
+ if (!function->IsConstructor() ||
+ function->map()->has_non_instance_prototype()) {
+ return NoChange();
}
- return NoChange();
+ JSFunction::EnsureHasInitialMap(function);
+ DCHECK(function->has_initial_map());
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ this->dependencies()->AssumeInitialMapCantChange(initial_map);
+ Node* prototype =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+
+ Node* if_is_smi = nullptr;
+ Node* e_is_smi = nullptr;
+ // If the left hand side is an object, no smi check is needed.
+ if (r.left_type()->Maybe(Type::TaggedSigned())) {
+ Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+ Node* branch_is_smi =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
+ if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+ e_is_smi = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
+ }
+
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ r.left(), effect, control);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+
+ Node* loop_effect = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+
+ Node* loop_object_map =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ object_map, r.left(), loop);
+
+ // Check if the lhs needs access checks.
+ Node* map_bit_field = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMapBitField()),
+ loop_object_map, loop_effect, control);
+ int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
+ Node* is_access_check_needed_num =
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
+ jsgraph()->Uint32Constant(is_access_check_needed_bit));
+ Node* is_access_check_needed =
+ graph()->NewNode(machine()->Word32Equal(), is_access_check_needed_num,
+ jsgraph()->Uint32Constant(is_access_check_needed_bit));
+
+ Node* branch_is_access_check_needed = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
+ Node* if_is_access_check_needed =
+ graph()->NewNode(common()->IfTrue(), branch_is_access_check_needed);
+ Node* e_is_access_check_needed = effect;
+
+ control =
+ graph()->NewNode(common()->IfFalse(), branch_is_access_check_needed);
+
+ // Check if the lhs is a proxy.
+ Node* map_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ loop_object_map, loop_effect, control);
+ Node* is_proxy = graph()->NewNode(machine()->Word32Equal(), map_instance_type,
+ jsgraph()->Uint32Constant(JS_PROXY_TYPE));
+ Node* branch_is_proxy =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
+ Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
+ Node* e_is_proxy = effect;
+
+
+ Node* runtime_has_in_proto_chain = control = graph()->NewNode(
+ common()->Merge(2), if_is_access_check_needed, if_is_proxy);
+ effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
+ e_is_proxy, control);
+
+ // If we need an access check or the object is a Proxy, make a runtime call
+ // to finish the lowering.
+ Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain, 2), r.left(),
+ prototype, context, frame_state, effect, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
+
+ Node* object_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()),
+ loop_object_map, loop_effect, control);
+
+ // Check if object prototype is equal to function prototype.
+ Node* eq_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, prototype);
+ Node* branch_eq_proto =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
+ Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
+ Node* e_eq_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
+
+ // If not, check if object prototype is the null prototype.
+ Node* null_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, jsgraph()->NullConstant());
+ Node* branch_null_proto = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), null_proto, control);
+ Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
+ Node* e_null_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+ Node* load_object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ object_prototype, effect, control);
+ // Close the loop.
+ loop_effect->ReplaceInput(1, effect);
+ loop_object_map->ReplaceInput(1, load_object_map);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(3), runtime_has_in_proto_chain,
+ if_eq_proto, if_null_proto);
+ effect = graph()->NewNode(common()->EffectPhi(3),
+ bool_result_runtime_has_in_proto_chain_case,
+ e_eq_proto, e_null_proto, control);
+
+ Node* result = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 3),
+ bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
+ jsgraph()->FalseConstant(), control);
+
+ if (if_is_smi != nullptr) {
+ DCHECK_NOT_NULL(e_is_smi);
+ control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+ result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), result, control);
+ }
+
+ ReplaceWithValue(node, result, effect, control);
+ return Changed(result);
}
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = graph()->start();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control));
+ Node* previous = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetValueInput(node, 0), effect, control);
+ node->ReplaceInput(0, previous);
}
node->ReplaceInput(1, effect);
node->ReplaceInput(2, control);
@@ -1200,16 +1308,17 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = graph()->start();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control));
+ Node* previous = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetValueInput(node, 0), effect, control);
+ node->ReplaceInput(0, previous);
}
node->RemoveInput(2);
+ node->ReplaceInput(2, effect);
NodeProperties::ChangeOp(
node,
simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
@@ -1237,13 +1346,12 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
isolate());
receiver = jsgraph()->Constant(global_proxy);
} else {
- Node* global_object = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true),
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
context, context, effect);
- receiver = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSGlobalObjectGlobalProxy()),
- global_object, effect, control);
+ receiver = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, effect);
}
} else if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
mode == ConvertReceiverMode::kNotNullOrUndefined) {
@@ -1292,21 +1400,21 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
isolate());
rglobal = jsgraph()->Constant(global_proxy);
} else {
- Node* global_object = eglobal = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true),
+ Node* native_context = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
context, context, eglobal);
rglobal = eglobal = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSGlobalObjectGlobalProxy()),
- global_object, eglobal, if_global);
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, eglobal);
}
}
control = graph()->NewNode(common()->Merge(2), if_convert, if_global);
effect =
graph()->NewNode(common()->EffectPhi(2), econvert, eglobal, control);
- receiver = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), rconvert,
- rglobal, control);
+ receiver =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ rconvert, rglobal, control);
}
}
ReplaceWithValue(node, receiver, effect, control);
@@ -1316,6 +1424,75 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
namespace {
+// Maximum instance size for which allocations will be inlined.
+const int kMaxInlineInstanceSize = 64 * kPointerSize;
+
+
+// Checks whether allocation using the given constructor can be inlined.
+bool IsAllocationInlineable(Handle<JSFunction> constructor) {
+ // TODO(bmeurer): Further relax restrictions on inlining, i.e.
+ // instance type and maybe instance size (inobject properties
+ // are limited anyways by the runtime).
+ return constructor->has_initial_map() &&
+ constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
+ constructor->initial_map()->instance_size() < kMaxInlineInstanceSize;
+}
+
+} // namespace
+
+
+Reduction JSTypedLowering::ReduceJSCreate(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
+ Node* const target = NodeProperties::GetValueInput(node, 0);
+ Type* const target_type = NodeProperties::GetType(target);
+ Node* const new_target = NodeProperties::GetValueInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ // TODO(turbofan): Add support for NewTarget passed to JSCreate.
+ if (target != new_target) return NoChange();
+ // Extract constructor function.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ DCHECK(constructor->IsConstructor());
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ constructor->CompleteInobjectSlackTrackingIfActive();
+
+ // TODO(bmeurer): We fall back to the runtime in case we cannot inline
+ // the allocation here, which is sort of expensive. We should think about
+ // a soft fallback to some NewObjectCodeStub.
+ if (IsAllocationInlineable(constructor)) {
+ // Compute instance size from initial map of {constructor}.
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ int const instance_size = initial_map->instance_size();
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} of the {constructor} changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ // Emit code to allocate the JSObject instance for the {constructor}.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(instance_size);
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+
+namespace {
+
// Retrieves the frame state holding actual argument values.
Node* GetArgumentsFrameState(Node* frame_state) {
Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
@@ -1337,124 +1514,302 @@ Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
// arguments object, but only for non-inlined (i.e. outermost) frames.
- if (p.type() != CreateArgumentsParameters::kRestArray &&
- outer_state->opcode() != IrOpcode::kFrameState) {
- Handle<SharedFunctionInfo> shared;
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
Isolate* isolate = jsgraph()->isolate();
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
- Callable callable = CodeFactory::ArgumentsAccess(
- isolate, unmapped, shared->has_duplicate_parameters());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
- const Operator* new_op = common()->Call(desc);
int parameter_count = state_info.parameter_count() - 1;
int parameter_offset = parameter_count * kPointerSize;
int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* parameter_pointer = graph()->NewNode(
machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
jsgraph()->IntPtrConstant(offset));
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(parameter_count));
- node->InsertInput(graph()->zone(), 3, parameter_pointer);
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
+
+ if (p.type() != CreateArgumentsParameters::kRestArray) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
+ Callable callable = CodeFactory::ArgumentsAccess(
+ isolate, unmapped, shared->has_duplicate_parameters());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 3, parameter_pointer);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ } else {
+ Callable callable = CodeFactory::RestArgumentsAccess(isolate);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->ReplaceInput(1, jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 2, parameter_pointer);
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(p.start_index()));
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+ } else if (outer_state->opcode() == IrOpcode::kFrameState) {
+ // Use inline allocation for all mapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared->has_duplicate_parameters()) return NoChange();
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ bool has_aliased_arguments = false;
+ Node* const elements = AllocateAliasedArguments(
+ effect, control, args_state, context, shared, &has_aliased_arguments);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+ : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
+ a.Allocate(Heap::kSloppyArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (p.type() == CreateArgumentsParameters::kUnmappedArguments) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ Node* const elements = AllocateArguments(effect, control, args_state);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::STRICT_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
+ a.Allocate(Heap::kStrictArgumentsObjectSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (p.type() == CreateArgumentsParameters::kRestArray) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by the rest array.
+ Node* const elements =
+ AllocateRestArguments(effect, control, args_state, p.start_index());
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the JSArray object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_jsarray_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the jsarray.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // -1 to minus receiver
+ int argument_count = args_state_info.parameter_count() - 1;
+ int length = std::max(0, argument_count - p.start_index());
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+ jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
}
- // Use inline allocation for all mapped arguments objects within inlined
- // (i.e. non-outermost) frames, independent of the object size.
- if (p.type() == CreateArgumentsParameters::kMappedArguments &&
- outer_state->opcode() == IrOpcode::kFrameState) {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- Node* const callee = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- // TODO(mstarzinger): Duplicate parameters are not handled yet.
- if (shared->has_duplicate_parameters()) return NoChange();
- // Choose the correct frame state and frame state info depending on whether
- // there conceptually is an arguments adaptor frame in the call chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
- // Prepare element backing store to be used by arguments object.
- bool has_aliased_arguments = false;
- Node* const elements = AllocateAliasedArguments(
- effect, control, args_state, context, shared, &has_aliased_arguments);
- // Load the arguments object map from the current native context.
- Node* const load_global_object = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
- Node* const load_native_context =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSGlobalObjectNativeContext()),
- load_global_object, effect, control);
- Node* const load_arguments_map = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
- : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
- load_native_context, effect, control);
- // Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
- int length = args_state_info.parameter_count() - 1; // Minus receiver.
- STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
- a.Allocate(Heap::kSloppyArgumentsObjectSize);
- a.Store(AccessBuilder::ForMap(), load_arguments_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
- a.Store(AccessBuilder::ForArgumentsCallee(), callee);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
+ return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceNewArray(Node* node, Node* length,
+ int capacity,
+ Handle<AllocationSite> site) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Extract transition and tenuring feedback from the {site} and add
+ // appropriate code dependencies on the {site} if deoptimization is
+ // enabled.
+ PretenureFlag pretenure = site->GetPretenureMode();
+ ElementsKind elements_kind = site->GetElementsKind();
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeTenuringDecision(site);
+ dependencies()->AssumeTransitionStable(site);
+ }
+
+ // Retrieve the initial map for the array from the appropriate native context.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* js_array_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
+ native_context, native_context, effect);
+
+ // Setup elements and properties.
+ Node* elements;
+ if (capacity == 0) {
+ elements = jsgraph()->EmptyFixedArrayConstant();
+ } else {
+ elements = effect =
+ AllocateElements(effect, control, elements_kind, capacity, pretenure);
+ }
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArray::kSize, pretenure);
+ a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSCreateArray(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+
+ // TODO(bmeurer): Optimize the subclassing case.
+ if (target != new_target) return NoChange();
+
+ // Check if we have a feedback {site} on the {node}.
+ Handle<AllocationSite> site = p.site();
+ if (p.site().is_null()) return NoChange();
+
+ // Attempt to inline calls to the Array constructor for the relevant cases
+ // where either no arguments are provided, or exactly one unsigned number
+ // argument is given.
+ if (site->CanInlineCall()) {
+ if (p.arity() == 0) {
+ Node* length = jsgraph()->ZeroConstant();
+ int capacity = JSArray::kPreallocatedArrayElements;
+ return ReduceNewArray(node, length, capacity, site);
+ } else if (p.arity() == 1) {
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Type* length_type = NodeProperties::GetType(length);
+ if (length_type->Is(type_cache_.kElementLoopUnrollType)) {
+ int capacity = static_cast<int>(length_type->Max());
+ return ReduceNewArray(node, length, capacity, site);
+ }
+ }
}
- // Use inline allocation for all unmapped arguments objects within inlined
- // (i.e. non-outermost) frames, independent of the object size.
- if (p.type() == CreateArgumentsParameters::kUnmappedArguments &&
- outer_state->opcode() == IrOpcode::kFrameState) {
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- // Choose the correct frame state and frame state info depending on whether
- // there conceptually is an arguments adaptor frame in the call chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
- // Prepare element backing store to be used by arguments object.
- Node* const elements = AllocateArguments(effect, control, args_state);
- // Load the arguments object map from the current native context.
- Node* const load_global_object = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
- Node* const load_native_context =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSGlobalObjectNativeContext()),
- load_global_object, effect, control);
- Node* const load_arguments_map = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX)),
- load_native_context, effect, control);
- // Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
- int length = args_state_info.parameter_count() - 1; // Minus receiver.
- STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
- a.Allocate(Heap::kStrictArgumentsObjectSize);
- a.Store(AccessBuilder::ForMap(), load_arguments_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
- RelaxControls(node);
- a.FinishAndChange(node);
+ // Reduce {node} to the appropriate ArrayConstructorStub backend.
+ // Note that these stubs "behave" like JSFunctions, which means they
+ // expect a receiver on the stack, which they remove. We just push
+ // undefined for the receiver.
+ ElementsKind elements_kind = site->GetElementsKind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ if (p.arity() == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else if (p.arity() == 1) {
+ // TODO(bmeurer): Optimize for the 0 length non-holey case?
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else {
+ int const arity = static_cast<int>(p.arity());
+ ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
}
-
- return NoChange();
}
@@ -1484,11 +1839,43 @@ Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSCreateIterResultObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* done = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Load the JSIteratorResult map for the {context}.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* iterator_result_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+ native_context, native_context, effect);
+
+ // Emit code to allocate the JSIteratorResult instance.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(JSIteratorResult::kSize);
+ a.Store(AccessBuilder::ForMap(), iterator_result_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
+ a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
- HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
- int length = Handle<FixedArray>::cast(mconst.Value())->length();
- int flags = OpParameter<int>(node->op());
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
+ int const length = constants->length();
+ int const flags = p.flags();
// Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
// initial length limit for arrays with "fast" elements kind.
@@ -1505,7 +1892,11 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
: CallDescriptor::kNoFlags);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* constant_elements = jsgraph()->HeapConstant(constants);
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, literal_index);
+ node->InsertInput(graph()->zone(), 3, constant_elements);
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1516,10 +1907,11 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
- HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
// Constants are pairs, see ObjectLiteral::properties_count().
- int length = Handle<FixedArray>::cast(mconst.Value())->length() / 2;
- int flags = OpParameter<int>(node->op());
+ int const length = constants->length() / 2;
+ int const flags = p.flags();
// Use the FastCloneShallowObjectStub only for shallow boilerplates without
// elements up to the number of properties that the stubs can handle.
@@ -1534,8 +1926,13 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
: CallDescriptor::kNoFlags);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(flags));
+ Node* literal_index = jsgraph()->SmiConstant(p.index());
+ Node* literal_flags = jsgraph()->SmiConstant(flags);
+ Node* constant_elements = jsgraph()->HeapConstant(constants);
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, literal_index);
+ node->InsertInput(graph()->zone(), 3, constant_elements);
+ node->InsertInput(graph()->zone(), 4, literal_flags);
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1549,22 +1946,16 @@ Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
int slot_count = OpParameter<int>(node->op());
Node* const closure = NodeProperties::GetValueInput(node, 0);
- // The closure can be NumberConstant(0) if the closure is global code
- // (rather than a function). We exclude that case here.
- // TODO(jarin) Find a better way to check that the closure is a function.
-
// Use inline allocation for function contexts up to a size limit.
- if (slot_count < kFunctionContextAllocationLimit &&
- closure->opcode() != IrOpcode::kNumberConstant) {
+ if (slot_count < kFunctionContextAllocationLimit) {
// JSCreateFunctionContext[slot_count < limit]](fun)
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const extension = jsgraph()->ZeroConstant();
- Node* const load = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->TheHoleConstant();
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
@@ -1572,7 +1963,8 @@ Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
- a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
}
@@ -1601,38 +1993,53 @@ Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Node* const closure = NodeProperties::GetValueInput(node, 1);
- Type* input_type = NodeProperties::GetType(input);
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
- // The closure can be NumberConstant(0) if the closure is global code
- // (rather than a function). We exclude that case here.
- // TODO(jarin) Find a better way to check that the closure is a function.
-
- // Use inline allocation for with contexts for regular objects.
- if (input_type->Is(Type::Receiver()) &&
- closure->opcode() != IrOpcode::kNumberConstant) {
- // JSCreateWithContext(o:receiver, fun)
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const load = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
- a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
- a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), input);
- a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- }
- return NoChange();
+Reduction JSTypedLowering::ReduceJSCreateCatchContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
+ Handle<String> name = OpParameter<Handle<String>>(node);
+ Node* exception = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
+ factory()->catch_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
+ exception);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
}
@@ -1642,31 +2049,26 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
int context_length = scope_info->ContextLength();
Node* const closure = NodeProperties::GetValueInput(node, 0);
- // The closure can be NumberConstant(0) if the closure is global code
- // (rather than a function). We exclude that case here.
- // TODO(jarin) Find a better way to check that the closure is a function.
-
// Use inline allocation for block contexts up to a size limit.
- if (context_length < kBlockContextAllocationLimit &&
- closure->opcode() != IrOpcode::kNumberConstant) {
+ if (context_length < kBlockContextAllocationLimit) {
// JSCreateBlockContext[scope[length < limit]](fun)
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* const extension = jsgraph()->Constant(scope_info);
- Node* const load = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
- context, effect, control);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->Constant(scope_info);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(context_length, factory()->block_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
- a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
- a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->TheHoleConstant());
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
}
RelaxControls(node);
a.FinishAndChange(node);
@@ -1677,6 +2079,66 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters const& p = CallConstructParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ // Check if {target} is a known JSFunction.
+ if (target_type->IsConstant() &&
+ target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Patch {node} to an indirect call via the {function}s construct stub.
+ Callable callable(handle(shared->construct_stub(), isolate()),
+ ConstructStubDescriptor(isolate()));
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Remove the eager bailout frame state.
+ NodeProperties::RemoveFrameStateInput(node, 1);
+
+ // Patch {node} to an indirect call via the ConstructFunction builtin.
+ Callable callable = CodeFactory::ConstructFunction(isolate());
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
@@ -1708,8 +2170,10 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (IsClassConstructor(shared->kind())) return NoChange();
- // Grab the context from the {function}.
- Node* context = jsgraph()->Constant(handle(function->context(), isolate()));
+ // Load the context from the {target}.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()), target,
+ effect, control);
NodeProperties::ReplaceContextInput(node, context);
// Check if we need to convert the {receiver}.
@@ -1718,10 +2182,12 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
receiver = effect =
graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
receiver, context, frame_state, effect, control);
- NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceValueInput(node, receiver, 1);
}
+ // Update the effect dependency for the {node}.
+ NodeProperties::ReplaceEffectInput(node, effect);
+
// Remove the eager bailout frame state.
NodeProperties::RemoveFrameStateInput(node, 1);
@@ -1731,12 +2197,14 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
flags |= CallDescriptor::kSupportsTailCalls;
}
+ Node* new_target = jsgraph()->UndefinedConstant();
+ Node* argument_count = jsgraph()->Int32Constant(arity);
if (shared->internal_formal_parameter_count() == arity ||
shared->internal_formal_parameter_count() ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Patch {node} to a direct call.
- node->InsertInput(graph()->zone(), arity + 2,
- jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), arity + 2, new_target);
+ node->InsertInput(graph()->zone(), arity + 3, argument_count);
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity, flags)));
@@ -1745,9 +2213,10 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Callable callable = CodeFactory::ArgumentAdaptor(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, argument_count);
node->InsertInput(
- graph()->zone(), 3,
+ graph()->zone(), 4,
jsgraph()->Int32Constant(shared->internal_formal_parameter_count()));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1882,8 +2351,8 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
etrue0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
cache_array_true0 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true1,
- cache_array_false1, if_true0);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true1, cache_array_false1, if_true0);
cache_type_true0 = cache_type;
}
@@ -1895,35 +2364,24 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
Node* efalse0;
{
// FixedArray case.
- Node* receiver_instance_type = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, effect, if_false0);
-
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- cache_type_false0 = graph()->NewNode(
- common()->Select(kMachAnyTagged, BranchHint::kFalse),
- graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- receiver_instance_type,
- jsgraph()->Uint32Constant(LAST_JS_PROXY_TYPE)),
- jsgraph()->ZeroConstant(), // Zero indicagtes proxy.
- jsgraph()->OneConstant()); // One means slow check.
-
+ cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- cache_array_false0, efalse0, if_false0);
+ cache_array_false0, effect, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
Node* cache_array =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_array_true0,
- cache_array_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_array_true0, cache_array_false0, control);
Node* cache_length =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), cache_length_true0,
- cache_length_false0, control);
- cache_type = graph()->NewNode(common()->Phi(kMachAnyTagged, 2),
- cache_type_true0, cache_type_false0, control);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_length_true0, cache_length_false0, control);
+ cache_type =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ cache_type_true0, cache_type_false0, control);
for (auto edge : node->use_edges()) {
Node* const use = edge.from();
@@ -2036,8 +2494,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
efalse0 =
graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue1,
- vfalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@@ -2047,7 +2505,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
node->ReplaceInput(1, vfalse0);
node->ReplaceInput(2, control);
node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, common()->Phi(kMachAnyTagged, 2));
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 2));
return Changed(node);
}
@@ -2060,6 +2519,36 @@ Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
}
+Reduction JSTypedLowering::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* const condition = NodeProperties::GetValueInput(node, 0);
+ Type* const condition_type = NodeProperties::GetType(condition);
+ Node* const vtrue = NodeProperties::GetValueInput(node, 1);
+ Type* const vtrue_type = NodeProperties::GetType(vtrue);
+ Node* const vfalse = NodeProperties::GetValueInput(node, 2);
+ Type* const vfalse_type = NodeProperties::GetType(vfalse);
+ if (condition_type->Is(true_type_)) {
+ // Select(condition:true, vtrue, vfalse) => vtrue
+ return Replace(vtrue);
+ }
+ if (condition_type->Is(false_type_)) {
+ // Select(condition:false, vtrue, vfalse) => vfalse
+ return Replace(vfalse);
+ }
+ if (vtrue_type->Is(true_type_) && vfalse_type->Is(false_type_)) {
+ // Select(condition, vtrue:true, vfalse:false) => condition
+ return Replace(condition);
+ }
+ if (vtrue_type->Is(false_type_) && vfalse_type->Is(true_type_)) {
+ // Select(condition, vtrue:false, vfalse:true) => BooleanNot(condition)
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::Reduce(Node* node) {
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
@@ -2129,8 +2618,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceNumberBinop(node, simplified()->NumberDivide());
case IrOpcode::kJSModulus:
return ReduceJSModulus(node);
- case IrOpcode::kJSUnaryNot:
- return ReduceJSUnaryNot(node);
case IrOpcode::kJSToBoolean:
return ReduceJSToBoolean(node);
case IrOpcode::kJSToNumber:
@@ -2153,10 +2640,16 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSStoreContext(node);
case IrOpcode::kJSConvertReceiver:
return ReduceJSConvertReceiver(node);
+ case IrOpcode::kJSCreate:
+ return ReduceJSCreate(node);
case IrOpcode::kJSCreateArguments:
return ReduceJSCreateArguments(node);
+ case IrOpcode::kJSCreateArray:
+ return ReduceJSCreateArray(node);
case IrOpcode::kJSCreateClosure:
return ReduceJSCreateClosure(node);
+ case IrOpcode::kJSCreateIterResultObject:
+ return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateLiteralArray:
return ReduceJSCreateLiteralArray(node);
case IrOpcode::kJSCreateLiteralObject:
@@ -2165,8 +2658,12 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCreateFunctionContext(node);
case IrOpcode::kJSCreateWithContext:
return ReduceJSCreateWithContext(node);
+ case IrOpcode::kJSCreateCatchContext:
+ return ReduceJSCreateCatchContext(node);
case IrOpcode::kJSCreateBlockContext:
return ReduceJSCreateBlockContext(node);
+ case IrOpcode::kJSCallConstruct:
+ return ReduceJSCallConstruct(node);
case IrOpcode::kJSCallFunction:
return ReduceJSCallFunction(node);
case IrOpcode::kJSForInDone:
@@ -2177,6 +2674,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSForInPrepare(node);
case IrOpcode::kJSForInStep:
return ReduceJSForInStep(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
default:
break;
}
@@ -2202,13 +2701,43 @@ Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
// Prepare an iterator over argument values recorded in the frame state.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
StateValuesAccess parameters_access(parameters);
- auto paratemers_it = ++parameters_access.begin();
+ auto parameters_it = ++parameters_access.begin();
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
a.AllocateArray(argument_count, factory()->fixed_array_map());
- for (int i = 0; i < argument_count; ++i, ++paratemers_it) {
- a.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSTypedLowering::AllocateRestArguments(Node* effect, Node* control,
+ Node* frame_state,
+ int start_index) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ int num_elements = std::max(0, argument_count - start_index);
+ if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Skip unused arguments.
+ for (int i = 0; i < start_index; i++) {
+ ++parameters_it;
+ }
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(num_elements, factory()->fixed_array_map());
+ for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
return a.Finish();
}
@@ -2254,7 +2783,7 @@ Node* JSTypedLowering::AllocateAliasedArguments(
Node* arguments = aa.Finish();
// Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
+ AllocationBuilder a(jsgraph(), arguments, control);
a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
a.Store(AccessBuilder::ForFixedArraySlot(0), context);
a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
@@ -2266,6 +2795,34 @@ Node* JSTypedLowering::AllocateAliasedArguments(
}
+Node* JSTypedLowering::AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(1, capacity);
+ DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+ Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ ? factory()->fixed_double_array_map()
+ : factory()->fixed_array_map();
+ ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+ Node* value =
+ IsFastDoubleElementsKind(elements_kind)
+ ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
+ : jsgraph()->TheHoleConstant();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(capacity, elements_map, pretenure);
+ for (int i = 0; i < capacity; ++i) {
+ Node* index = jsgraph()->Constant(i);
+ a.Store(access, index, value);
+ }
+ return a.Finish();
+}
+
+
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index f4e11ec03f..68ce74e624 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -15,6 +15,7 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
+class TypeCache;
namespace compiler {
@@ -34,6 +35,7 @@ class JSTypedLowering final : public AdvancedReducer {
enum Flag {
kNoFlags = 0u,
kDeoptimizationEnabled = 1u << 0,
+ kDisableBinaryOpReduction = 1u << 1,
};
typedef base::Flags<Flag> Flags;
@@ -59,7 +61,6 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSStoreContext(Node* node);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
- Reduction ReduceJSUnaryNot(Node* node);
Reduction ReduceJSToBoolean(Node* node);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToNumber(Node* node);
@@ -67,28 +68,41 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
+ Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
+ Reduction ReduceJSCreateArray(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
+ Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateLiteralArray(Node* node);
Reduction ReduceJSCreateLiteralObject(Node* node);
Reduction ReduceJSCreateFunctionContext(Node* node);
Reduction ReduceJSCreateWithContext(Node* node);
+ Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node);
Reduction ReduceJSForInPrepare(Node* node);
Reduction ReduceJSForInStep(Node* node);
+ Reduction ReduceSelect(Node* node);
Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
+ Reduction ReduceNewArray(Node* node, Node* length, int capacity,
+ Handle<AllocationSite> site);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+ Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
+ int start_index);
Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
Node* context, Handle<SharedFunctionInfo>,
bool* has_aliased_arguments);
+ Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind, int capacity,
+ PretenureFlag pretenure);
Factory* factory() const;
Graph* graph() const;
@@ -109,6 +123,10 @@ class JSTypedLowering final : public AdvancedReducer {
Flags flags_;
JSGraph* jsgraph_;
Type* shifted_int32_ranges_[4];
+ Type* const true_type_;
+ Type* const false_type_;
+ Type* const the_hole_type_;
+ TypeCache const& type_cache_;
};
DEFINE_OPERATORS_FOR_FLAGS(JSTypedLowering::Flags)
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index c3f6074fa6..2eef9291e9 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/compiler/common-operator.h"
@@ -10,7 +11,6 @@
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -25,29 +25,29 @@ LinkageLocation regloc(Register reg) {
MachineType reptyp(Representation representation) {
switch (representation.kind()) {
case Representation::kInteger8:
- return kMachInt8;
+ return MachineType::Int8();
case Representation::kUInteger8:
- return kMachUint8;
+ return MachineType::Uint8();
case Representation::kInteger16:
- return kMachInt16;
+ return MachineType::Int16();
case Representation::kUInteger16:
- return kMachUint16;
+ return MachineType::Uint16();
case Representation::kInteger32:
- return kMachInt32;
+ return MachineType::Int32();
case Representation::kSmi:
case Representation::kTagged:
case Representation::kHeapObject:
- return kMachAnyTagged;
+ return MachineType::AnyTagged();
case Representation::kDouble:
- return kMachFloat64;
+ return MachineType::Float64();
case Representation::kExternal:
- return kMachPtr;
+ return MachineType::Pointer();
case Representation::kNone:
case Representation::kNumRepresentations:
break;
}
UNREACHABLE();
- return kMachNone;
+ return MachineType::None();
}
} // namespace
@@ -89,84 +89,38 @@ bool CallDescriptor::HasSameReturnLocationsAs(
}
-bool CallDescriptor::CanTailCall(const Node* node) const {
- // Determine the number of stack parameters passed in
- size_t stack_params = 0;
- for (size_t i = 0; i < InputCount(); ++i) {
- if (!GetInputLocation(i).IsRegister()) {
- ++stack_params;
- }
- }
- // Ensure the input linkage contains the stack parameters in the right order
- size_t current_stack_param = 0;
- for (size_t i = 0; i < InputCount(); ++i) {
- if (!GetInputLocation(i).IsRegister()) {
- if (GetInputLocation(i) != LinkageLocation::ForCallerFrameSlot(
- static_cast<int>(current_stack_param) -
- static_cast<int>(stack_params))) {
- return false;
- }
- ++current_stack_param;
- }
- }
- // Tail calling is currently allowed if return locations match and all
- // parameters are either in registers or on the stack but match exactly in
- // number and content.
+bool CallDescriptor::CanTailCall(const Node* node,
+ int* stack_param_delta) const {
CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
- if (!HasSameReturnLocationsAs(other)) return false;
size_t current_input = 0;
size_t other_input = 0;
- while (true) {
- if (other_input >= other->InputCount()) {
- while (current_input < InputCount()) {
- if (!GetInputLocation(current_input).IsRegister()) {
- return false;
- }
- ++current_input;
+ *stack_param_delta = 0;
+ bool more_other = true;
+ bool more_this = true;
+ while (more_other || more_this) {
+ if (other_input < other->InputCount()) {
+ if (!other->GetInputLocation(other_input).IsRegister()) {
+ (*stack_param_delta)--;
}
- return true;
+ } else {
+ more_other = false;
}
- if (current_input >= InputCount()) {
- while (other_input < other->InputCount()) {
- if (!other->GetInputLocation(other_input).IsRegister()) {
- return false;
- }
- ++other_input;
+ if (current_input < InputCount()) {
+ if (!GetInputLocation(current_input).IsRegister()) {
+ (*stack_param_delta)++;
}
- return true;
- }
- if (GetInputLocation(current_input).IsRegister()) {
- ++current_input;
- continue;
- }
- if (other->GetInputLocation(other_input).IsRegister()) {
- ++other_input;
- continue;
- }
- if (GetInputLocation(current_input) !=
- other->GetInputLocation(other_input)) {
- return false;
- }
- Node* input = node->InputAt(static_cast<int>(other_input));
- if (input->opcode() != IrOpcode::kParameter) {
- return false;
- }
- // Make sure that the parameter input passed through to the tail call
- // corresponds to the correct stack slot.
- size_t param_index = ParameterIndexOf(input->op());
- if (param_index != current_input - 1) {
- return false;
+ } else {
+ more_this = false;
}
++current_input;
++other_input;
}
- UNREACHABLE();
- return false;
+ return HasSameReturnLocationsAs(OpParameter<CallDescriptor const*>(node));
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
- if (info->code_stub() != NULL) {
+ if (info->code_stub() != nullptr) {
// Use the code stub interface descriptor.
CodeStub* stub = info->code_stub();
CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
@@ -189,27 +143,7 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
1 + shared->internal_formal_parameter_count(),
CallDescriptor::kNoFlags);
}
- return NULL; // TODO(titzer): ?
-}
-
-
-FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame) const {
- bool has_frame = frame->GetSpillSlotCount() > 0 ||
- incoming_->IsJSFunctionCall() ||
- incoming_->kind() == CallDescriptor::kCallAddress;
- const int offset =
- (StandardFrameConstants::kFixedSlotCountAboveFp - spill_slot - 1) *
- kPointerSize;
- if (has_frame) {
- return FrameOffset::FromFramePointer(offset);
- } else {
- // No frame. Retrieve all parameters relative to stack pointer.
- DCHECK(spill_slot < 0); // Must be a parameter.
- int offsetSpToFp =
- kPointerSize * (StandardFrameConstants::kFixedSlotCountAboveFp -
- frame->GetTotalFrameSlotCount());
- return FrameOffset::FromStackPointer(offset - offsetSpToFp);
- }
+ return nullptr; // TODO(titzer): ?
}
@@ -220,13 +154,14 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
// are blacklisted here and can be called without a FrameState.
switch (function) {
case Runtime::kAllocateInTargetSpace:
- case Runtime::kDateField:
+ case Runtime::kCreateIterResultObject:
case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
+ case Runtime::kGetSuperConstructor:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
case Runtime::kNewFunctionContext:
@@ -241,9 +176,8 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
return 0;
case Runtime::kInlineArguments:
case Runtime::kInlineArgumentsLength:
- case Runtime::kInlineDefaultConstructorCallSuper:
- case Runtime::kInlineGetCallerJSFunction:
case Runtime::kInlineGetPrototype:
+ case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
case Runtime::kInlineSubString:
case Runtime::kInlineToInteger:
@@ -257,6 +191,7 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kInlineToString:
return 1;
case Runtime::kInlineCall:
+ case Runtime::kInlineTailCall:
case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineThrowNotDateError:
return 2;
@@ -286,7 +221,7 @@ bool CallDescriptor::UsesOnlyRegisters() const {
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
- Operator::Properties properties, bool needs_frame_state) {
+ Operator::Properties properties, CallDescriptor::Flags flags) {
const size_t function_count = 1;
const size_t num_args_count = 1;
const size_t context_count = 1;
@@ -308,34 +243,34 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
locations.AddReturn(regloc(kReturnRegister1));
}
for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(kMachAnyTagged);
+ types.AddReturn(MachineType::AnyTagged());
}
// All parameters to the runtime call go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
locations.AddParam(
LinkageLocation::ForCallerFrameSlot(i - js_parameter_count));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
}
// Add runtime function itself.
locations.AddParam(regloc(kRuntimeCallFunctionRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
// Add runtime call argument count.
locations.AddParam(regloc(kRuntimeCallArgCountRegister));
- types.AddParam(kMachPtr);
+ types.AddParam(MachineType::Pointer());
// Add context.
locations.AddParam(regloc(kContextRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
- CallDescriptor::Flags flags =
- needs_frame_state && (Linkage::FrameStateInputCount(function_id) > 0)
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ if (Linkage::FrameStateInputCount(function_id) == 0) {
+ flags = static_cast<CallDescriptor::Flags>(
+ flags & ~CallDescriptor::kNeedsFrameState);
+ }
// The target for runtime calls is a code object.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
@@ -360,7 +295,7 @@ CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
MachineSignature::Builder types(zone, return_count, parameter_count);
// The target is ignored, but we need to give some values here.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = regloc(kJSFunctionRegister);
return new (zone) CallDescriptor( // --
CallDescriptor::kLazyBailout, // kind
@@ -382,34 +317,39 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
CallDescriptor::Flags flags) {
const size_t return_count = 1;
const size_t context_count = 1;
+ const size_t new_target_count = 1;
const size_t num_args_count = 1;
const size_t parameter_count =
- js_parameter_count + num_args_count + context_count;
+ js_parameter_count + new_target_count + num_args_count + context_count;
LocationSignature::Builder locations(zone, return_count, parameter_count);
MachineSignature::Builder types(zone, return_count, parameter_count);
// All JS calls have exactly one return value.
locations.AddReturn(regloc(kReturnRegister0));
- types.AddReturn(kMachAnyTagged);
+ types.AddReturn(MachineType::AnyTagged());
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
int spill_slot_index = i - js_parameter_count;
locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
}
+ // Add JavaScript call new target value.
+ locations.AddParam(regloc(kJavaScriptCallNewTargetRegister));
+ types.AddParam(MachineType::AnyTagged());
+
// Add JavaScript call argument count.
locations.AddParam(regloc(kJavaScriptCallArgCountRegister));
- types.AddParam(kMachInt32);
+ types.AddParam(MachineType::Int32());
// Add context.
locations.AddParam(regloc(kContextRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
// The target for JS function calls is the JSFunction object.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
// TODO(titzer): When entering into an OSR function from unoptimized code,
// the JSFunction is not in a register, but it is on the stack in an
// unaddressable spill slot. We hack this in the OSR prologue. Fix.
@@ -436,23 +376,23 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
// Add registers for fixed parameters passed via interpreter dispatch.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kInterpreterAccumulatorRegister));
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
- types.AddParam(kMachPtr);
+ types.AddParam(MachineType::Pointer());
locations.AddParam(regloc(kInterpreterRegisterFileRegister));
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
- types.AddParam(kMachIntPtr);
+ types.AddParam(MachineType::IntPtr());
locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- types.AddParam(kMachPtr);
+ types.AddParam(MachineType::Pointer());
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
// TODO(rmcilroy): Make the context param the one spilled to the stack once
// Turbofan supports modified stack arguments in tail calls.
@@ -463,13 +403,13 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
#endif
STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
locations.AddParam(regloc(kContextRegister));
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
- kMachNone, // target MachineType
+ MachineType::None(), // target MachineType
target_loc, // target location
types.Build(), // machine_sig
locations.Build(), // location_sig
@@ -489,21 +429,28 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties, MachineType return_type) {
+ Operator::Properties properties, MachineType return_type,
+ size_t return_count) {
const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int js_parameter_count =
register_parameter_count + stack_parameter_count;
const int context_count = 1;
- const size_t return_count = 1;
const size_t parameter_count =
static_cast<size_t>(js_parameter_count + context_count);
LocationSignature::Builder locations(zone, return_count, parameter_count);
MachineSignature::Builder types(zone, return_count, parameter_count);
- // Add return location.
- locations.AddReturn(regloc(kReturnRegister0));
- types.AddReturn(return_type);
+ // Add returns.
+ if (locations.return_count_ > 0) {
+ locations.AddReturn(regloc(kReturnRegister0));
+ }
+ if (locations.return_count_ > 1) {
+ locations.AddReturn(regloc(kReturnRegister1));
+ }
+ for (size_t i = 0; i < return_count; i++) {
+ types.AddReturn(return_type);
+ }
// Add parameters in registers and on the stack.
for (int i = 0; i < js_parameter_count; i++) {
@@ -518,15 +465,15 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
}
}
// Add context.
locations.AddParam(regloc(kContextRegister));
- types.AddParam(kMachAnyTagged);
+ types.AddParam(MachineType::AnyTagged());
// The target for stub calls is a code object.
- MachineType target_type = kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
@@ -551,8 +498,8 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
if (index == kOsrContextSpillSlotIndex) {
// Context. Use the parameter location of the context spill slot.
// Parameter (arity + 2) is special for the context of the function frame.
- int context_index =
- 1 + 1 + 1 + parameter_count; // target + receiver + params + #args
+ // >> context_index = target + receiver + params + new_target + #args
+ int context_index = 1 + 1 + parameter_count + 1 + 1;
return incoming_->GetInputLocation(context_index);
} else if (index >= first_stack_slot) {
// Local variable stored in this (callee) stack.
@@ -565,6 +512,28 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
return incoming_->GetInputLocation(parameter_index);
}
}
+
+
+bool Linkage::ParameterHasSecondaryLocation(int index) const {
+ if (incoming_->kind() != CallDescriptor::kCallJSFunction) return false;
+ LinkageLocation loc = GetParameterLocation(index);
+ return (loc == regloc(kJSFunctionRegister) ||
+ loc == regloc(kContextRegister));
+}
+
+LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
+ DCHECK(ParameterHasSecondaryLocation(index));
+ LinkageLocation loc = GetParameterLocation(index);
+
+ if (loc == regloc(kJSFunctionRegister)) {
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot);
+ } else {
+ DCHECK(loc == regloc(kContextRegister));
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot);
+ }
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 0f4b8db1c4..252f044321 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -7,9 +7,9 @@
#include "src/base/flags.h"
#include "src/compiler/frame.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
#include "src/frames.h"
+#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/zone.h"
@@ -57,6 +57,34 @@ class LinkageLocation {
return LinkageLocation(STACK_SLOT, slot);
}
+ static LinkageLocation ForSavedCallerReturnAddress() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kCallerPCOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ForSavedCallerFramePtr() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kCallerFPOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ForSavedCallerConstantPool() {
+ DCHECK(V8_EMBEDDED_CONSTANT_POOL);
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kConstantPoolOffset) /
+ kPointerSize);
+ }
+
+ static LinkageLocation ConvertToTailCallerLocation(
+ LinkageLocation caller_location, int stack_param_delta) {
+ if (!caller_location.IsRegister()) {
+ return LinkageLocation(STACK_SLOT,
+ caller_location.GetLocation() - stack_param_delta);
+ }
+ return caller_location;
+ }
+
private:
friend class CallDescriptor;
friend class OperandGenerator;
@@ -125,6 +153,9 @@ class CallDescriptor final : public ZoneObject {
kHasLocalCatchHandler = 1u << 4,
kSupportsTailCalls = 1u << 5,
kCanUseRoots = 1u << 6,
+ // Indicates that the native stack should be used for a code object. This
+ // information is important for native calls on arm64.
+ kUseNativeStack = 1u << 7,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
@@ -160,6 +191,10 @@ class CallDescriptor final : public ZoneObject {
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+ bool RequiresFrameAsIncoming() const {
+ return IsCFunctionCall() || IsJSFunctionCall();
+ }
+
// The number of return values from this call.
size_t ReturnCount() const { return machine_sig_->return_count(); }
@@ -186,6 +221,7 @@ class CallDescriptor final : public ZoneObject {
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
+ bool UseNativeStack() const { return flags() & kUseNativeStack; }
LinkageLocation GetReturnLocation(size_t index) const {
return location_sig_->GetReturn(index);
@@ -222,7 +258,7 @@ class CallDescriptor final : public ZoneObject {
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
- bool CanTailCall(const Node* call) const;
+ bool CanTailCall(const Node* call, int* stack_param_delta) const;
private:
friend class Linkage;
@@ -254,11 +290,11 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
// Can be used to translate {arg_index} (i.e. index of the call node input) as
// well as {param_index} (i.e. as stored in parameter nodes) into an operator
// representing the architecture-specific location. The following call node
-// layouts are supported (where {n} is the number value inputs):
+// layouts are supported (where {n} is the number of value inputs):
//
// #0 #1 #2 #3 [...] #n
// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
-// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], #arg, context
+// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], new, #arg, context
// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
class Linkage : public ZoneObject {
public:
@@ -275,7 +311,7 @@ class Linkage : public ZoneObject {
static CallDescriptor* GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties, bool needs_frame_state = true);
+ Operator::Properties properties, CallDescriptor::Flags flags);
static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
@@ -283,7 +319,8 @@ class Linkage : public ZoneObject {
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties = Operator::kNoProperties,
- MachineType return_type = kMachAnyTagged);
+ MachineType return_type = MachineType::AnyTagged(),
+ size_t return_count = 1);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
@@ -317,19 +354,31 @@ class Linkage : public ZoneObject {
return incoming_->GetReturnType(index);
}
- // Get the frame offset for a given spill slot. The location depends on the
- // calling convention and the specific frame layout, and may thus be
- // architecture-specific. Negative spill slots indicate arguments on the
- // caller's frame.
- FrameOffset GetFrameOffset(int spill_slot, Frame* frame) const;
+ bool ParameterHasSecondaryLocation(int index) const;
+ LinkageLocation GetParameterSecondaryLocation(int index) const;
static int FrameStateInputCount(Runtime::FunctionId function);
// Get the location where an incoming OSR value is stored.
LinkageLocation GetOsrValueLocation(int index) const;
- // A special parameter index for JSCalls that represents the closure.
- static const int kJSFunctionCallClosureParamIndex = -1;
+ // A special {Parameter} index for JSCalls that represents the new target.
+ static int GetJSCallNewTargetParamIndex(int parameter_count) {
+ return parameter_count + 0; // Parameter (arity + 0) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the argument count.
+ static int GetJSCallArgCountParamIndex(int parameter_count) {
+ return parameter_count + 1; // Parameter (arity + 1) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the context.
+ static int GetJSCallContextParamIndex(int parameter_count) {
+ return parameter_count + 2; // Parameter (arity + 2) is special.
+ }
+
+ // A special {Parameter} index for JSCalls that represents the closure.
+ static const int kJSCallClosureParamIndex = -1;
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
diff --git a/deps/v8/src/compiler/live-range-separator.cc b/deps/v8/src/compiler/live-range-separator.cc
index 6591d71e72..980c9442bc 100644
--- a/deps/v8/src/compiler/live-range-separator.cc
+++ b/deps/v8/src/compiler/live-range-separator.cc
@@ -45,7 +45,8 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
data->CreateSpillRangeForLiveRange(range);
}
if (range->splinter() == nullptr) {
- TopLevelLiveRange *splinter = data->NextLiveRange(range->machine_type());
+ TopLevelLiveRange *splinter =
+ data->NextLiveRange(range->representation());
DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
data->live_ranges()[splinter->vreg()] = splinter;
range->SetSplinter(splinter);
@@ -58,26 +59,6 @@ void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
}
-int FirstInstruction(const UseInterval *interval) {
- LifetimePosition start = interval->start();
- int ret = start.ToInstructionIndex();
- if (start.IsInstructionPosition() && start.IsEnd()) {
- ++ret;
- }
- return ret;
-}
-
-
-int LastInstruction(const UseInterval *interval) {
- LifetimePosition end = interval->end();
- int ret = end.ToInstructionIndex();
- if (end.IsGapPosition() || end.IsStart()) {
- --ret;
- }
- return ret;
-}
-
-
void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
const InstructionSequence *code = data->code();
UseInterval *interval = range->first_interval();
@@ -88,9 +69,9 @@ void SplinterLiveRange(TopLevelLiveRange *range, RegisterAllocationData *data) {
while (interval != nullptr) {
UseInterval *next_interval = interval->next();
const InstructionBlock *first_block =
- code->GetInstructionBlock(FirstInstruction(interval));
+ code->GetInstructionBlock(interval->FirstGapIndex());
const InstructionBlock *last_block =
- code->GetInstructionBlock(LastInstruction(interval));
+ code->GetInstructionBlock(interval->LastGapIndex());
int first_block_nr = first_block->rpo_number().ToInt();
int last_block_nr = last_block->rpo_number().ToInt();
for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
@@ -129,12 +110,35 @@ void LiveRangeSeparator::Splinter() {
if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
continue;
}
- SplinterLiveRange(range, data());
+ int first_instr = range->first_interval()->FirstGapIndex();
+ if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
+ SplinterLiveRange(range, data());
+ }
+ }
+}
+
+
+void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
+ for (TopLevelLiveRange *top : data()->live_ranges()) {
+ if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
+ continue;
+ }
+
+ LiveRange *child = top;
+ for (; child != nullptr; child = child->next()) {
+ if (child->spilled() ||
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ break;
+ }
+ }
+ if (child == nullptr) top->MarkSpilledInDeferredBlock();
}
}
void LiveRangeMerger::Merge() {
+ MarkRangesSpilledInDeferredBlocks();
+
int live_range_count = static_cast<int>(data()->live_ranges().size());
for (int i = 0; i < live_range_count; ++i) {
TopLevelLiveRange *range = data()->live_ranges()[i];
diff --git a/deps/v8/src/compiler/live-range-separator.h b/deps/v8/src/compiler/live-range-separator.h
index c8e6edc20b..57bc98235d 100644
--- a/deps/v8/src/compiler/live-range-separator.h
+++ b/deps/v8/src/compiler/live-range-separator.h
@@ -47,6 +47,11 @@ class LiveRangeMerger final : public ZoneObject {
RegisterAllocationData* data() const { return data_; }
Zone* zone() const { return zone_; }
+ // Mark ranges spilled in deferred blocks, that also cover non-deferred code.
+ // We do nothing special for ranges fully contained in deferred blocks,
+ // because they would "spill in deferred blocks" anyway.
+ void MarkRangesSpilledInDeferredBlocks();
+
RegisterAllocationData* const data_;
Zone* const zone_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index c78a283ca0..97f1ab0ec5 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -28,7 +28,7 @@ Reduction LoadElimination::Reduce(Node* node) {
Reduction LoadElimination::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const access = FieldAccessOf(node->op());
- Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* object = NodeProperties::GetValueInput(node, 0);
for (Node* effect = NodeProperties::GetEffectInput(node);;
effect = NodeProperties::GetEffectInput(effect)) {
switch (effect->opcode()) {
@@ -53,11 +53,24 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
}
break;
}
+ case IrOpcode::kBeginRegion:
case IrOpcode::kStoreBuffer:
case IrOpcode::kStoreElement: {
// These can never interfere with field loads.
break;
}
+ case IrOpcode::kFinishRegion: {
+ // "Look through" FinishRegion nodes to make LoadElimination capable
+ // of looking into atomic regions.
+ if (object == effect) object = NodeProperties::GetValueInput(effect, 0);
+ break;
+ }
+ case IrOpcode::kAllocate: {
+ // Allocations don't interfere with field loads. In case we see the
+ // actual allocation for the {object} we can abort.
+ if (object == effect) return NoChange();
+ break;
+ }
default: {
if (!effect->op()->HasProperty(Operator::kNoWrite) ||
effect->op()->EffectInputCount() != 1) {
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 1a06b666dd..2ed5bc2280 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -113,7 +113,7 @@ class LoopTree : public ZoneObject {
if (node->opcode() == IrOpcode::kLoop) return node;
}
UNREACHABLE();
- return NULL;
+ return nullptr;
}
private:
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 8c980aa125..b553a9ff58 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -311,8 +311,9 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
// Update all the value and effect edges at once.
if (!value_edges.empty()) {
// TODO(titzer): machine type is wrong here.
- Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), node,
- peeling.map(node), merge);
+ Node* phi =
+ graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), node,
+ peeling.map(node), merge);
for (Edge edge : value_edges) edge.UpdateTo(phi);
value_edges.clear();
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 5951fb612a..19ea062053 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -600,7 +600,8 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
node->ReplaceInput(2, Word32And(dividend, mask));
NodeProperties::ChangeOp(
- node, common()->Select(kMachInt32, BranchHint::kFalse));
+ node,
+ common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
} else {
Node* quotient = Int32Div(dividend, divisor);
DCHECK_EQ(dividend, node->InputAt(0));
@@ -648,10 +649,9 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
- if (m.IsRoundInt64ToFloat64()) return Replace(m.node()->InputAt(0));
if (m.IsPhi()) {
Node* const phi = m.node();
- DCHECK_EQ(kRepFloat64, RepresentationOf(OpParameter<MachineType>(phi)));
+ DCHECK_EQ(MachineRepresentation::kFloat64, PhiRepresentationOf(phi->op()));
if (phi->OwnedBy(node)) {
// TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
// => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
@@ -666,8 +666,9 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
if (reduction.Changed()) input = reduction.replacement();
phi->ReplaceInput(i, input);
}
- NodeProperties::ChangeOp(phi,
- common()->Phi(kMachInt32, value_input_count));
+ NodeProperties::ChangeOp(
+ phi,
+ common()->Phi(MachineRepresentation::kWord32, value_input_count));
return Replace(phi);
}
}
@@ -676,15 +677,16 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
- MachineType const rep =
- RepresentationOf(StoreRepresentationOf(node->op()).machine_type());
+ MachineRepresentation const rep =
+ StoreRepresentationOf(node->op()).representation();
Node* const value = node->InputAt(2);
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
- if (m.right().HasValue() &&
- ((rep == kRepWord8 && (m.right().Value() & 0xff) == 0xff) ||
- (rep == kRepWord16 && (m.right().Value() & 0xffff) == 0xffff))) {
+ if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
+ (m.right().Value() & 0xff) == 0xff) ||
+ (rep == MachineRepresentation::kWord16 &&
+ (m.right().Value() & 0xffff) == 0xffff))) {
node->ReplaceInput(2, m.left().node());
return Changed(node);
}
@@ -692,9 +694,10 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
}
case IrOpcode::kWord32Sar: {
Int32BinopMatcher m(value);
- if (m.left().IsWord32Shl() &&
- ((rep == kRepWord8 && m.right().IsInRange(1, 24)) ||
- (rep == kRepWord16 && m.right().IsInRange(1, 16)))) {
+ if (m.left().IsWord32Shl() && ((rep == MachineRepresentation::kWord8 &&
+ m.right().IsInRange(1, 24)) ||
+ (rep == MachineRepresentation::kWord16 &&
+ m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
node->ReplaceInput(2, mleft.left().node());
@@ -812,12 +815,14 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
}
} else if (mleft.left().IsLoad()) {
LoadRepresentation const rep =
- OpParameter<LoadRepresentation>(mleft.left().node());
- if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
+ LoadRepresentationOf(mleft.left().node()->op());
+ if (m.right().Is(24) && mleft.right().Is(24) &&
+ rep == MachineType::Int8()) {
// Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
return Replace(mleft.left().node());
}
- if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
+ if (m.right().Is(16) && mleft.right().Is(16) &&
+ rep == MachineType::Int16()) {
// Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
return Replace(mleft.left().node());
}
@@ -939,8 +944,8 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
}
if (m.LeftEqualsRight()) return Replace(m.left().node()); // x | x => x
- Node* shl = NULL;
- Node* shr = NULL;
+ Node* shl = nullptr;
+ Node* shr = nullptr;
// Recognize rotation, we are matching either:
// * x << y | x >>> (32 - y) => x ror (32 - y), i.e x rol y
// * x << (32 - y) | x >>> y => x ror y
@@ -963,8 +968,8 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
// Case where y is a constant.
if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
} else {
- Node* sub = NULL;
- Node* y = NULL;
+ Node* sub = nullptr;
+ Node* y = nullptr;
if (mshl.right().IsInt32Sub()) {
sub = mshl.right().node();
y = mshr.right().node();
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 43f7eda5c6..511a10dd02 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -47,7 +47,7 @@ std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
- return lhs.machine_type() == rhs.machine_type() &&
+ return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
}
@@ -58,16 +58,22 @@ bool operator!=(StoreRepresentation lhs, StoreRepresentation rhs) {
size_t hash_value(StoreRepresentation rep) {
- return base::hash_combine(rep.machine_type(), rep.write_barrier_kind());
+ return base::hash_combine(rep.representation(), rep.write_barrier_kind());
}
std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
- return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
+ return os << "(" << rep.representation() << " : " << rep.write_barrier_kind()
<< ")";
}
+LoadRepresentation LoadRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kLoad, op->opcode());
+ return OpParameter<LoadRepresentation>(op);
+}
+
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStore, op->opcode());
return OpParameter<StoreRepresentation>(op);
@@ -122,7 +128,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
+ 0, 2) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
@@ -135,9 +144,15 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
@@ -182,24 +197,40 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundUp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundUp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1)
+ V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
#define MACHINE_TYPE_LIST(V) \
- V(MachFloat32) \
- V(MachFloat64) \
- V(MachInt8) \
- V(MachUint8) \
- V(MachInt16) \
- V(MachUint16) \
- V(MachInt32) \
- V(MachUint32) \
- V(MachInt64) \
- V(MachUint64) \
- V(MachPtr) \
- V(MachAnyTagged)
+ V(Float32) \
+ V(Float64) \
+ V(Int8) \
+ V(Uint8) \
+ V(Int16) \
+ V(Uint16) \
+ V(Int32) \
+ V(Uint32) \
+ V(Int64) \
+ V(Uint64) \
+ V(Pointer) \
+ V(AnyTagged)
+
+
+#define MACHINE_REPRESENTATION_LIST(V) \
+ V(kFloat32) \
+ V(kFloat64) \
+ V(kWord8) \
+ V(kWord16) \
+ V(kWord32) \
+ V(kWord64) \
+ V(kTagged)
struct MachineOperatorGlobalCache {
@@ -234,14 +265,14 @@ struct MachineOperatorGlobalCache {
Load##Type##Operator() \
: Operator1<LoadRepresentation>( \
IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, k##Type) {} \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct CheckedLoad##Type##Operator final \
: public Operator1<CheckedLoadRepresentation> { \
CheckedLoad##Type##Operator() \
: Operator1<CheckedLoadRepresentation>( \
IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {} \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type;
@@ -254,7 +285,8 @@ struct MachineOperatorGlobalCache {
: Operator1<StoreRepresentation>( \
IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
"Store", 3, 1, 1, 0, 1, 0, \
- StoreRepresentation(k##Type, write_barrier_kind)) {} \
+ StoreRepresentation(MachineRepresentation::Type, \
+ write_barrier_kind)) {} \
}; \
struct Store##Type##NoWriteBarrier##Operator final \
: public Store##Type##Operator { \
@@ -281,7 +313,8 @@ struct MachineOperatorGlobalCache {
CheckedStore##Type##Operator() \
: Operator1<CheckedStoreRepresentation>( \
IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
- "CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {} \
+ "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
+ } \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
@@ -289,7 +322,7 @@ struct MachineOperatorGlobalCache {
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
CheckedStore##Type##Operator kCheckedStore##Type;
- MACHINE_TYPE_LIST(STORE)
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
};
@@ -298,10 +331,12 @@ static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
LAZY_INSTANCE_INITIALIZER;
-MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
+MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
+ MachineRepresentation word,
Flags flags)
: cache_(kCache.Get()), word_(word), flags_(flags) {
- DCHECK(word == kRepWord32 || word == kRepWord64);
+ DCHECK(word == MachineRepresentation::kWord32 ||
+ word == MachineRepresentation::kWord64);
}
@@ -334,39 +369,36 @@ const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
- switch (rep) {
-#define LOAD(Type) \
- case k##Type: \
- return &cache_.kLoad##Type;
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kLoad##Type; \
+ }
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
- default:
- break;
- }
UNREACHABLE();
return nullptr;
}
-const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
- switch (rep.machine_type()) {
-#define STORE(Type) \
- case k##Type: \
- switch (rep.write_barrier_kind()) { \
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ switch (store_rep.write_barrier_kind()) { \
case kNoWriteBarrier: \
- return &cache_.k##Store##Type##NoWriteBarrier; \
+ return &cache_.k##Store##kRep##NoWriteBarrier; \
case kMapWriteBarrier: \
- return &cache_.k##Store##Type##MapWriteBarrier; \
+ return &cache_.k##Store##kRep##MapWriteBarrier; \
case kPointerWriteBarrier: \
- return &cache_.k##Store##Type##PointerWriteBarrier; \
+ return &cache_.k##Store##kRep##PointerWriteBarrier; \
case kFullWriteBarrier: \
- return &cache_.k##Store##Type##FullWriteBarrier; \
+ return &cache_.k##Store##kRep##FullWriteBarrier; \
} \
break;
- MACHINE_TYPE_LIST(STORE)
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
@@ -376,15 +408,12 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
const Operator* MachineOperatorBuilder::CheckedLoad(
CheckedLoadRepresentation rep) {
- switch (rep) {
-#define LOAD(Type) \
- case k##Type: \
- return &cache_.kCheckedLoad##Type;
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kCheckedLoad##Type; \
+ }
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
- default:
- break;
- }
UNREACHABLE();
return nullptr;
}
@@ -393,12 +422,13 @@ const Operator* MachineOperatorBuilder::CheckedLoad(
const Operator* MachineOperatorBuilder::CheckedStore(
CheckedStoreRepresentation rep) {
switch (rep) {
-#define STORE(Type) \
- case k##Type: \
- return &cache_.kCheckedStore##Type;
- MACHINE_TYPE_LIST(STORE)
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kCheckedStore##kRep;
+ MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 1280f91544..00fefe3539 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_MACHINE_OPERATOR_H_
#include "src/base/flags.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -62,20 +62,22 @@ std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
+LoadRepresentation LoadRepresentationOf(Operator const*);
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
class StoreRepresentation final {
public:
- StoreRepresentation(MachineType machine_type,
+ StoreRepresentation(MachineRepresentation representation,
WriteBarrierKind write_barrier_kind)
- : machine_type_(machine_type), write_barrier_kind_(write_barrier_kind) {}
+ : representation_(representation),
+ write_barrier_kind_(write_barrier_kind) {}
- MachineType machine_type() const { return machine_type_; }
+ MachineRepresentation representation() const { return representation_; }
WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
private:
- MachineType machine_type_;
+ MachineRepresentation representation_;
WriteBarrierKind write_barrier_kind_;
};
@@ -96,7 +98,7 @@ CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
// A CheckedStore needs a MachineType.
-typedef MachineType CheckedStoreRepresentation;
+typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
@@ -116,25 +118,35 @@ class MachineOperatorBuilder final : public ZoneObject {
kFloat32Min = 1u << 1,
kFloat64Max = 1u << 2,
kFloat64Min = 1u << 3,
- kFloat64RoundDown = 1u << 4,
- kFloat64RoundTruncate = 1u << 5,
- kFloat64RoundTiesAway = 1u << 6,
- kInt32DivIsSafe = 1u << 7,
- kUint32DivIsSafe = 1u << 8,
- kWord32ShiftIsSafe = 1u << 9,
- kWord32Ctz = 1u << 10,
- kWord32Popcnt = 1u << 11,
- kWord64Ctz = 1u << 12,
- kWord64Popcnt = 1u << 13,
+ kFloat32RoundDown = 1u << 4,
+ kFloat64RoundDown = 1u << 5,
+ kFloat32RoundUp = 1u << 6,
+ kFloat64RoundUp = 1u << 7,
+ kFloat32RoundTruncate = 1u << 8,
+ kFloat64RoundTruncate = 1u << 9,
+ kFloat32RoundTiesEven = 1u << 10,
+ kFloat64RoundTiesEven = 1u << 11,
+ kFloat64RoundTiesAway = 1u << 12,
+ kInt32DivIsSafe = 1u << 13,
+ kUint32DivIsSafe = 1u << 14,
+ kWord32ShiftIsSafe = 1u << 15,
+ kWord32Ctz = 1u << 16,
+ kWord64Ctz = 1u << 17,
+ kWord32Popcnt = 1u << 18,
+ kWord64Popcnt = 1u << 19,
kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
- kFloat64RoundDown | kFloat64RoundTruncate |
- kFloat64RoundTiesAway | kWord32Ctz | kWord32Popcnt |
- kWord64Ctz | kWord64Popcnt
+ kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat64RoundUp | kFloat32RoundTruncate |
+ kFloat64RoundTruncate | kFloat64RoundTiesAway |
+ kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
};
typedef base::Flags<Flag, unsigned> Flags;
- explicit MachineOperatorBuilder(Zone* zone, MachineType word = kMachPtr,
- Flags supportedOperators = kNoFlags);
+ explicit MachineOperatorBuilder(
+ Zone* zone,
+ MachineRepresentation word = MachineType::PointerRepresentation(),
+ Flags supportedOperators = kNoFlags);
const Operator* Word32And();
const Operator* Word32Or();
@@ -180,7 +192,9 @@ class MachineOperatorBuilder final : public ZoneObject {
bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
const Operator* Int64Add();
+ const Operator* Int64AddWithOverflow();
const Operator* Int64Sub();
+ const Operator* Int64SubWithOverflow();
const Operator* Int64Mul();
const Operator* Int64Div();
const Operator* Int64Mod();
@@ -199,6 +213,10 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* TryTruncateFloat32ToInt64();
+ const Operator* TryTruncateFloat64ToInt64();
+ const Operator* TryTruncateFloat32ToUint64();
+ const Operator* TryTruncateFloat64ToUint64();
const Operator* ChangeInt32ToFloat64();
const Operator* ChangeInt32ToInt64();
const Operator* ChangeUint32ToFloat64();
@@ -211,6 +229,8 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* TruncateInt64ToInt32();
const Operator* RoundInt64ToFloat32();
const Operator* RoundInt64ToFloat64();
+ const Operator* RoundUint64ToFloat32();
+ const Operator* RoundUint64ToFloat64();
// These operators reinterpret the bits of a floating point number as an
// integer and vice versa.
@@ -261,9 +281,15 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Float64Abs();
// Floating point rounding.
+ const OptionalOperator Float32RoundDown();
const OptionalOperator Float64RoundDown();
+ const OptionalOperator Float32RoundUp();
+ const OptionalOperator Float64RoundUp();
+ const OptionalOperator Float32RoundTruncate();
const OptionalOperator Float64RoundTruncate();
const OptionalOperator Float64RoundTiesAway();
+ const OptionalOperator Float32RoundTiesEven();
+ const OptionalOperator Float64RoundTiesEven();
// Floating point bit representation.
const Operator* Float64ExtractLowWord32();
@@ -287,9 +313,9 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* CheckedStore(CheckedStoreRepresentation);
// Target machine word-size assumed by this builder.
- bool Is32() const { return word() == kRepWord32; }
- bool Is64() const { return word() == kRepWord64; }
- MachineType word() const { return word_; }
+ bool Is32() const { return word() == MachineRepresentation::kWord32; }
+ bool Is64() const { return word() == MachineRepresentation::kWord64; }
+ MachineRepresentation word() const { return word_; }
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
@@ -322,7 +348,7 @@ class MachineOperatorBuilder final : public ZoneObject {
private:
MachineOperatorGlobalCache const& cache_;
- MachineType const word_;
+ MachineRepresentation const word_;
Flags const flags_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
diff --git a/deps/v8/src/compiler/machine-type.cc b/deps/v8/src/compiler/machine-type.cc
deleted file mode 100644
index 7475a038cc..0000000000
--- a/deps/v8/src/compiler/machine-type.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/machine-type.h"
-#include "src/ostreams.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define PRINT(bit) \
- if (type & bit) { \
- if (before) os << "|"; \
- os << #bit; \
- before = true; \
- }
-
-
-std::ostream& operator<<(std::ostream& os, const MachineType& type) {
- bool before = false;
- PRINT(kRepBit);
- PRINT(kRepWord8);
- PRINT(kRepWord16);
- PRINT(kRepWord32);
- PRINT(kRepWord64);
- PRINT(kRepFloat32);
- PRINT(kRepFloat64);
- PRINT(kRepTagged);
-
- PRINT(kTypeBool);
- PRINT(kTypeInt32);
- PRINT(kTypeUint32);
- PRINT(kTypeInt64);
- PRINT(kTypeUint64);
- PRINT(kTypeNumber);
- PRINT(kTypeAny);
- return os;
-}
-
-
-#undef PRINT
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/machine-type.h b/deps/v8/src/compiler/machine-type.h
deleted file mode 100644
index 0cd2a84010..0000000000
--- a/deps/v8/src/compiler/machine-type.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_MACHINE_TYPE_H_
-#define V8_COMPILER_MACHINE_TYPE_H_
-
-#include <iosfwd>
-
-#include "src/base/bits.h"
-#include "src/globals.h"
-#include "src/signature.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Machine-level types and representations.
-// TODO(titzer): Use the real type system instead of MachineType.
-enum MachineType : uint16_t {
- // Representations.
- kRepBit = 1u << 0,
- kRepWord8 = 1u << 1,
- kRepWord16 = 1u << 2,
- kRepWord32 = 1u << 3,
- kRepWord64 = 1u << 4,
- kRepFloat32 = 1u << 5,
- kRepFloat64 = 1u << 6,
- kRepTagged = 1u << 7,
-
- // Types.
- kTypeBool = 1u << 8,
- kTypeInt32 = 1u << 9,
- kTypeUint32 = 1u << 10,
- kTypeInt64 = 1u << 11,
- kTypeUint64 = 1u << 12,
- kTypeNumber = 1u << 13,
- kTypeAny = 1u << 14,
-
- // Machine types.
- kMachNone = 0u,
- kMachBool = kRepBit | kTypeBool,
- kMachFloat32 = kRepFloat32 | kTypeNumber,
- kMachFloat64 = kRepFloat64 | kTypeNumber,
- kMachInt8 = kRepWord8 | kTypeInt32,
- kMachUint8 = kRepWord8 | kTypeUint32,
- kMachInt16 = kRepWord16 | kTypeInt32,
- kMachUint16 = kRepWord16 | kTypeUint32,
- kMachInt32 = kRepWord32 | kTypeInt32,
- kMachUint32 = kRepWord32 | kTypeUint32,
- kMachInt64 = kRepWord64 | kTypeInt64,
- kMachUint64 = kRepWord64 | kTypeUint64,
- kMachIntPtr = (kPointerSize == 4) ? kMachInt32 : kMachInt64,
- kMachUintPtr = (kPointerSize == 4) ? kMachUint32 : kMachUint64,
- kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
- kMachAnyTagged = kRepTagged | kTypeAny
-};
-
-V8_INLINE size_t hash_value(MachineType type) {
- return static_cast<size_t>(type);
-}
-
-std::ostream& operator<<(std::ostream& os, const MachineType& type);
-
-typedef uint16_t MachineTypeUnion;
-
-// Globally useful machine types and constants.
-const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
- kRepWord32 | kRepWord64 | kRepFloat32 |
- kRepFloat64 | kRepTagged;
-const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
- kTypeInt64 | kTypeUint64 | kTypeNumber |
- kTypeAny;
-
-// Gets only the type of the given type.
-inline MachineType TypeOf(MachineType machine_type) {
- int result = machine_type & kTypeMask;
- return static_cast<MachineType>(result);
-}
-
-// Gets only the representation of the given type.
-inline MachineType RepresentationOf(MachineType machine_type) {
- int result = machine_type & kRepMask;
- CHECK(base::bits::IsPowerOfTwo32(result));
- return static_cast<MachineType>(result);
-}
-
-// Gets the log2 of the element size in bytes of the machine type.
-inline int ElementSizeLog2Of(MachineType machine_type) {
- switch (RepresentationOf(machine_type)) {
- case kRepBit:
- case kRepWord8:
- return 0;
- case kRepWord16:
- return 1;
- case kRepWord32:
- case kRepFloat32:
- return 2;
- case kRepWord64:
- case kRepFloat64:
- return 3;
- case kRepTagged:
- return kPointerSizeLog2;
- default:
- break;
- }
- UNREACHABLE();
- return -1;
-}
-
-// Gets the element size in bytes of the machine type.
-inline int ElementSizeOf(MachineType machine_type) {
- const int shift = ElementSizeLog2Of(machine_type);
- DCHECK_NE(-1, shift);
- return 1 << shift;
-}
-
-inline bool IsFloatingPoint(MachineType type) {
- MachineType rep = RepresentationOf(type);
- return rep == kRepFloat32 || rep == kRepFloat64;
-}
-
-typedef Signature<MachineType> MachineSignature;
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 54bb55a146..75e4b9e7a8 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -118,10 +118,10 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -192,24 +192,21 @@ class OutOfLineRound : public OutOfLineCode {
};
-class OutOfLineTruncate final : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
-
-
-class OutOfLineFloor final : public OutOfLineRound {
- public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
-class OutOfLineCeil final : public OutOfLineRound {
- public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ private:
+ DoubleRegister const result_;
};
@@ -302,19 +299,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
}
-Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
- switch (condition) {
- case kOverflow:
- return lt;
- case kNotOverflow:
- return ge;
- default:
- break;
- }
- UNREACHABLE();
- return kNoCondition;
-}
-
FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
FlagsCondition condition) {
switch (condition) {
@@ -425,10 +409,15 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -436,22 +425,62 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
__ or_(at, at, kScratchReg2); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (IsMipsArchVariant(kMips32r6)) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
}
+
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addiu(sp, sp, sp_slot_delta * kPointerSize);
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -471,10 +500,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(at);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -482,6 +513,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -496,6 +528,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -506,9 +539,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -519,8 +554,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -530,6 +570,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -542,12 +584,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -584,15 +629,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsAddOvf:
- __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsSub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsSubOvf:
- __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
break;
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -605,9 +648,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kMipsDiv:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsDivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMipsMod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -621,6 +674,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsOr:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsNor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMipsXor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -651,6 +712,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsExt:
+ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMipsIns:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
case kMipsRor:
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -770,15 +843,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputDoubleRegister(1));
break;
case kMipsFloat64RoundDown: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMipsFloat32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
break;
}
case kMipsFloat64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ break;
+ }
+ case kMipsFloat32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
break;
}
case kMipsFloat64RoundUp: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMipsFloat32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMipsFloat64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMipsFloat32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
break;
}
case kMipsFloat64Max: {
@@ -851,11 +944,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMipsCvtSW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMipsCvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
break;
}
+ case kMipsFloorWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
@@ -863,6 +980,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMipsFloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsCeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsRoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMipsTruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
case kMipsTruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
@@ -927,12 +1068,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMipsStoreToStackSlot: {
@@ -984,7 +1128,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
}
-}
+} // NOLINT(readability/fn_size)
#define UNSUPPORTED_COND(opcode, condition) \
@@ -1036,11 +1180,34 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
- } else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
- cc = FlagsConditionToConditionOvf(branch->condition);
- __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kMipsAddOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kMipsSubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1054,7 +1221,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ BranchF32(tlabel, NULL, cc, left, right);
+ __ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMipsCmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
@@ -1065,7 +1232,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ BranchF64(tlabel, NULL, cc, left, right);
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -1107,13 +1274,26 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
- cc = FlagsConditionToConditionOvf(condition);
- // Return 1 on overflow.
- __ Slt(result, kCompareReg, Operand(zero_reg));
- if (cc == ge) // Invert result on not overflow.
- __ xori(result, result, 1);
- return;
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMipsAddOvf:
+ __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
+ break;
+ case kMipsSubOvf:
+ __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
@@ -1263,17 +1443,17 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int stack_shrink_slots = frame()->GetSpillSlotCount();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1336,10 +1516,10 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopFPU(saves_fpu);
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(sp, fp);
__ Pop(ra, fp);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
@@ -1360,7 +1540,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1466,7 +1646,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index a251ba93ca..c9381775c8 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -25,11 +25,14 @@ namespace compiler {
V(MipsModU) \
V(MipsAnd) \
V(MipsOr) \
+ V(MipsNor) \
V(MipsXor) \
V(MipsClz) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsExt) \
+ V(MipsIns) \
V(MipsRor) \
V(MipsMov) \
V(MipsTst) \
@@ -54,15 +57,28 @@ namespace compiler {
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
V(MipsTruncUwD) \
V(MipsCvtDW) \
V(MipsCvtDUw) \
+ V(MipsCvtSW) \
V(MipsLb) \
V(MipsLbu) \
V(MipsSb) \
diff --git a/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
new file mode 100644
index 0000000000..af86a87ad7
--- /dev/null
+++ b/deps/v8/src/compiler/mips/instruction-scheduler-mips.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index b43edb17cd..61cea76b22 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -127,32 +127,32 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMipsLwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMipsLdc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -177,13 +177,13 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -212,26 +212,27 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kMipsSwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMipsSdc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kMipsSb;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kMipsSh;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kMipsSw;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -252,6 +253,50 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitWord32And(Node* node) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of invereted mask.
+ Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0), g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMipsAnd);
}
@@ -262,16 +307,81 @@ void InstructionSelector::VisitWord32Or(Node* node) {
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ MipsOperandGenerator g(this);
+ Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMipsXor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ MipsOperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMipsShl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMipsShl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ MipsOperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMipsExt, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMipsShr, node);
}
@@ -359,7 +469,7 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
void InstructionSelector::VisitInt32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -367,7 +477,7 @@ void InstructionSelector::VisitInt32Div(Node* node) {
void InstructionSelector::VisitUint32Div(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -404,6 +514,65 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMipsFloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMipsCeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMipsRoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMipsTruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMipsFloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMipsCeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMipsRoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMipsTruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
VisitRR(this, kMipsTruncWD, node);
}
@@ -414,6 +583,16 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMipsCvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
VisitRR(this, kMipsCvtSD, node);
}
@@ -583,11 +762,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat32RoundDown, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kMipsFloat64RoundDown, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMipsFloat64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTruncate, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMipsFloat64RoundTruncate, node);
}
@@ -598,9 +797,19 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat32RoundTiesEven, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMipsFloat64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
MipsOperandGenerator g(this);
// Prepare for C function call.
@@ -611,8 +820,8 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* input : (*arguments)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ for (PushParameter input : (*arguments)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -624,8 +833,9 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
g.TempImmediate(push_count << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
- if (Node* input = (*arguments)[n]) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(n << kPointerSizeLog2));
}
}
@@ -637,30 +847,32 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -681,27 +893,27 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
default:
@@ -897,7 +1109,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
@@ -1102,12 +1314,21 @@ InstructionSelector::SupportedMachineOperatorFlags() {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode()) {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
- return flags | MachineOperatorBuilder::kFloat64Min |
+ return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat32Max;
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 62fd2f5efc..1b81aa5698 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/mips/macro-assembler-mips.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -118,10 +118,10 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -192,24 +192,21 @@ class OutOfLineRound : public OutOfLineCode {
};
-class OutOfLineTruncate final : public OutOfLineRound {
+class OutOfLineRound32 : public OutOfLineCode {
public:
- OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
-
-
-class OutOfLineFloor final : public OutOfLineRound {
- public:
- OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
-};
+ OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+ void Generate() final {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of float input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ mtc1(at, result_);
+ }
-class OutOfLineCeil final : public OutOfLineRound {
- public:
- OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineRound(gen, result) {}
+ private:
+ DoubleRegister const result_;
};
@@ -426,10 +423,15 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} while (0)
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
- do { \
- auto ool = \
- new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
+ if (kArchVariant == kMips64r6) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
Label done; \
__ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
__ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
@@ -437,21 +439,60 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ Branch(USE_DELAY_SLOT, &done, hs, at, \
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
__ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
__ dmfc1(at, i.OutputDoubleRegister()); \
__ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
__ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
__ bind(ool->exit()); \
__ bind(&done); \
- } while (0)
+ }
+#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
+ if (kArchVariant == kMips64r6) { \
+ __ cfc1(kScratchReg, FCSR); \
+ __ li(at, Operand(mode_##mode)); \
+ __ ctc1(at, FCSR); \
+ __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ ctc1(kScratchReg, FCSR); \
+ } else { \
+ int32_t kFloat32ExponentBias = 127; \
+ int32_t kFloat32MantissaBits = 23; \
+ int32_t kFloat32ExponentBits = 8; \
+ auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
+ __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ mfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ }
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ daddiu(sp, sp, sp_slot_delta * kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -471,10 +512,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(at);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (instr->InputAt(0)->IsImmediate()) {
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
@@ -482,6 +525,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -495,6 +539,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -504,9 +549,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -517,8 +564,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -528,6 +580,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -540,12 +594,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -584,12 +641,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Dadd:
__ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DaddOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Dsub:
__ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DsubOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -599,11 +662,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64MulHighU:
__ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64DMulHigh:
+ __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
case kMips64Div:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64DivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64Mod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -616,9 +692,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kMips64Ddiv:
__ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64DdivU:
__ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ if (kArchVariant == kMips64r6) {
+ __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
+ }
break;
case kMips64Dmod:
__ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -632,6 +718,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Nor:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -672,9 +766,37 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
break;
- case kMips64Dext:
- __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
+ case kMips64Ins:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
+ case kMips64Dext: {
+ int16_t pos = i.InputInt8(1);
+ int16_t size = i.InputInt8(2);
+ if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
+ __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ } else {
+ DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
+ __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
+ break;
+ }
+ case kMips64Dins:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
+ } else {
+ __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ }
break;
case kMips64Dshl:
if (instr->InputAt(1)->IsRegister()) {
@@ -838,15 +960,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputDoubleRegister(1));
break;
case kMips64Float64RoundDown: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ break;
+ }
+ case kMips64Float32RoundDown: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
break;
}
case kMips64Float64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ break;
+ }
+ case kMips64Float32RoundTruncate: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
break;
}
case kMips64Float64RoundUp: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ break;
+ }
+ case kMips64Float32RoundUp: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ break;
+ }
+ case kMips64Float64RoundTiesEven: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ break;
+ }
+ case kMips64Float32RoundTiesEven: {
+ ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
break;
}
case kMips64Float64Max: {
@@ -917,6 +1059,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMips64CvtSW: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ mtc1(i.InputRegister(0), scratch);
+ __ cvt_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
case kMips64CvtSL: {
FPURegister scratch = kScratchDoubleReg;
__ dmtc1(i.InputRegister(0), scratch);
@@ -930,8 +1078,33 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kMips64CvtDUw: {
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64CvtDUl: {
+ __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64CvtSUl: {
+ __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kMips64FloorWD: {
FPURegister scratch = kScratchDoubleReg;
- __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+ __ floor_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64CeilWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64RoundWD: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_d(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
break;
}
case kMips64TruncWD: {
@@ -941,12 +1114,108 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mfc1(i.OutputRegister(), scratch);
break;
}
+ case kMips64FloorWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ floor_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64CeilWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ceil_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64RoundWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ round_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncWS: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ trunc_w_s(scratch, i.InputDoubleRegister(0));
+ __ mfc1(i.OutputRegister(), scratch);
+ break;
+ }
+ case kMips64TruncLS: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register tmp_fcsr = kScratchReg;
+ Register result = kScratchReg2;
+
+ bool load_status = instr->OutputCount() > 1;
+ if (load_status) {
+ // Save FCSR.
+ __ cfc1(tmp_fcsr, FCSR);
+ // Clear FPU flags.
+ __ ctc1(zero_reg, FCSR);
+ }
+ // Other arches use round to zero here, so we follow.
+ __ trunc_l_s(scratch, i.InputDoubleRegister(0));
+ __ dmfc1(i.OutputRegister(), scratch);
+ if (load_status) {
+ __ cfc1(result, FCSR);
+ // Check for overflow and NaNs.
+ __ andi(result, result,
+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
+ __ Slt(result, zero_reg, result);
+ __ xori(result, result, 1);
+ __ mov(i.OutputRegister(1), result);
+ // Restore FCSR
+ __ ctc1(tmp_fcsr, FCSR);
+ }
+ break;
+ }
+ case kMips64TruncLD: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register tmp_fcsr = kScratchReg;
+ Register result = kScratchReg2;
+
+ bool load_status = instr->OutputCount() > 1;
+ if (load_status) {
+ // Save FCSR.
+ __ cfc1(tmp_fcsr, FCSR);
+ // Clear FPU flags.
+ __ ctc1(zero_reg, FCSR);
+ }
+ // Other arches use round to zero here, so we follow.
+ __ trunc_l_d(scratch, i.InputDoubleRegister(0));
+ __ dmfc1(i.OutputRegister(0), scratch);
+ if (load_status) {
+ __ cfc1(result, FCSR);
+ // Check for overflow and NaNs.
+ __ andi(result, result,
+ (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
+ __ Slt(result, zero_reg, result);
+ __ xori(result, result, 1);
+ __ mov(i.OutputRegister(1), result);
+ // Restore FCSR
+ __ ctc1(tmp_fcsr, FCSR);
+ }
+ break;
+ }
case kMips64TruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64TruncUlS: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function.
+ __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch,
+ result);
+ break;
+ }
+ case kMips64TruncUlD: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function.
+ __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch,
+ result);
+ break;
+ }
case kMips64BitcastDL:
__ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -1017,12 +1286,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
break;
}
case kMips64StoreToStackSlot: {
@@ -1134,6 +1406,34 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kMips64DaddOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+ break;
+ }
+ } else if (instr->arch_opcode() == kMips64DsubOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1147,7 +1447,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ BranchF32(tlabel, NULL, cc, left, right);
+ __ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
if (!convertCondition(branch->condition, cc)) {
UNSUPPORTED_COND(kMips64CmpD, branch->condition);
@@ -1158,7 +1458,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
!__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ BranchF64(tlabel, NULL, cc, left, right);
+ __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -1208,6 +1508,28 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (cc == eq) // Toggle result for not overflow.
__ xori(result, result, 1);
return;
+ } else if (instr->arch_opcode() == kMips64DaddOvf ||
+ instr->arch_opcode() == kMips64DsubOvf) {
+ Label flabel, tlabel;
+ switch (instr->arch_opcode()) {
+ case kMips64DaddOvf:
+ __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+
+ break;
+ case kMips64DsubOvf:
+ __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ li(result, 1);
+ __ Branch(&tlabel);
+ __ bind(&flabel);
+ __ li(result, 0);
+ __ bind(&tlabel);
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
@@ -1336,9 +1658,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
__ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
// Ensure that dd-ed labels use 8 byte aligned addresses.
- if ((masm()->pc_offset() & 7) != 0) {
- __ nop();
- }
+ __ Align(8);
__ bal(&here);
__ dsll(at, input, 3); // Branch delay slot.
__ bind(&here);
@@ -1362,17 +1682,17 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1432,10 +1752,10 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopFPU(saves_fpu);
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(sp, fp);
__ Pop(ra, fp);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ Branch(&return_label_);
@@ -1457,7 +1777,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1563,7 +1883,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- MipsOperandConverter g(this, NULL);
+ MipsOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index e64a0fa812..778c6add0f 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -14,10 +14,13 @@ namespace compiler {
#define TARGET_ARCH_OPCODE_LIST(V) \
V(Mips64Add) \
V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
V(Mips64Sub) \
V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
V(Mips64Mul) \
V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
V(Mips64MulHighU) \
V(Mips64Dmul) \
V(Mips64Div) \
@@ -30,13 +33,16 @@ namespace compiler {
V(Mips64DmodU) \
V(Mips64And) \
V(Mips64Or) \
+ V(Mips64Nor) \
V(Mips64Xor) \
V(Mips64Clz) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
V(Mips64Ext) \
+ V(Mips64Ins) \
V(Mips64Dext) \
+ V(Mips64Dins) \
V(Mips64Dclz) \
V(Mips64Dshl) \
V(Mips64Dshr) \
@@ -69,14 +75,33 @@ namespace compiler {
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
V(Mips64TruncUwD) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
V(Mips64CvtDW) \
V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUl) \
V(Mips64CvtDL) \
V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
V(Mips64Lb) \
V(Mips64Lbu) \
V(Mips64Sb) \
diff --git a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
new file mode 100644
index 0000000000..af86a87ad7
--- /dev/null
+++ b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index c62d0ef372..1b12bd9aec 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -132,35 +132,34 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kMips64Lwc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMips64Ldc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
break;
- case kRepWord16:
- opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kMips64Lw;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -185,13 +184,13 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
// TODO(mips): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -220,29 +219,29 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kMips64Swc1;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kMips64Sdc1;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kMips64Sb;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kMips64Sh;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kMips64Sw;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kMips64Sd;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -263,11 +262,102 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitWord32And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().Value() & 0x1f;
+
+ // Ext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Ext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint32_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation32(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kMips64Ins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
void InstructionSelector::VisitWord64And(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
+ // significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+
+ // Dext cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Dext with a smaller mask and the remaining bits will be
+ // zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasValue()) {
+ uint64_t mask = m.right().Value();
+ uint32_t shift = base::bits::CountPopulation64(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kMips64Dins, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64And);
}
@@ -283,21 +373,105 @@ void InstructionSelector::VisitWord64Or(Node* node) {
void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasValue()) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
VisitBinop(this, node, kMips64Xor);
}
void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Mips64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasValue()) {
+ uint32_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation32(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Shl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Shl, node);
}
void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x1f;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation32(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kMips64Ext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Shr, node);
}
@@ -319,11 +493,56 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
g.UseImmediate(m.right().node()));
return;
}
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint64_t mask = mleft.right().Value();
+ uint32_t mask_width = base::bits::CountPopulation64(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().Value();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
VisitRRO(this, kMips64Dshl, node);
}
void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasValue()) {
+ uint32_t lsb = m.right().Value() & 0x3f;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
+ // shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation64(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Mips64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kMips64Dext, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
VisitRRO(this, kMips64Dshr, node);
}
@@ -419,6 +638,21 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
}
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Dmul high.
+ Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMips64Mul, node);
}
@@ -429,12 +663,7 @@ void InstructionSelector::VisitInt32MulHigh(Node* node) {
void InstructionSelector::VisitUint32MulHigh(Node* node) {
- Mips64OperandGenerator g(this);
- InstructionOperand const dmul_operand = g.TempRegister();
- Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)));
- Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
- g.TempImmediate(32));
+ VisitRRR(this, kMips64MulHighU, node);
}
@@ -477,7 +706,22 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
void InstructionSelector::VisitInt32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Ddiv.
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -485,7 +729,7 @@ void InstructionSelector::VisitInt32Div(Node* node) {
void InstructionSelector::VisitUint32Div(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
- Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -493,6 +737,21 @@ void InstructionSelector::VisitUint32Div(Node* node) {
void InstructionSelector::VisitInt32Mod(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Dmod.
+ Emit(kMips64Dmod, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -509,7 +768,7 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitInt64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -517,7 +776,7 @@ void InstructionSelector::VisitInt64Div(Node* node) {
void InstructionSelector::VisitUint64Div(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
- Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()));
}
@@ -554,6 +813,65 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
+ // which does rounding and conversion to integer format.
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kFloat64RoundDown:
+ Emit(kMips64FloorWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundUp:
+ Emit(kMips64CeilWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTiesEven:
+ Emit(kMips64RoundWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ case IrOpcode::kFloat64RoundTruncate:
+ Emit(kMips64TruncWD, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ default:
+ break;
+ }
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (CanCover(value, next)) {
+ // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
+ switch (next->opcode()) {
+ case IrOpcode::kFloat32RoundDown:
+ Emit(kMips64FloorWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundUp:
+ Emit(kMips64CeilWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTiesEven:
+ Emit(kMips64RoundWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ case IrOpcode::kFloat32RoundTruncate:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(next->InputAt(0)));
+ return;
+ default:
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ } else {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kMips64TruncWS, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
VisitRR(this, kMips64TruncWD, node);
}
@@ -563,6 +881,71 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
@@ -587,7 +970,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence.
Emit(kMips64Dsar, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.TempImmediate(kSmiShift));
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
return;
}
break;
@@ -602,6 +986,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kMips64CvtSW, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
VisitRR(this, kMips64CvtSD, node);
}
@@ -627,6 +1021,16 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSUl, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDUl, node);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
@@ -792,11 +1196,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kMips64Float32RoundDown, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kMips64Float64RoundDown, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kMips64Float32RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kMips64Float64RoundUp, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kMips64Float32RoundTruncate, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kMips64Float64RoundTruncate, node);
}
@@ -807,9 +1231,19 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float32RoundTiesEven, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kMips64Float64RoundTiesEven, node);
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
Mips64OperandGenerator g(this);
// Prepare for C function call.
@@ -820,8 +1254,8 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
int slot = kCArgSlotCount;
- for (Node* input : (*arguments)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ for (PushParameter input : (*arguments)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -832,8 +1266,9 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
g.TempImmediate(push_count << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
- if (Node* input = (*arguments)[n]) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
@@ -845,33 +1280,34 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -892,33 +1328,35 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -1157,12 +1595,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1170,6 +1608,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DaddOvf, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64DsubOvf, cont);
default:
break;
}
@@ -1280,6 +1724,26 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DaddOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DaddOvf, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64DsubOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64DsubOvf, &cont);
+}
+
+
void InstructionSelector::VisitWord64Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int64BinopMatcher m(node);
@@ -1382,12 +1846,21 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Min |
+ return MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index e99b5e9ebe..bde3f7fe36 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -65,12 +65,12 @@ bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
}
-int FindFirstNonEmptySlot(Instruction* instr) {
+int FindFirstNonEmptySlot(const Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
- auto moves = instr->parallel_moves()[i];
+ ParallelMove* moves = instr->parallel_moves()[i];
if (moves == nullptr) continue;
- for (auto move : *moves) {
+ for (MoveOperands* move : *moves) {
if (!move->IsRedundant()) return i;
move->Eliminate();
}
@@ -86,91 +86,106 @@ MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
: local_zone_(local_zone),
code_(code),
to_finalize_(local_zone),
- temp_vector_0_(local_zone),
- temp_vector_1_(local_zone) {}
+ local_vector_(local_zone) {}
void MoveOptimizer::Run() {
- for (auto* block : code()->instruction_blocks()) {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
CompressBlock(block);
}
- for (auto block : code()->instruction_blocks()) {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
if (block->PredecessorCount() <= 1) continue;
- bool has_only_deferred = true;
- for (RpoNumber pred_id : block->predecessors()) {
- if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
- has_only_deferred = false;
- break;
+ if (!block->IsDeferred()) {
+ bool has_only_deferred = true;
+ for (RpoNumber& pred_id : block->predecessors()) {
+ if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
+ has_only_deferred = false;
+ break;
+ }
}
+ // This would pull down common moves. If the moves occur in deferred
+ // blocks, and the closest common successor is not deferred, we lose the
+ // optimization of just spilling/filling in deferred blocks, when the
+ // current block is not deferred.
+ if (has_only_deferred) continue;
}
- // This would pull down common moves. If the moves occur in deferred blocks,
- // and the closest common successor is not deferred, we lose the
- // optimization of just spilling/filling in deferred blocks.
- if (has_only_deferred) continue;
OptimizeMerge(block);
}
- for (auto gap : to_finalize_) {
+ for (Instruction* gap : to_finalize_) {
FinalizeMoves(gap);
}
}
-void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
- ParallelMove* right) {
- DCHECK(eliminated->empty());
+void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
+ if (right == nullptr) return;
+
+ MoveOpVector& eliminated = local_vector();
+ DCHECK(eliminated.empty());
+
if (!left->empty()) {
// Modify the right moves in place and collect moves that will be killed by
// merging the two gaps.
- for (auto move : *right) {
+ for (MoveOperands* move : *right) {
if (move->IsRedundant()) continue;
- auto to_eliminate = left->PrepareInsertAfter(move);
- if (to_eliminate != nullptr) eliminated->push_back(to_eliminate);
+ MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
+ if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
}
// Eliminate dead moves.
- for (auto to_eliminate : *eliminated) {
+ for (MoveOperands* to_eliminate : eliminated) {
to_eliminate->Eliminate();
}
- eliminated->clear();
+ eliminated.clear();
}
// Add all possibly modified moves from right side.
- for (auto move : *right) {
+ for (MoveOperands* move : *right) {
if (move->IsRedundant()) continue;
left->push_back(move);
}
// Nuke right.
right->clear();
+ DCHECK(eliminated.empty());
}
// Smash all consecutive moves into the left most move slot and accumulate them
// as much as possible across instructions.
void MoveOptimizer::CompressBlock(InstructionBlock* block) {
- auto temp_vector = temp_vector_0();
- DCHECK(temp_vector.empty());
Instruction* prev_instr = nullptr;
for (int index = block->code_start(); index < block->code_end(); ++index) {
- auto instr = code()->instructions()[index];
+ Instruction* instr = code()->instructions()[index];
int i = FindFirstNonEmptySlot(instr);
- if (i <= Instruction::LAST_GAP_POSITION) {
- // Move the first non-empty gap to position 0.
- std::swap(instr->parallel_moves()[0], instr->parallel_moves()[i]);
- auto left = instr->parallel_moves()[0];
- // Compress everything into position 0.
- for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) {
- auto move = instr->parallel_moves()[i];
- if (move == nullptr) continue;
- CompressMoves(&temp_vector, left, move);
- }
- if (prev_instr != nullptr) {
- // Smash left into prev_instr, killing left.
- auto pred_moves = prev_instr->parallel_moves()[0];
- CompressMoves(&temp_vector, pred_moves, left);
- }
+ bool has_moves = i <= Instruction::LAST_GAP_POSITION;
+
+ if (i == Instruction::LAST_GAP_POSITION) {
+ std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+ } else if (i == Instruction::FIRST_GAP_POSITION) {
+ CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
}
+ // We either have no moves, or, after swapping or compressing, we have
+ // all the moves in the first gap position, and none in the second/end gap
+ // position.
+ ParallelMove* first =
+ instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
+ ParallelMove* last =
+ instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
+ USE(last);
+
+ DCHECK(!has_moves ||
+ (first != nullptr && (last == nullptr || last->empty())));
+
if (prev_instr != nullptr) {
+ if (has_moves) {
+ // Smash first into prev_instr, killing left.
+ ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
+ CompressMoves(pred_moves, first);
+ }
// Slide prev_instr down so we always know where to look for it.
std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
}
+
prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
if (GapsCanMoveOver(instr, local_zone())) continue;
if (prev_instr != nullptr) {
@@ -184,7 +199,8 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
}
-Instruction* MoveOptimizer::LastInstruction(InstructionBlock* block) {
+const Instruction* MoveOptimizer::LastInstruction(
+ const InstructionBlock* block) const {
return code()->instructions()[block->last_instruction_index()];
}
@@ -193,14 +209,15 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
DCHECK(block->PredecessorCount() > 1);
// Ensure that the last instruction in all incoming blocks don't contain
// things that would prevent moving gap moves across them.
- for (auto pred_index : block->predecessors()) {
- auto pred = code()->InstructionBlockAt(pred_index);
- auto last_instr = code()->instructions()[pred->last_instruction_index()];
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ const Instruction* last_instr =
+ code()->instructions()[pred->last_instruction_index()];
if (last_instr->IsCall()) return;
if (last_instr->TempCount() != 0) return;
if (last_instr->OutputCount() != 0) return;
for (size_t i = 0; i < last_instr->InputCount(); ++i) {
- auto op = last_instr->InputAt(i);
+ const InstructionOperand* op = last_instr->InputAt(i);
if (!op->IsConstant() && !op->IsImmediate()) return;
}
}
@@ -208,17 +225,17 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
MoveMap move_map(local_zone());
size_t correct_counts = 0;
// Accumulate set of shared moves.
- for (auto pred_index : block->predecessors()) {
- auto pred = code()->InstructionBlockAt(pred_index);
- auto instr = LastInstruction(pred);
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ const Instruction* instr = LastInstruction(pred);
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->empty()) {
return;
}
- for (auto move : *instr->parallel_moves()[0]) {
+ for (const MoveOperands* move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
- auto src = move->source();
- auto dst = move->destination();
+ InstructionOperand src = move->source();
+ InstructionOperand dst = move->destination();
MoveKey key = {src, dst};
auto res = move_map.insert(std::make_pair(key, 1));
if (!res.second) {
@@ -238,7 +255,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
break;
}
- DCHECK(instr != nullptr);
+ DCHECK_NOT_NULL(instr);
bool gap_initialized = true;
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->empty()) {
@@ -248,13 +265,13 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
gap_initialized = false;
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
- auto moves = instr->GetOrCreateParallelMove(
+ ParallelMove* moves = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(0), code_zone());
// Delete relevant entries in predecessors and move everything to block.
bool first_iteration = true;
- for (auto pred_index : block->predecessors()) {
- auto pred = code()->InstructionBlockAt(pred_index);
- for (auto move : *LastInstruction(pred)->parallel_moves()[0]) {
+ for (RpoNumber& pred_index : block->predecessors()) {
+ const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+ for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
MoveKey key = {move->source(), move->destination()};
auto it = move_map.find(key);
@@ -269,8 +286,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
// Compress.
if (!gap_initialized) {
- CompressMoves(&temp_vector_0(), instr->parallel_moves()[0],
- instr->parallel_moves()[1]);
+ CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
}
@@ -297,10 +313,11 @@ bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
- auto loads = temp_vector_0();
+ MoveOpVector& loads = local_vector();
DCHECK(loads.empty());
+
// Find all the loads.
- for (auto move : *instr->parallel_moves()[0]) {
+ for (MoveOperands* move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
if (move->source().IsConstant() || IsSlot(move->source())) {
loads.push_back(move);
@@ -311,7 +328,7 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
// beginning of the group.
std::sort(loads.begin(), loads.end(), LoadCompare);
MoveOperands* group_begin = nullptr;
- for (auto load : loads) {
+ for (MoveOperands* load : loads) {
// New group.
if (group_begin == nullptr ||
!load->source().EqualsCanonicalized(group_begin->source())) {
@@ -321,7 +338,7 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
// Nothing to be gained from splitting here.
if (IsSlot(group_begin->destination())) continue;
// Insert new move into slot 1.
- auto slot_1 = instr->GetOrCreateParallelMove(
+ ParallelMove* slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
slot_1->AddMove(group_begin->destination(), load->destination());
load->Eliminate();
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index 2fdecf31e9..c9a3289d6b 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -24,21 +24,18 @@ class MoveOptimizer final {
InstructionSequence* code() const { return code_; }
Zone* local_zone() const { return local_zone_; }
Zone* code_zone() const { return code()->zone(); }
- MoveOpVector& temp_vector_0() { return temp_vector_0_; }
- MoveOpVector& temp_vector_1() { return temp_vector_1_; }
+ MoveOpVector& local_vector() { return local_vector_; }
void CompressBlock(InstructionBlock* blocke);
- void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
- ParallelMove* right);
- Instruction* LastInstruction(InstructionBlock* block);
+ void CompressMoves(ParallelMove* left, ParallelMove* right);
+ const Instruction* LastInstruction(const InstructionBlock* block) const;
void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(Instruction* instr);
Zone* const local_zone_;
InstructionSequence* const code_;
Instructions to_finalize_;
- MoveOpVector temp_vector_0_;
- MoveOpVector temp_vector_1_;
+ MoveOpVector local_vector_;
DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
};
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index adddb67eff..a8f9071af0 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -35,7 +35,8 @@ class NodeCache final {
// Search for node associated with {key} and return a pointer to a memory
// location in this cache that stores an entry for the key. If the location
- // returned by this method contains a non-NULL node, the caller can use that
+ // returned by this method contains a non-nullptr node, the caller can use
+ // that
// node. Otherwise it is the responsibility of the caller to fill the entry
// with a new node.
// Note that a previous cache entry may be overwritten if the cache becomes
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index bafe3daa68..37d0e1a561 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -384,19 +384,19 @@ template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
: matches_(false),
- index_(NULL),
+ index_(nullptr),
scale_(0),
- base_(NULL),
- displacement_(NULL) {
+ base_(nullptr),
+ displacement_(nullptr) {
Initialize(node, allow_input_swap);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
: matches_(false),
- index_(NULL),
+ index_(nullptr),
scale_(0),
- base_(NULL),
- displacement_(NULL) {
+ base_(nullptr),
+ displacement_(nullptr) {
Initialize(node, node->op()->HasProperty(Operator::kCommutative));
}
@@ -434,10 +434,10 @@ struct BaseWithIndexAndDisplacementMatcher {
AddMatcher m(node, allow_input_swap);
Node* left = m.left().node();
Node* right = m.right().node();
- Node* displacement = NULL;
- Node* base = NULL;
- Node* index = NULL;
- Node* scale_expression = NULL;
+ Node* displacement = nullptr;
+ Node* base = nullptr;
+ Node* index = nullptr;
+ Node* scale_expression = nullptr;
bool power_of_two_plus_one = false;
int scale = 0;
if (m.HasIndexInput() && left->OwnedBy(node)) {
@@ -519,7 +519,7 @@ struct BaseWithIndexAndDisplacementMatcher {
}
}
int64_t value = 0;
- if (displacement != NULL) {
+ if (displacement != nullptr) {
switch (displacement->opcode()) {
case IrOpcode::kInt32Constant: {
value = OpParameter<int32_t>(displacement);
@@ -534,11 +534,11 @@ struct BaseWithIndexAndDisplacementMatcher {
break;
}
if (value == 0) {
- displacement = NULL;
+ displacement = nullptr;
}
}
if (power_of_two_plus_one) {
- if (base != NULL) {
+ if (base != nullptr) {
// If the scale requires explicitly using the index as the base, but a
// base is already part of the match, then the (1 << N + 1) scale factor
// can't be folded into the match and the entire index * scale
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 4fb4aa487c..cb6c3c43d8 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -4,6 +4,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/verifier.h"
@@ -138,6 +139,17 @@ void NodeProperties::ReplaceValueInput(Node* node, Node* value, int index) {
// static
+void NodeProperties::ReplaceValueInputs(Node* node, Node* value) {
+ int value_input_count = node->op()->ValueInputCount();
+ DCHECK_LE(1, value_input_count);
+ node->ReplaceInput(0, value);
+ while (--value_input_count > 0) {
+ node->RemoveInput(value_input_count);
+ }
+}
+
+
+// static
void NodeProperties::ReplaceContextInput(Node* node, Node* context) {
node->ReplaceInput(FirstContextIndex(node), context);
}
@@ -177,6 +189,15 @@ void NodeProperties::RemoveNonValueInputs(Node* node) {
}
+// static
+void NodeProperties::RemoveValueInputs(Node* node) {
+ int value_input_count = node->op()->ValueInputCount();
+ while (--value_input_count >= 0) {
+ node->RemoveInput(value_input_count);
+ }
+}
+
+
void NodeProperties::MergeControlToEnd(Graph* graph,
CommonOperatorBuilder* common,
Node* node) {
@@ -284,6 +305,90 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
// static
+MaybeHandle<Context> NodeProperties::GetSpecializationContext(
+ Node* node, MaybeHandle<Context> context) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return context;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return MaybeHandle<Context>();
+}
+
+
+// static
+MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
+ Node* node, MaybeHandle<Context> native_context) {
+ while (true) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreateBlockContext:
+ case IrOpcode::kJSCreateCatchContext:
+ case IrOpcode::kJSCreateFunctionContext:
+ case IrOpcode::kJSCreateModuleContext:
+ case IrOpcode::kJSCreateScriptContext:
+ case IrOpcode::kJSCreateWithContext: {
+ // Skip over the intermediate contexts, we're only interested in the
+ // very last context in the context chain anyway.
+ node = NodeProperties::GetContextInput(node);
+ break;
+ }
+ case IrOpcode::kHeapConstant: {
+ // Extract the native context from the actual {context}.
+ Handle<Context> context =
+ Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ return handle(context->native_context());
+ }
+ case IrOpcode::kOsrValue: {
+ int const index = OpParameter<int>(node);
+ if (index == Linkage::kOsrContextSpillSlotIndex) {
+ return native_context;
+ }
+ return MaybeHandle<Context>();
+ }
+ case IrOpcode::kParameter: {
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function,
+ // and {Parameter} indices start at -1, so value outputs of {Start}
+ // look like this: closure, receiver, param0, ..., paramN, context.
+ if (index == start->op()->ValueOutputCount() - 2) {
+ return native_context;
+ }
+ return MaybeHandle<Context>();
+ }
+ default:
+ return MaybeHandle<Context>();
+ }
+ }
+}
+
+
+// static
+MaybeHandle<JSGlobalObject> NodeProperties::GetSpecializationGlobalObject(
+ Node* node, MaybeHandle<Context> native_context) {
+ Handle<Context> context;
+ if (GetSpecializationNativeContext(node, native_context).ToHandle(&context)) {
+ return handle(context->global_object());
+ }
+ return MaybeHandle<JSGlobalObject>();
+}
+
+
+// static
Type* NodeProperties::GetTypeOrAny(Node* node) {
return IsTyped(node) ? node->type() : Type::Any();
}
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index ca8d228ae4..58005a7153 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -86,6 +86,10 @@ class NodeProperties final {
static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
static void RemoveFrameStateInput(Node* node, int index);
static void RemoveNonValueInputs(Node* node);
+ static void RemoveValueInputs(Node* node);
+
+ // Replaces all value inputs of {node} with the single input {value}.
+ static void ReplaceValueInputs(Node* node, Node* value);
// Merge the control node {node} into the end of the graph, introducing a
// merge node or expanding an existing merge node if necessary.
@@ -93,7 +97,7 @@ class NodeProperties final {
Node* node);
// Replace all uses of {node} with the given replacement nodes. All occurring
- // use kinds need to be replaced, {NULL} is only valid if a use kind is
+ // use kinds need to be replaced, {nullptr} is only valid if a use kind is
// guaranteed not to exist.
static void ReplaceUses(Node* node, Node* value, Node* effect = nullptr,
Node* success = nullptr, Node* exception = nullptr);
@@ -115,6 +119,27 @@ class NodeProperties final {
static void CollectControlProjections(Node* node, Node** proj, size_t count);
// ---------------------------------------------------------------------------
+ // Context.
+
+ // Try to retrieve the specialization context from the given {node},
+ // optionally utilizing the knowledge about the (outermost) function
+ // {context}.
+ static MaybeHandle<Context> GetSpecializationContext(
+ Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
+
+ // Try to retrieve the specialization native context from the given
+ // {node}, optionally utilizing the knowledge about the (outermost)
+ // {native_context}.
+ static MaybeHandle<Context> GetSpecializationNativeContext(
+ Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
+
+ // Try to retrieve the specialization global object from the given
+ // {node}, optionally utilizing the knowledge about the (outermost)
+ // {native_context}.
+ static MaybeHandle<JSGlobalObject> GetSpecializationGlobalObject(
+ Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
+
+ // ---------------------------------------------------------------------------
// Type.
static bool IsTyped(Node* node) { return node->type() != nullptr; }
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 022c44db2d..198c353084 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -56,6 +56,16 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
Node* node;
bool is_inline;
+#if DEBUG
+ // Verify that none of the inputs are {nullptr}.
+ for (int i = 0; i < input_count; i++) {
+ if (inputs[i] == nullptr) {
+ V8_Fatal(__FILE__, __LINE__, "Node::New() Error: #%d:%s[%d] is nullptr",
+ static_cast<int>(id), op->mnemonic(), i);
+ }
+ }
+#endif
+
if (input_count > kMaxInlineCapacity) {
// Allocate out-of-line inputs.
int capacity =
@@ -271,6 +281,12 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
}
+void Node::Print() const {
+ OFStream os(stdout);
+ os << *this << std::endl;
+}
+
+
Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
: op_(op),
type_(nullptr),
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index d6a9b39a56..c73482fa69 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -186,6 +186,7 @@ class Node final {
// Returns true if {owner1} and {owner2} are the only users of {this} node.
bool OwnedBy(Node const* owner1, Node const* owner2) const;
+ void Print() const;
private:
struct Use;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index d94c60d468..a97fdfa54b 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -50,6 +50,7 @@
V(FrameState) \
V(StateValues) \
V(TypedStateValues) \
+ V(ObjectState) \
V(Call) \
V(Parameter) \
V(OsrValue) \
@@ -91,8 +92,6 @@
JS_BITWISE_BINOP_LIST(V) \
JS_ARITH_BINOP_LIST(V)
-#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
-
#define JS_CONVERSION_UNOP_LIST(V) \
V(JSToBoolean) \
V(JSToNumber) \
@@ -104,24 +103,26 @@
V(JSTypeOf)
#define JS_SIMPLE_UNOP_LIST(V) \
- JS_LOGIC_UNOP_LIST(V) \
JS_CONVERSION_UNOP_LIST(V) \
JS_OTHER_UNOP_LIST(V)
-#define JS_OBJECT_OP_LIST(V) \
- V(JSCreate) \
- V(JSCreateArguments) \
- V(JSCreateClosure) \
- V(JSCreateLiteralArray) \
- V(JSCreateLiteralObject) \
- V(JSLoadProperty) \
- V(JSLoadNamed) \
- V(JSLoadGlobal) \
- V(JSStoreProperty) \
- V(JSStoreNamed) \
- V(JSStoreGlobal) \
- V(JSDeleteProperty) \
- V(JSHasProperty) \
+#define JS_OBJECT_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateClosure) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateLiteralRegExp) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSLoadGlobal) \
+ V(JSStoreProperty) \
+ V(JSStoreNamed) \
+ V(JSStoreGlobal) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
V(JSInstanceOf)
#define JS_CONTEXT_OP_LIST(V) \
@@ -183,6 +184,7 @@
V(NumberShiftRightLogical) \
V(NumberToInt32) \
V(NumberToUint32) \
+ V(NumberIsHoleNaN) \
V(PlainPrimitiveToNumber) \
V(ChangeTaggedToInt32) \
V(ChangeTaggedToUint32) \
@@ -257,7 +259,9 @@
V(Uint32Mod) \
V(Uint32MulHigh) \
V(Int64Add) \
+ V(Int64AddWithOverflow) \
V(Int64Sub) \
+ V(Int64SubWithOverflow) \
V(Int64Mul) \
V(Int64Div) \
V(Int64Mod) \
@@ -266,6 +270,10 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(TryTruncateFloat32ToInt64) \
+ V(TryTruncateFloat64ToInt64) \
+ V(TryTruncateFloat32ToUint64) \
+ V(TryTruncateFloat64ToUint64) \
V(ChangeInt32ToFloat64) \
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
@@ -273,8 +281,10 @@
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
- V(RoundInt64ToFloat64) \
V(RoundInt64ToFloat32) \
+ V(RoundInt64ToFloat64) \
+ V(RoundUint64ToFloat32) \
+ V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
V(BitcastFloat64ToInt64) \
V(BitcastInt32ToFloat32) \
@@ -287,6 +297,7 @@
V(Float32Min) \
V(Float32Abs) \
V(Float32Sqrt) \
+ V(Float32RoundDown) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Mul) \
@@ -297,8 +308,13 @@
V(Float64Abs) \
V(Float64Sqrt) \
V(Float64RoundDown) \
+ V(Float32RoundUp) \
+ V(Float64RoundUp) \
+ V(Float32RoundTruncate) \
V(Float64RoundTruncate) \
V(Float64RoundTiesAway) \
+ V(Float32RoundTiesEven) \
+ V(Float64RoundTiesEven) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
@@ -372,6 +388,11 @@ class IrOpcode {
return kIfTrue <= value && value <= kIfDefault;
}
+ // Returns true if opcode can be inlined.
+ static bool IsInlineeOpcode(Value value) {
+ return value == kJSCallConstruct || value == kJSCallFunction;
+ }
+
// Returns true if opcode for comparison operator.
static bool IsComparisonOpcode(Value value) {
return (kJSEqual <= value && value <= kJSGreaterThanOrEqual) ||
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index d1bea56091..bd704a3650 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -35,13 +35,11 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
return 0;
// We record the frame state immediately before and immediately after every
- // function call.
+ // construct/function call.
+ case IrOpcode::kJSCallConstruct:
case IrOpcode::kJSCallFunction:
return 2;
- // Construct calls
- case IrOpcode::kJSCallConstruct:
-
// Compare operations
case IrOpcode::kJSEqual:
case IrOpcode::kJSNotEqual:
@@ -49,14 +47,16 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSInstanceOf:
// Object operations
+ case IrOpcode::kJSCreate:
case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
// Context operations
case IrOpcode::kJSLoadDynamic:
case IrOpcode::kJSCreateScriptContext:
- case IrOpcode::kJSCreateWithContext:
// Conversions
case IrOpcode::kJSToName:
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index eba430f927..fa85d599cd 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -30,7 +30,7 @@ namespace compiler {
// meaningful to the operator itself.
class Operator : public ZoneObject {
public:
- typedef uint8_t Opcode;
+ typedef uint16_t Opcode;
// Properties inform the operator-independent optimizer about legal
// transformations for nodes that have this operator.
@@ -136,10 +136,19 @@ DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
std::ostream& operator<<(std::ostream& os, const Operator& op);
+// Default equality function for below Operator1<*> class.
+template <typename T>
+struct OpEqualTo : public std::equal_to<T> {};
+
+
+// Default hashing function for below Operator1<*> class.
+template <typename T>
+struct OpHash : public base::hash<T> {};
+
+
// A templatized implementation of Operator that has one static parameter of
-// type {T}.
-template <typename T, typename Pred = std::equal_to<T>,
- typename Hash = base::hash<T>>
+// type {T} with the proper default equality and hashing functions.
+template <typename T, typename Pred = OpEqualTo<T>, typename Hash = OpHash<T>>
class Operator1 : public Operator {
public:
Operator1(Opcode opcode, Properties properties, const char* mnemonic,
@@ -183,46 +192,38 @@ class Operator1 : public Operator {
// Helper to extract parameters from Operator1<*> operator.
template <typename T>
inline T const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<T>*>(op)->parameter();
+ return reinterpret_cast<const Operator1<T, OpEqualTo<T>, OpHash<T>>*>(op)
+ ->parameter();
}
+
// NOTE: We have to be careful to use the right equal/hash functions below, for
// float/double we always use the ones operating on the bit level, for Handle<>
// we always use the ones operating on the location level.
template <>
-inline float const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<float, base::bit_equal_to<float>,
- base::bit_hash<float>>*>(op)
- ->parameter();
-}
+struct OpEqualTo<float> : public base::bit_equal_to<float> {};
+template <>
+struct OpHash<float> : public base::bit_hash<float> {};
template <>
-inline double const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<double, base::bit_equal_to<double>,
- base::bit_hash<double>>*>(op)
- ->parameter();
-}
+struct OpEqualTo<double> : public base::bit_equal_to<double> {};
+template <>
+struct OpHash<double> : public base::bit_hash<double> {};
template <>
-inline Handle<HeapObject> const& OpParameter(const Operator* op) {
- return reinterpret_cast<
- const Operator1<Handle<HeapObject>, Handle<HeapObject>::equal_to,
- Handle<HeapObject>::hash>*>(op)->parameter();
-}
+struct OpEqualTo<Handle<HeapObject>> : public Handle<HeapObject>::equal_to {};
+template <>
+struct OpHash<Handle<HeapObject>> : public Handle<HeapObject>::hash {};
template <>
-inline Handle<String> const& OpParameter(const Operator* op) {
- return reinterpret_cast<const Operator1<
- Handle<String>, Handle<String>::equal_to, Handle<String>::hash>*>(op)
- ->parameter();
-}
+struct OpEqualTo<Handle<String>> : public Handle<String>::equal_to {};
+template <>
+struct OpHash<Handle<String>> : public Handle<String>::hash {};
template <>
-inline Handle<ScopeInfo> const& OpParameter(const Operator* op) {
- return reinterpret_cast<
- const Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
- Handle<ScopeInfo>::hash>*>(op)->parameter();
-}
+struct OpEqualTo<Handle<ScopeInfo>> : public Handle<ScopeInfo>::equal_to {};
+template <>
+struct OpHash<Handle<ScopeInfo>> : public Handle<ScopeInfo>::hash {};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 77eea3ce2c..55431c201c 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
@@ -17,7 +18,6 @@
#include "src/compiler/node.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index ba705ba1d8..b98f837ee9 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -36,7 +36,7 @@ void PipelineStatistics::CommonStats::End(
diff->max_allocated_bytes_ + allocated_bytes_at_start_;
diff->total_allocated_bytes_ =
outer_zone_diff + scope_->GetTotalAllocatedBytes();
- scope_.Reset(NULL);
+ scope_.Reset(nullptr);
timer_.Stop();
}
@@ -48,8 +48,8 @@ PipelineStatistics::PipelineStatistics(CompilationInfo* info,
zone_pool_(zone_pool),
compilation_stats_(isolate_->GetTurboStatistics()),
source_size_(0),
- phase_kind_name_(NULL),
- phase_name_(NULL) {
+ phase_kind_name_(nullptr),
+ phase_name_(nullptr) {
if (info->has_shared_info()) {
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
base::SmartArrayPointer<char> name =
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 988327d1bb..2b6563da40 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -76,10 +76,10 @@ class PhaseScope {
public:
PhaseScope(PipelineStatistics* pipeline_stats, const char* name)
: pipeline_stats_(pipeline_stats) {
- if (pipeline_stats_ != NULL) pipeline_stats_->BeginPhase(name);
+ if (pipeline_stats_ != nullptr) pipeline_stats_->BeginPhase(name);
}
~PhaseScope() {
- if (pipeline_stats_ != NULL) pipeline_stats_->EndPhase();
+ if (pipeline_stats_ != nullptr) pipeline_stats_->EndPhase();
}
private:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index ceff8d660b..4d6aacd78a 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -12,7 +12,6 @@
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
-#include "src/compiler/binary-operator-reducer.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/change-lowering.h"
@@ -20,6 +19,8 @@
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/dead-code-elimination.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
@@ -28,6 +29,7 @@
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-context-relaxation.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-frame-specialization.h"
@@ -55,6 +57,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/tail-call-optimization.h"
+#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
@@ -102,7 +105,7 @@ class PipelineData {
source_positions_.Reset(new SourcePositionTable(graph_));
simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
- graph_zone_, kMachPtr,
+ graph_zone_, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags());
common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
@@ -197,6 +200,12 @@ class PipelineData {
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
+ MaybeHandle<Context> native_context() const {
+ if (info()->is_native_context_specializing()) {
+ return handle(info()->native_context(), isolate());
+ }
+ return MaybeHandle<Context>();
+ }
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
@@ -204,6 +213,12 @@ class PipelineData {
loop_assignment_ = loop_assignment;
}
+ TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
+ void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
+ DCHECK_NULL(type_hint_analysis_);
+ type_hint_analysis_ = type_hint_analysis;
+ }
+
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
DCHECK(!schedule_);
@@ -228,6 +243,7 @@ class PipelineData {
graph_zone_ = nullptr;
graph_ = nullptr;
loop_assignment_ = nullptr;
+ type_hint_analysis_ = nullptr;
simplified_ = nullptr;
machine_ = nullptr;
common_ = nullptr;
@@ -267,12 +283,12 @@ class PipelineData {
DCHECK(register_allocation_data_ == nullptr);
int fixed_frame_size = 0;
if (descriptor != nullptr) {
- fixed_frame_size = (descriptor->kind() == CallDescriptor::kCallAddress)
+ fixed_frame_size = (descriptor->IsCFunctionCall())
? StandardFrameConstants::kFixedSlotCountAboveFp +
StandardFrameConstants::kCPSlotCount
: StandardFrameConstants::kFixedSlotCount;
}
- frame_ = new (instruction_zone()) Frame(fixed_frame_size);
+ frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
sequence(), debug_name);
@@ -288,13 +304,14 @@ class PipelineData {
Handle<Code> code_;
// All objects in the following group of fields are allocated in graph_zone_.
- // They are all set to NULL when the graph_zone_ is destroyed.
+ // They are all set to nullptr when the graph_zone_ is destroyed.
ZonePool::Scope graph_zone_scope_;
Zone* graph_zone_;
Graph* graph_;
// TODO(dcarney): make this into a ZoneObject.
base::SmartPointer<SourcePositionTable> source_positions_;
LoopAssignmentAnalysis* loop_assignment_;
+ TypeHintAnalysis* type_hint_analysis_ = nullptr;
SimplifiedOperatorBuilder* simplified_;
MachineOperatorBuilder* machine_;
CommonOperatorBuilder* common_;
@@ -303,7 +320,8 @@ class PipelineData {
Schedule* schedule_;
// All objects in the following group of fields are allocated in
- // instruction_zone_. They are all set to NULL when the instruction_zone_ is
+ // instruction_zone_. They are all set to nullptr when the instruction_zone_
+ // is
// destroyed.
ZonePool::Scope instruction_zone_scope_;
Zone* instruction_zone_;
@@ -311,7 +329,7 @@ class PipelineData {
Frame* frame_;
// All objects in the following group of fields are allocated in
- // register_allocation_zone_. They are all set to NULL when the zone is
+ // register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
ZonePool::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
@@ -332,7 +350,7 @@ struct TurboCfgFile : public std::ofstream {
void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
+ FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
if (json_file != nullptr) {
OFStream json_of(json_file);
json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
@@ -357,8 +375,10 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment,
+ TypeHintAnalysis* type_hint_analysis,
SourcePositionTable* source_positions)
- : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
+ : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
+ type_hint_analysis),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position()) {}
@@ -470,6 +490,18 @@ struct LoopAssignmentAnalysisPhase {
};
+struct TypeHintAnalysisPhase {
+ static const char* phase_name() { return "type hint analysis"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ TypeHintAnalyzer analyzer(data->graph_zone());
+ Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
+ TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
+ data->set_type_hint_analysis(type_hint_analysis);
+ }
+};
+
+
struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
@@ -484,7 +516,7 @@ struct GraphBuilderPhase {
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->source_positions());
+ data->type_hint_analysis(), data->source_positions());
succeeded = graph_builder.CreateGraph(stack_check);
}
@@ -495,8 +527,8 @@ struct GraphBuilderPhase {
};
-struct NativeContextSpecializationPhase {
- static const char* phase_name() { return "native context specialization"; }
+struct InliningPhase {
+ static const char* phase_name() { return "inlining"; }
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
@@ -504,46 +536,30 @@ struct NativeContextSpecializationPhase {
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ JSCallReducer call_reducer(data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSCallReducer::kDeoptimizationEnabled
+ : JSCallReducer::kNoFlags,
+ data->native_context());
+ JSContextSpecialization context_specialization(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_function_context_specializing()
+ ? data->info()->context()
+ : MaybeHandle<Context>());
+ JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
+ data->jsgraph());
JSGlobalObjectSpecialization global_object_specialization(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
? JSGlobalObjectSpecialization::kDeoptimizationEnabled
: JSGlobalObjectSpecialization::kNoFlags,
- handle(data->info()->global_object(), data->isolate()),
- data->info()->dependencies());
+ data->native_context(), data->info()->dependencies());
JSNativeContextSpecialization native_context_specialization(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
? JSNativeContextSpecialization::kDeoptimizationEnabled
: JSNativeContextSpecialization::kNoFlags,
- handle(data->info()->global_object()->native_context(),
- data->isolate()),
- data->info()->dependencies(), temp_zone);
- AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &global_object_specialization);
- AddReducer(data, &graph_reducer, &native_context_specialization);
- graph_reducer.ReduceGraph();
- }
-};
-
-
-struct InliningPhase {
- static const char* phase_name() { return "inlining"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
- JSContextSpecialization context_specialization(
- &graph_reducer, data->jsgraph(),
- data->info()->is_function_context_specializing()
- ? data->info()->context()
- : MaybeHandle<Context>());
- JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
- data->jsgraph());
+ data->native_context(), data->info()->dependencies(), temp_zone);
JSInliningHeuristic inlining(&graph_reducer,
data->info()->is_inlining_enabled()
? JSInliningHeuristic::kGeneralInlining
@@ -554,7 +570,10 @@ struct InliningPhase {
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
+ AddReducer(data, &graph_reducer, &global_object_specialization);
+ AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
+ AddReducer(data, &graph_reducer, &call_reducer);
AddReducer(data, &graph_reducer, &inlining);
graph_reducer.ReduceGraph();
}
@@ -591,11 +610,16 @@ struct TypedLoweringPhase {
data->common());
LoadElimination load_elimination(&graph_reducer);
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+ JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
+ if (data->info()->is_deoptimization_enabled()) {
+ typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
+ }
+ if (data->info()->shared_info()->HasBytecodeArray()) {
+ typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
+ }
JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
- data->info()->is_deoptimization_enabled()
- ? JSTypedLowering::kDeoptimizationEnabled
- : JSTypedLowering::kNoFlags,
- data->jsgraph(), temp_zone);
+ typed_lowering_flags, data->jsgraph(),
+ temp_zone);
JSIntrinsicLowering intrinsic_lowering(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
@@ -630,6 +654,22 @@ struct BranchEliminationPhase {
};
+struct EscapeAnalysisPhase {
+ static const char* phase_name() { return "escape analysis"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
+ temp_zone);
+ escape_analysis.Run();
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
+ &escape_analysis, temp_zone);
+ AddReducer(data, &graph_reducer, &escape_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "simplified lowering"; }
@@ -645,14 +685,11 @@ struct SimplifiedLoweringPhase {
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
- BinaryOperatorReducer binary_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &binary_reducer);
graph_reducer.ReduceGraph();
}
};
@@ -722,7 +759,7 @@ struct StressLoopPeelingPhase {
// Peel the first outer loop for testing.
// TODO(titzer): peel all loops? the N'th loop? Innermost loops?
LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
- if (loop_tree != NULL && loop_tree->outer_loops().size() > 0) {
+ if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
loop_tree->outer_loops()[0], temp_zone);
}
@@ -969,16 +1006,8 @@ struct PrintGraphPhase {
CompilationInfo* info = data->info();
Graph* graph = data->graph();
- { // Print dot.
- FILE* dot_file = OpenVisualizerLogFile(info, phase, "dot", "w+");
- if (dot_file == nullptr) return;
- OFStream dot_of(dot_file);
- dot_of << AsDOT(*graph);
- fclose(dot_file);
- }
-
{ // Print JSON.
- FILE* json_file = OpenVisualizerLogFile(info, NULL, "json", "a+");
+ FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
if (json_file == nullptr) return;
OFStream json_of(json_file);
json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
@@ -1007,7 +1036,7 @@ struct VerifyGraphPhase {
void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
- if (data_->pipeline_statistics() != NULL) {
+ if (data_->pipeline_statistics() != nullptr) {
data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
}
}
@@ -1040,7 +1069,7 @@ Handle<Code> Pipeline::GenerateCode() {
}
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "w+");
+ FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
if (json_file != nullptr) {
OFStream json_of(json_file);
Handle<Script> script = info()->script();
@@ -1084,6 +1113,10 @@ Handle<Code> Pipeline::GenerateCode() {
Run<LoopAssignmentAnalysisPhase>();
}
+ if (info()->is_typing_enabled()) {
+ Run<TypeHintAnalysisPhase>();
+ }
+
Run<GraphBuilderPhase>();
if (data.compilation_failed()) return Handle<Code>::null();
RunPrintAndVerify("Initial untyped", true);
@@ -1094,12 +1127,6 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("OSR deconstruction", true);
}
- // Perform native context specialization (if enabled).
- if (info()->is_native_context_specializing()) {
- Run<NativeContextSpecializationPhase>();
- RunPrintAndVerify("Native context specialized", true);
- }
-
// Perform function context specialization and inlining (if enabled).
Run<InliningPhase>();
RunPrintAndVerify("Inlined", true);
@@ -1120,7 +1147,7 @@ Handle<Code> Pipeline::GenerateCode() {
info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
- info()->dependencies(), info()->function_type()));
+ info()->dependencies()));
Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed");
}
@@ -1137,6 +1164,11 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("Loop peeled");
}
+ if (FLAG_turbo_escape) {
+ Run<EscapeAnalysisPhase>();
+ RunPrintAndVerify("Escape Analysed");
+ }
+
// Lower simplified operators and insert changes.
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
@@ -1177,10 +1209,13 @@ Handle<Code> Pipeline::GenerateCode() {
}
-Handle<Code> Pipeline::GenerateCodeForInterpreter(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, const char* bytecode_name) {
- CompilationInfo info(bytecode_name, isolate, graph->zone());
+Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule,
+ Code::Kind kind,
+ const char* debug_name) {
+ CompilationInfo info(debug_name, isolate, graph->zone());
+ info.set_output_code_kind(kind);
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool;
@@ -1188,21 +1223,24 @@ Handle<Code> Pipeline::GenerateCodeForInterpreter(
base::SmartPointer<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats) {
pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
- pipeline_statistics->BeginPhaseKind("interpreter handler codegen");
+ pipeline_statistics->BeginPhaseKind("stub codegen");
}
+
+ Pipeline pipeline(&info);
+ pipeline.data_ = &data;
+ DCHECK_NOT_NULL(data.schedule());
+
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(&info, NULL, "json", "w+");
+ FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
if (json_file != nullptr) {
OFStream json_of(json_file);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
fclose(json_file);
}
+ pipeline.Run<PrintGraphPhase>("Machine");
}
- Pipeline pipeline(&info);
- pipeline.data_ = &data;
- pipeline.RunPrintAndVerify("Machine", true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
@@ -1262,7 +1300,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
- BasicBlockProfiler::Data* profiler_data = NULL;
+ BasicBlockProfiler::Data* profiler_data = nullptr;
if (FLAG_turbo_profiling) {
profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
data->schedule());
@@ -1311,10 +1349,10 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
Run<GenerateCodePhase>(&linkage);
Handle<Code> code = data->code();
- if (profiler_data != NULL) {
+ if (profiler_data != nullptr) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
- code->Disassemble(NULL, os);
+ code->Disassemble(nullptr, os);
profiler_data->SetCode(&os);
#endif
}
@@ -1323,14 +1361,14 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
v8::internal::CodeGenerator::PrintCode(code, info());
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), NULL, "json", "a+");
+ FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
if (json_file != nullptr) {
OFStream json_of(json_file);
json_of
<< "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
#if ENABLE_DISASSEMBLER
std::stringstream disassembly_stream;
- code->Disassemble(NULL, disassembly_stream);
+ code->Disassemble(nullptr, disassembly_stream);
std::string disassembly_string(disassembly_stream.str());
for (const auto& c : disassembly_string) {
json_of << AsEscapedUC16ForJSON(c);
@@ -1388,6 +1426,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
}
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
+ CHECK(data->register_allocation_data()
+ ->RangesDefinedInDeferredStayInDeferred());
}
if (FLAG_turbo_preprocess_ranges) {
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index d437c7e585..af94018f07 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -30,11 +30,13 @@ class Pipeline {
// Run the entire pipeline and generate a handle to a code object.
Handle<Code> GenerateCode();
- // Run the pipeline on an interpreter bytecode handler machine graph and
- // generate code.
- static Handle<Code> GenerateCodeForInterpreter(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Schedule* schedule, const char* bytecode_name);
+ // Run the pipeline on a machine graph and generate code. The {schedule} must
+ // be valid, hence the given {graph} does not need to be schedulable.
+ static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ Graph* graph, Schedule* schedule,
+ Code::Kind kind,
+ const char* debug_name);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 65ee21f857..6fe674e4f2 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -4,12 +4,12 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/ppc/macro-assembler-ppc.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -27,6 +27,8 @@ class PPCOperandConverter final : public InstructionOperandConverter {
PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
+ size_t OutputCount() { return instr_->OutputCount(); }
+
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
@@ -99,10 +101,10 @@ class PPCOperandConverter final : public InstructionOperandConverter {
}
MemOperand ToMemOperand(InstructionOperand* op) const {
- DCHECK(op != NULL);
+ DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
@@ -199,7 +201,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
};
-Condition FlagsConditionToCondition(FlagsCondition condition) {
+Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
switch (condition) {
case kEqual:
return eq;
@@ -218,17 +220,42 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnsignedGreaterThan:
return gt;
case kOverflow:
+ // Overflow checked for add/sub only.
+ switch (op) {
#if V8_TARGET_ARCH_PPC64
- return ne;
+ case kPPC_Add:
+ case kPPC_Sub:
+ return lt;
+#endif
+ case kPPC_AddWithOverflow32:
+ case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+ return ne;
#else
- return lt;
+ return lt;
#endif
+ default:
+ break;
+ }
+ break;
case kNotOverflow:
+ switch (op) {
#if V8_TARGET_ARCH_PPC64
- return eq;
+ case kPPC_Add:
+ case kPPC_Sub:
+ return ge;
+#endif
+ case kPPC_AddWithOverflow32:
+ case kPPC_SubWithOverflow32:
+#if V8_TARGET_ARCH_PPC64
+ return eq;
#else
- return ge;
+ return ge;
#endif
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -288,13 +315,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-#if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_ADD_WITH_OVERFLOW() \
- do { \
- ASSEMBLE_BINOP(add, addi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
- } while (0)
-#else
#define ASSEMBLE_ADD_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -305,16 +325,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
i.InputInt32(1), kScratchReg, r0); \
} \
} while (0)
-#endif
-#if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_SUB_WITH_OVERFLOW() \
- do { \
- ASSEMBLE_BINOP(sub, subi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
- } while (0)
-#else
#define ASSEMBLE_SUB_WITH_OVERFLOW() \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -325,6 +337,24 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
-i.InputInt32(1), kScratchReg, r0); \
} \
} while (0)
+
+
+#if V8_TARGET_ARCH_PPC64
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(add, addi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+
+
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_BINOP(sub, subi); \
+ __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+ } while (0)
+#else
+#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
+#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
#endif
@@ -605,12 +635,31 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ LeaveFrame(StackFrame::MANUAL);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadP(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ }
+ __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ mtlr(r0);
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -634,10 +683,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -650,6 +701,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RelocInfo::CODE_TARGET);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -668,6 +720,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(ip);
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -679,10 +732,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -695,8 +750,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -706,6 +766,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -721,13 +783,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -866,31 +931,47 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
#endif
case kPPC_Add:
- if (HasRegisterInput(instr, 1)) {
- __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
- LeaveOE, i.OutputRCBit());
+#if V8_TARGET_ARCH_PPC64
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_ADD_WITH_OVERFLOW();
} else {
- __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
+#endif
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+#if V8_TARGET_ARCH_PPC64
}
+#endif
break;
case kPPC_AddWithOverflow32:
- ASSEMBLE_ADD_WITH_OVERFLOW();
+ ASSEMBLE_ADD_WITH_OVERFLOW32();
break;
case kPPC_AddDouble:
ASSEMBLE_FLOAT_BINOP_RC(fadd);
break;
case kPPC_Sub:
- if (HasRegisterInput(instr, 1)) {
- __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
- LeaveOE, i.OutputRCBit());
+#if V8_TARGET_ARCH_PPC64
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_SUB_WITH_OVERFLOW();
} else {
- __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
+#endif
+ if (HasRegisterInput(instr, 1)) {
+ __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+#if V8_TARGET_ARCH_PPC64
}
+#endif
break;
case kPPC_SubWithOverflow32:
- ASSEMBLE_SUB_WITH_OVERFLOW();
+ ASSEMBLE_SUB_WITH_OVERFLOW32();
break;
case kPPC_SubDouble:
ASSEMBLE_FLOAT_BINOP_RC(fsub);
@@ -1045,8 +1126,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -1089,8 +1172,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Int64ToInt32:
- // TODO(mbrandy): sign extend?
- __ Move(i.OutputRegister(), i.InputRegister(0));
+ __ extsw(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_Int64ToFloat32:
@@ -1101,6 +1183,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kPPC_Uint64ToFloat32:
+ __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Uint64ToDouble:
+ __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
#endif
case kPPC_Int32ToDouble:
__ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
@@ -1113,13 +1205,52 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kPPC_DoubleToInt32:
case kPPC_DoubleToUint32:
+ case kPPC_DoubleToInt64: {
+#if V8_TARGET_ARCH_PPC64
+ bool check_conversion =
+ (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
+ if (check_conversion) {
+ __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ }
+#endif
__ ConvertDoubleToInt64(i.InputDoubleRegister(0),
#if !V8_TARGET_ARCH_PPC64
kScratchReg,
#endif
- i.OutputRegister(), kScratchDoubleReg);
+ i.OutputRegister(0), kScratchDoubleReg);
+#if V8_TARGET_ARCH_PPC64
+ if (check_conversion) {
+ // Set 2nd output to zero if conversion fails.
+ CRBit crbit = static_cast<CRBit>(VXCVI % CRWIDTH);
+ __ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1),
+ v8::internal::Assembler::encode_crbit(cr7, crbit));
+ }
+#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_DoubleToUint64: {
+ bool check_conversion = (i.OutputCount() > 1);
+ if (check_conversion) {
+ __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
+ }
+ __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
+ i.OutputRegister(0), kScratchDoubleReg);
+ if (check_conversion) {
+ // Set 2nd output to zero if conversion fails.
+ CRBit crbit = static_cast<CRBit>(VXCVI % CRWIDTH);
+ __ mcrfs(cr7, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1),
+ v8::internal::Assembler::encode_crbit(cr7, crbit));
+ }
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ }
+#endif
case kPPC_DoubleToFloat32:
ASSEMBLE_FLOAT_UNOP_RC(frsp);
break;
@@ -1282,11 +1413,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
FlagsCondition condition = branch->condition;
CRegister cr = cr0;
- // Overflow checked for add/sub only.
- DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
-
- Condition cond = FlagsConditionToCondition(condition);
+ Condition cond = FlagsConditionToCondition(condition, op);
if (op == kPPC_CmpDouble) {
// check for unordered if necessary
if (cond == le) {
@@ -1313,57 +1440,53 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
PPCOperandConverter i(this, instr);
Label done;
ArchOpcode op = instr->arch_opcode();
+ bool check_unordered = (op == kPPC_CmpDouble);
CRegister cr = cr0;
- int reg_value = -1;
-
- // Overflow checked for add/sub only.
- DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cond = FlagsConditionToCondition(condition);
- if (op == kPPC_CmpDouble) {
- // check for unordered if necessary
- if (cond == le) {
- reg_value = 0;
+ Condition cond = FlagsConditionToCondition(condition, op);
+ switch (cond) {
+ case eq:
+ case lt:
__ li(reg, Operand::Zero());
- __ bunordered(&done, cr);
- } else if (cond == gt) {
- reg_value = 1;
+ __ li(kScratchReg, Operand(1));
+ __ isel(cond, reg, kScratchReg, reg, cr);
+ break;
+ case ne:
+ case ge:
__ li(reg, Operand(1));
- __ bunordered(&done, cr);
- }
- // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
- }
-
- if (CpuFeatures::IsSupported(ISELECT)) {
- switch (cond) {
- case eq:
- case lt:
- case gt:
- if (reg_value != 1) __ li(reg, Operand(1));
+ __ isel(NegateCondition(cond), reg, r0, reg, cr);
+ break;
+ case gt:
+ if (check_unordered) {
+ __ li(reg, Operand(1));
__ li(kScratchReg, Operand::Zero());
+ __ bunordered(&done, cr);
__ isel(cond, reg, reg, kScratchReg, cr);
- break;
- case ne:
- case ge:
- case le:
- if (reg_value != 1) __ li(reg, Operand(1));
- // r0 implies logical zero in this form
+ } else {
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ isel(cond, reg, kScratchReg, reg, cr);
+ }
+ break;
+ case le:
+ if (check_unordered) {
+ __ li(reg, Operand::Zero());
+ __ li(kScratchReg, Operand(1));
+ __ bunordered(&done, cr);
+ __ isel(NegateCondition(cond), reg, r0, kScratchReg, cr);
+ } else {
+ __ li(reg, Operand(1));
__ isel(NegateCondition(cond), reg, r0, reg, cr);
- break;
+ }
+ break;
default:
UNREACHABLE();
break;
- }
- } else {
- if (reg_value != 0) __ li(reg, Operand::Zero());
- __ b(NegateCondition(cond), &done, cr);
- __ li(reg, Operand(1));
}
__ bind(&done);
}
@@ -1408,8 +1531,7 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ function_descriptor();
__ mflr(r0);
if (FLAG_enable_embedded_constant_pool) {
@@ -1421,13 +1543,18 @@ void CodeGenerator::AssemblePrologue() {
__ mr(fp, sp);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
- __ StubPrologue();
+ __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+ } else if (frame()->needs_frame()) {
+ if (!ABI_CALL_VIA_IP && info()->output_code_kind() == Code::WASM_FUNCTION) {
+ // TODO(mbrandy): Restrict only to the wasm wrapper case.
+ __ StubPrologue();
+ } else {
+ __ StubPrologue(ip);
+ }
} else {
frame()->SetElidedFrameSizeInSlots(0);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1497,9 +1624,9 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopDoubles(double_saves);
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ b(&return_label_);
@@ -1517,7 +1644,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- PPCOperandConverter g(this, NULL);
+ PPCOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1619,7 +1746,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- PPCOperandConverter g(this, NULL);
+ PPCOperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 64a14ae09d..a3bf80e503 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -80,11 +80,15 @@ namespace compiler {
V(PPC_Int64ToInt32) \
V(PPC_Int64ToFloat32) \
V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
V(PPC_Int32ToDouble) \
V(PPC_Uint32ToDouble) \
V(PPC_Float32ToDouble) \
V(PPC_DoubleToInt32) \
V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
V(PPC_DoubleToFloat32) \
V(PPC_DoubleExtractLowWord32) \
V(PPC_DoubleExtractHighWord32) \
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
new file mode 100644
index 0000000000..fc90cdd628
--- /dev/null
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -0,0 +1,143 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kPPC_And:
+ case kPPC_AndComplement:
+ case kPPC_Or:
+ case kPPC_OrComplement:
+ case kPPC_Xor:
+ case kPPC_ShiftLeft32:
+ case kPPC_ShiftLeft64:
+ case kPPC_ShiftRight32:
+ case kPPC_ShiftRight64:
+ case kPPC_ShiftRightAlg32:
+ case kPPC_ShiftRightAlg64:
+ case kPPC_RotRight32:
+ case kPPC_RotRight64:
+ case kPPC_Not:
+ case kPPC_RotLeftAndMask32:
+ case kPPC_RotLeftAndClear64:
+ case kPPC_RotLeftAndClearLeft64:
+ case kPPC_RotLeftAndClearRight64:
+ case kPPC_Add:
+ case kPPC_AddWithOverflow32:
+ case kPPC_AddDouble:
+ case kPPC_Sub:
+ case kPPC_SubWithOverflow32:
+ case kPPC_SubDouble:
+ case kPPC_Mul32:
+ case kPPC_Mul64:
+ case kPPC_MulHigh32:
+ case kPPC_MulHighU32:
+ case kPPC_MulDouble:
+ case kPPC_Div32:
+ case kPPC_Div64:
+ case kPPC_DivU32:
+ case kPPC_DivU64:
+ case kPPC_DivDouble:
+ case kPPC_Mod32:
+ case kPPC_Mod64:
+ case kPPC_ModU32:
+ case kPPC_ModU64:
+ case kPPC_ModDouble:
+ case kPPC_Neg:
+ case kPPC_NegDouble:
+ case kPPC_SqrtDouble:
+ case kPPC_FloorDouble:
+ case kPPC_CeilDouble:
+ case kPPC_TruncateDouble:
+ case kPPC_RoundDouble:
+ case kPPC_MaxDouble:
+ case kPPC_MinDouble:
+ case kPPC_AbsDouble:
+ case kPPC_Cntlz32:
+ case kPPC_Cntlz64:
+ case kPPC_Popcnt32:
+ case kPPC_Popcnt64:
+ case kPPC_Cmp32:
+ case kPPC_Cmp64:
+ case kPPC_CmpDouble:
+ case kPPC_Tst32:
+ case kPPC_Tst64:
+ case kPPC_ExtendSignWord8:
+ case kPPC_ExtendSignWord16:
+ case kPPC_ExtendSignWord32:
+ case kPPC_Uint32ToUint64:
+ case kPPC_Int64ToInt32:
+ case kPPC_Int64ToFloat32:
+ case kPPC_Int64ToDouble:
+ case kPPC_Uint64ToFloat32:
+ case kPPC_Uint64ToDouble:
+ case kPPC_Int32ToDouble:
+ case kPPC_Uint32ToDouble:
+ case kPPC_Float32ToDouble:
+ case kPPC_DoubleToInt32:
+ case kPPC_DoubleToUint32:
+ case kPPC_DoubleToInt64:
+ case kPPC_DoubleToUint64:
+ case kPPC_DoubleToFloat32:
+ case kPPC_DoubleExtractLowWord32:
+ case kPPC_DoubleExtractHighWord32:
+ case kPPC_DoubleInsertLowWord32:
+ case kPPC_DoubleInsertHighWord32:
+ case kPPC_DoubleConstruct:
+ case kPPC_BitcastInt32ToFloat32:
+ case kPPC_BitcastFloat32ToInt32:
+ case kPPC_BitcastInt64ToDouble:
+ case kPPC_BitcastDoubleToInt64:
+ return kNoOpcodeFlags;
+
+ case kPPC_LoadWordS8:
+ case kPPC_LoadWordU8:
+ case kPPC_LoadWordS16:
+ case kPPC_LoadWordU16:
+ case kPPC_LoadWordS32:
+ case kPPC_LoadWord64:
+ case kPPC_LoadFloat32:
+ case kPPC_LoadDouble:
+ return kIsLoadOperation;
+
+ case kPPC_StoreWord8:
+ case kPPC_StoreWord16:
+ case kPPC_StoreWord32:
+ case kPPC_StoreWord64:
+ case kPPC_StoreFloat32:
+ case kPPC_StoreDouble:
+ case kPPC_Push:
+ case kPPC_PushFrame:
+ case kPPC_StoreToStackSlot:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index f2a00044c2..f6ebbdf5d6 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -95,6 +95,25 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
}
+#if V8_TARGET_ARCH_PPC64
+void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ PPCOperandGenerator g(selector);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ selector->Emit(opcode, output_count, outputs, 1, inputs);
+}
+#endif
+
+
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
@@ -142,32 +161,30 @@ void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
-
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
- switch (rep) {
- case kRepFloat32:
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kPPC_LoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kPPC_LoadDouble;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = (typ == kTypeInt32) ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kPPC_LoadWordS8 : kPPC_LoadWordU8;
break;
- case kRepWord16:
- opcode = (typ == kTypeInt32) ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
break;
#if !V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
#endif
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kPPC_LoadWordS32;
#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): this applies to signed loads only (lwa)
@@ -175,13 +192,15 @@ void InstructionSelector::VisitLoad(Node* node) {
#endif
break;
#if V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
mode = kInt16Imm_4ByteAligned;
break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
#endif
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -204,13 +223,13 @@ void InstructionSelector::VisitStore(Node* node) {
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
// TODO(ppc): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
@@ -239,36 +258,38 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
ImmediateMode mode = kInt16Imm;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kPPC_StoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kPPC_StoreDouble;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kPPC_StoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kPPC_StoreWord16;
break;
#if !V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
#endif
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kPPC_StoreWord32;
break;
#if V8_TARGET_ARCH_PPC64
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kPPC_StoreWord64;
mode = kInt16Imm_4ByteAligned;
break;
+#else
+ case MachineRepresentation::kWord64: // Fall through.
#endif
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -287,33 +308,39 @@ void InstructionSelector::VisitStore(Node* node) {
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+#endif
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -325,33 +352,40 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
PPCOperandGenerator g(this);
Node* const base = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+#endif
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kWord64: // Fall through.
+#endif
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -927,6 +961,26 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToInt64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ VisitTryTruncateDouble(this, kPPC_DoubleToUint64, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_ExtendSignWord32, node);
@@ -971,6 +1025,16 @@ void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
VisitRR(this, kPPC_Int64ToDouble, node);
}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Uint64ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_Uint64ToDouble, node);
+}
#endif
@@ -1108,11 +1172,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kPPC_FloorDouble, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, kPPC_FloorDouble, node);
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kPPC_CeilDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kPPC_TruncateDouble, node);
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, kPPC_TruncateDouble, node);
}
@@ -1123,6 +1207,16 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ UNREACHABLE();
+}
+
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont(kOverflow, ovf);
@@ -1147,6 +1241,30 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
}
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+}
+
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate,
+ &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate, &cont);
+}
+#endif
+
+
static bool CompareLogical(FlagsContinuation* cont) {
switch (cont->condition()) {
case kUnsignedLessThan:
@@ -1313,12 +1431,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1329,6 +1447,16 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitBinop<Int32BinopMatcher>(selector, node,
kPPC_SubWithOverflow32,
kInt16Imm_Negate, cont);
+#if V8_TARGET_ARCH_PPC64
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+ kInt16Imm, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Sub,
+ kInt16Imm_Negate, cont);
+#endif
default:
break;
}
@@ -1535,9 +1663,9 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
PPCOperandGenerator g(this);
// Prepare for C function call.
@@ -1548,8 +1676,8 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
- for (Node* node : (*arguments)) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ for (PushParameter input : (*arguments)) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot));
++slot;
}
@@ -1557,15 +1685,15 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Push any stack arguments.
int num_slots = static_cast<int>(descriptor->StackParameterCount());
int slot = 0;
- for (Node* input : (*arguments)) {
+ for (PushParameter input : (*arguments)) {
if (slot == 0) {
- DCHECK(input);
- Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input),
+ DCHECK(input.node());
+ Emit(kPPC_PushFrame, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(num_slots));
} else {
// Skip any alignment holes in pushed nodes.
- if (input) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input),
+ if (input.node()) {
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
g.TempImmediate(slot));
}
}
@@ -1627,7 +1755,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index d658c294d9..4df2bde448 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -5,6 +5,7 @@
#include "src/compiler/raw-machine-assembler.h"
#include "src/code-factory.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
@@ -14,7 +15,7 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
CallDescriptor* call_descriptor,
- MachineType word,
+ MachineRepresentation word,
MachineOperatorBuilder::Flags flags)
: isolate_(isolate),
graph_(graph),
@@ -31,6 +32,7 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
parameters_[i] =
AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
+ graph->SetEnd(graph->NewNode(common_.End(0)));
}
@@ -51,15 +53,15 @@ Node* RawMachineAssembler::Parameter(size_t index) {
}
-void RawMachineAssembler::Goto(Label* label) {
+void RawMachineAssembler::Goto(RawMachineLabel* label) {
DCHECK(current_block_ != schedule()->end());
schedule()->AddGoto(CurrentBlock(), Use(label));
current_block_ = nullptr;
}
-void RawMachineAssembler::Branch(Node* condition, Label* true_val,
- Label* false_val) {
+void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
+ RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
Node* branch = AddNode(common()->Branch(), condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
@@ -67,8 +69,9 @@ void RawMachineAssembler::Branch(Node* condition, Label* true_val,
}
-void RawMachineAssembler::Switch(Node* index, Label* default_label,
- int32_t* case_values, Label** case_labels,
+void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
+ int32_t* case_values,
+ RawMachineLabel** case_labels,
size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
size_t succ_count = case_count + 1;
@@ -93,6 +96,7 @@ void RawMachineAssembler::Switch(Node* index, Label* default_label,
void RawMachineAssembler::Return(Node* value) {
Node* ret = MakeNode(common()->Return(), 1, &value);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -101,6 +105,7 @@ void RawMachineAssembler::Return(Node* value) {
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {v1, v2};
Node* ret = MakeNode(common()->Return(2), 2, values);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -109,6 +114,7 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 3, values);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -118,15 +124,13 @@ Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
Node** args) {
int param_count =
static_cast<int>(desc->GetMachineSignature()->parameter_count());
- int input_count = param_count + 3;
+ int input_count = param_count + 1;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
buffer[index++] = function;
for (int i = 0; i < param_count; i++) {
buffer[index++] = args[i];
}
- buffer[index++] = graph()->start();
- buffer[index++] = graph()->start();
return AddNode(common()->Call(desc), input_count, buffer);
}
@@ -137,7 +141,7 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
DCHECK(desc->NeedsFrameState());
int param_count =
static_cast<int>(desc->GetMachineSignature()->parameter_count());
- int input_count = param_count + 4;
+ int input_count = param_count + 2;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
buffer[index++] = function;
@@ -145,59 +149,38 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
buffer[index++] = args[i];
}
buffer[index++] = frame_state;
- buffer[index++] = graph()->start();
- buffer[index++] = graph()->start();
return AddNode(common()->Call(desc), input_count, buffer);
}
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
- Node** args) {
- int param_count =
- static_cast<int>(desc->GetMachineSignature()->parameter_count());
- int input_count = param_count + 3;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- buffer[index++] = graph()->start();
- buffer[index++] = graph()->start();
- Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-
Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
Node* arg1, Node* context) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 1, Operator::kNoProperties, false);
+ zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
- Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
Node* arity = Int32Constant(1);
- return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context,
- graph()->start(), graph()->start());
+ return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
}
Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
Node* arg1, Node* arg2, Node* context) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 2, Operator::kNoProperties, false);
+ zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
- Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
Node* arity = Int32Constant(2);
return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
- context, graph()->start(), graph()->start());
+ context);
}
@@ -205,15 +188,82 @@ Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
Node* arg1, Node* arg2, Node* arg3,
Node* arg4, Node* context) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 4, Operator::kNoProperties, false);
+ zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
- Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
Node* arity = Int32Constant(4);
return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
- ref, arity, context, graph()->start(), graph()->start());
+ ref, arity, context);
+}
+
+
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
+ Node** args) {
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int input_count = param_count + 1;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+
+Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
+ Node* arg1, Node* context) {
+ const int kArity = 1;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+
+Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
+ Node* arg1, Node* arg2,
+ Node* context) {
+ const int kArity = 2;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
}
@@ -224,8 +274,7 @@ Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, graph()->start(),
- graph()->start());
+ return AddNode(common()->Call(descriptor), function);
}
@@ -238,8 +287,7 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, graph()->start(),
- graph()->start());
+ return AddNode(common()->Call(descriptor), function, arg0);
}
@@ -254,8 +302,7 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- return AddNode(common()->Call(descriptor), function, arg0, arg1,
- graph()->start(), graph()->start());
+ return AddNode(common()->Call(descriptor), function, arg0, arg1);
}
@@ -275,24 +322,14 @@ Node* RawMachineAssembler::CallCFunction8(
builder.AddParam(arg5_type);
builder.AddParam(arg6_type);
builder.AddParam(arg7_type);
- Node* args[] = {function,
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5,
- arg6,
- arg7,
- graph()->start(),
- graph()->start()};
+ Node* args[] = {function, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
-void RawMachineAssembler::Bind(Label* label) {
+void RawMachineAssembler::Bind(RawMachineLabel* label) {
DCHECK(current_block_ == nullptr);
DCHECK(!label->bound_);
label->bound_ = true;
@@ -300,13 +337,13 @@ void RawMachineAssembler::Bind(Label* label) {
}
-BasicBlock* RawMachineAssembler::Use(Label* label) {
+BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
label->used_ = true;
return EnsureBlock(label);
}
-BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
+BasicBlock* RawMachineAssembler::EnsureBlock(RawMachineLabel* label) {
if (label->block_ == nullptr) label->block_ = schedule()->NewBasicBlock();
return label->block_;
}
@@ -321,7 +358,7 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
Node** inputs) {
DCHECK_NOT_NULL(schedule_);
- DCHECK(current_block_ != nullptr);
+ DCHECK_NOT_NULL(current_block_);
Node* node = MakeNode(op, input_count, inputs);
schedule()->AddNode(CurrentBlock(), node);
return node;
@@ -335,6 +372,13 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
return graph()->NewNodeUnchecked(op, input_count, inputs);
}
+
+RawMachineLabel::RawMachineLabel()
+ : block_(nullptr), used_(false), bound_(false) {}
+
+
+RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index d4b8e93d10..5c232ed1d1 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -19,8 +19,10 @@ namespace internal {
namespace compiler {
class BasicBlock;
+class RawMachineLabel;
class Schedule;
+
// The RawMachineAssembler produces a low-level IR graph. All nodes are wired
// into a graph and also placed into a schedule immediately, hence subsequent
// code generation can happen without the need for scheduling.
@@ -34,37 +36,19 @@ class Schedule;
// non-schedulable due to missing control and effect dependencies.
class RawMachineAssembler {
public:
- class Label {
- public:
- Label() : block_(NULL), used_(false), bound_(false) {}
- ~Label() { DCHECK(bound_ || !used_); }
-
- private:
- BasicBlock* block_;
- bool used_;
- bool bound_;
- friend class RawMachineAssembler;
- DISALLOW_COPY_AND_ASSIGN(Label);
- };
-
- RawMachineAssembler(Isolate* isolate, Graph* graph,
- CallDescriptor* call_descriptor,
- MachineType word = kMachPtr,
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::Flag::kNoFlags);
+ RawMachineAssembler(
+ Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
+ MachineRepresentation word = MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::Flag::kNoFlags);
~RawMachineAssembler() {}
Isolate* isolate() const { return isolate_; }
Graph* graph() const { return graph_; }
- Schedule* schedule() { return schedule_; }
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- size_t parameter_count() const { return machine_sig()->parameter_count(); }
- const MachineSignature* machine_sig() const {
- return call_descriptor_->GetMachineSignature();
- }
// Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export.
@@ -127,17 +111,16 @@ class RawMachineAssembler {
return Load(rep, base, IntPtrConstant(0));
}
Node* Load(MachineType rep, Node* base, Node* index) {
- return AddNode(machine()->Load(rep), base, index, graph()->start(),
- graph()->start());
+ return AddNode(machine()->Load(rep), base, index);
}
- Node* Store(MachineType rep, Node* base, Node* value,
+ Node* Store(MachineRepresentation rep, Node* base, Node* value,
WriteBarrierKind write_barrier) {
return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
- Node* Store(MachineType rep, Node* base, Node* index, Node* value,
+ Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
WriteBarrierKind write_barrier) {
return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
- base, index, value, graph()->start(), graph()->start());
+ base, index, value);
}
// Arithmetic Operations.
@@ -262,10 +245,10 @@ class RawMachineAssembler {
return AddNode(machine()->Int32MulHigh(), a, b);
}
Node* Int32Div(Node* a, Node* b) {
- return AddNode(machine()->Int32Div(), a, b, graph()->start());
+ return AddNode(machine()->Int32Div(), a, b);
}
Node* Int32Mod(Node* a, Node* b) {
- return AddNode(machine()->Int32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Int32Mod(), a, b);
}
Node* Int32LessThan(Node* a, Node* b) {
return AddNode(machine()->Int32LessThan(), a, b);
@@ -274,7 +257,7 @@ class RawMachineAssembler {
return AddNode(machine()->Int32LessThanOrEqual(), a, b);
}
Node* Uint32Div(Node* a, Node* b) {
- return AddNode(machine()->Uint32Div(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Div(), a, b);
}
Node* Uint32LessThan(Node* a, Node* b) {
return AddNode(machine()->Uint32LessThan(), a, b);
@@ -283,7 +266,7 @@ class RawMachineAssembler {
return AddNode(machine()->Uint32LessThanOrEqual(), a, b);
}
Node* Uint32Mod(Node* a, Node* b) {
- return AddNode(machine()->Uint32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Mod(), a, b);
}
Node* Uint32MulHigh(Node* a, Node* b) {
return AddNode(machine()->Uint32MulHigh(), a, b);
@@ -297,9 +280,15 @@ class RawMachineAssembler {
Node* Int64Add(Node* a, Node* b) {
return AddNode(machine()->Int64Add(), a, b);
}
+ Node* Int64AddWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int64AddWithOverflow(), a, b);
+ }
Node* Int64Sub(Node* a, Node* b) {
return AddNode(machine()->Int64Sub(), a, b);
}
+ Node* Int64SubWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int64SubWithOverflow(), a, b);
+ }
Node* Int64Mul(Node* a, Node* b) {
return AddNode(machine()->Int64Mul(), a, b);
}
@@ -443,6 +432,38 @@ class RawMachineAssembler {
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
+ Node* TruncateFloat32ToInt64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ }
+ Node* TryTruncateFloat32ToInt64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ }
+ Node* TruncateFloat64ToInt64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
+ }
+ Node* TryTruncateFloat64ToInt64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
+ }
+ Node* TruncateFloat32ToUint64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
+ }
+ Node* TryTruncateFloat32ToUint64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
+ }
+ Node* TruncateFloat64ToUint64(Node* a) {
+ // TODO(ahaas): Remove this function as soon as it is not used anymore in
+ // WebAssembly.
+ return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
+ }
+ Node* TryTruncateFloat64ToUint64(Node* a) {
+ return AddNode(machine()->TryTruncateFloat64ToUint64(), a);
+ }
Node* ChangeInt32ToInt64(Node* a) {
return AddNode(machine()->ChangeInt32ToInt64(), a);
}
@@ -464,6 +485,12 @@ class RawMachineAssembler {
Node* RoundInt64ToFloat64(Node* a) {
return AddNode(machine()->RoundInt64ToFloat64(), a);
}
+ Node* RoundUint64ToFloat32(Node* a) {
+ return AddNode(machine()->RoundUint64ToFloat32(), a);
+ }
+ Node* RoundUint64ToFloat64(Node* a) {
+ return AddNode(machine()->RoundUint64ToFloat64(), a);
+ }
Node* BitcastFloat32ToInt32(Node* a) {
return AddNode(machine()->BitcastFloat32ToInt32(), a);
}
@@ -476,15 +503,33 @@ class RawMachineAssembler {
Node* BitcastInt64ToFloat64(Node* a) {
return AddNode(machine()->BitcastInt64ToFloat64(), a);
}
+ Node* Float32RoundDown(Node* a) {
+ return AddNode(machine()->Float32RoundDown().op(), a);
+ }
Node* Float64RoundDown(Node* a) {
return AddNode(machine()->Float64RoundDown().op(), a);
}
+ Node* Float32RoundUp(Node* a) {
+ return AddNode(machine()->Float32RoundUp().op(), a);
+ }
+ Node* Float64RoundUp(Node* a) {
+ return AddNode(machine()->Float64RoundUp().op(), a);
+ }
+ Node* Float32RoundTruncate(Node* a) {
+ return AddNode(machine()->Float32RoundTruncate().op(), a);
+ }
Node* Float64RoundTruncate(Node* a) {
return AddNode(machine()->Float64RoundTruncate().op(), a);
}
Node* Float64RoundTiesAway(Node* a) {
return AddNode(machine()->Float64RoundTiesAway().op(), a);
}
+ Node* Float32RoundTiesEven(Node* a) {
+ return AddNode(machine()->Float32RoundTiesEven().op(), a);
+ }
+ Node* Float64RoundTiesEven(Node* a) {
+ return AddNode(machine()->Float64RoundTiesEven().op(), a);
+ }
// Float64 bit operations.
Node* Float64ExtractLowWord32(Node* a) {
@@ -511,7 +556,7 @@ class RawMachineAssembler {
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return Load(rep, PointerConstant(address), Int32Constant(offset));
}
- Node* StoreToPointer(void* address, MachineType rep, Node* node) {
+ Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
}
Node* StringConstant(const char* string) {
@@ -523,8 +568,6 @@ class RawMachineAssembler {
// Call a given call descriptor and the given arguments and frame-state.
Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
Node* frame_state);
- // Tail call the given call descriptor and the given arguments.
- Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
// Call to a runtime function with one arguments.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
// Call to a runtime function with two arguments.
@@ -551,30 +594,41 @@ class RawMachineAssembler {
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
+ // Tail call the given call descriptor and the given arguments.
+ Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
+ // Tail call to a runtime function with one argument.
+ Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
+ Node* context);
+ // Tail call to a runtime function with two arguments.
+ Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* context);
+
+
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
// the current basic block or create new basic blocks for labels.
// Control flow.
- void Goto(Label* label);
- void Branch(Node* condition, Label* true_val, Label* false_val);
- void Switch(Node* index, Label* default_label, int32_t* case_values,
- Label** case_labels, size_t case_count);
+ void Goto(RawMachineLabel* label);
+ void Branch(Node* condition, RawMachineLabel* true_val,
+ RawMachineLabel* false_val);
+ void Switch(Node* index, RawMachineLabel* default_label, int32_t* case_values,
+ RawMachineLabel** case_labels, size_t case_count);
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
- void Bind(Label* label);
+ void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
// Variables.
- Node* Phi(MachineType type, Node* n1, Node* n2) {
- return AddNode(common()->Phi(type, 2), n1, n2);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
+ return AddNode(common()->Phi(rep, 2), n1, n2);
}
- Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
- return AddNode(common()->Phi(type, 3), n1, n2, n3);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
+ return AddNode(common()->Phi(rep, 3), n1, n2, n3);
}
- Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
- return AddNode(common()->Phi(type, 4), n1, n2, n3, n4);
+ Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
+ return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4);
}
// ===========================================================================
@@ -596,10 +650,16 @@ class RawMachineAssembler {
private:
Node* MakeNode(const Operator* op, int input_count, Node** inputs);
- BasicBlock* Use(Label* label);
- BasicBlock* EnsureBlock(Label* label);
+ BasicBlock* Use(RawMachineLabel* label);
+ BasicBlock* EnsureBlock(RawMachineLabel* label);
BasicBlock* CurrentBlock();
+ Schedule* schedule() { return schedule_; }
+ size_t parameter_count() const { return machine_sig()->parameter_count(); }
+ const MachineSignature* machine_sig() const {
+ return call_descriptor_->GetMachineSignature();
+ }
+
Isolate* isolate_;
Graph* graph_;
Schedule* schedule_;
@@ -612,6 +672,20 @@ class RawMachineAssembler {
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
+
+class RawMachineLabel final {
+ public:
+ RawMachineLabel();
+ ~RawMachineLabel();
+
+ private:
+ BasicBlock* block_;
+ bool used_;
+ bool bound_;
+ friend class RawMachineAssembler;
+ DISALLOW_COPY_AND_ASSIGN(RawMachineLabel);
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 68862add46..463795ecf2 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -172,7 +172,12 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
}
break;
case UnallocatedOperand::FIXED_REGISTER:
- constraint->type_ = kFixedRegister;
+ if (unallocated->HasSecondaryStorage()) {
+ constraint->type_ = kRegisterAndSlot;
+ constraint->spilled_slot_ = unallocated->GetSecondaryStorage();
+ } else {
+ constraint->type_ = kFixedRegister;
+ }
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
@@ -225,6 +230,7 @@ void RegisterAllocatorVerifier::CheckConstraint(
CHECK(op->IsExplicit());
return;
case kFixedRegister:
+ case kRegisterAndSlot:
CHECK(op->IsRegister());
CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
constraint->value_);
@@ -386,11 +392,13 @@ class OperandMap : public ZoneObject {
}
}
- void Define(Zone* zone, const InstructionOperand* op, int virtual_register) {
+ MapValue* Define(Zone* zone, const InstructionOperand* op,
+ int virtual_register) {
auto value = new (zone) MapValue();
value->define_vreg = virtual_register;
auto res = map().insert(std::make_pair(op, value));
if (!res.second) res.first->second = value;
+ return value;
}
void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
@@ -704,7 +712,20 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
int virtual_register = op_constraints[count].virtual_register_;
- current->Define(zone(), instr->OutputAt(i), virtual_register);
+ OperandMap::MapValue* value =
+ current->Define(zone(), instr->OutputAt(i), virtual_register);
+ if (op_constraints[count].type_ == kRegisterAndSlot) {
+ const AllocatedOperand* reg_op =
+ AllocatedOperand::cast(instr->OutputAt(i));
+ MachineRepresentation rep = reg_op->representation();
+ const AllocatedOperand* stack_op = AllocatedOperand::New(
+ zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
+ op_constraints[i].spilled_slot_);
+ auto insert_result =
+ current->map().insert(std::make_pair(stack_op, value));
+ DCHECK(insert_result.second);
+ USE(insert_result);
+ }
}
}
}
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 87b5cfbb7a..f3ab54f018 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -36,12 +36,14 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kNone,
kNoneDouble,
kExplicit,
- kSameAsFirst
+ kSameAsFirst,
+ kRegisterAndSlot
};
struct OperandConstraint {
ConstraintType type_;
int value_; // subkind index when relevant
+ int spilled_slot_;
int virtual_register_;
};
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 0dc76000f7..232ad9fec1 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -49,7 +49,7 @@ const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
- auto index = block->loop_header();
+ RpoNumber index = block->loop_header();
if (!index.IsValid()) return nullptr;
return sequence->InstructionBlockAt(index);
}
@@ -69,7 +69,7 @@ Instruction* GetLastInstruction(InstructionSequence* code,
bool IsOutputRegisterOf(Instruction* instr, Register reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
+ InstructionOperand* output = instr->OutputAt(i);
if (output->IsRegister() &&
LocationOperand::cast(output)->GetRegister().is(reg)) {
return true;
@@ -81,7 +81,7 @@ bool IsOutputRegisterOf(Instruction* instr, Register reg) {
bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
+ InstructionOperand* output = instr->OutputAt(i);
if (output->IsDoubleRegister() &&
LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
return true;
@@ -92,23 +92,23 @@ bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
// TODO(dcarney): fix frame to allow frame accesses to half size location.
-int GetByteWidth(MachineType machine_type) {
- DCHECK_EQ(RepresentationOf(machine_type), machine_type);
- switch (machine_type) {
- case kRepBit:
- case kRepWord8:
- case kRepWord16:
- case kRepWord32:
- case kRepTagged:
+int GetByteWidth(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTagged:
return kPointerSize;
- case kRepFloat32:
- case kRepWord64:
- case kRepFloat64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat64:
return 8;
- default:
- UNREACHABLE();
- return 0;
+ case MachineRepresentation::kNone:
+ break;
}
+ UNREACHABLE();
+ return 0;
}
} // namespace
@@ -151,14 +151,15 @@ bool UsePosition::HintRegister(int* register_code) const {
case UsePositionHintType::kUnresolved:
return false;
case UsePositionHintType::kUsePos: {
- auto use_pos = reinterpret_cast<UsePosition*>(hint_);
+ UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
if (assigned_register == kUnassignedRegister) return false;
*register_code = assigned_register;
return true;
}
case UsePositionHintType::kOperand: {
- auto operand = reinterpret_cast<InstructionOperand*>(hint_);
+ InstructionOperand* operand =
+ reinterpret_cast<InstructionOperand*>(hint_);
int assigned_register =
operand->IsRegister()
? LocationOperand::cast(operand)->GetRegister().code()
@@ -167,7 +168,8 @@ bool UsePosition::HintRegister(int* register_code) const {
return true;
}
case UsePositionHintType::kPhi: {
- auto phi = reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
+ RegisterAllocationData::PhiMapValue* phi =
+ reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_);
int assigned_register = phi->assigned_register();
if (assigned_register == kUnassignedRegister) return false;
*register_code = assigned_register;
@@ -223,7 +225,7 @@ void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
DCHECK(Contains(pos) && pos != start());
- auto after = new (zone) UseInterval(pos, end_);
+ UseInterval* after = new (zone) UseInterval(pos, end_);
after->next_ = next_;
next_ = nullptr;
end_ = pos;
@@ -231,6 +233,12 @@ UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
}
+void LifetimePosition::Print() const {
+ OFStream os(stdout);
+ os << *this << std::endl;
+}
+
+
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
os << '@' << pos.ToInstructionIndex();
if (pos.IsGapPosition()) {
@@ -251,7 +259,7 @@ const float LiveRange::kInvalidWeight = -1;
const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
-LiveRange::LiveRange(int relative_id, MachineType machine_type,
+LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level)
: relative_id_(relative_id),
bits_(0),
@@ -267,27 +275,39 @@ LiveRange::LiveRange(int relative_id, MachineType machine_type,
size_(kInvalidSize),
weight_(kInvalidWeight),
group_(nullptr) {
- DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
+ DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
- MachineTypeField::encode(machine_type);
+ RepresentationField::encode(rep);
}
-void LiveRange::Verify() const {
+void LiveRange::VerifyPositions() const {
// Walk the positions, verifying that each is in an interval.
- auto interval = first_interval_;
- for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) {
+ UseInterval* interval = first_interval_;
+ for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
CHECK(Start() <= pos->pos());
CHECK(pos->pos() <= End());
- CHECK(interval != nullptr);
+ CHECK_NOT_NULL(interval);
while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
interval = interval->next();
- CHECK(interval != nullptr);
+ CHECK_NOT_NULL(interval);
}
}
}
+void LiveRange::VerifyIntervals() const {
+ DCHECK(first_interval()->start() == Start());
+ LifetimePosition last_end = first_interval()->end();
+ for (UseInterval* interval = first_interval()->next(); interval != nullptr;
+ interval = interval->next()) {
+ DCHECK(last_end <= interval->start());
+ last_end = interval->end();
+ }
+ DCHECK(last_end == End());
+}
+
+
void LiveRange::set_assigned_register(int reg) {
DCHECK(!HasRegisterAssigned() && !spilled());
bits_ = AssignedRegisterField::update(bits_, reg);
@@ -309,19 +329,13 @@ void LiveRange::Spill() {
RegisterKind LiveRange::kind() const {
- switch (RepresentationOf(machine_type())) {
- case kRepFloat32:
- case kRepFloat64:
- return DOUBLE_REGISTERS;
- default:
- break;
- }
- return GENERAL_REGISTERS;
+ return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
+ : GENERAL_REGISTERS;
}
UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
- for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
if (pos->HintRegister(register_index)) return pos;
}
return nullptr;
@@ -353,7 +367,7 @@ UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
LifetimePosition start) const {
- auto pos = first_pos();
+ UsePosition* pos = first_pos();
UsePosition* prev = nullptr;
while (pos != nullptr && pos->pos() < start) {
if (pos->RegisterIsBeneficial()) prev = pos;
@@ -385,7 +399,7 @@ UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
- auto use_pos = NextRegisterPosition(pos);
+ UsePosition* use_pos = NextRegisterPosition(pos);
if (use_pos == nullptr) return true;
return use_pos->pos() > pos.NextStart().End();
}
@@ -397,13 +411,13 @@ bool LiveRange::IsTopLevel() const { return top_level_ == this; }
InstructionOperand LiveRange::GetAssignedOperand() const {
if (HasRegisterAssigned()) {
DCHECK(!spilled());
- return AllocatedOperand(LocationOperand::REGISTER, machine_type(),
+ return AllocatedOperand(LocationOperand::REGISTER, representation(),
assigned_register());
}
DCHECK(spilled());
DCHECK(!HasRegisterAssigned());
if (TopLevel()->HasSpillOperand()) {
- auto op = TopLevel()->GetSpillOperand();
+ InstructionOperand* op = TopLevel()->GetSpillOperand();
DCHECK(!op->IsUnallocated());
return *op;
}
@@ -426,8 +440,9 @@ void LiveRange::AdvanceLastProcessedMarker(
UseInterval* to_start_of, LifetimePosition but_not_past) const {
if (to_start_of == nullptr) return;
if (to_start_of->start() > but_not_past) return;
- auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
- : current_interval_->start();
+ LifetimePosition start = current_interval_ == nullptr
+ ? LifetimePosition::Invalid()
+ : current_interval_->start();
if (to_start_of->start() > start) {
current_interval_ = to_start_of;
}
@@ -436,15 +451,12 @@ void LiveRange::AdvanceLastProcessedMarker(
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
- LiveRange* child = new (zone) LiveRange(new_id, machine_type(), TopLevel());
+ LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
DetachAt(position, child, zone);
child->top_level_ = TopLevel();
child->next_ = next_;
next_ = child;
- if (child->next() == nullptr) {
- TopLevel()->set_last_child(child);
- }
return child;
}
@@ -457,7 +469,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
- auto current = FirstSearchIntervalForPosition(position);
+ UseInterval* current = FirstSearchIntervalForPosition(position);
// If the split position coincides with the beginning of a use interval
// we need to split use positons in a special way.
@@ -474,7 +486,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
after = current->SplitAt(position, zone);
break;
}
- auto next = current->next();
+ UseInterval* next = current->next();
if (next->start() >= position) {
split_at_start = (next->start() == position);
after = next;
@@ -486,7 +498,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
DCHECK(nullptr != after);
// Partition original use intervals to the two live ranges.
- auto before = current;
+ UseInterval* before = current;
result->last_interval_ =
(last_interval_ == before)
? after // Only interval in the range after split.
@@ -496,7 +508,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
// Find the last use position before the split and the first use
// position after it.
- auto use_after =
+ UsePosition* use_after =
splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
? first_pos()
: splitting_pointer_;
@@ -534,22 +546,13 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
size_ = kInvalidSize;
weight_ = kInvalidWeight;
#ifdef DEBUG
- Verify();
- result->Verify();
+ VerifyChildStructure();
+ result->VerifyChildStructure();
#endif
return use_before;
}
-void LiveRange::AppendAsChild(TopLevelLiveRange* other) {
- next_ = other;
-
- other->UpdateParentForAllChildren(TopLevel());
- TopLevel()->UpdateSpillRangePostMerge(other);
- TopLevel()->set_last_child(other->last_child());
-}
-
-
void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
LiveRange* child = this;
for (; child != nullptr; child = child->next()) {
@@ -560,7 +563,7 @@ void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op) {
- for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
DCHECK(Start() <= pos->pos() && pos->pos() <= End());
if (!pos->HasOperand()) continue;
switch (pos->type()) {
@@ -599,7 +602,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
void LiveRange::SetUseHints(int register_index) {
- for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
@@ -621,8 +624,8 @@ bool LiveRange::CanCover(LifetimePosition position) const {
bool LiveRange::Covers(LifetimePosition position) const {
if (!CanCover(position)) return false;
- auto start_search = FirstSearchIntervalForPosition(position);
- for (auto interval = start_search; interval != nullptr;
+ UseInterval* start_search = FirstSearchIntervalForPosition(position);
+ for (UseInterval* interval = start_search; interval != nullptr;
interval = interval->next()) {
DCHECK(interval->next() == nullptr ||
interval->next()->start() >= interval->start());
@@ -635,14 +638,14 @@ bool LiveRange::Covers(LifetimePosition position) const {
LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
- auto b = other->first_interval();
+ UseInterval* b = other->first_interval();
if (b == nullptr) return LifetimePosition::Invalid();
- auto advance_last_processed_up_to = b->start();
- auto a = FirstSearchIntervalForPosition(b->start());
+ LifetimePosition advance_last_processed_up_to = b->start();
+ UseInterval* a = FirstSearchIntervalForPosition(b->start());
while (a != nullptr && b != nullptr) {
if (a->start() > other->End()) break;
if (b->start() > End()) break;
- auto cur_intersection = a->Intersect(b);
+ LifetimePosition cur_intersection = a->Intersect(b);
if (cur_intersection.IsValid()) {
return cur_intersection;
}
@@ -661,7 +664,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
unsigned LiveRange::GetSize() {
if (size_ == kInvalidSize) {
size_ = 0;
- for (auto interval = first_interval(); interval != nullptr;
+ for (const UseInterval* interval = first_interval(); interval != nullptr;
interval = interval->next()) {
size_ += (interval->end().value() - interval->start().value());
}
@@ -671,28 +674,48 @@ unsigned LiveRange::GetSize() {
}
-struct TopLevelLiveRange::SpillAtDefinitionList : ZoneObject {
- SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
- SpillAtDefinitionList* next)
+void LiveRange::Print(const RegisterConfiguration* config,
+ bool with_children) const {
+ OFStream os(stdout);
+ PrintableLiveRange wrapper;
+ wrapper.register_configuration_ = config;
+ for (const LiveRange* i = this; i != nullptr; i = i->next()) {
+ wrapper.range_ = i;
+ os << wrapper << std::endl;
+ if (!with_children) break;
+ }
+}
+
+
+void LiveRange::Print(bool with_children) const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ Print(config, with_children);
+}
+
+
+struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
+ SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
+ SpillMoveInsertionList* next)
: gap_index(gap_index), operand(operand), next(next) {}
const int gap_index;
InstructionOperand* const operand;
- SpillAtDefinitionList* const next;
+ SpillMoveInsertionList* const next;
};
-TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type)
- : LiveRange(0, machine_type, this),
+TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
+ : LiveRange(0, rep, this),
vreg_(vreg),
last_child_id_(0),
splintered_from_(nullptr),
spill_operand_(nullptr),
- spills_at_definition_(nullptr),
+ spill_move_insertion_locations_(nullptr),
spilled_in_deferred_blocks_(false),
spill_start_index_(kMaxInt),
- last_child_(this),
last_pos_(nullptr),
- splinter_(nullptr) {
+ splinter_(nullptr),
+ has_preassigned_slot_(false) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
@@ -704,57 +727,11 @@ int TopLevelLiveRange::debug_virt_reg() const {
#endif
-void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand) {
+void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
+ InstructionOperand* operand) {
DCHECK(HasNoSpillType());
- spills_at_definition_ = new (zone)
- SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
-}
-
-
-void TopLevelLiveRange::MarkSpilledInDeferredBlock(
- const InstructionSequence* code) {
- if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
- !HasSpillRange()) {
- return;
- }
-
- int count = 0;
- for (const LiveRange* child = this; child != nullptr; child = child->next()) {
- int first_instr = child->Start().ToInstructionIndex();
-
- // If the range starts at instruction end, the first instruction index is
- // the next one.
- if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
- ++first_instr;
- }
-
- // We only look at where the range starts. It doesn't matter where it ends:
- // if it ends past this block, then either there is a phi there already,
- // or ResolveControlFlow will adapt the last instruction gap of this block
- // as if there were a phi. In either case, data flow will be correct.
- const InstructionBlock* block = code->GetInstructionBlock(first_instr);
-
- // If we have slot uses in a subrange, bail out, because we need the value
- // on the stack before that use.
- bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
- if (!block->IsDeferred()) {
- if (child->spilled() || has_slot_use) {
- TRACE(
- "Live Range %d must be spilled at definition: found a "
- "slot-requiring non-deferred child range %d.\n",
- TopLevel()->vreg(), child->relative_id());
- return;
- }
- } else {
- if (child->spilled() || has_slot_use) ++count;
- }
- }
- if (count == 0) return;
-
- spill_start_index_ = -1;
- spilled_in_deferred_blocks_ = true;
- spills_at_definition_ = nullptr;
+ spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList(
+ gap_index, operand, spill_move_insertion_locations_);
}
@@ -768,14 +745,15 @@ bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
if (!child->spilled() &&
child->NextSlotPosition(child->Start()) != nullptr) {
- auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
+ Instruction* instr =
+ code->InstructionAt(child->Start().ToInstructionIndex());
// Insert spill at the end to let live range connections happen at START.
- auto move =
+ ParallelMove* move =
instr->GetOrCreateParallelMove(Instruction::END, code->zone());
InstructionOperand assigned = child->GetAssignedOperand();
if (TopLevel()->has_slot_use()) {
bool found = false;
- for (auto move_op : *move) {
+ for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(assigned) &&
move_op->destination().Equals(spill_operand)) {
@@ -794,31 +772,35 @@ bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
}
-void TopLevelLiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
- const InstructionOperand& op,
- bool might_be_duplicated) {
- DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
- auto zone = sequence->zone();
+void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
+ const InstructionOperand& op,
+ bool might_be_duplicated) {
+ DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
+ Zone* zone = sequence->zone();
- for (auto to_spill = spills_at_definition_; to_spill != nullptr;
- to_spill = to_spill->next) {
- auto instr = sequence->InstructionAt(to_spill->gap_index);
- auto move = instr->GetOrCreateParallelMove(Instruction::START, zone);
+ for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
+ to_spill != nullptr; to_spill = to_spill->next) {
+ Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
+ ParallelMove* move =
+ instr->GetOrCreateParallelMove(Instruction::START, zone);
// Skip insertion if it's possible that the move exists already as a
// constraint move from a fixed output register to a slot.
- if (might_be_duplicated) {
+ if (might_be_duplicated || has_preassigned_slot()) {
bool found = false;
- for (auto move_op : *move) {
+ for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(*to_spill->operand) &&
move_op->destination().Equals(op)) {
found = true;
+ if (has_preassigned_slot()) move_op->Eliminate();
break;
}
}
if (found) continue;
}
- move->AddMove(*to_spill->operand, op);
+ if (!has_preassigned_slot()) {
+ move->AddMove(*to_spill->operand, op);
+ }
}
}
@@ -839,9 +821,9 @@ void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
- auto spill_range = GetSpillRange();
+ SpillRange* spill_range = GetSpillRange();
int index = spill_range->assigned_slot();
- return AllocatedOperand(LocationOperand::STACK_SLOT, machine_type(), index);
+ return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
}
@@ -850,19 +832,14 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
DCHECK(start != Start() || end != End());
DCHECK(start < end);
- TopLevelLiveRange splinter_temp(-1, machine_type());
+ TopLevelLiveRange splinter_temp(-1, representation());
UsePosition* last_in_splinter = nullptr;
- if (start <= Start()) {
- // TODO(mtrofin): here, the TopLevel part is in the deferred range, so we
- // may want to continue processing the splinter. However, if the value is
- // defined in a cold block, and then used in a hot block, it follows that
- // it should terminate on the RHS of a phi, defined on the hot path. We
- // should check this, however, this may not be the place, because we don't
- // have access to the instruction sequence.
- DCHECK(end < End());
- DetachAt(end, &splinter_temp, zone);
- next_ = nullptr;
- } else if (end >= End()) {
+ // Live ranges defined in deferred blocks stay in deferred blocks, so we
+ // don't need to splinter them. That means that start should always be
+ // after the beginning of the range.
+ DCHECK(start > Start());
+
+ if (end >= End()) {
DCHECK(start > Start());
DetachAt(start, &splinter_temp, zone);
next_ = nullptr;
@@ -873,7 +850,7 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
UsePosition* last = DetachAt(start, &splinter_temp, zone);
- LiveRange end_part(kInvalidId, this->machine_type(), nullptr);
+ LiveRange end_part(kInvalidId, this->representation(), nullptr);
last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
next_ = end_part.next_;
@@ -1000,6 +977,24 @@ void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
}
+void TopLevelLiveRange::VerifyChildrenInOrder() const {
+ LifetimePosition last_end = End();
+ for (const LiveRange* child = this->next(); child != nullptr;
+ child = child->next()) {
+ DCHECK(last_end <= child->Start());
+ last_end = child->End();
+ }
+}
+
+
+void TopLevelLiveRange::Verify() const {
+ VerifyChildrenInOrder();
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ VerifyChildStructure();
+ }
+}
+
+
void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
DCHECK(first_interval_ != nullptr);
@@ -1013,7 +1008,7 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
LifetimePosition end, Zone* zone) {
TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
end.value());
- auto new_end = end;
+ LifetimePosition new_end = end;
while (first_interval_ != nullptr && first_interval_->start() <= end) {
if (first_interval_->end() > end) {
new_end = first_interval_->end();
@@ -1021,7 +1016,7 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
first_interval_ = first_interval_->next();
}
- auto new_interval = new (zone) UseInterval(start, new_end);
+ UseInterval* new_interval = new (zone) UseInterval(start, new_end);
new_interval->set_next(first_interval_);
first_interval_ = new_interval;
if (new_interval->next() == nullptr) {
@@ -1035,14 +1030,14 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
end.value());
if (first_interval_ == nullptr) {
- auto interval = new (zone) UseInterval(start, end);
+ UseInterval* interval = new (zone) UseInterval(start, end);
first_interval_ = interval;
last_interval_ = interval;
} else {
if (end == first_interval_->start()) {
first_interval_->set_start(start);
} else if (end < first_interval_->start()) {
- auto interval = new (zone) UseInterval(start, end);
+ UseInterval* interval = new (zone) UseInterval(start, end);
interval->set_next(first_interval_);
first_interval_ = interval;
} else {
@@ -1058,11 +1053,11 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
- auto pos = use_pos->pos();
+ LifetimePosition pos = use_pos->pos();
TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
- auto current = first_pos_;
+ UsePosition* current = first_pos_;
while (current != nullptr && current->pos() < pos) {
prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
@@ -1111,8 +1106,8 @@ std::ostream& operator<<(std::ostream& os,
if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
os << "{" << std::endl;
- auto interval = range->first_interval();
- auto use_pos = range->first_pos();
+ UseInterval* interval = range->first_interval();
+ UsePosition* use_pos = range->first_pos();
PrintableInstructionOperand pio;
pio.register_configuration_ = printable_range.register_configuration_;
while (use_pos != nullptr) {
@@ -1137,7 +1132,7 @@ std::ostream& operator<<(std::ostream& os,
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
- byte_width_(GetByteWidth(parent->machine_type())),
+ byte_width_(GetByteWidth(parent->representation())),
kind_(parent->kind()) {
// Spill ranges are created for top level, non-splintered ranges. This is so
// that, when merging decisions are made, we consider the full extent of the
@@ -1147,9 +1142,9 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
UseInterval* node = nullptr;
// Copy the intervals for all ranges.
for (LiveRange* range = parent; range != nullptr; range = range->next()) {
- auto src = range->first_interval();
+ UseInterval* src = range->first_interval();
while (src != nullptr) {
- auto new_node = new (zone) UseInterval(src->start(), src->end());
+ UseInterval* new_node = new (zone) UseInterval(src->start(), src->end());
if (result == nullptr) {
result = new_node;
} else {
@@ -1167,7 +1162,7 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
int SpillRange::ByteWidth() const {
- return GetByteWidth(live_ranges_[0]->machine_type());
+ return GetByteWidth(live_ranges_[0]->representation());
}
@@ -1182,13 +1177,14 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
bool SpillRange::TryMerge(SpillRange* other) {
+ if (HasSlot() || other->HasSlot()) return false;
// TODO(dcarney): byte widths should be compared here not kinds.
if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
IsIntersectingWith(other)) {
return false;
}
- auto max = LifetimePosition::MaxPosition();
+ LifetimePosition max = LifetimePosition::MaxPosition();
if (End() < other->End() && other->End() != max) {
end_position_ = other->End();
}
@@ -1197,7 +1193,7 @@ bool SpillRange::TryMerge(SpillRange* other) {
MergeDisjointIntervals(other->use_interval_);
other->use_interval_ = nullptr;
- for (auto range : other->live_ranges()) {
+ for (TopLevelLiveRange* range : other->live_ranges()) {
DCHECK(range->GetSpillRange() == other);
range->SetSpillRange(this);
}
@@ -1212,7 +1208,7 @@ bool SpillRange::TryMerge(SpillRange* other) {
void SpillRange::MergeDisjointIntervals(UseInterval* other) {
UseInterval* tail = nullptr;
- auto current = use_interval_;
+ UseInterval* current = use_interval_;
while (other != nullptr) {
// Make sure the 'current' list starts first
if (current == nullptr || current->start() > other->start()) {
@@ -1233,6 +1229,21 @@ void SpillRange::MergeDisjointIntervals(UseInterval* other) {
}
+void SpillRange::Print() const {
+ OFStream os(stdout);
+ os << "{" << std::endl;
+ for (TopLevelLiveRange* range : live_ranges()) {
+ os << range->vreg() << " ";
+ }
+ os << std::endl;
+
+ for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
+ os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
+ }
+ os << "}" << std::endl;
+}
+
+
RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi,
const InstructionBlock* block,
Zone* zone)
@@ -1252,7 +1263,7 @@ void RegisterAllocationData::PhiMapValue::AddOperand(
void RegisterAllocationData::PhiMapValue::CommitAssignment(
const InstructionOperand& assigned) {
- for (auto operand : incoming_operands_) {
+ for (InstructionOperand* operand : incoming_operands_) {
InstructionOperand::ReplaceWith(operand, &assigned);
}
}
@@ -1283,7 +1294,8 @@ RegisterAllocationData::RegisterAllocationData(
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
- virtual_register_count_(code->VirtualRegisterCount()) {
+ virtual_register_count_(code->VirtualRegisterCount()),
+ preassigned_slot_ranges_(zone) {
DCHECK(this->config()->num_general_registers() <=
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
@@ -1300,13 +1312,14 @@ RegisterAllocationData::RegisterAllocationData(
MoveOperands* RegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
- auto instr = code()->InstructionAt(index);
- auto moves = instr->GetOrCreateParallelMove(position, code_zone());
+ Instruction* instr = code()->InstructionAt(index);
+ ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
return moves->AddMove(from, to);
}
-MachineType RegisterAllocationData::MachineTypeFor(int virtual_register) {
+MachineRepresentation RegisterAllocationData::RepresentationFor(
+ int virtual_register) {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
@@ -1316,9 +1329,9 @@ TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
- auto result = live_ranges()[index];
+ TopLevelLiveRange* result = live_ranges()[index];
if (result == nullptr) {
- result = NewLiveRange(index, MachineTypeFor(index));
+ result = NewLiveRange(index, RepresentationFor(index));
live_ranges()[index] = result;
}
return result;
@@ -1326,8 +1339,8 @@ TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
- int index, MachineType machine_type) {
- return new (allocation_zone()) TopLevelLiveRange(index, machine_type);
+ int index, MachineRepresentation rep) {
+ return new (allocation_zone()) TopLevelLiveRange(index, rep);
}
@@ -1341,16 +1354,16 @@ int RegisterAllocationData::GetNextLiveRangeId() {
TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
- MachineType machine_type) {
+ MachineRepresentation rep) {
int vreg = GetNextLiveRangeId();
- TopLevelLiveRange* ret = NewLiveRange(vreg, machine_type);
+ TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
return ret;
}
RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap(
const InstructionBlock* block, PhiInstruction* phi) {
- auto map_value = new (allocation_zone())
+ RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone())
RegisterAllocationData::PhiMapValue(phi, block, allocation_zone());
auto res =
phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
@@ -1395,6 +1408,37 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
}
+// If a range is defined in a deferred block, we can expect all the range
+// to only cover positions in deferred blocks. Otherwise, a block on the
+// hot path would be dominated by a deferred block, meaning it is unreachable
+// without passing through the deferred block, which is contradictory.
+// In particular, when such a range contributes a result back on the hot
+// path, it will be as one of the inputs of a phi. In that case, the value
+// will be transferred via a move in the Gap::END's of the last instruction
+// of a deferred block.
+bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
+ for (const TopLevelLiveRange* range : live_ranges()) {
+ if (range == nullptr || range->IsEmpty() ||
+ !code()
+ ->GetInstructionBlock(range->Start().ToInstructionIndex())
+ ->IsDeferred()) {
+ continue;
+ }
+ for (const UseInterval* i = range->first_interval(); i != nullptr;
+ i = i->next()) {
+ int first = i->FirstGapIndex();
+ int last = i->LastGapIndex();
+ for (int instr = first; instr <= last;) {
+ const InstructionBlock* block = code()->GetInstructionBlock(instr);
+ if (!block->IsDeferred()) return false;
+ instr = block->last_instruction_index() + 1;
+ }
+ }
+ }
+ return true;
+}
+
+
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
@@ -1419,7 +1463,7 @@ SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
TopLevelLiveRange* range) {
DCHECK(!range->HasSpillOperand());
DCHECK(!range->IsSplinter());
- auto spill_range =
+ SpillRange* spill_range =
new (allocation_zone()) SpillRange(range, allocation_zone());
return spill_range;
}
@@ -1442,74 +1486,6 @@ bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
}
-void RegisterAllocationData::Print(
- const InstructionSequence* instructionSequence) {
- OFStream os(stdout);
- PrintableInstructionSequence wrapper;
- wrapper.register_configuration_ = config();
- wrapper.sequence_ = instructionSequence;
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const Instruction* instruction) {
- OFStream os(stdout);
- PrintableInstruction wrapper;
- wrapper.instr_ = instruction;
- wrapper.register_configuration_ = config();
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const LiveRange* range, bool with_children) {
- OFStream os(stdout);
- PrintableLiveRange wrapper;
- wrapper.register_configuration_ = config();
- for (const LiveRange* i = range; i != nullptr; i = i->next()) {
- wrapper.range_ = i;
- os << wrapper << std::endl;
- if (!with_children) break;
- }
-}
-
-
-void RegisterAllocationData::Print(const InstructionOperand& op) {
- OFStream os(stdout);
- PrintableInstructionOperand wrapper;
- wrapper.register_configuration_ = config();
- wrapper.op_ = op;
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const MoveOperands* move) {
- OFStream os(stdout);
- PrintableInstructionOperand wrapper;
- wrapper.register_configuration_ = config();
- wrapper.op_ = move->destination();
- os << wrapper << " = ";
- wrapper.op_ = move->source();
- os << wrapper << std::endl;
-}
-
-
-void RegisterAllocationData::Print(const SpillRange* spill_range) {
- OFStream os(stdout);
- os << "{" << std::endl;
- for (TopLevelLiveRange* range : spill_range->live_ranges()) {
- os << range->vreg() << " ";
- }
- os << std::endl;
-
- for (UseInterval* interval = spill_range->interval(); interval != nullptr;
- interval = interval->next()) {
- os << '[' << interval->start() << ", " << interval->end() << ')'
- << std::endl;
- }
- os << "}" << std::endl;
-}
-
-
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
@@ -1519,22 +1495,22 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
DCHECK(operand->HasFixedPolicy());
InstructionOperand allocated;
- MachineType machine_type = InstructionSequence::DefaultRepresentation();
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
int virtual_register = operand->virtual_register();
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
- machine_type = data()->MachineTypeFor(virtual_register);
+ rep = data()->RepresentationFor(virtual_register);
}
if (operand->HasFixedSlotPolicy()) {
- allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, machine_type,
+ allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
- DCHECK(!IsFloatingPoint(machine_type));
- allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
+ DCHECK(!IsFloatingPoint(rep));
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
} else if (operand->HasFixedDoubleRegisterPolicy()) {
- DCHECK(IsFloatingPoint(machine_type));
+ DCHECK(IsFloatingPoint(rep));
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
- allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
+ allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
} else {
UNREACHABLE();
@@ -1542,7 +1518,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
InstructionOperand::ReplaceWith(operand, &allocated);
if (is_tagged) {
TRACE("Fixed reg is tagged at %d\n", pos);
- auto instr = code()->InstructionAt(pos);
+ Instruction* instr = code()->InstructionAt(pos);
if (instr->HasReferenceMap()) {
instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
}
@@ -1552,7 +1528,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
void ConstraintBuilder::MeetRegisterConstraints() {
- for (auto block : code()->instruction_blocks()) {
+ for (InstructionBlock* block : code()->instruction_blocks()) {
MeetRegisterConstraints(block);
}
}
@@ -1574,13 +1550,13 @@ void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block) {
int end = block->last_instruction_index();
- auto last_instruction = code()->InstructionAt(end);
+ Instruction* last_instruction = code()->InstructionAt(end);
for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
- auto output_operand = last_instruction->OutputAt(i);
+ InstructionOperand* output_operand = last_instruction->OutputAt(i);
DCHECK(!output_operand->IsConstant());
- auto output = UnallocatedOperand::cast(output_operand);
+ UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
int output_vreg = output->virtual_register();
- auto range = data()->GetOrCreateLiveRangeFor(output_vreg);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
bool assigned = false;
if (output->HasFixedPolicy()) {
AllocateFixed(output, -1, false);
@@ -1593,7 +1569,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
assigned = true;
}
- for (auto succ : block->successors()) {
+ for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
int gap_index = successor->first_instruction_index();
@@ -1605,11 +1581,11 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
}
if (!assigned) {
- for (auto succ : block->successors()) {
+ for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK(successor->PredecessorCount() == 1);
int gap_index = successor->first_instruction_index();
- range->SpillAtDefinition(allocation_zone(), gap_index, output);
+ range->RecordSpillLocation(allocation_zone(), gap_index, output);
range->SetSpillStartIndex(gap_index);
}
}
@@ -1618,10 +1594,10 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
- auto first = code()->InstructionAt(instr_index);
+ Instruction* first = code()->InstructionAt(instr_index);
// Handle fixed temporaries.
for (size_t i = 0; i < first->TempCount(); i++) {
- auto temp = UnallocatedOperand::cast(first->TempAt(i));
+ UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
}
// Handle constant/fixed output operands.
@@ -1629,19 +1605,24 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
InstructionOperand* output = first->OutputAt(i);
if (output->IsConstant()) {
int output_vreg = ConstantOperand::cast(output)->virtual_register();
- auto range = data()->GetOrCreateLiveRangeFor(output_vreg);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
range->SetSpillStartIndex(instr_index + 1);
range->SetSpillOperand(output);
continue;
}
- auto first_output = UnallocatedOperand::cast(output);
- auto range =
+ UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+ TopLevelLiveRange* range =
data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
int output_vreg = first_output->virtual_register();
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
bool is_tagged = code()->IsReference(output_vreg);
+ if (first_output->HasSecondaryStorage()) {
+ range->MarkHasPreassignedSlot();
+ data()->preassigned_slot_ranges().push_back(
+ std::make_pair(range, first_output->GetSecondaryStorage()));
+ }
AllocateFixed(first_output, instr_index, is_tagged);
// This value is produced on the stack, we never need to spill it.
@@ -1658,8 +1639,8 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
// Make sure we add a gap move for spilling (if we have not done
// so already).
if (!assigned) {
- range->SpillAtDefinition(allocation_zone(), instr_index + 1,
- first_output);
+ range->RecordSpillLocation(allocation_zone(), instr_index + 1,
+ first_output);
range->SetSpillStartIndex(instr_index + 1);
}
}
@@ -1667,14 +1648,14 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
- auto second = code()->InstructionAt(instr_index);
+ Instruction* second = code()->InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
- auto input = second->InputAt(i);
+ InstructionOperand* input = second->InputAt(i);
if (input->IsImmediate() || input->IsExplicit()) {
continue; // Ignore immediates and explicitly reserved registers.
}
- auto cur_input = UnallocatedOperand::cast(input);
+ UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
@@ -1685,9 +1666,9 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
}
// Handle "output same as input" for second instruction.
for (size_t i = 0; i < second->OutputCount(); i++) {
- auto output = second->OutputAt(i);
+ InstructionOperand* output = second->OutputAt(i);
if (!output->IsUnallocated()) continue;
- auto second_output = UnallocatedOperand::cast(output);
+ UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (!second_output->HasSameAsInputPolicy()) continue;
DCHECK(i == 0); // Only valid for first output.
UnallocatedOperand* cur_input =
@@ -1696,8 +1677,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
cur_input->set_virtual_register(second_output->virtual_register());
- auto gap_move = data()->AddGapMove(instr_index, Instruction::END,
- input_copy, *cur_input);
+ MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
+ input_copy, *cur_input);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
RegisterAllocationData::DelayedReference delayed_reference = {
@@ -1726,25 +1707,26 @@ void ConstraintBuilder::ResolvePhis() {
void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
- for (auto phi : block->phis()) {
+ for (PhiInstruction* phi : block->phis()) {
int phi_vreg = phi->virtual_register();
- auto map_value = data()->InitializePhiMap(block, phi);
- auto& output = phi->output();
+ RegisterAllocationData::PhiMapValue* map_value =
+ data()->InitializePhiMap(block, phi);
+ InstructionOperand& output = phi->output();
// Map the destination operands, so the commitment phase can find them.
for (size_t i = 0; i < phi->operands().size(); ++i) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
- auto move = data()->AddGapMove(cur_block->last_instruction_index(),
- Instruction::END, input, output);
+ MoveOperands* move = data()->AddGapMove(
+ cur_block->last_instruction_index(), Instruction::END, input, output);
map_value->AddOperand(&move->destination());
DCHECK(!code()
->InstructionAt(cur_block->last_instruction_index())
->HasReferenceMap());
}
- auto live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
+ TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
int gap_index = block->first_instruction_index();
- live_range->SpillAtDefinition(allocation_zone(), gap_index, &output);
+ live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
live_range->SetSpillStartIndex(gap_index);
// We use the phi-ness of some nodes in some later heuristics.
live_range->set_is_phi(true);
@@ -1779,7 +1761,7 @@ BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
// All phi input operands corresponding to this successor edge are live
// out from this block.
- auto successor = code->InstructionBlockAt(succ);
+ const InstructionBlock* successor = code->InstructionBlockAt(succ);
size_t index = successor->PredecessorIndexOf(block->rpo_number());
DCHECK(index < successor->PredecessorCount());
for (PhiInstruction* phi : successor->phis()) {
@@ -1796,14 +1778,15 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
BitVector* live_out) {
// Add an interval that includes the entire block to the live range for
// each live_out value.
- auto start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- auto end = LifetimePosition::InstructionFromInstructionIndex(
- block->last_instruction_index()).NextStart();
+ LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
+ block->last_instruction_index())
+ .NextStart();
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
- auto range = data()->GetOrCreateLiveRangeFor(operand_index);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->AddUseInterval(start, end, allocation_zone());
iterator.Advance();
}
@@ -1817,7 +1800,7 @@ int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
- auto result = data()->fixed_live_ranges()[index];
+ TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedLiveRangeID(index),
InstructionSequence::DefaultRepresentation());
@@ -1832,9 +1815,10 @@ TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
DCHECK(index < config()->num_double_registers());
- auto result = data()->fixed_double_live_ranges()[index];
+ TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
- result = data()->NewLiveRange(FixedDoubleLiveRangeID(index), kRepFloat64);
+ result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
+ MachineRepresentation::kFloat64);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(DOUBLE_REGISTERS, index);
@@ -1874,7 +1858,7 @@ UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type) {
- auto range = LiveRangeFor(operand);
+ TopLevelLiveRange* range = LiveRangeFor(operand);
if (range == nullptr) return nullptr;
if (range->IsEmpty() || range->Start() > position) {
@@ -1885,8 +1869,9 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
range->ShortenTo(position);
}
if (!operand->IsUnallocated()) return nullptr;
- auto unalloc_operand = UnallocatedOperand::cast(operand);
- auto use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
+ UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+ UsePosition* use_pos =
+ NewUsePosition(position, unalloc_operand, hint, hint_type);
range->AddUsePosition(use_pos);
return use_pos;
}
@@ -1896,7 +1881,7 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type) {
- auto range = LiveRangeFor(operand);
+ TopLevelLiveRange* range = LiveRangeFor(operand);
if (range == nullptr) return nullptr;
UsePosition* use_pos = nullptr;
if (operand->IsUnallocated()) {
@@ -1912,19 +1897,19 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
BitVector* live) {
int block_start = block->first_instruction_index();
- auto block_start_position =
+ LifetimePosition block_start_position =
LifetimePosition::GapFromInstructionIndex(block_start);
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
- auto curr_position =
+ LifetimePosition curr_position =
LifetimePosition::InstructionFromInstructionIndex(index);
- auto instr = code()->InstructionAt(index);
+ Instruction* instr = code()->InstructionAt(index);
DCHECK(instr != nullptr);
DCHECK(curr_position.IsInstructionPosition());
// Process output, inputs, and temps of this instruction.
for (size_t i = 0; i < instr->OutputCount(); i++) {
- auto output = instr->OutputAt(i);
+ InstructionOperand* output = instr->OutputAt(i);
if (output->IsUnallocated()) {
// Unsupported.
DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
@@ -1934,7 +1919,10 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int out_vreg = ConstantOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
}
- if (block->IsHandler() && index == block_start) {
+ if (block->IsHandler() && index == block_start && output->IsAllocated() &&
+ output->IsRegister() &&
+ AllocatedOperand::cast(output)->GetRegister().is(
+ v8::internal::kReturnRegister0)) {
// The register defined here is blocked from gap start - it is the
// exception value.
// TODO(mtrofin): should we explore an explicit opcode for
@@ -1949,7 +1937,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
int code = config()->GetAllocatableGeneralCode(i);
if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
- auto range = FixedLiveRangeFor(code);
+ TopLevelLiveRange* range = FixedLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
@@ -1961,7 +1949,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
++i) {
int code = config()->GetAllocatableDoubleCode(i);
if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
- auto range = FixedDoubleLiveRangeFor(code);
+ TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
@@ -1969,7 +1957,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
for (size_t i = 0; i < instr->InputCount(); i++) {
- auto input = instr->InputAt(i);
+ InstructionOperand* input = instr->InputAt(i);
if (input->IsImmediate() || input->IsExplicit()) {
continue; // Ignore immediates and explicitly reserved registers.
}
@@ -1993,7 +1981,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
for (size_t i = 0; i < instr->TempCount(); i++) {
- auto temp = instr->TempAt(i);
+ InstructionOperand* temp = instr->TempAt(i);
// Unsupported.
DCHECK_IMPLIES(temp->IsUnallocated(),
!UnallocatedOperand::cast(temp)->HasSlotPolicy());
@@ -2015,24 +2003,25 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
Instruction::START};
curr_position = curr_position.PrevStart();
DCHECK(curr_position.IsGapPosition());
- for (auto position : kPositions) {
- auto move = instr->GetParallelMove(position);
+ for (const Instruction::GapPosition& position : kPositions) {
+ ParallelMove* move = instr->GetParallelMove(position);
if (move == nullptr) continue;
if (position == Instruction::END) {
curr_position = curr_position.End();
} else {
curr_position = curr_position.Start();
}
- for (auto cur : *move) {
- auto& from = cur->source();
- auto& to = cur->destination();
+ for (MoveOperands* cur : *move) {
+ InstructionOperand& from = cur->source();
+ InstructionOperand& to = cur->destination();
void* hint = &to;
UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
UsePosition* to_use = nullptr;
int phi_vreg = -1;
if (to.IsUnallocated()) {
int to_vreg = UnallocatedOperand::cast(to).virtual_register();
- auto to_range = data()->GetOrCreateLiveRangeFor(to_vreg);
+ TopLevelLiveRange* to_range =
+ data()->GetOrCreateLiveRangeFor(to_vreg);
if (to_range->is_phi()) {
phi_vreg = to_vreg;
if (to_range->is_non_loop_phi()) {
@@ -2056,7 +2045,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
} else {
Define(curr_position, &to);
}
- auto from_use =
+ UsePosition* from_use =
Use(block_start_position, curr_position, &from, hint, hint_type);
// Mark range live.
if (from.IsUnallocated()) {
@@ -2079,16 +2068,16 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
BitVector* live) {
- for (auto phi : block->phis()) {
+ for (PhiInstruction* phi : block->phis()) {
// The live range interval already ends at the first instruction of the
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
InstructionOperand* hint = nullptr;
- auto instr = GetLastInstruction(
+ Instruction* instr = GetLastInstruction(
code(), code()->InstructionBlockAt(block->predecessors()[0]));
- for (auto move : *instr->GetParallelMove(Instruction::END)) {
- auto& to = move->destination();
+ for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
+ InstructionOperand& to = move->destination();
if (to.IsUnallocated() &&
UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
hint = &move->source();
@@ -2096,10 +2085,10 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
}
}
DCHECK(hint != nullptr);
- auto block_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- auto use_pos = Define(block_start, &phi->output(), hint,
- UsePosition::HintTypeForOperand(*hint));
+ UsePosition* use_pos = Define(block_start, &phi->output(), hint,
+ UsePosition::HintTypeForOperand(*hint));
MapPhiHint(hint, use_pos);
}
}
@@ -2111,10 +2100,11 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
// Add a live range stretching from the first loop instruction to the last
// for each value live on entry to the header.
BitVector::Iterator iterator(live);
- auto start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- auto end = LifetimePosition::GapFromInstructionIndex(
- code()->LastLoopInstructionIndex(block)).NextFullStart();
+ LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
+ code()->LastLoopInstructionIndex(block))
+ .NextFullStart();
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
@@ -2133,8 +2123,9 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
- auto block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
- auto live = ComputeLiveOut(block, data());
+ InstructionBlock* block =
+ code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
+ BitVector* live = ComputeLiveOut(block, data());
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
AddInitialIntervals(block, live);
@@ -2149,7 +2140,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
live_in_sets()[block_id] = live;
}
// Postprocess the ranges.
- for (auto range : data()->live_ranges()) {
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
@@ -2160,7 +2151,8 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Without this hack, all uses with "any" policy would get the constant
// operand assigned.
if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
- for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
+ for (UsePosition* pos = range->first_pos(); pos != nullptr;
+ pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot) continue;
UsePositionType new_type = UsePositionType::kAny;
// Can't mark phis as needing a register.
@@ -2171,6 +2163,14 @@ void LiveRangeBuilder::BuildLiveRanges() {
}
}
}
+ for (auto preassigned : data()->preassigned_slot_ranges()) {
+ TopLevelLiveRange* range = preassigned.first;
+ int slot_id = preassigned.second;
+ SpillRange* spill = range->HasSpillRange()
+ ? range->GetSpillRange()
+ : data()->AssignSpillRangeToLiveRange(range);
+ spill->set_assigned_slot(slot_id);
+ }
#ifdef DEBUG
Verify();
#endif
@@ -2199,8 +2199,8 @@ void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
}
- for (LiveRange* current : data()->live_ranges()) {
- if (current != nullptr) current->Verify();
+ for (TopLevelLiveRange* current : data()->live_ranges()) {
+ if (current != nullptr && !current->IsEmpty()) current->Verify();
}
}
@@ -2295,7 +2295,7 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
range->TopLevel()->vreg(), range->relative_id(), start.value(),
end.value());
- auto split_pos = FindOptimalSplitPos(start, end);
+ LifetimePosition split_pos = FindOptimalSplitPos(start, end);
DCHECK(split_pos >= start);
return SplitRangeAt(range, split_pos);
}
@@ -2310,8 +2310,8 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
// We have no choice
if (start_instr == end_instr) return end;
- auto start_block = GetInstructionBlock(code(), start);
- auto end_block = GetInstructionBlock(code(), end);
+ const InstructionBlock* start_block = GetInstructionBlock(code(), start);
+ const InstructionBlock* end_block = GetInstructionBlock(code(), end);
if (end_block == start_block) {
// The interval is split in the same basic block. Split at the latest
@@ -2319,7 +2319,7 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
return end;
}
- auto block = end_block;
+ const InstructionBlock* block = end_block;
// Find header of outermost loop.
// TODO(titzer): fix redundancy below.
while (GetContainingLoop(code(), block) != nullptr &&
@@ -2339,19 +2339,20 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
LiveRange* range, LifetimePosition pos) {
- auto block = GetInstructionBlock(code(), pos.Start());
- auto loop_header =
+ const InstructionBlock* block = GetInstructionBlock(code(), pos.Start());
+ const InstructionBlock* loop_header =
block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
if (loop_header == nullptr) return pos;
- auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+ const UsePosition* prev_use =
+ range->PreviousUsePositionRegisterIsBeneficial(pos);
while (loop_header != nullptr) {
// We are going to spill live range inside the loop.
// If possible try to move spilling position backwards to loop header.
// This will reduce number of memory moves on the back edge.
- auto loop_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
loop_header->first_instruction_index());
if (range->Covers(loop_start)) {
@@ -2419,7 +2420,8 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
- SplitAndSpillRangesDefinedByMemoryOperand(false);
+ SplitAndSpillRangesDefinedByMemoryOperand(code()->VirtualRegisterCount() <=
+ num_allocatable_registers());
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (!CanProcessRange(range)) continue;
@@ -2434,7 +2436,7 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(UnhandledIsSorted());
auto& fixed_ranges = GetFixedRegisters();
- for (auto current : fixed_ranges) {
+ for (TopLevelLiveRange* current : fixed_ranges) {
if (current != nullptr) {
DCHECK_EQ(mode(), current->kind());
AddToInactive(current);
@@ -2443,10 +2445,10 @@ void LinearScanAllocator::AllocateRegisters() {
while (!unhandled_live_ranges().empty()) {
DCHECK(UnhandledIsSorted());
- auto current = unhandled_live_ranges().back();
+ LiveRange* current = unhandled_live_ranges().back();
unhandled_live_ranges().pop_back();
DCHECK(UnhandledIsSorted());
- auto position = current->Start();
+ LifetimePosition position = current->Start();
#ifdef DEBUG
allocation_finger_ = position;
#endif
@@ -2457,7 +2459,7 @@ void LinearScanAllocator::AllocateRegisters() {
continue;
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- auto cur_active = active_live_ranges()[i];
+ LiveRange* cur_active = active_live_ranges()[i];
if (cur_active->End() <= position) {
ActiveToHandled(cur_active);
--i; // The live range was removed from the list of active live ranges.
@@ -2468,7 +2470,7 @@ void LinearScanAllocator::AllocateRegisters() {
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- auto cur_inactive = inactive_live_ranges()[i];
+ LiveRange* cur_inactive = inactive_live_ranges()[i];
if (cur_inactive->End() <= position) {
InactiveToHandled(cur_inactive);
--i; // Live range was removed from the list of inactive live ranges.
@@ -2520,7 +2522,7 @@ void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
DCHECK(allocation_finger_ <= range->Start());
for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
--i) {
- auto cur_range = unhandled_live_ranges().at(i);
+ LiveRange* cur_range = unhandled_live_ranges().at(i);
if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
TRACE("Add live range %d:%d to unhandled at %d\n",
range->TopLevel()->vreg(), range->relative_id(), i + 1);
@@ -2566,8 +2568,8 @@ void LinearScanAllocator::SortUnhandled() {
bool LinearScanAllocator::UnhandledIsSorted() {
size_t len = unhandled_live_ranges().size();
for (size_t i = 1; i < len; i++) {
- auto a = unhandled_live_ranges().at(i - 1);
- auto b = unhandled_live_ranges().at(i);
+ LiveRange* a = unhandled_live_ranges().at(i - 1);
+ LiveRange* b = unhandled_live_ranges().at(i);
if (a->Start() < b->Start()) return false;
}
return true;
@@ -2611,7 +2613,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
- for (auto cur_active : active_live_ranges()) {
+ for (LiveRange* cur_active : active_live_ranges()) {
free_until_pos[cur_active->assigned_register()] =
LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n",
@@ -2619,9 +2621,10 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition::GapFromInstructionIndex(0).value());
}
- for (auto cur_inactive : inactive_live_ranges()) {
+ for (LiveRange* cur_inactive : inactive_live_ranges()) {
DCHECK(cur_inactive->End() > current->Start());
- auto next_intersection = cur_inactive->FirstIntersection(current);
+ LifetimePosition next_intersection =
+ cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
@@ -2656,7 +2659,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
}
}
- auto pos = free_until_pos[reg];
+ LifetimePosition pos = free_until_pos[reg];
if (pos <= current->Start()) {
// All registers are blocked.
@@ -2666,7 +2669,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
if (pos < current->End()) {
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
- auto tail = SplitRangeAt(current, pos);
+ LiveRange* tail = SplitRangeAt(current, pos);
AddToUnhandledSorted(tail);
}
@@ -2682,7 +2685,7 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
- auto register_use = current->NextRegisterPosition(current->Start());
+ UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
// There is no use in the current live range that requires a register.
// We can just spill it.
@@ -2697,14 +2700,14 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
- for (auto range : active_live_ranges()) {
+ for (LiveRange* range : active_live_ranges()) {
int cur_reg = range->assigned_register();
if (range->TopLevel()->IsFixed() ||
!range->CanBeSpilled(current->Start())) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
- auto next_use =
+ UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[cur_reg] = range->End();
@@ -2714,9 +2717,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- for (auto range : inactive_live_ranges()) {
+ for (LiveRange* range : inactive_live_ranges()) {
DCHECK(range->End() > current->Start());
- auto next_intersection = range->FirstIntersection(current);
+ LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
if (range->TopLevel()->IsFixed()) {
@@ -2735,7 +2738,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- auto pos = use_pos[reg];
+ LifetimePosition pos = use_pos[reg];
if (pos < register_use->pos()) {
// All registers are blocked before the first use that requires a register.
@@ -2768,12 +2771,12 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
DCHECK(current->HasRegisterAssigned());
int reg = current->assigned_register();
- auto split_pos = current->Start();
+ LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
- auto range = active_live_ranges()[i];
+ LiveRange* range = active_live_ranges()[i];
if (range->assigned_register() == reg) {
- auto next_pos = range->NextRegisterPosition(current->Start());
- auto spill_pos = FindOptimalSpillingPos(range, split_pos);
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
if (next_pos == nullptr) {
SpillAfter(range, spill_pos);
} else {
@@ -2793,7 +2796,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
- auto range = inactive_live_ranges()[i];
+ LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
LifetimePosition next_intersection = range->FirstIntersection(current);
@@ -2817,9 +2820,10 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
if (!range->is_phi()) return false;
DCHECK(!range->HasSpillOperand());
- auto phi_map_value = data()->GetPhiMapValueFor(range);
- auto phi = phi_map_value->phi();
- auto block = phi_map_value->block();
+ RegisterAllocationData::PhiMapValue* phi_map_value =
+ data()->GetPhiMapValueFor(range);
+ const PhiInstruction* phi = phi_map_value->phi();
+ const InstructionBlock* block = phi_map_value->block();
// Count the number of spilled operands.
size_t spilled_count = 0;
LiveRange* first_op = nullptr;
@@ -2827,9 +2831,11 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
int op = phi->operands()[i];
LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
if (!op_range->TopLevel()->HasSpillRange()) continue;
- auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
+ const InstructionBlock* pred =
+ code()->InstructionBlockAt(block->predecessors()[i]);
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
while (op_range != nullptr && !op_range->CanCover(pred_end)) {
op_range = op_range->next();
}
@@ -2849,13 +2855,13 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
// Try to merge the spilled operands and count the number of merged spilled
// operands.
DCHECK(first_op != nullptr);
- auto first_op_spill = first_op->TopLevel()->GetSpillRange();
+ SpillRange* first_op_spill = first_op->TopLevel()->GetSpillRange();
size_t num_merged = 1;
for (size_t i = 1; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
- auto op_range = data()->GetOrCreateLiveRangeFor(op);
+ TopLevelLiveRange* op_range = data()->live_ranges()[op];
if (!op_range->HasSpillRange()) continue;
- auto op_spill = op_range->GetSpillRange();
+ SpillRange* op_spill = op_range->GetSpillRange();
if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
num_merged++;
}
@@ -2871,11 +2877,11 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
// If the range does not need register soon, spill it to the merged
// spill range.
- auto next_pos = range->Start();
+ LifetimePosition next_pos = range->Start();
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
- auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
- auto spill_range =
+ SpillRange* spill_range =
range->TopLevel()->HasSpillRange()
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
@@ -2884,7 +2890,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
Spill(range);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
- auto spill_range =
+ SpillRange* spill_range =
range->TopLevel()->HasSpillRange()
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
@@ -2899,7 +2905,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
void LinearScanAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
- auto second_part = SplitRangeAt(range, pos);
+ LiveRange* second_part = SplitRangeAt(range, pos);
Spill(second_part);
}
@@ -2915,17 +2921,17 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LifetimePosition until,
LifetimePosition end) {
CHECK(start < end);
- auto second_part = SplitRangeAt(range, start);
+ LiveRange* second_part = SplitRangeAt(range, start);
if (second_part->Start() < end) {
// The split result intersects with [start, end[.
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
- auto third_part_end = end.PrevStart().End();
+ LifetimePosition third_part_end = end.PrevStart().End();
if (data()->IsBlockBoundary(end.Start())) {
third_part_end = end.Start();
}
- auto third_part = SplitBetween(
+ LiveRange* third_part = SplitBetween(
second_part, Max(second_part->Start().End(), until), third_part_end);
DCHECK(third_part != second_part);
@@ -2945,12 +2951,11 @@ SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
void SpillSlotLocator::LocateSpillSlots() {
- auto code = data()->code();
+ const InstructionSequence* code = data()->code();
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange()) continue;
- range->MarkSpilledInDeferredBlock(data()->code());
if (range->IsSpilledOnlyInDeferredBlocks()) {
for (LiveRange* child = range; child != nullptr; child = child->next()) {
if (child->spilled()) {
@@ -2959,7 +2964,8 @@ void SpillSlotLocator::LocateSpillSlots() {
}
}
} else {
- auto spills = range->spills_at_definition();
+ TopLevelLiveRange::SpillMoveInsertionList* spills =
+ range->spill_move_insertion_locations();
DCHECK_NOT_NULL(spills);
for (; spills != nullptr; spills = spills->next) {
code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
@@ -2990,9 +2996,11 @@ void OperandAssigner::AssignSpillSlots() {
for (SpillRange* range : spill_ranges) {
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
- int byte_width = range->ByteWidth();
- int index = data()->frame()->AllocateSpillSlot(byte_width);
- range->set_assigned_slot(index);
+ if (!range->HasSlot()) {
+ int byte_width = range->ByteWidth();
+ int index = data()->frame()->AllocateSpillSlot(byte_width);
+ range->set_assigned_slot(index);
+ }
}
}
@@ -3012,7 +3020,7 @@ void OperandAssigner::CommitAssignment() {
}
for (LiveRange* range = top_range; range != nullptr;
range = range->next()) {
- auto assigned = range->GetAssignedOperand();
+ InstructionOperand assigned = range->GetAssignedOperand();
range->ConvertUsesToOperand(assigned, spill_operand);
}
@@ -3032,7 +3040,7 @@ void OperandAssigner::CommitAssignment() {
spill_operand)) {
// Spill at definition if the range isn't spilled only in deferred
// blocks.
- top_range->CommitSpillsAtDefinition(
+ top_range->CommitSpillMoves(
data()->code(), spill_operand,
top_range->has_slot_use() || top_range->spilled());
}
@@ -3047,7 +3055,7 @@ ReferenceMapPopulator::ReferenceMapPopulator(RegisterAllocationData* data)
bool ReferenceMapPopulator::SafePointsAreInOrder() const {
int safe_point = 0;
- for (auto map : *data()->code()->reference_maps()) {
+ for (ReferenceMap* map : *data()->code()->reference_maps()) {
if (safe_point > map->instruction_position()) return false;
safe_point = map->instruction_position();
}
@@ -3058,14 +3066,15 @@ bool ReferenceMapPopulator::SafePointsAreInOrder() const {
void ReferenceMapPopulator::PopulateReferenceMaps() {
DCHECK(SafePointsAreInOrder());
// Map all delayed references.
- for (auto& delayed_reference : data()->delayed_references()) {
+ for (RegisterAllocationData::DelayedReference& delayed_reference :
+ data()->delayed_references()) {
delayed_reference.map->RecordReference(
AllocatedOperand::cast(*delayed_reference.operand));
}
// Iterate over all safe point positions and record a pointer
// for all spilled live ranges at this point.
int last_range_start = 0;
- auto reference_maps = data()->code()->reference_maps();
+ const ReferenceMapDeque* reference_maps = data()->code()->reference_maps();
ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
@@ -3073,12 +3082,13 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
if (!data()->IsReference(range)) continue;
// Skip empty live ranges.
if (range->IsEmpty()) continue;
+ if (range->has_preassigned_slot()) continue;
// Find the extent of the range and its children.
int start = range->Start().ToInstructionIndex();
int end = 0;
for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
- auto this_end = cur->End();
+ LifetimePosition this_end = cur->End();
if (this_end.ToInstructionIndex() > end)
end = this_end.ToInstructionIndex();
DCHECK(cur->Start().ToInstructionIndex() >= start);
@@ -3092,7 +3102,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Step across all the safe points that are before the start of this range,
// recording how far we step in order to save doing this for the next range.
for (; first_it != reference_maps->end(); ++first_it) {
- auto map = *first_it;
+ ReferenceMap* map = *first_it;
if (map->instruction_position() >= start) break;
}
@@ -3106,13 +3116,14 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
spill_operand = range->GetSpillRangeOperand();
}
DCHECK(spill_operand.IsStackSlot());
- DCHECK_EQ(kRepTagged,
- AllocatedOperand::cast(spill_operand).machine_type());
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ AllocatedOperand::cast(spill_operand).representation());
}
+ LiveRange* cur = range;
// Step through the safe points to see whether they are in the range.
for (auto it = first_it; it != reference_maps->end(); ++it) {
- auto map = *it;
+ ReferenceMap* map = *it;
int safe_point = map->instruction_position();
// The safe points are sorted so we can stop searching here.
@@ -3120,13 +3131,33 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Advance to the next active range that covers the current
// safe point position.
- auto safe_point_pos =
+ LifetimePosition safe_point_pos =
LifetimePosition::InstructionFromInstructionIndex(safe_point);
- LiveRange* cur = range;
- while (cur != nullptr && !cur->Covers(safe_point_pos)) {
- cur = cur->next();
+
+ // Search for the child range (cur) that covers safe_point_pos. If we
+ // don't find it before the children pass safe_point_pos, keep cur at
+ // the last child, because the next safe_point_pos may be covered by cur.
+ // This may happen if cur has more than one interval, and the current
+ // safe_point_pos is in between intervals.
+ // For that reason, cur may be at most the last child.
+ DCHECK_NOT_NULL(cur);
+ DCHECK(safe_point_pos >= cur->Start() || range == cur);
+ bool found = false;
+ while (!found) {
+ if (cur->Covers(safe_point_pos)) {
+ found = true;
+ } else {
+ LiveRange* next = cur->next();
+ if (next == nullptr || next->Start() > safe_point_pos) {
+ break;
+ }
+ cur = next;
+ }
+ }
+
+ if (!found) {
+ continue;
}
- if (cur == nullptr) continue;
// Check if the live range is spilled and the safe point is after
// the spill position.
@@ -3146,9 +3177,10 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
"at safe point %d\n",
range->vreg(), cur->relative_id(), cur->Start().value(),
safe_point);
- auto operand = cur->GetAssignedOperand();
+ InstructionOperand operand = cur->GetAssignedOperand();
DCHECK(!operand.IsStackSlot());
- DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ AllocatedOperand::cast(operand).representation());
map->RecordReference(AllocatedOperand::cast(operand));
}
}
@@ -3160,8 +3192,8 @@ namespace {
class LiveRangeBound {
public:
- explicit LiveRangeBound(const LiveRange* range)
- : range_(range), start_(range->Start()), end_(range->End()) {
+ explicit LiveRangeBound(const LiveRange* range, bool skip)
+ : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
@@ -3172,6 +3204,7 @@ class LiveRangeBound {
const LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
+ const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
@@ -3190,14 +3223,17 @@ class LiveRangeBoundArray {
bool ShouldInitialize() { return start_ == nullptr; }
- void Initialize(Zone* zone, const LiveRange* const range) {
- size_t length = 0;
- for (auto i = range; i != nullptr; i = i->next()) length++;
- start_ = zone->NewArray<LiveRangeBound>(length);
- length_ = length;
- auto curr = start_;
- for (auto i = range; i != nullptr; i = i->next(), ++curr) {
- new (curr) LiveRangeBound(i);
+ void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
+ length_ = range->GetChildCount();
+
+ start_ = zone->NewArray<LiveRangeBound>(length_);
+ LiveRangeBound* curr = start_;
+ // Normally, spilled ranges do not need connecting moves, because the spill
+ // location has been assigned at definition. For ranges spilled in deferred
+ // blocks, that is not the case, so we need to connect the spilled children.
+ bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
+ for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+ new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
}
}
@@ -3207,7 +3243,7 @@ class LiveRangeBoundArray {
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
- auto bound = &start_[current_index];
+ LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
@@ -3219,32 +3255,41 @@ class LiveRangeBoundArray {
}
LiveRangeBound* FindPred(const InstructionBlock* pred) {
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
- auto succ_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
}
- void Find(const InstructionBlock* block, const InstructionBlock* pred,
- FindResult* result) const {
- auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- auto bound = Find(pred_end);
+ bool FindConnectableSubranges(const InstructionBlock* block,
+ const InstructionBlock* pred,
+ FindResult* result) const {
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
- auto cur_start = LifetimePosition::GapFromInstructionIndex(
+ LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
- // Common case.
+
if (bound->CanCover(cur_start)) {
- result->cur_cover_ = bound->range_;
- return;
+ // Both blocks are covered by the same range, so there is nothing to
+ // connect.
+ return false;
+ }
+ bound = Find(cur_start);
+ if (bound->skip_) {
+ return false;
}
- result->cur_cover_ = Find(cur_start)->range_;
+ result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+ return (result->cur_cover_ != result->pred_cover_);
}
private:
@@ -3269,9 +3314,9 @@ class LiveRangeFinder {
LiveRangeBoundArray* ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
- auto range = data_->live_ranges()[operand_index];
+ TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
- auto array = &bounds_[operand_index];
+ LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
@@ -3322,25 +3367,28 @@ bool LiveRangeConnector::CanEagerlyResolveControlFlow(
void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
// Lazily linearize live ranges in memory for fast lookup.
LiveRangeFinder finder(data(), local_zone);
- auto& live_in_sets = data()->live_in_sets();
- for (auto block : code()->instruction_blocks()) {
+ ZoneVector<BitVector*>& live_in_sets = data()->live_in_sets();
+ for (const InstructionBlock* block : code()->instruction_blocks()) {
if (CanEagerlyResolveControlFlow(block)) continue;
- auto live = live_in_sets[block->rpo_number().ToInt()];
+ BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
- auto* array = finder.ArrayFor(iterator.Current());
- for (auto pred : block->predecessors()) {
+ LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+ for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
- const auto* pred_block = code()->InstructionBlockAt(pred);
- array->Find(block, pred_block, &result);
- if (result.cur_cover_ == result.pred_cover_ ||
- (!result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
- result.cur_cover_->spilled()))
+ const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
+ if (!array->FindConnectableSubranges(block, pred_block, &result)) {
continue;
- auto pred_op = result.pred_cover_->GetAssignedOperand();
- auto cur_op = result.cur_cover_->GetAssignedOperand();
+ }
+ InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
+ InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
if (pred_op.Equals(cur_op)) continue;
- ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
+ USE(move_loc);
+ DCHECK_IMPLIES(
+ result.cur_cover_->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+ !(pred_op.IsAnyRegister() && cur_op.IsAnyRegister()),
+ code()->GetInstructionBlock(move_loc)->IsDeferred());
}
iterator.Advance();
}
@@ -3348,10 +3396,10 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
}
-void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
- const InstructionOperand& cur_op,
- const InstructionBlock* pred,
- const InstructionOperand& pred_op) {
+int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
+ const InstructionBlock* pred,
+ const InstructionOperand& pred_op) {
DCHECK(!pred_op.Equals(cur_op));
int gap_index;
Instruction::GapPosition position;
@@ -3367,6 +3415,7 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
position = Instruction::END;
}
data()->AddGapMove(gap_index, position, pred_op, cur_op);
+ return gap_index;
}
@@ -3378,7 +3427,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
LiveRange* first_range = top_range;
for (LiveRange *second_range = first_range->next(); second_range != nullptr;
first_range = second_range, second_range = second_range->next()) {
- auto pos = second_range->Start();
+ LifetimePosition pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block
// boundary.
if (!connect_spilled && second_range->spilled()) continue;
@@ -3387,8 +3436,8 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
continue;
}
- auto prev_operand = first_range->GetAssignedOperand();
- auto cur_operand = second_range->GetAssignedOperand();
+ InstructionOperand prev_operand = first_range->GetAssignedOperand();
+ InstructionOperand cur_operand = second_range->GetAssignedOperand();
if (prev_operand.Equals(cur_operand)) continue;
bool delay_insertion = false;
Instruction::GapPosition gap_pos;
@@ -3403,8 +3452,16 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
gap_pos = delay_insertion ? Instruction::END : Instruction::START;
}
- auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
- gap_pos, code_zone());
+ // Fills or spills for spilled in deferred blocks ranges must happen
+ // only in deferred blocks.
+ DCHECK_IMPLIES(
+ connect_spilled &&
+ !(prev_operand.IsAnyRegister() && cur_operand.IsAnyRegister()),
+ code()->GetInstructionBlock(gap_index)->IsDeferred());
+
+ ParallelMove* move =
+ code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
+ gap_pos, code_zone());
if (!delay_insertion) {
move->AddMove(prev_operand, cur_operand);
} else {
@@ -3419,15 +3476,15 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
ZoneVector<MoveOperands*> to_eliminate(local_zone);
to_insert.reserve(4);
to_eliminate.reserve(4);
- auto moves = delayed_insertion_map.begin()->first.first;
+ ParallelMove* moves = delayed_insertion_map.begin()->first.first;
for (auto it = delayed_insertion_map.begin();; ++it) {
bool done = it == delayed_insertion_map.end();
if (done || it->first.first != moves) {
// Commit the MoveOperands for current ParallelMove.
- for (auto move : to_eliminate) {
+ for (MoveOperands* move : to_eliminate) {
move->Eliminate();
}
- for (auto move : to_insert) {
+ for (MoveOperands* move : to_insert) {
moves->push_back(move);
}
if (done) break;
@@ -3437,8 +3494,9 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
moves = it->first.first;
}
// Gather all MoveOperands for a single ParallelMove.
- auto move = new (code_zone()) MoveOperands(it->first.second, it->second);
- auto eliminate = moves->PrepareInsertAfter(move);
+ MoveOperands* move =
+ new (code_zone()) MoveOperands(it->first.second, it->second);
+ MoveOperands* eliminate = moves->PrepareInsertAfter(move);
to_insert.push_back(move);
if (eliminate != nullptr) to_eliminate.push_back(eliminate);
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 443232abb1..b96a43ccec 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -135,6 +135,8 @@ class LifetimePosition final {
return this->value_ >= that.value_;
}
+ void Print() const;
+
static inline LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition MaxPosition() {
@@ -194,6 +196,24 @@ class UseInterval final : public ZoneObject {
return start_ <= point && point < end_;
}
+ // Returns the index of the first gap covered by this interval.
+ int FirstGapIndex() const {
+ int ret = start_.ToInstructionIndex();
+ if (start_.IsInstructionPosition()) {
+ ++ret;
+ }
+ return ret;
+ }
+
+ // Returns the index of the last gap covered by this interval.
+ int LastGapIndex() const {
+ int ret = end_.ToInstructionIndex();
+ if (end_.IsGapPosition() && end_.IsStart()) {
+ --ret;
+ }
+ return ret;
+ }
+
private:
LifetimePosition start_;
LifetimePosition end_;
@@ -299,7 +319,9 @@ class LiveRange : public ZoneObject {
InstructionOperand GetAssignedOperand() const;
- MachineType machine_type() const { return MachineTypeField::decode(bits_); }
+ MachineRepresentation representation() const {
+ return RepresentationField::decode(bits_);
+ }
int assigned_register() const { return AssignedRegisterField::decode(bits_); }
bool HasRegisterAssigned() const {
@@ -378,7 +400,10 @@ class LiveRange : public ZoneObject {
bool Covers(LifetimePosition position) const;
LifetimePosition FirstIntersection(LiveRange* other) const;
- void Verify() const;
+ void VerifyChildStructure() const {
+ VerifyIntervals();
+ VerifyPositions();
+ }
void ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op);
@@ -391,6 +416,8 @@ class LiveRange : public ZoneObject {
void set_weight(float weight) { weight_ = weight; }
LiveRangeGroup* group() const { return group_; }
void set_group(LiveRangeGroup* group) { group_ = group; }
+ void Print(const RegisterConfiguration* config, bool with_children) const;
+ void Print(bool with_children) const;
static const int kInvalidSize = -1;
static const float kInvalidWeight;
@@ -398,10 +425,9 @@ class LiveRange : public ZoneObject {
private:
friend class TopLevelLiveRange;
- explicit LiveRange(int relative_id, MachineType machine_type,
+ explicit LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level);
- void AppendAsChild(TopLevelLiveRange* other);
void UpdateParentForAllChildren(TopLevelLiveRange* new_top_level);
void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
@@ -410,9 +436,12 @@ class LiveRange : public ZoneObject {
void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const;
+ void VerifyPositions() const;
+ void VerifyIntervals() const;
+
typedef BitField<bool, 0, 1> SpilledField;
typedef BitField<int32_t, 6, 6> AssignedRegisterField;
- typedef BitField<MachineType, 12, 15> MachineTypeField;
+ typedef BitField<MachineRepresentation, 12, 8> RepresentationField;
// Unique among children and splinters of the same virtual register.
int relative_id_;
@@ -465,7 +494,7 @@ class LiveRangeGroup final : public ZoneObject {
class TopLevelLiveRange final : public LiveRange {
public:
- explicit TopLevelLiveRange(int vreg, MachineType machine_type);
+ explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
int spill_start_index() const { return spill_start_index_; }
bool IsFixed() const { return vreg_ < 0; }
@@ -532,16 +561,16 @@ class TopLevelLiveRange final : public LiveRange {
AllocatedOperand GetSpillRangeOperand() const;
- void SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand);
+ void RecordSpillLocation(Zone* zone, int gap_index,
+ InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
}
- void CommitSpillsAtDefinition(InstructionSequence* sequence,
- const InstructionOperand& operand,
- bool might_be_duplicated);
+ void CommitSpillMoves(InstructionSequence* sequence,
+ const InstructionOperand& operand,
+ bool might_be_duplicated);
// If all the children of this range are spilled in deferred blocks, and if
// for any non-spilled child with a use position requiring a slot, that range
@@ -550,7 +579,12 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
- void MarkSpilledInDeferredBlock(const InstructionSequence* code);
+ void MarkSpilledInDeferredBlock() {
+ spill_start_index_ = -1;
+ spilled_in_deferred_blocks_ = true;
+ spill_move_insertion_locations_ = nullptr;
+ }
+
bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
const InstructionOperand& spill_operand);
@@ -567,22 +601,25 @@ class TopLevelLiveRange final : public LiveRange {
int debug_virt_reg() const;
#endif
+ void Verify() const;
+ void VerifyChildrenInOrder() const;
+
int GetNextChildId() {
return IsSplinter() ? splintered_from()->GetNextChildId()
: ++last_child_id_;
}
+ int GetChildCount() const { return last_child_id_ + 1; }
+
bool IsSpilledOnlyInDeferredBlocks() const {
return spilled_in_deferred_blocks_;
}
- struct SpillAtDefinitionList;
+ struct SpillMoveInsertionList;
- SpillAtDefinitionList* spills_at_definition() const {
- return spills_at_definition_;
+ SpillMoveInsertionList* spill_move_insertion_locations() const {
+ return spill_move_insertion_locations_;
}
- void set_last_child(LiveRange* range) { last_child_ = range; }
- LiveRange* last_child() const { return last_child_; }
TopLevelLiveRange* splinter() const { return splinter_; }
void SetSplinter(TopLevelLiveRange* splinter) {
DCHECK_NULL(splinter_);
@@ -594,6 +631,9 @@ class TopLevelLiveRange final : public LiveRange {
splinter->SetSplinteredFrom(this);
}
+ void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
+ bool has_preassigned_slot() const { return has_preassigned_slot_; }
+
private:
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
@@ -610,14 +650,14 @@ class TopLevelLiveRange final : public LiveRange {
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
};
- SpillAtDefinitionList* spills_at_definition_;
+ SpillMoveInsertionList* spill_move_insertion_locations_;
// TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block.
bool spilled_in_deferred_blocks_;
int spill_start_index_;
- LiveRange* last_child_;
UsePosition* last_pos_;
TopLevelLiveRange* splinter_;
+ bool has_preassigned_slot_;
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
@@ -643,6 +683,7 @@ class SpillRange final : public ZoneObject {
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
+ bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
void set_assigned_slot(int index) {
DCHECK_EQ(kUnassignedSlot, assigned_slot_);
@@ -658,6 +699,7 @@ class SpillRange final : public ZoneObject {
ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
int byte_width() const { return byte_width_; }
RegisterKind kind() const { return kind_; }
+ void Print() const;
private:
LifetimePosition End() const { return end_position_; }
@@ -709,6 +751,8 @@ class RegisterAllocationData final : public ZoneObject {
InstructionOperand* operand;
};
typedef ZoneVector<DelayedReference> DelayedReferences;
+ typedef ZoneVector<std::pair<TopLevelLiveRange*, int>>
+ RangesWithPreassignedSlots;
RegisterAllocationData(const RegisterConfiguration* config,
Zone* allocation_zone, Frame* frame,
@@ -746,12 +790,12 @@ class RegisterAllocationData final : public ZoneObject {
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
- MachineType MachineTypeFor(int virtual_register);
+ MachineRepresentation RepresentationFor(int virtual_register);
TopLevelLiveRange* GetOrCreateLiveRangeFor(int index);
// Creates a new live range.
- TopLevelLiveRange* NewLiveRange(int index, MachineType machine_type);
- TopLevelLiveRange* NextLiveRange(MachineType machine_type);
+ TopLevelLiveRange* NewLiveRange(int index, MachineRepresentation rep);
+ TopLevelLiveRange* NextLiveRange(MachineRepresentation rep);
SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range);
SpillRange* CreateSpillRangeForLiveRange(TopLevelLiveRange* range);
@@ -765,6 +809,7 @@ class RegisterAllocationData final : public ZoneObject {
}
bool ExistsUseWithoutDefinition();
+ bool RangesDefinedInDeferredStayInDeferred();
void MarkAllocated(RegisterKind kind, int index);
@@ -774,12 +819,9 @@ class RegisterAllocationData final : public ZoneObject {
PhiMapValue* GetPhiMapValueFor(int virtual_register);
bool IsBlockBoundary(LifetimePosition pos) const;
- void Print(const InstructionSequence* instructionSequence);
- void Print(const Instruction* instruction);
- void Print(const LiveRange* range, bool with_children = false);
- void Print(const InstructionOperand& op);
- void Print(const MoveOperands* move);
- void Print(const SpillRange* spill_range);
+ RangesWithPreassignedSlots& preassigned_slot_ranges() {
+ return preassigned_slot_ranges_;
+ }
private:
int GetNextLiveRangeId();
@@ -802,6 +844,7 @@ class RegisterAllocationData final : public ZoneObject {
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
int virtual_register_count_;
+ RangesWithPreassignedSlots preassigned_slot_ranges_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@@ -1109,10 +1152,10 @@ class LiveRangeConnector final : public ZoneObject {
bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
- void ResolveControlFlow(const InstructionBlock* block,
- const InstructionOperand& cur_op,
- const InstructionBlock* pred,
- const InstructionOperand& pred_op);
+ int ResolveControlFlow(const InstructionBlock* block,
+ const InstructionOperand& cur_op,
+ const InstructionBlock* pred,
+ const InstructionOperand& pred_op);
RegisterAllocationData* const data_;
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
new file mode 100644
index 0000000000..5dab60f6a3
--- /dev/null
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -0,0 +1,537 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/representation-change.h"
+
+#include <sstream>
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const char* Truncation::description() const {
+ switch (kind()) {
+ case TruncationKind::kNone:
+ return "no-value-use";
+ case TruncationKind::kBool:
+ return "truncate-to-bool";
+ case TruncationKind::kWord32:
+ return "truncate-to-word32";
+ case TruncationKind::kWord64:
+ return "truncate-to-word64";
+ case TruncationKind::kFloat32:
+ return "truncate-to-float32";
+ case TruncationKind::kFloat64:
+ return "truncate-to-float64";
+ case TruncationKind::kAny:
+ return "no-truncation";
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+// Partial order for truncations:
+//
+// kWord64 kAny
+// ^ ^
+// \ |
+// \ kFloat64 <--+
+// \ ^ ^ |
+// \ / | |
+// kWord32 kFloat32 kBool
+// ^ ^ ^
+// \ | /
+// \ | /
+// \ | /
+// \ | /
+// \ | /
+// kNone
+
+// static
+Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
+ TruncationKind rep2) {
+ if (LessGeneral(rep1, rep2)) return rep2;
+ if (LessGeneral(rep2, rep1)) return rep1;
+ // Handle the generalization of float64-representable values.
+ if (LessGeneral(rep1, TruncationKind::kFloat64) &&
+ LessGeneral(rep2, TruncationKind::kFloat64)) {
+ return TruncationKind::kFloat64;
+ }
+ // All other combinations are illegal.
+ FATAL("Tried to combine incompatible truncations");
+ return TruncationKind::kNone;
+}
+
+
+// static
+bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
+ switch (rep1) {
+ case TruncationKind::kNone:
+ return true;
+ case TruncationKind::kBool:
+ return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord32:
+ return rep2 == TruncationKind::kWord32 ||
+ rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord64:
+ return rep2 == TruncationKind::kWord64;
+ case TruncationKind::kFloat32:
+ return rep2 == TruncationKind::kFloat32 ||
+ rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kFloat64:
+ return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ case TruncationKind::kAny:
+ return rep2 == TruncationKind::kAny;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+namespace {
+
+// TODO(titzer): should Word64 also be implicitly convertable to others?
+bool IsWord(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kWord16 ||
+ rep == MachineRepresentation::kWord32;
+}
+
+} // namespace
+
+
+// Changes representation from {output_rep} to {use_rep}. The {truncation}
+// parameter is only used for sanity checking - if the changer cannot figure
+// out signedness for the word32->float64 conversion, then we check that the
+// uses truncate to word32 (so they do not care about signedness).
+Node* RepresentationChanger::GetRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ MachineRepresentation use_rep, Truncation truncation) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // The output representation should be set.
+ return TypeError(node, output_rep, output_type, use_rep);
+ }
+ if (use_rep == output_rep) {
+ // Representations are the same. That's a no-op.
+ return node;
+ }
+ if (IsWord(use_rep) && IsWord(output_rep)) {
+ // Both are words less than or equal to 32-bits.
+ // Since loads of integers from memory implicitly sign or zero extend the
+ // value to the full machine word size and stores implicitly truncate,
+ // no representation change is necessary.
+ return node;
+ }
+ switch (use_rep) {
+ case MachineRepresentation::kTagged:
+ return GetTaggedRepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kFloat32:
+ return GetFloat32RepresentationFor(node, output_rep, output_type,
+ truncation);
+ case MachineRepresentation::kFloat64:
+ return GetFloat64RepresentationFor(node, output_rep, output_type,
+ truncation);
+ case MachineRepresentation::kBit:
+ return GetBitRepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return GetWord32RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kWord64:
+ return GetWord64RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kNone:
+ return node;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+Node* RepresentationChanger::GetTaggedRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kHeapConstant:
+ return node; // No change necessary.
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Constant(value);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Constant(static_cast<double>(value));
+ } else if (output_rep == MachineRepresentation::kBit) {
+ return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+ : jsgraph()->TrueConstant();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ case IrOpcode::kFloat64Constant:
+ return jsgraph()->Constant(OpParameter<double>(node));
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Tagged operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ op = simplified()->ChangeBitToBool();
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeUint32ToTagged();
+ } else if (output_type->Is(Type::Signed32())) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ } else if (output_rep ==
+ MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTagged();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ op = simplified()->ChangeFloat64ToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetFloat32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float32Constant(
+ DoubleToFloat32(OpParameter<double>(node)));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Unsigned32())) {
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ } else {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float32Constant(static_cast<float>(value));
+ }
+ case IrOpcode::kFloat32Constant:
+ return node; // No change necessary.
+ default:
+ break;
+ }
+ // Select the correct X -> Float32 operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat32);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToFloat64();
+ } else {
+ // Either the output is int32 or the uses only care about the
+ // low 32 bits (so we can pick int32 safely).
+ DCHECK(output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32());
+ op = machine()->ChangeUint32ToFloat64();
+ }
+ // int32 -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ node = jsgraph()->graph()->NewNode(op, node);
+ op = machine()->TruncateFloat64ToFloat32();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ op = machine()->TruncateFloat64ToFloat32();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetFloat64RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Truncation truncation) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float64Constant(OpParameter<double>(node));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float64Constant(value);
+ } else {
+ DCHECK(output_type->Is(Type::Unsigned32()));
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float64Constant(static_cast<double>(value));
+ }
+ case IrOpcode::kFloat64Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Float64Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Float64 operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kBit) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat64);
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeInt32ToFloat64();
+ } else {
+ // Either the output is int32 or the uses only care about the
+ // low 32 bits (so we can pick int32 safely).
+ DCHECK(output_type->Is(Type::Unsigned32()) ||
+ truncation.TruncatesToWord32());
+ op = machine()->ChangeUint32ToFloat64();
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToFloat64();
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ op = machine()->ChangeFloat32ToFloat64();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kFloat64);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
+ return jsgraph()->Int32Constant(DoubleToInt32(value));
+}
+
+
+Node* RepresentationChanger::GetWord32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return MakeTruncatedInt32Constant(OpParameter<float>(node));
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat64Constant:
+ return MakeTruncatedInt32Constant(OpParameter<double>(node));
+ default:
+ break;
+ }
+ // Select the correct X -> Word32 operator.
+ const Operator* op;
+ Type* type = NodeProperties::GetType(node);
+
+ if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word32
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ // TODO(jarin) Use only output_type here, once we intersect it with the
+ // type inferred by the typer.
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else {
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else {
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ if (output_type->Is(Type::Unsigned32()) || type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else if (output_type->Is(Type::Signed32()) ||
+ type->Is(Type::Signed32())) {
+ op = simplified()->ChangeTaggedToInt32();
+ } else {
+ node = InsertChangeTaggedToFloat64(node);
+ op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetBitRepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ // Eagerly fold representation changes for constants.
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ DCHECK(value.is_identical_to(factory()->true_value()) ||
+ value.is_identical_to(factory()->false_value()));
+ return jsgraph()->Int32Constant(
+ value.is_identical_to(factory()->true_value()) ? 1 : 0);
+ }
+ default:
+ break;
+ }
+ // Select the correct X -> Bit operator.
+ const Operator* op;
+ if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeBoolToBit();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kBit);
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
+
+
+Node* RepresentationChanger::GetWord64RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type) {
+ if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word64
+ }
+ // Can't really convert Word64 to anything else. Purported to be internal.
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord64);
+}
+
+
+const Operator* RepresentationChanger::Int32OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Int32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Int32Mod();
+ case IrOpcode::kNumberBitwiseOr:
+ return machine()->Word32Or();
+ case IrOpcode::kNumberBitwiseXor:
+ return machine()->Word32Xor();
+ case IrOpcode::kNumberBitwiseAnd:
+ return machine()->Word32And();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Int32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Int32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const Operator* RepresentationChanger::Uint32OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Int32Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Int32Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Int32Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Uint32Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Uint32Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Word32Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Uint32LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Uint32LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+const Operator* RepresentationChanger::Float64OperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kNumberAdd:
+ return machine()->Float64Add();
+ case IrOpcode::kNumberSubtract:
+ return machine()->Float64Sub();
+ case IrOpcode::kNumberMultiply:
+ return machine()->Float64Mul();
+ case IrOpcode::kNumberDivide:
+ return machine()->Float64Div();
+ case IrOpcode::kNumberModulus:
+ return machine()->Float64Mod();
+ case IrOpcode::kNumberEqual:
+ return machine()->Float64Equal();
+ case IrOpcode::kNumberLessThan:
+ return machine()->Float64LessThan();
+ case IrOpcode::kNumberLessThanOrEqual:
+ return machine()->Float64LessThanOrEqual();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* RepresentationChanger::TypeError(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type,
+ MachineRepresentation use) {
+ type_error_ = true;
+ if (!testing_type_errors_) {
+ std::ostringstream out_str;
+ out_str << output_rep << " (";
+ output_type->PrintTo(out_str, Type::SEMANTIC_DIM);
+ out_str << ")";
+
+ std::ostringstream use_str;
+ use_str << use;
+
+ V8_Fatal(__FILE__, __LINE__,
+ "RepresentationChangerError: node #%d:%s of "
+ "%s cannot be changed to %s",
+ node->id(), node->op()->mnemonic(), out_str.str().c_str(),
+ use_str.str().c_str());
+ }
+ return node;
+}
+
+
+Node* RepresentationChanger::InsertChangeFloat32ToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
+}
+
+
+Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+ node);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 98de04d3a5..62ea3b4684 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -5,414 +5,102 @@
#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
#define V8_COMPILER_REPRESENTATION_CHANGE_H_
-#include <sstream>
-
-#include "src/base/bits.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Contains logic related to changing the representation of values for constants
-// and other nodes, as well as lowering Simplified->Machine operators.
-// Eagerly folds any representation changes for constants.
-class RepresentationChanger {
+class Truncation final {
public:
- RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
- : jsgraph_(jsgraph),
- isolate_(isolate),
- testing_type_errors_(false),
- type_error_(false) {}
+ // Constructors.
+ static Truncation None() { return Truncation(TruncationKind::kNone); }
+ static Truncation Bool() { return Truncation(TruncationKind::kBool); }
+ static Truncation Word32() { return Truncation(TruncationKind::kWord32); }
+ static Truncation Word64() { return Truncation(TruncationKind::kWord64); }
+ static Truncation Float32() { return Truncation(TruncationKind::kFloat32); }
+ static Truncation Float64() { return Truncation(TruncationKind::kFloat64); }
+ static Truncation Any() { return Truncation(TruncationKind::kAny); }
- // TODO(titzer): should Word64 also be implicitly convertable to others?
- static bool IsWord(MachineTypeUnion type) {
- return (type & (kRepWord8 | kRepWord16 | kRepWord32)) != 0;
+ static Truncation Generalize(Truncation t1, Truncation t2) {
+ return Truncation(Generalize(t1.kind(), t2.kind()));
}
- Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
- MachineTypeUnion use_type) {
- if (!base::bits::IsPowerOfTwo32(output_type & kRepMask)) {
- // There should be only one output representation.
- return TypeError(node, output_type, use_type);
- }
- if ((use_type & kRepMask) == (output_type & kRepMask)) {
- // Representations are the same. That's a no-op.
- return node;
- }
- if (IsWord(use_type) && IsWord(output_type)) {
- // Both are words less than or equal to 32-bits.
- // Since loads of integers from memory implicitly sign or zero extend the
- // value to the full machine word size and stores implicitly truncate,
- // no representation change is necessary.
- return node;
- }
- if (use_type & kRepTagged) {
- return GetTaggedRepresentationFor(node, output_type);
- } else if (use_type & kRepFloat32) {
- return GetFloat32RepresentationFor(node, output_type);
- } else if (use_type & kRepFloat64) {
- return GetFloat64RepresentationFor(node, output_type);
- } else if (use_type & kRepBit) {
- return GetBitRepresentationFor(node, output_type);
- } else if (IsWord(use_type)) {
- return GetWord32RepresentationFor(node, output_type,
- use_type & kTypeUint32);
- } else if (use_type & kRepWord64) {
- return GetWord64RepresentationFor(node, output_type);
- } else {
- return node;
- }
+ // Queries.
+ bool TruncatesToWord32() const {
+ return LessGeneral(kind_, TruncationKind::kWord32);
}
-
- Node* GetTaggedRepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- case IrOpcode::kHeapConstant:
- return node; // No change necessary.
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Constant(static_cast<double>(value));
- } else if (output_type & kTypeInt32) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Constant(value);
- } else if (output_type & kRepBit) {
- return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
- : jsgraph()->TrueConstant();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- case IrOpcode::kFloat64Constant:
- return jsgraph()->Constant(OpParameter<double>(node));
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Constant(OpParameter<float>(node));
- default:
- break;
- }
- // Select the correct X -> Tagged operator.
- const Operator* op;
- if (output_type & kRepBit) {
- op = simplified()->ChangeBitToBool();
- } else if (IsWord(output_type)) {
- if (output_type & kTypeUint32) {
- op = simplified()->ChangeUint32ToTagged();
- } else if (output_type & kTypeInt32) {
- op = simplified()->ChangeInt32ToTagged();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- } else if (output_type & kRepFloat32) { // float32 -> float64 -> tagged
- node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTagged();
- } else if (output_type & kRepFloat64) {
- op = simplified()->ChangeFloat64ToTagged();
- } else {
- return TypeError(node, output_type, kRepTagged);
- }
- return jsgraph()->graph()->NewNode(op, node);
+ bool TruncatesNaNToZero() {
+ return LessGeneral(kind_, TruncationKind::kWord32) ||
+ LessGeneral(kind_, TruncationKind::kBool);
}
-
- Node* GetFloat32RepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float32Constant(
- DoubleToFloat32(OpParameter<double>(node)));
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float32Constant(static_cast<float>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float32Constant(static_cast<float>(value));
- }
- case IrOpcode::kFloat32Constant:
- return node; // No change necessary.
- default:
- break;
- }
- // Select the correct X -> Float32 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return TypeError(node, output_type, kRepFloat32);
- } else if (IsWord(output_type)) {
- if (output_type & kTypeUint32) {
- op = machine()->ChangeUint32ToFloat64();
- } else {
- op = machine()->ChangeInt32ToFloat64();
- }
- // int32 -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
- } else if (output_type & kRepTagged) {
- op = simplified()
- ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
- node = jsgraph()->graph()->NewNode(op, node);
- op = machine()->TruncateFloat64ToFloat32();
- } else if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToFloat32();
- } else {
- return TypeError(node, output_type, kRepFloat32);
- }
- return jsgraph()->graph()->NewNode(op, node);
+ bool TruncatesUndefinedToZeroOrNaN() {
+ return LessGeneral(kind_, TruncationKind::kFloat64) ||
+ LessGeneral(kind_, TruncationKind::kWord64);
}
- Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node));
- case IrOpcode::kInt32Constant:
- if (output_type & kTypeUint32) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float64Constant(static_cast<double>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float64Constant(value);
- }
- case IrOpcode::kFloat64Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Float64Constant(OpParameter<float>(node));
- default:
- break;
- }
- // Select the correct X -> Float64 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return TypeError(node, output_type, kRepFloat64);
- } else if (IsWord(output_type)) {
- if (output_type & kTypeUint32) {
- op = machine()->ChangeUint32ToFloat64();
- } else {
- op = machine()->ChangeInt32ToFloat64();
- }
- } else if (output_type & kRepTagged) {
- op = simplified()->ChangeTaggedToFloat64();
- } else if (output_type & kRepFloat32) {
- op = machine()->ChangeFloat32ToFloat64();
- } else {
- return TypeError(node, output_type, kRepFloat64);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* MakeInt32Constant(double value) {
- if (value < 0) {
- DCHECK(IsInt32Double(value));
- int32_t iv = static_cast<int32_t>(value);
- return jsgraph()->Int32Constant(iv);
- } else {
- DCHECK(IsUint32Double(value));
- int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
- return jsgraph()->Int32Constant(iv);
- }
- }
-
- Node* GetTruncatedWord32For(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold truncations for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Int32Constant(
- DoubleToInt32(OpParameter<float>(node)));
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return jsgraph()->Int32Constant(
- DoubleToInt32(OpParameter<double>(node)));
- default:
- break;
- }
- // Select the correct X -> Word32 truncation operator.
- const Operator* op = NULL;
- if (output_type & kRepFloat64) {
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
- } else if (output_type & kRepFloat32) {
- node = InsertChangeFloat32ToFloat64(node);
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
- } else if (output_type & kRepTagged) {
- node = InsertChangeTaggedToFloat64(node);
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
- } else {
- return TypeError(node, output_type, kRepWord32);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
+ // Operators.
+ bool operator==(Truncation other) const { return kind() == other.kind(); }
+ bool operator!=(Truncation other) const { return !(*this == other); }
- Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
- bool use_unsigned) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return MakeInt32Constant(OpParameter<float>(node));
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return MakeInt32Constant(OpParameter<double>(node));
- default:
- break;
- }
- // Select the correct X -> Word32 operator.
- const Operator* op;
- if (output_type & kRepBit) {
- return node; // Sloppy comparison -> word32
- } else if (output_type & kRepFloat64) {
- if (output_type & kTypeUint32 || use_unsigned) {
- op = machine()->ChangeFloat64ToUint32();
- } else {
- op = machine()->ChangeFloat64ToInt32();
- }
- } else if (output_type & kRepFloat32) {
- node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
- if (output_type & kTypeUint32 || use_unsigned) {
- op = machine()->ChangeFloat64ToUint32();
- } else {
- op = machine()->ChangeFloat64ToInt32();
- }
- } else if (output_type & kRepTagged) {
- if (output_type & kTypeUint32 || use_unsigned) {
- op = simplified()->ChangeTaggedToUint32();
- } else {
- op = simplified()->ChangeTaggedToInt32();
- }
- } else {
- return TypeError(node, output_type, kRepWord32);
- }
- return jsgraph()->graph()->NewNode(op, node);
+ // Debug utilities.
+ const char* description() const;
+ bool IsLessGeneralThan(Truncation other) {
+ return LessGeneral(kind(), other.kind());
}
- Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
- // Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- DCHECK(value.is_identical_to(factory()->true_value()) ||
- value.is_identical_to(factory()->false_value()));
- return jsgraph()->Int32Constant(
- value.is_identical_to(factory()->true_value()) ? 1 : 0);
- }
- default:
- break;
- }
- // Select the correct X -> Bit operator.
- const Operator* op;
- if (output_type & kRepTagged) {
- op = simplified()->ChangeBoolToBit();
- } else {
- return TypeError(node, output_type, kRepBit);
- }
- return jsgraph()->graph()->NewNode(op, node);
- }
-
- Node* GetWord64RepresentationFor(Node* node, MachineTypeUnion output_type) {
- if (output_type & kRepBit) {
- return node; // Sloppy comparison -> word64
- }
- // Can't really convert Word64 to anything else. Purported to be internal.
- return TypeError(node, output_type, kRepWord64);
- }
+ private:
+ enum class TruncationKind : uint8_t {
+ kNone,
+ kBool,
+ kWord32,
+ kWord64,
+ kFloat32,
+ kFloat64,
+ kAny
+ };
+
+ explicit Truncation(TruncationKind kind) : kind_(kind) {}
+ TruncationKind kind() const { return kind_; }
+
+ TruncationKind kind_;
+
+ static TruncationKind Generalize(TruncationKind rep1, TruncationKind rep2);
+ static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
+};
- const Operator* Int32OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Int32Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Int32Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Int32Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Int32Div();
- case IrOpcode::kNumberModulus:
- return machine()->Int32Mod();
- case IrOpcode::kNumberBitwiseOr:
- return machine()->Word32Or();
- case IrOpcode::kNumberBitwiseXor:
- return machine()->Word32Xor();
- case IrOpcode::kNumberBitwiseAnd:
- return machine()->Word32And();
- case IrOpcode::kNumberEqual:
- return machine()->Word32Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Int32LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Int32LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- const Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Int32Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Int32Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Int32Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Uint32Div();
- case IrOpcode::kNumberModulus:
- return machine()->Uint32Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Word32Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Uint32LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Uint32LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
+// Contains logic related to changing the representation of values for constants
+// and other nodes, as well as lowering Simplified->Machine operators.
+// Eagerly folds any representation changes for constants.
+class RepresentationChanger final {
+ public:
+ RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
+ : jsgraph_(jsgraph),
+ isolate_(isolate),
+ testing_type_errors_(false),
+ type_error_(false) {}
- const Operator* Float64OperatorFor(IrOpcode::Value opcode) {
- switch (opcode) {
- case IrOpcode::kNumberAdd:
- return machine()->Float64Add();
- case IrOpcode::kNumberSubtract:
- return machine()->Float64Sub();
- case IrOpcode::kNumberMultiply:
- return machine()->Float64Mul();
- case IrOpcode::kNumberDivide:
- return machine()->Float64Div();
- case IrOpcode::kNumberModulus:
- return machine()->Float64Mod();
- case IrOpcode::kNumberEqual:
- return machine()->Float64Equal();
- case IrOpcode::kNumberLessThan:
- return machine()->Float64LessThan();
- case IrOpcode::kNumberLessThanOrEqual:
- return machine()->Float64LessThanOrEqual();
- default:
- UNREACHABLE();
- return NULL;
- }
- }
+ // Changes representation from {output_type} to {use_rep}. The {truncation}
+ // parameter is only used for sanity checking - if the changer cannot figure
+ // out signedness for the word32->float64 conversion, then we check that the
+ // uses truncate to word32 (so they do not care about signedness).
+ Node* GetRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type, MachineRepresentation use_rep,
+ Truncation truncation = Truncation::None());
+ const Operator* Int32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Float64OperatorFor(IrOpcode::Value opcode);
MachineType TypeForBasePointer(const FieldAccess& access) {
- return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+ return access.tag() != 0 ? MachineType::AnyTagged()
+ : MachineType::Pointer();
}
MachineType TypeForBasePointer(const ElementAccess& access) {
- return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
- }
-
- MachineType TypeFromUpperBound(Type* type) {
- if (type->Is(Type::None()))
- return kTypeAny; // TODO(titzer): should be an error
- if (type->Is(Type::Signed32())) return kTypeInt32;
- if (type->Is(Type::Unsigned32())) return kTypeUint32;
- if (type->Is(Type::Number())) return kTypeNumber;
- if (type->Is(Type::Boolean())) return kTypeBool;
- return kTypeAny;
+ return access.tag() != 0 ? MachineType::AnyTagged()
+ : MachineType::Pointer();
}
private:
@@ -424,34 +112,25 @@ class RepresentationChanger {
bool testing_type_errors_; // If {true}, don't abort on a type error.
bool type_error_; // Set when a type error is detected.
- Node* TypeError(Node* node, MachineTypeUnion output_type,
- MachineTypeUnion use) {
- type_error_ = true;
- if (!testing_type_errors_) {
- std::ostringstream out_str;
- out_str << static_cast<MachineType>(output_type);
-
- std::ostringstream use_str;
- use_str << static_cast<MachineType>(use);
-
- V8_Fatal(__FILE__, __LINE__,
- "RepresentationChangerError: node #%d:%s of "
- "%s cannot be changed to %s",
- node->id(), node->op()->mnemonic(), out_str.str().c_str(),
- use_str.str().c_str());
- }
- return node;
- }
-
- Node* InsertChangeFloat32ToFloat64(Node* node) {
- return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(),
- node);
- }
-
- Node* InsertChangeTaggedToFloat64(Node* node) {
- return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- node);
- }
+ Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetFloat32RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Truncation truncation);
+ Node* GetFloat64RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Truncation truncation);
+ Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
+ Type* output_type);
+ Node* TypeError(Node* node, MachineRepresentation output_rep,
+ Type* output_type, MachineRepresentation use);
+ Node* MakeTruncatedInt32Constant(double value);
+ Node* InsertChangeFloat32ToFloat64(Node* node);
+ Node* InsertChangeTaggedToFloat64(Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 63f148d926..455fcd120e 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -34,7 +34,7 @@ bool BasicBlock::LoopContains(BasicBlock* block) const {
// RPO numbers must be initialized.
DCHECK(rpo_number_ >= 0);
DCHECK(block->rpo_number_ >= 0);
- if (loop_end_ == NULL) return false; // This is not a loop.
+ if (loop_end_ == nullptr) return false; // This is not a loop.
return block->rpo_number_ >= rpo_number_ &&
block->rpo_number_ < loop_end_->rpo_number_;
}
@@ -140,13 +140,13 @@ BasicBlock* Schedule::block(Node* node) const {
if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
return nodeid_to_block_[node->id()];
}
- return NULL;
+ return nullptr;
}
bool Schedule::IsScheduled(Node* node) {
if (node->id() >= nodeid_to_block_.size()) return false;
- return nodeid_to_block_[node->id()] != NULL;
+ return nodeid_to_block_[node->id()] != nullptr;
}
@@ -158,7 +158,7 @@ BasicBlock* Schedule::GetBlockById(BasicBlock::Id block_id) {
bool Schedule::SameBasicBlock(Node* a, Node* b) const {
BasicBlock* block = this->block(a);
- return block != NULL && block == this->block(b);
+ return block != nullptr && block == this->block(b);
}
@@ -176,7 +176,7 @@ void Schedule::PlanNode(BasicBlock* block, Node* node) {
os << "Planning #" << node->id() << ":" << node->op()->mnemonic()
<< " for future add to B" << block->id() << "\n";
}
- DCHECK(this->block(node) == NULL);
+ DCHECK(this->block(node) == nullptr);
SetBlockForNode(block, node);
}
@@ -187,7 +187,7 @@ void Schedule::AddNode(BasicBlock* block, Node* node) {
os << "Adding #" << node->id() << ":" << node->op()->mnemonic() << " to B"
<< block->id() << "\n";
}
- DCHECK(this->block(node) == NULL || this->block(node) == block);
+ DCHECK(this->block(node) == nullptr || this->block(node) == block);
block->AddNode(node);
SetBlockForNode(block, node);
}
@@ -354,7 +354,7 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
BasicBlock::Control control = block->control();
if (control != BasicBlock::kNone) {
os << " ";
- if (block->control_input() != NULL) {
+ if (block->control_input() != nullptr) {
os << *block->control_input();
} else {
os << "Goto";
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 37ce76299e..9624ff5a4f 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -138,7 +138,7 @@ class BasicBlock final : public ZoneObject {
void set_rpo_number(int32_t rpo_number);
// Loop membership helpers.
- inline bool IsLoopHeader() const { return loop_end_ != NULL; }
+ inline bool IsLoopHeader() const { return loop_end_ != nullptr; }
bool LoopContains(BasicBlock* block) const;
// Computes the immediate common dominator of {b1} and {b2}. The worst time
@@ -153,8 +153,8 @@ class BasicBlock final : public ZoneObject {
BasicBlock* dominator_; // Immediate dominator of the block.
BasicBlock* rpo_next_; // Link to next block in special RPO order.
BasicBlock* loop_header_; // Pointer to dominating loop header basic block,
- // NULL if none. For loop headers, this points to
- // enclosing loop header.
+ // nullptr if none. For loop headers, this points to
+ // enclosing loop header.
BasicBlock* loop_end_; // end of the loop, if this block is a loop header.
int32_t loop_depth_; // loop nesting, 0 is top-level
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 2c53acf1e5..80ce8b1711 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -221,9 +221,9 @@ class CFGBuilder : public ZoneObject {
queued_(scheduler->graph_, 2),
queue_(zone),
control_(zone),
- component_entry_(NULL),
- component_start_(NULL),
- component_end_(NULL) {}
+ component_entry_(nullptr),
+ component_start_(nullptr),
+ component_end_(nullptr) {}
// Run the control flow graph construction algorithm by walking the graph
// backwards from end through control edges, building and connecting the
@@ -253,7 +253,7 @@ class CFGBuilder : public ZoneObject {
ResetDataStructures();
Queue(exit);
- component_entry_ = NULL;
+ component_entry_ = nullptr;
component_start_ = block;
component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit);
@@ -377,7 +377,7 @@ class CFGBuilder : public ZoneObject {
BasicBlock* BuildBlockForNode(Node* node) {
BasicBlock* block = schedule_->block(node);
- if (block == NULL) {
+ if (block == nullptr) {
block = schedule_->NewBasicBlock();
TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
node->op()->mnemonic());
@@ -501,34 +501,34 @@ class CFGBuilder : public ZoneObject {
void ConnectTailCall(Node* call) {
Node* call_control = NodeProperties::GetControlInput(call);
BasicBlock* call_block = FindPredecessorBlock(call_control);
- TraceConnect(call, call_block, NULL);
+ TraceConnect(call, call_block, nullptr);
schedule_->AddTailCall(call_block, call);
}
void ConnectReturn(Node* ret) {
Node* return_control = NodeProperties::GetControlInput(ret);
BasicBlock* return_block = FindPredecessorBlock(return_control);
- TraceConnect(ret, return_block, NULL);
+ TraceConnect(ret, return_block, nullptr);
schedule_->AddReturn(return_block, ret);
}
void ConnectDeoptimize(Node* deopt) {
Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
- TraceConnect(deopt, deoptimize_block, NULL);
+ TraceConnect(deopt, deoptimize_block, nullptr);
schedule_->AddDeoptimize(deoptimize_block, deopt);
}
void ConnectThrow(Node* thr) {
Node* throw_control = NodeProperties::GetControlInput(thr);
BasicBlock* throw_block = FindPredecessorBlock(throw_control);
- TraceConnect(thr, throw_block, NULL);
+ TraceConnect(thr, throw_block, nullptr);
schedule_->AddThrow(throw_block, thr);
}
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
DCHECK_NOT_NULL(block);
- if (succ == NULL) {
+ if (succ == nullptr) {
TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
node->op()->mnemonic(), block->id().ToInt());
} else {
@@ -602,8 +602,8 @@ class SpecialRPONumberer : public ZoneObject {
SpecialRPONumberer(Zone* zone, Schedule* schedule)
: zone_(zone),
schedule_(schedule),
- order_(NULL),
- beyond_end_(NULL),
+ order_(nullptr),
+ beyond_end_(nullptr),
loops_(zone),
backedges_(zone),
stack_(zone),
@@ -630,7 +630,7 @@ class SpecialRPONumberer : public ZoneObject {
// numbering for basic blocks into the final schedule.
void SerializeRPOIntoSchedule() {
int32_t number = 0;
- for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
+ for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
b->set_rpo_number(number++);
schedule_->rpo_order()->push_back(b);
}
@@ -677,7 +677,7 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* start;
void AddOutgoing(Zone* zone, BasicBlock* block) {
- if (outgoing == NULL) {
+ if (outgoing == nullptr) {
outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
ZoneVector<BasicBlock*>(zone);
}
@@ -713,7 +713,7 @@ class SpecialRPONumberer : public ZoneObject {
// use the schedule's end block in actual control flow (e.g. with end having
// successors). Once this has been cleaned up we can use the end block here.
BasicBlock* BeyondEndSentinel() {
- if (beyond_end_ == NULL) {
+ if (beyond_end_ == nullptr) {
BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
}
@@ -777,7 +777,7 @@ class SpecialRPONumberer : public ZoneObject {
// Initialize the "loop stack". Note the entry could be a loop header.
LoopInfo* loop =
- HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+ HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
order = insertion_point;
// Perform an iterative post-order traversal, visiting loop bodies before
@@ -788,7 +788,7 @@ class SpecialRPONumberer : public ZoneObject {
while (stack_depth > 0) {
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
BasicBlock* block = frame->block;
- BasicBlock* succ = NULL;
+ BasicBlock* succ = nullptr;
if (block != end && frame->index < block->SuccessorCount()) {
// Process the next normal successor.
@@ -798,7 +798,7 @@ class SpecialRPONumberer : public ZoneObject {
if (block->rpo_number() == kBlockOnStack) {
// Finish the loop body the first time the header is left on the
// stack.
- DCHECK(loop != NULL && loop->header == block);
+ DCHECK(loop != nullptr && loop->header == block);
loop->start = PushFront(order, block);
order = loop->end;
block->set_rpo_number(kBlockVisited2);
@@ -813,19 +813,19 @@ class SpecialRPONumberer : public ZoneObject {
size_t outgoing_index = frame->index - block->SuccessorCount();
LoopInfo* info = &loops_[GetLoopNumber(block)];
DCHECK(loop != info);
- if (block != entry && info->outgoing != NULL &&
+ if (block != entry && info->outgoing != nullptr &&
outgoing_index < info->outgoing->size()) {
succ = info->outgoing->at(outgoing_index);
frame->index++;
}
}
- if (succ != NULL) {
+ if (succ != nullptr) {
// Process the next successor.
if (succ->rpo_number() == kBlockOnStack) continue;
if (succ->rpo_number() == kBlockVisited2) continue;
DCHECK(succ->rpo_number() == kBlockUnvisited2);
- if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
+ if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
// The successor is not in the current loop or any nested loop.
// Add it to the outgoing edges of this loop and visit it later.
loop->AddOutgoing(zone_, succ);
@@ -865,10 +865,10 @@ class SpecialRPONumberer : public ZoneObject {
}
// Publish new order the first time.
- if (order_ == NULL) order_ = order;
+ if (order_ == nullptr) order_ = order;
// Compute the correct loop headers and set the correct loop ends.
- LoopInfo* current_loop = NULL;
+ LoopInfo* current_loop = nullptr;
BasicBlock* current_header = entry->loop_header();
int32_t loop_depth = entry->loop_depth();
if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
@@ -879,11 +879,13 @@ class SpecialRPONumberer : public ZoneObject {
current->set_rpo_number(kBlockUnvisited1);
// Finish the previous loop(s) if we just exited them.
- while (current_header != NULL && current == current_header->loop_end()) {
+ while (current_header != nullptr &&
+ current == current_header->loop_end()) {
DCHECK(current_header->IsLoopHeader());
- DCHECK(current_loop != NULL);
+ DCHECK_NOT_NULL(current_loop);
current_loop = current_loop->prev;
- current_header = current_loop == NULL ? NULL : current_loop->header;
+ current_header =
+ current_loop == nullptr ? nullptr : current_loop->header;
--loop_depth;
}
current->set_loop_header(current_header);
@@ -893,7 +895,7 @@ class SpecialRPONumberer : public ZoneObject {
++loop_depth;
current_loop = &loops_[GetLoopNumber(current)];
BasicBlock* end = current_loop->end;
- current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
+ current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
current_header = current_loop->header;
TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -901,7 +903,7 @@ class SpecialRPONumberer : public ZoneObject {
current->set_loop_depth(loop_depth);
- if (current->loop_header() == NULL) {
+ if (current->loop_header() == nullptr) {
TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
current->loop_depth());
} else {
@@ -932,7 +934,7 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* member = backedges->at(i).first;
BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
size_t loop_num = GetLoopNumber(header);
- if (loops_[loop_num].header == NULL) {
+ if (loops_[loop_num].header == nullptr) {
loops_[loop_num].header = header;
loops_[loop_num].members = new (zone_)
BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
@@ -979,7 +981,8 @@ class SpecialRPONumberer : public ZoneObject {
}
os << ":\n";
- for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
+ for (BasicBlock* block = order_; block != nullptr;
+ block = block->rpo_next()) {
os << std::setw(5) << "B" << block->rpo_number() << ":";
for (size_t i = 0; i < loops_.size(); i++) {
bool range = loops_[i].header->LoopContains(block);
@@ -988,11 +991,11 @@ class SpecialRPONumberer : public ZoneObject {
os << (range ? "x" : " ");
}
os << " id:" << block->id() << ": ";
- if (block->loop_end() != NULL) {
+ if (block->loop_end() != nullptr) {
os << " range: [B" << block->rpo_number() << ", B"
<< block->loop_end()->rpo_number() << ")";
}
- if (block->loop_header() != NULL) {
+ if (block->loop_header() != nullptr) {
os << " header: id:" << block->loop_header()->id();
}
if (block->loop_depth() > 0) {
@@ -1012,10 +1015,10 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* header = loop->header;
BasicBlock* end = header->loop_end();
- DCHECK(header != NULL);
+ DCHECK_NOT_NULL(header);
DCHECK(header->rpo_number() >= 0);
DCHECK(header->rpo_number() < static_cast<int>(order->size()));
- DCHECK(end != NULL);
+ DCHECK_NOT_NULL(end);
DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
DCHECK(end->rpo_number() > header->rpo_number());
DCHECK(header->loop_header() != header);
@@ -1026,7 +1029,7 @@ class SpecialRPONumberer : public ZoneObject {
DCHECK_EQ(header, block);
bool end_found;
while (true) {
- if (block == NULL || block == loop->end) {
+ if (block == nullptr || block == loop->end) {
end_found = (loop->end == block);
break;
}
@@ -1042,7 +1045,7 @@ class SpecialRPONumberer : public ZoneObject {
// Check loop depth of the header.
int loop_depth = 0;
- for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
+ for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
loop_depth++;
}
DCHECK_EQ(loop_depth, header->loop_depth());
@@ -1096,7 +1099,7 @@ void Scheduler::ComputeSpecialRPONumbering() {
void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
- for (/*nop*/; block != NULL; block = block->rpo_next()) {
+ for (/*nop*/; block != nullptr; block = block->rpo_next()) {
auto pred = block->predecessors().begin();
auto end = block->predecessors().end();
DCHECK(pred != end); // All blocks except start have predecessors.
@@ -1153,7 +1156,7 @@ class PrepareUsesVisitor {
opcode == IrOpcode::kParameter
? schedule_->start()
: schedule_->block(NodeProperties::GetControlInput(node));
- DCHECK(block != NULL);
+ DCHECK_NOT_NULL(block);
schedule_->AddNode(block, node);
}
}
@@ -1243,7 +1246,7 @@ class ScheduleEarlyNodeVisitor {
if (data->minimum_block_ == schedule_->start()) return;
// Propagate schedule early position.
- DCHECK(data->minimum_block_ != NULL);
+ DCHECK_NOT_NULL(data->minimum_block_);
for (auto use : node->uses()) {
PropagateMinimumPositionToNode(data->minimum_block_, use);
}
@@ -1521,10 +1524,11 @@ class ScheduleLateNodeVisitor {
BasicBlock* block = nullptr;
for (Edge edge : node->use_edges()) {
BasicBlock* use_block = GetBlockForUse(edge);
- block = block == NULL ? use_block : use_block == NULL
- ? block
- : BasicBlock::GetCommonDominator(
- block, use_block);
+ block = block == nullptr
+ ? use_block
+ : use_block == nullptr
+ ? block
+ : BasicBlock::GetCommonDominator(block, use_block);
}
return block;
}
@@ -1564,7 +1568,7 @@ class ScheduleLateNodeVisitor {
}
}
BasicBlock* result = schedule_->block(use);
- if (result == NULL) return NULL;
+ if (result == nullptr) return nullptr;
TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
use->op()->mnemonic(), result->id().ToInt());
return result;
@@ -1685,9 +1689,9 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// Iterate on phase 2: Compute special RPO and dominator tree.
special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
// TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
- for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
+ for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
b->set_dominator_depth(-1);
- b->set_dominator(NULL);
+ b->set_dominator(nullptr);
}
PropagateImmediateDominators(block->rpo_next());
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index 28a5d922b7..0e8b36fa73 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -55,7 +55,7 @@ Reduction SelectLowering::Reduce(Node* node) {
node->ReplaceInput(0, vthen);
node->ReplaceInput(1, velse);
node->ReplaceInput(2, merge);
- NodeProperties::ChangeOp(node, common()->Phi(p.type(), 2));
+ NodeProperties::ChangeOp(node, common()->Phi(p.representation(), 2));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 80c7ff5a94..653fea80ea 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -6,7 +6,6 @@
#include <limits>
-#include "src/address-map.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
@@ -19,6 +18,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
#include "src/objects.h"
+#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -57,14 +57,279 @@ enum Phase {
};
+namespace {
+
+// The {UseInfo} class is used to describe a use of an input of a node.
+//
+// This information is used in two different ways, based on the phase:
+//
+// 1. During propagation, the use info is used to inform the input node
+// about what part of the input is used (we call this truncation) and what
+// is the preferred representation.
+//
+// 2. During lowering, the use info is used to properly convert the input
+// to the preferred representation. The preferred representation might be
+// insufficient to do the conversion (e.g. word32->float64 conv), so we also
+// need the signedness information to produce the correct value.
+class UseInfo {
+ public:
+ UseInfo(MachineRepresentation preferred, Truncation truncation)
+ : preferred_(preferred), truncation_(truncation) {}
+ static UseInfo TruncatingWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
+ }
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo Bool() {
+ return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
+ }
+ static UseInfo Float32() {
+ return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+ }
+ static UseInfo Float64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
+ }
+ static UseInfo PointerInt() {
+ return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+ }
+ static UseInfo AnyTagged() {
+ return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+ }
+
+ // Undetermined representation.
+ static UseInfo Any() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Any());
+ }
+ static UseInfo None() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::None());
+ }
+
+ // Truncation to a representation that is smaller than the preferred
+ // one.
+ static UseInfo Float64TruncatingToWord32() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Word32());
+ }
+ static UseInfo Word64TruncatingToWord32() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word32());
+ }
+ static UseInfo AnyTruncatingToBool() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+ }
+
+ MachineRepresentation preferred() const { return preferred_; }
+ Truncation truncation() const { return truncation_; }
+
+ private:
+ MachineRepresentation preferred_;
+ Truncation truncation_;
+};
+
+
+UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kTagged:
+ return UseInfo::AnyTagged();
+ case MachineRepresentation::kFloat64:
+ return UseInfo::Float64();
+ case MachineRepresentation::kFloat32:
+ return UseInfo::Float32();
+ case MachineRepresentation::kWord64:
+ return UseInfo::TruncatingWord64();
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return UseInfo::TruncatingWord32();
+ case MachineRepresentation::kBit:
+ return UseInfo::Bool();
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return UseInfo::None();
+}
+
+
+UseInfo UseInfoForBasePointer(const FieldAccess& access) {
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+}
+
+
+UseInfo UseInfoForBasePointer(const ElementAccess& access) {
+ return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
+}
+
+
+#ifdef DEBUG
+// Helpers for monotonicity checking.
+bool MachineRepresentationIsSubtype(MachineRepresentation r1,
+ MachineRepresentation r2) {
+ switch (r1) {
+ case MachineRepresentation::kNone:
+ return true;
+ case MachineRepresentation::kBit:
+ return r2 == MachineRepresentation::kBit ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord8:
+ return r2 == MachineRepresentation::kWord8 ||
+ r2 == MachineRepresentation::kWord16 ||
+ r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord16:
+ return r2 == MachineRepresentation::kWord16 ||
+ r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord32:
+ return r2 == MachineRepresentation::kWord32 ||
+ r2 == MachineRepresentation::kWord64 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kWord64:
+ return r2 == MachineRepresentation::kWord64;
+ case MachineRepresentation::kFloat32:
+ return r2 == MachineRepresentation::kFloat32 ||
+ r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kFloat64:
+ return r2 == MachineRepresentation::kFloat64 ||
+ r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kTagged:
+ return r2 == MachineRepresentation::kTagged;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+class InputUseInfos {
+ public:
+ explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
+
+ void SetAndCheckInput(Node* node, int index, UseInfo use_info) {
+ if (input_use_infos_.empty()) {
+ input_use_infos_.resize(node->InputCount(), UseInfo::None());
+ }
+ // Check that the new use informatin is a super-type of the old
+ // one.
+ CHECK(IsUseLessGeneral(input_use_infos_[index], use_info));
+ input_use_infos_[index] = use_info;
+ }
+
+ private:
+ ZoneVector<UseInfo> input_use_infos_;
+
+ static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
+ return MachineRepresentationIsSubtype(use1.preferred(), use2.preferred()) &&
+ use1.truncation().IsLessGeneralThan(use2.truncation());
+ }
+};
+
+#endif // DEBUG
+
+} // namespace
+
+
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
- struct NodeInfo {
- MachineTypeUnion use : 15; // Union of all usages for the node.
- bool queued : 1; // Bookkeeping for the traversal.
- bool visited : 1; // Bookkeeping for the traversal.
- MachineTypeUnion output : 15; // Output type of the node.
+ class NodeOutputInfo {
+ public:
+ NodeOutputInfo(MachineRepresentation representation, Type* type)
+ : type_(type), representation_(representation) {}
+ NodeOutputInfo()
+ : type_(Type::None()), representation_(MachineRepresentation::kNone) {}
+
+ MachineRepresentation representation() const { return representation_; }
+ Type* type() const { return type_; }
+
+ static NodeOutputInfo None() {
+ return NodeOutputInfo(MachineRepresentation::kNone, Type::None());
+ }
+
+ static NodeOutputInfo Float32() {
+ return NodeOutputInfo(MachineRepresentation::kFloat32, Type::Number());
+ }
+
+ static NodeOutputInfo Float64() {
+ return NodeOutputInfo(MachineRepresentation::kFloat64, Type::Number());
+ }
+
+ static NodeOutputInfo NumberTruncatedToWord32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Number());
+ }
+
+ static NodeOutputInfo Int32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Signed32());
+ }
+
+ static NodeOutputInfo Uint32() {
+ return NodeOutputInfo(MachineRepresentation::kWord32, Type::Unsigned32());
+ }
+
+ static NodeOutputInfo Bool() {
+ return NodeOutputInfo(MachineRepresentation::kBit, Type::Boolean());
+ }
+
+ static NodeOutputInfo Int64() {
+ // TODO(jarin) Fix once we have a real int64 type.
+ return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
+ }
+
+ static NodeOutputInfo Uint64() {
+ // TODO(jarin) Fix once we have a real uint64 type.
+ return NodeOutputInfo(MachineRepresentation::kWord64, Type::Internal());
+ }
+
+ static NodeOutputInfo AnyTagged() {
+ return NodeOutputInfo(MachineRepresentation::kTagged, Type::Any());
+ }
+
+ static NodeOutputInfo NumberTagged() {
+ return NodeOutputInfo(MachineRepresentation::kTagged, Type::Number());
+ }
+
+ static NodeOutputInfo Pointer() {
+ return NodeOutputInfo(MachineType::PointerRepresentation(), Type::Any());
+ }
+
+ private:
+ Type* type_;
+ MachineRepresentation representation_;
+ };
+
+ class NodeInfo {
+ public:
+ // Adds new use to the node. Returns true if something has changed
+ // and the node has to be requeued.
+ bool AddUse(UseInfo info) {
+ Truncation old_truncation = truncation_;
+ truncation_ = Truncation::Generalize(truncation_, info.truncation());
+ return truncation_ != old_truncation;
+ }
+
+ void set_queued(bool value) { queued_ = value; }
+ bool queued() const { return queued_; }
+ void set_visited() { visited_ = true; }
+ bool visited() const { return visited_; }
+ Truncation truncation() const { return truncation_; }
+ void set_output_type(NodeOutputInfo output) { output_ = output; }
+
+ Type* output_type() const { return output_.type(); }
+ MachineRepresentation representation() const {
+ return output_.representation();
+ }
+
+ private:
+ bool queued_ = false; // Bookkeeping for the traversal.
+ bool visited_ = false; // Bookkeeping for the traversal.
+ NodeOutputInfo output_; // Output type and representation.
+ Truncation truncation_ = Truncation::None(); // Information about uses.
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
@@ -72,34 +337,34 @@ class RepresentationSelector {
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
count_(jsgraph->graph()->NodeCount()),
- info_(zone->NewArray<NodeInfo>(count_)),
+ info_(count_, zone),
+#ifdef DEBUG
+ node_input_use_infos_(count_, InputUseInfos(zone), zone),
+#endif
nodes_(zone),
replacements_(zone),
phase_(PROPAGATE),
changer_(changer),
queue_(zone),
- source_positions_(source_positions) {
- memset(info_, 0, sizeof(NodeInfo) * count_);
-
- safe_int_additive_range_ =
- Type::Range(-std::pow(2.0, 52.0), std::pow(2.0, 52.0), zone);
+ source_positions_(source_positions),
+ type_cache_(TypeCache::Get()) {
}
void Run(SimplifiedLowering* lowering) {
// Run propagation phase to a fixpoint.
TRACE("--{Propagation phase}--\n");
phase_ = PROPAGATE;
- Enqueue(jsgraph_->graph()->end());
+ EnqueueInitial(jsgraph_->graph()->end());
// Process nodes from the queue until it is empty.
while (!queue_.empty()) {
Node* node = queue_.front();
NodeInfo* info = GetInfo(node);
queue_.pop();
- info->queued = false;
+ info->set_queued(false);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
- VisitNode(node, info->use, NULL);
+ VisitNode(node, info->truncation(), nullptr);
TRACE(" ==> output ");
- PrintInfo(info->output);
+ PrintOutputInfo(info);
TRACE("\n");
}
@@ -109,11 +374,12 @@ class RepresentationSelector {
// Process nodes from the collected {nodes_} vector.
for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
Node* node = *i;
+ NodeInfo* info = GetInfo(node);
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
// Reuse {VisitNode()} so the representation rules are in one place.
SourcePositionTable::Scope scope(
source_positions_, source_positions_->GetSourcePosition(node));
- VisitNode(node, GetUseInfo(node), lowering);
+ VisitNode(node, info->truncation(), lowering);
}
// Perform the final replacements.
@@ -130,105 +396,173 @@ class RepresentationSelector {
}
}
- // Enqueue {node} if the {use} contains new information for that node.
- // Add {node} to {nodes_} if this is the first time it's been visited.
- void Enqueue(Node* node, MachineTypeUnion use = 0) {
+ void EnqueueInitial(Node* node) {
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ info->set_queued(true);
+ nodes_.push_back(node);
+ queue_.push(node);
+ }
+
+ // Enqueue {use_node}'s {index} input if the {use} contains new information
+ // for that input node. Add the input to {nodes_} if this is the first time
+ // it's been visited.
+ void EnqueueInput(Node* use_node, int index,
+ UseInfo use_info = UseInfo::None()) {
+ Node* node = use_node->InputAt(index);
if (phase_ != PROPAGATE) return;
NodeInfo* info = GetInfo(node);
- if (!info->visited) {
+#ifdef DEBUG
+ // Check monotonicity of input requirements.
+ node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
+ use_info);
+#endif // DEBUG
+ if (!info->visited()) {
// First visit of this node.
- info->visited = true;
- info->queued = true;
+ info->set_visited();
+ info->set_queued(true);
nodes_.push_back(node);
queue_.push(node);
TRACE(" initial: ");
- info->use |= use;
- PrintUseInfo(node);
+ info->AddUse(use_info);
+ PrintTruncation(info->truncation());
return;
}
TRACE(" queue?: ");
- PrintUseInfo(node);
- if ((info->use & use) != use) {
+ PrintTruncation(info->truncation());
+ if (info->AddUse(use_info)) {
// New usage information for the node is available.
- if (!info->queued) {
+ if (!info->queued()) {
queue_.push(node);
- info->queued = true;
+ info->set_queued(true);
TRACE(" added: ");
} else {
TRACE(" inqueue: ");
}
- info->use |= use;
- PrintUseInfo(node);
+ PrintTruncation(info->truncation());
}
}
bool lower() { return phase_ == LOWER; }
- void Enqueue(Node* node, MachineType use) {
- Enqueue(node, static_cast<MachineTypeUnion>(use));
+ void EnqueueUses(Node* node) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge)) {
+ Node* const user = edge.from();
+ if (user->id() < count_) {
+ // New type information for the node is available.
+ NodeInfo* info = GetInfo(user);
+ // Enqueue the node only if we are sure it is reachable from
+ // the end and it has not been queued yet.
+ if (info->visited() && !info->queued()) {
+ queue_.push(user);
+ info->set_queued(true);
+ }
+ }
+ }
+ }
+ }
+
+ void SetOutputFromMachineType(Node* node, MachineType machine_type) {
+ Type* type = Type::None();
+ switch (machine_type.semantic()) {
+ case MachineSemantic::kNone:
+ type = Type::None();
+ break;
+ case MachineSemantic::kBool:
+ type = Type::Boolean();
+ break;
+ case MachineSemantic::kInt32:
+ type = Type::Signed32();
+ break;
+ case MachineSemantic::kUint32:
+ type = Type::Unsigned32();
+ break;
+ case MachineSemantic::kInt64:
+ // TODO(jarin) Fix once we have proper int64.
+ type = Type::Internal();
+ break;
+ case MachineSemantic::kUint64:
+ // TODO(jarin) Fix once we have proper uint64.
+ type = Type::Internal();
+ break;
+ case MachineSemantic::kNumber:
+ type = Type::Number();
+ break;
+ case MachineSemantic::kAny:
+ type = Type::Any();
+ break;
+ }
+ return SetOutput(node, NodeOutputInfo(machine_type.representation(), type));
}
- void SetOutput(Node* node, MachineTypeUnion output) {
+ void SetOutput(Node* node, NodeOutputInfo output_info) {
// Every node should have at most one output representation. Note that
// phis can have 0, if they have not been used in a representation-inducing
// instruction.
- DCHECK((output & kRepMask) == 0 ||
- base::bits::IsPowerOfTwo32(output & kRepMask));
- GetInfo(node)->output = output;
+ Type* output_type = output_info.type();
+ if (NodeProperties::IsTyped(node)) {
+ output_type = Type::Intersect(NodeProperties::GetType(node),
+ output_info.type(), jsgraph_->zone());
+ }
+ NodeInfo* info = GetInfo(node);
+ DCHECK(info->output_type()->Is(output_type));
+ DCHECK(MachineRepresentationIsSubtype(info->representation(),
+ output_info.representation()));
+ if (!output_type->Is(info->output_type()) ||
+ output_info.representation() != info->representation()) {
+ EnqueueUses(node);
+ }
+ info->set_output_type(
+ NodeOutputInfo(output_info.representation(), output_type));
+ }
+
+ bool BothInputsAreSigned32(Node* node) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(Type::Signed32()) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(Type::Signed32());
+ }
+
+ bool BothInputsAreUnsigned32(Node* node) {
+ DCHECK_EQ(2, node->InputCount());
+ return GetInfo(node->InputAt(0))->output_type()->Is(Type::Unsigned32()) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(Type::Unsigned32());
}
bool BothInputsAre(Node* node, Type* type) {
DCHECK_EQ(2, node->InputCount());
- return NodeProperties::GetType(node->InputAt(0))->Is(type) &&
- NodeProperties::GetType(node->InputAt(1))->Is(type);
+ return GetInfo(node->InputAt(0))->output_type()->Is(type) &&
+ GetInfo(node->InputAt(1))->output_type()->Is(type);
}
- void ProcessTruncateWord32Input(Node* node, int index, MachineTypeUnion use) {
+ void ConvertInput(Node* node, int index, UseInfo use) {
Node* input = node->InputAt(index);
- if (phase_ == PROPAGATE) {
- // In the propagate phase, propagate the usage information backward.
- Enqueue(input, use);
- } else {
- // In the change phase, insert a change before the use if necessary.
- MachineTypeUnion output = GetInfo(input)->output;
- if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
- // Output representation doesn't match usage.
- TRACE(" truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic());
- TRACE(" from ");
- PrintInfo(output);
- TRACE(" to ");
- PrintInfo(use);
- TRACE("\n");
- Node* n = changer_->GetTruncatedWord32For(input, output);
- node->ReplaceInput(index, n);
- }
+ // In the change phase, insert a change before the use if necessary.
+ if (use.preferred() == MachineRepresentation::kNone)
+ return; // No input requirement on the use.
+ NodeInfo* input_info = GetInfo(input);
+ MachineRepresentation input_rep = input_info->representation();
+ if (input_rep != use.preferred()) {
+ // Output representation doesn't match usage.
+ TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
+ index, input->id(), input->op()->mnemonic());
+ TRACE(" from ");
+ PrintOutputInfo(input_info);
+ TRACE(" to ");
+ PrintUseInfo(use);
+ TRACE("\n");
+ Node* n = changer_->GetRepresentationFor(
+ input, input_info->representation(), input_info->output_type(),
+ use.preferred(), use.truncation());
+ node->ReplaceInput(index, n);
}
}
- void ProcessInput(Node* node, int index, MachineTypeUnion use) {
- Node* input = node->InputAt(index);
+ void ProcessInput(Node* node, int index, UseInfo use) {
if (phase_ == PROPAGATE) {
- // In the propagate phase, propagate the usage information backward.
- Enqueue(input, use);
+ EnqueueInput(node, index, use);
} else {
- // In the change phase, insert a change before the use if necessary.
- if ((use & kRepMask) == 0) return; // No input requirement on the use.
- MachineTypeUnion output = GetInfo(input)->output;
- if ((output & kRepMask & use) == 0) {
- // Output representation doesn't match usage.
- TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic());
- TRACE(" from ");
- PrintInfo(output);
- TRACE(" to ");
- PrintInfo(use);
- TRACE("\n");
- Node* n = changer_->GetRepresentationFor(input, output, use);
- node->ReplaceInput(index, n);
- }
+ ConvertInput(node, index, use);
}
}
@@ -237,11 +571,11 @@ class RepresentationSelector {
DCHECK_GE(index, NodeProperties::PastContextIndex(node));
for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
i < NodeProperties::PastEffectIndex(node); ++i) {
- Enqueue(node->InputAt(i)); // Effect inputs: just visit
+ EnqueueInput(node, i); // Effect inputs: just visit
}
for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
i < NodeProperties::PastControlIndex(node); ++i) {
- Enqueue(node->InputAt(i)); // Control inputs: just visit
+ EnqueueInput(node, i); // Control inputs: just visit
}
}
@@ -254,165 +588,165 @@ class RepresentationSelector {
OperatorProperties::GetContextInputCount(node->op());
// Visit value and context inputs as tagged.
for (int i = 0; i < tagged_count; i++) {
- ProcessInput(node, i, kMachAnyTagged);
+ ProcessInput(node, i, UseInfo::AnyTagged());
}
// Only enqueue other inputs (framestates, effects, control).
for (int i = tagged_count; i < node->InputCount(); i++) {
- Enqueue(node->InputAt(i));
+ EnqueueInput(node, i);
}
- // Assume the output is tagged.
- SetOutput(node, kMachAnyTagged);
}
// Helper for binops of the R x L -> O variety.
- void VisitBinop(Node* node, MachineTypeUnion left_use,
- MachineTypeUnion right_use, MachineTypeUnion output) {
+ void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
+ NodeOutputInfo output) {
DCHECK_EQ(2, node->op()->ValueInputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
for (int i = 2; i < node->InputCount(); i++) {
- Enqueue(node->InputAt(i));
+ EnqueueInput(node, i);
}
SetOutput(node, output);
}
// Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, MachineTypeUnion input_use,
- MachineTypeUnion output) {
+ void VisitBinop(Node* node, UseInfo input_use, NodeOutputInfo output) {
VisitBinop(node, input_use, input_use, output);
}
// Helper for unops of the I -> O variety.
- void VisitUnop(Node* node, MachineTypeUnion input_use,
- MachineTypeUnion output) {
+ void VisitUnop(Node* node, UseInfo input_use, NodeOutputInfo output) {
DCHECK_EQ(1, node->InputCount());
ProcessInput(node, 0, input_use);
SetOutput(node, output);
}
// Helper for leaf nodes.
- void VisitLeaf(Node* node, MachineTypeUnion output) {
+ void VisitLeaf(Node* node, NodeOutputInfo output) {
DCHECK_EQ(0, node->InputCount());
SetOutput(node, output);
}
// Helpers for specific types of binops.
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, kMachFloat64, kMachFloat64);
+ VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
+ }
+ void VisitInt32Binop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ }
+ void VisitWord32TruncatingBinop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::NumberTruncatedToWord32());
}
- void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
void VisitUint32Binop(Node* node) {
- VisitBinop(node, kMachUint32, kMachUint32);
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ }
+ void VisitInt64Binop(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Int64());
}
- void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
void VisitUint64Binop(Node* node) {
- VisitBinop(node, kMachUint64, kMachUint64);
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Uint64());
+ }
+ void VisitFloat64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ }
+ void VisitInt32Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ }
+ void VisitUint32Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Bool());
+ }
+ void VisitInt64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
+ }
+ void VisitUint64Cmp(Node* node) {
+ VisitBinop(node, UseInfo::TruncatingWord64(), NodeOutputInfo::Bool());
}
- void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
- void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
- void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
- void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
- void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
// Infer representation for phi-like nodes.
- MachineType GetRepresentationForPhi(Node* node, MachineTypeUnion use) {
- // Phis adapt to the output representation their uses demand.
- Type* upper = NodeProperties::GetType(node);
- if ((use & kRepMask) == kRepFloat32) {
- // only float32 uses.
- return kRepFloat32;
- } else if ((use & kRepMask) == kRepFloat64) {
- // only float64 uses.
- return kRepFloat64;
- } else if ((use & kRepMask) == kRepTagged) {
- // only tagged uses.
- return kRepTagged;
- } else if (upper->Is(Type::Integral32())) {
- // Integer within [-2^31, 2^32[ range.
- if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
- // multiple uses, but we are within 32 bits range => pick kRepWord32.
- return kRepWord32;
- } else if ((use & kTypeMask) == kTypeInt32 ||
- (use & kTypeMask) == kTypeUint32) {
- // We only use 32 bits or we use the result consistently.
- return kRepWord32;
- } else {
- return kRepFloat64;
+ NodeOutputInfo GetOutputInfoForPhi(Node* node, Truncation use) {
+ // Compute the type.
+ Type* type = GetInfo(node->InputAt(0))->output_type();
+ for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+ type = Type::Union(type, GetInfo(node->InputAt(i))->output_type(),
+ jsgraph_->zone());
+ }
+
+ // Compute the representation.
+ MachineRepresentation rep = MachineRepresentation::kTagged;
+ if (type->Is(Type::None())) {
+ rep = MachineRepresentation::kNone;
+ } else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
+ rep = MachineRepresentation::kWord32;
+ } else if (use.TruncatesToWord32()) {
+ rep = MachineRepresentation::kWord32;
+ } else if (type->Is(Type::Boolean())) {
+ rep = MachineRepresentation::kBit;
+ } else if (type->Is(Type::Number())) {
+ rep = MachineRepresentation::kFloat64;
+ } else if (type->Is(Type::Internal())) {
+ // We mark (u)int64 as Type::Internal.
+ // TODO(jarin) This is a workaround for our lack of (u)int64
+ // types. This can be removed once we can represent (u)int64
+ // unambiguously. (At the moment internal objects, such as the hole,
+ // are also Type::Internal()).
+ bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
+ MachineRepresentation::kWord64;
+#ifdef DEBUG
+ // Check that all the inputs agree on being Word64.
+ for (int i = 1; i < node->op()->ValueInputCount(); i++) {
+ DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
+ MachineRepresentation::kWord64);
}
- } else if (upper->Is(Type::Boolean())) {
- // multiple uses => pick kRepBit.
- return kRepBit;
- } else if (upper->Is(Type::Number())) {
- // multiple uses => pick kRepFloat64.
- return kRepFloat64;
- } else if (upper->Is(Type::Internal())) {
- return kMachPtr;
+#endif
+ rep = is_word64 ? MachineRepresentation::kWord64
+ : MachineRepresentation::kTagged;
}
- return kRepTagged;
+ return NodeOutputInfo(rep, type);
}
// Helper for handling selects.
- void VisitSelect(Node* node, MachineTypeUnion use,
+ void VisitSelect(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- ProcessInput(node, 0, kRepBit);
- MachineType output = GetRepresentationForPhi(node, use);
+ ProcessInput(node, 0, UseInfo::Bool());
- Type* upper = NodeProperties::GetType(node);
- MachineType output_type =
- static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
- SetOutput(node, output_type);
+ NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ SetOutput(node, output);
if (lower()) {
// Update the select operator.
SelectParameters p = SelectParametersOf(node->op());
- MachineType type = static_cast<MachineType>(output_type);
- if (type != p.type()) {
- NodeProperties::ChangeOp(node,
- lowering->common()->Select(type, p.hint()));
+ if (output.representation() != p.representation()) {
+ NodeProperties::ChangeOp(node, lowering->common()->Select(
+ output.representation(), p.hint()));
}
-
- // Convert inputs to the output representation of this select.
- ProcessInput(node, 1, output_type);
- ProcessInput(node, 2, output_type);
- } else {
- // Propagate {use} of the select to value inputs.
- MachineType use_type =
- static_cast<MachineType>((use & kTypeMask) | output);
- ProcessInput(node, 1, use_type);
- ProcessInput(node, 2, use_type);
}
+ // Convert inputs to the output representation of this phi, pass the
+ // truncation truncation along.
+ UseInfo input_use(output.representation(), truncation);
+ ProcessInput(node, 1, input_use);
+ ProcessInput(node, 2, input_use);
}
// Helper for handling phis.
- void VisitPhi(Node* node, MachineTypeUnion use,
+ void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- MachineType output = GetRepresentationForPhi(node, use);
-
- Type* upper = NodeProperties::GetType(node);
- MachineType output_type =
- static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
- SetOutput(node, output_type);
+ NodeOutputInfo output = GetOutputInfoForPhi(node, truncation);
+ SetOutput(node, output);
int values = node->op()->ValueInputCount();
-
if (lower()) {
// Update the phi operator.
- MachineType type = static_cast<MachineType>(output_type);
- if (type != OpParameter<MachineType>(node)) {
- NodeProperties::ChangeOp(node, lowering->common()->Phi(type, values));
+ if (output.representation() != PhiRepresentationOf(node->op())) {
+ NodeProperties::ChangeOp(
+ node, lowering->common()->Phi(output.representation(), values));
}
+ }
- // Convert inputs to the output representation of this phi.
- for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, i < values ? output_type : 0);
- }
- } else {
- // Propagate {use} of the phi to value inputs, and 0 to control.
- MachineType use_type =
- static_cast<MachineType>((use & kTypeMask) | output);
- for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, i < values ? use_type : 0);
- }
+ // Convert inputs to the output representation of this phi, pass the
+ // truncation truncation along.
+ UseInfo input_use(output.representation(), truncation);
+ for (int i = 0; i < node->InputCount(); i++) {
+ ProcessInput(node, i, i < values ? input_use : UseInfo::None());
}
}
@@ -424,25 +758,38 @@ class RepresentationSelector {
for (int i = 0; i < node->InputCount(); i++) {
if (i == 0) {
// The target of the call.
- ProcessInput(node, i, 0);
+ ProcessInput(node, i, UseInfo::None());
} else if ((i - 1) < params) {
- ProcessInput(node, i, sig->GetParam(i - 1));
+ ProcessInput(node, i, TruncatingUseInfoFromRepresentation(
+ sig->GetParam(i - 1).representation()));
} else {
- ProcessInput(node, i, 0);
+ ProcessInput(node, i, UseInfo::None());
}
}
if (sig->return_count() > 0) {
- SetOutput(node, desc->GetMachineSignature()->GetReturn());
+ SetOutputFromMachineType(node, desc->GetMachineSignature()->GetReturn());
} else {
- SetOutput(node, kMachAnyTagged);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
+ }
+ }
+
+ MachineSemantic DeoptValueSemanticOf(Type* type) {
+ CHECK(!type->Is(Type::None()));
+ // We only need signedness to do deopt correctly.
+ if (type->Is(Type::Signed32())) {
+ return MachineSemantic::kInt32;
+ } else if (type->Is(Type::Unsigned32())) {
+ return MachineSemantic::kUint32;
+ } else {
+ return MachineSemantic::kAny;
}
}
void VisitStateValues(Node* node) {
if (phase_ == PROPAGATE) {
for (int i = 0; i < node->InputCount(); i++) {
- Enqueue(node->InputAt(i), kTypeAny);
+ EnqueueInput(node, i, UseInfo::Any());
}
} else {
Zone* zone = jsgraph_->zone();
@@ -450,13 +797,20 @@ class RepresentationSelector {
new (zone->New(sizeof(ZoneVector<MachineType>)))
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
- MachineTypeUnion input_type = GetInfo(node->InputAt(i))->output;
- (*types)[i] = static_cast<MachineType>(input_type);
+ NodeInfo* input_info = GetInfo(node->InputAt(i));
+ MachineType machine_type(
+ input_info->representation(),
+ DeoptValueSemanticOf(input_info->output_type()));
+ DCHECK(machine_type.representation() !=
+ MachineRepresentation::kWord32 ||
+ machine_type.semantic() == MachineSemantic::kInt32 ||
+ machine_type.semantic() == MachineSemantic::kUint32);
+ (*types)[i] = machine_type;
}
NodeProperties::ChangeOp(node,
jsgraph_->common()->TypedStateValues(types));
}
- SetOutput(node, kMachAnyTagged);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
}
const Operator* Int32Op(Node* node) {
@@ -471,34 +825,9 @@ class RepresentationSelector {
return changer_->Float64OperatorFor(node->opcode());
}
- bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Signed32()) &&
- (!CanObserveNonWord32(use) ||
- NodeProperties::GetType(node)->Is(Type::Signed32()));
- }
-
- bool CanLowerToWord32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, safe_int_additive_range_) &&
- !CanObserveNonWord32(use);
- }
-
- bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
- return BothInputsAre(node, Type::Unsigned32()) &&
- (!CanObserveNonWord32(use) ||
- NodeProperties::GetType(node)->Is(Type::Unsigned32()));
- }
-
- bool CanObserveNonWord32(MachineTypeUnion use) {
- return (use & kTypeMask & ~(kTypeInt32 | kTypeUint32)) != 0;
- }
-
- bool CanObserveNaN(MachineTypeUnion use) {
- return (use & (kTypeNumber | kTypeAny)) != 0;
- }
-
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
- void VisitNode(Node* node, MachineTypeUnion use,
+ void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
switch (node->opcode()) {
//------------------------------------------------------------------
@@ -506,41 +835,41 @@ class RepresentationSelector {
//------------------------------------------------------------------
case IrOpcode::kStart:
case IrOpcode::kDead:
- return VisitLeaf(node, 0);
+ return VisitLeaf(node, NodeOutputInfo::None());
case IrOpcode::kParameter: {
// TODO(titzer): use representation from linkage.
- Type* upper = NodeProperties::GetType(node);
- ProcessInput(node, 0, 0);
- SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
+ Type* type = NodeProperties::GetType(node);
+ ProcessInput(node, 0, UseInfo::None());
+ SetOutput(node, NodeOutputInfo(MachineRepresentation::kTagged, type));
return;
}
case IrOpcode::kInt32Constant:
- return VisitLeaf(node, kRepWord32);
+ return VisitLeaf(node, NodeOutputInfo::Int32());
case IrOpcode::kInt64Constant:
- return VisitLeaf(node, kRepWord64);
+ return VisitLeaf(node, NodeOutputInfo::Int64());
case IrOpcode::kFloat32Constant:
- return VisitLeaf(node, kRepFloat32);
+ return VisitLeaf(node, NodeOutputInfo::Float32());
case IrOpcode::kFloat64Constant:
- return VisitLeaf(node, kRepFloat64);
+ return VisitLeaf(node, NodeOutputInfo::Float64());
case IrOpcode::kExternalConstant:
- return VisitLeaf(node, kMachPtr);
+ return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kNumberConstant:
- return VisitLeaf(node, kRepTagged);
+ return VisitLeaf(node, NodeOutputInfo::NumberTagged());
case IrOpcode::kHeapConstant:
- return VisitLeaf(node, kRepTagged);
+ return VisitLeaf(node, NodeOutputInfo::AnyTagged());
case IrOpcode::kBranch:
- ProcessInput(node, 0, kRepBit);
- Enqueue(NodeProperties::GetControlInput(node, 0));
+ ProcessInput(node, 0, UseInfo::Bool());
+ EnqueueInput(node, NodeProperties::FirstControlIndex(node));
break;
case IrOpcode::kSwitch:
- ProcessInput(node, 0, kRepWord32);
- Enqueue(NodeProperties::GetControlInput(node, 0));
+ ProcessInput(node, 0, UseInfo::TruncatingWord32());
+ EnqueueInput(node, NodeProperties::FirstControlIndex(node));
break;
case IrOpcode::kSelect:
- return VisitSelect(node, use, lowering);
+ return VisitSelect(node, truncation, lowering);
case IrOpcode::kPhi:
- return VisitPhi(node, use, lowering);
+ return VisitPhi(node, truncation, lowering);
case IrOpcode::kCall:
return VisitCall(node, lowering);
@@ -556,15 +885,15 @@ class RepresentationSelector {
JS_OP_LIST(DEFINE_JS_CASE)
#undef DEFINE_JS_CASE
VisitInputs(node);
- return SetOutput(node, kRepTagged);
+ return SetOutput(node, NodeOutputInfo::AnyTagged());
//------------------------------------------------------------------
// Simplified operators.
//------------------------------------------------------------------
case IrOpcode::kBooleanNot: {
if (lower()) {
- MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & kRepBit) {
+ NodeInfo* input_info = GetInfo(node->InputAt(0));
+ if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
@@ -575,15 +904,15 @@ class RepresentationSelector {
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, kTypeBool);
- SetOutput(node, kRepBit);
+ ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput(node, NodeOutputInfo::Bool());
}
break;
}
case IrOpcode::kBooleanToNumber: {
if (lower()) {
- MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
- if (input & kRepBit) {
+ NodeInfo* input_info = GetInfo(node->InputAt(0));
+ if (input_info->representation() == MachineRepresentation::kBit) {
// BooleanToNumber(x: kRepBit) => x
DeferReplacement(node, node->InputAt(0));
} else {
@@ -593,8 +922,8 @@ class RepresentationSelector {
}
} else {
// No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, kTypeBool);
- SetOutput(node, kMachInt32);
+ ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
+ SetOutput(node, NodeOutputInfo::Int32());
}
break;
}
@@ -602,11 +931,11 @@ class RepresentationSelector {
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual: {
// Number comparisons reduce to integer comparisons for integer inputs.
- if (BothInputsAre(node, Type::Signed32())) {
+ if (BothInputsAreSigned32(node)) {
// => signed Int32Cmp
VisitInt32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (BothInputsAre(node, Type::Unsigned32())) {
+ } else if (BothInputsAreUnsigned32(node)) {
// => unsigned Int32Cmp
VisitUint32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
@@ -619,21 +948,17 @@ class RepresentationSelector {
}
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
- // Add and subtract reduce to Int32Add/Sub if the inputs
- // are already integers and all uses are truncating.
- if (CanLowerToInt32Binop(node, use)) {
+ if (BothInputsAre(node, Type::Signed32()) &&
+ NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // int32 + int32 = int32
// => signed Int32Add/Sub
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (CanLowerToUint32Binop(node, use)) {
- // => unsigned Int32Add/Sub
- VisitUint32Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- } else if (CanLowerToWord32AdditiveBinop(node, use)) {
- // => signed Int32Add/Sub, truncating inputs
- ProcessTruncateWord32Input(node, 0, kTypeInt32);
- ProcessTruncateWord32Input(node, 1, kTypeInt32);
- SetOutput(node, kMachInt32);
+ } else if (BothInputsAre(node, type_cache_.kAdditiveSafeInteger) &&
+ truncation.TruncatesToWord32()) {
+ // safe-int + safe-int = x (truncated to int32)
+ // => signed Int32Add/Sub (truncated)
+ VisitWord32TruncatingBinop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Add/Sub
@@ -643,14 +968,23 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberMultiply: {
- NumberMatcher right(node->InputAt(1));
- if (right.IsInRange(-1048576, 1048576)) { // must fit double mantissa.
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Mul
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // Multiply reduces to Int32Mul if the inputs and the output
+ // are integers.
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
break;
}
+ if (truncation.TruncatesToWord32() &&
+ NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger)) {
+ // Multiply reduces to Int32Mul if the inputs are integers,
+ // the uses are truncating and the result is in the safe
+ // integer range.
+ VisitWord32TruncatingBinop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ break;
+ }
}
// => Float64Mul
VisitFloat64Binop(node);
@@ -658,15 +992,23 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberDivide: {
- if (CanLowerToInt32Binop(node, use)) {
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
break;
+ }
+ if (truncation.TruncatesToWord32()) {
+ // => signed Int32Div
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ break;
+ }
}
- if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+ if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Div
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
break;
}
@@ -676,15 +1018,23 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberModulus: {
- if (CanLowerToInt32Binop(node, use)) {
- // => signed Int32Mod
- VisitInt32Binop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ if (BothInputsAreSigned32(node)) {
+ if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // => signed Int32Mod
+ VisitInt32Binop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
+ if (truncation.TruncatesToWord32()) {
+ // => signed Int32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ break;
+ }
}
- if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
+ if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Mod
- VisitUint32Binop(node);
+ VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
break;
}
@@ -701,84 +1051,61 @@ class RepresentationSelector {
break;
}
case IrOpcode::kNumberShiftLeft: {
- VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
- if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shl());
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ }
break;
}
case IrOpcode::kNumberShiftRight: {
- VisitBinop(node, kMachInt32, kMachUint32, kMachInt32);
- if (lower()) lowering->DoShift(node, lowering->machine()->Word32Sar());
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ }
break;
}
case IrOpcode::kNumberShiftRightLogical: {
- VisitBinop(node, kMachUint32, kMachUint32, kMachUint32);
- if (lower()) lowering->DoShift(node, lowering->machine()->Word32Shr());
+ Type* rhs_type = GetInfo(node->InputAt(1))->output_type();
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ }
break;
}
case IrOpcode::kNumberToInt32: {
- MachineTypeUnion use_rep = use & kRepMask;
- Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetType(input);
- MachineTypeUnion in = GetInfo(input)->output;
- if (in_upper->Is(Type::Signed32())) {
- // If the input has type int32, pass through representation.
- VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- in_upper->Is(Type::Unsigned32())) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
- (in & kRepMask) == kRepWord32) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- // Require the input in float64 format and perform truncation.
- // TODO(turbofan): avoid a truncation with a smi check.
- VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
- if (lower()) {
- NodeProperties::ChangeOp(
- node, lowering->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript));
- }
- }
+ // Just change representation if necessary.
+ VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Int32());
+ if (lower()) DeferReplacement(node, node->InputAt(0));
break;
}
case IrOpcode::kNumberToUint32: {
- MachineTypeUnion use_rep = use & kRepMask;
- Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetType(input);
- MachineTypeUnion in = GetInfo(input)->output;
- if (in_upper->Is(Type::Unsigned32())) {
- // If the input has type uint32, pass through representation.
- VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeInt32 ||
- in_upper->Is(Type::Signed32())) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if ((in & kTypeMask) == kTypeUint32 ||
- (in & kRepMask) == kRepWord32) {
- // Just change representation if necessary.
- VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- // Require the input in float64 format and perform truncation.
- // TODO(turbofan): avoid a truncation with a smi check.
- VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
- if (lower()) {
- NodeProperties::ChangeOp(
- node, lowering->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript));
- }
+ // Just change representation if necessary.
+ VisitUnop(node, UseInfo::TruncatingWord32(), NodeOutputInfo::Uint32());
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ break;
+ }
+ case IrOpcode::kNumberIsHoleNaN: {
+ VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Bool());
+ if (lower()) {
+ // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
+ // #HoleNaNLower32)
+ node->ReplaceInput(0,
+ jsgraph_->graph()->NewNode(
+ lowering->machine()->Float64ExtractLowWord32(),
+ node->InputAt(0)));
+ node->AppendInput(jsgraph_->zone(),
+ jsgraph_->Int32Constant(kHoleNanLower32));
+ NodeProperties::ChangeOp(node, jsgraph_->machine()->Word32Equal());
}
break;
}
case IrOpcode::kPlainPrimitiveToNumber: {
- VisitUnop(node, kMachAnyTagged, kTypeNumber | kRepTagged);
+ VisitUnop(node, UseInfo::AnyTagged(), NodeOutputInfo::NumberTagged());
if (lower()) {
// PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
Operator::Properties properties = node->op()->properties();
@@ -795,116 +1122,127 @@ class RepresentationSelector {
break;
}
case IrOpcode::kReferenceEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) {
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
break;
}
case IrOpcode::kStringEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringEqual(node);
break;
}
case IrOpcode::kStringLessThan: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringLessThan(node);
break;
}
case IrOpcode::kStringLessThanOrEqual: {
- VisitBinop(node, kMachAnyTagged, kRepBit);
+ VisitBinop(node, UseInfo::AnyTagged(), NodeOutputInfo::Bool());
if (lower()) lowering->DoStringLessThanOrEqual(node);
break;
}
case IrOpcode::kAllocate: {
- ProcessInput(node, 0, kMachAnyTagged);
+ ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessRemainingInputs(node, 1);
- SetOutput(node, kMachAnyTagged);
- if (lower()) lowering->DoAllocate(node);
+ SetOutput(node, NodeOutputInfo::AnyTagged());
break;
}
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+ ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessRemainingInputs(node, 1);
- SetOutput(node, access.machine_type);
- if (lower()) lowering->DoLoadField(node);
+ SetOutputFromMachineType(node, access.machine_type);
break;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access));
- ProcessInput(node, 1, access.machine_type);
+ ProcessInput(node, 0, UseInfoForBasePointer(access));
+ ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
+ access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
- SetOutput(node, 0);
- if (lower()) lowering->DoStoreField(node);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kLoadBuffer: {
BufferAccess access = BufferAccessOf(node->op());
- ProcessInput(node, 0, kMachPtr); // buffer
- ProcessInput(node, 1, kMachInt32); // offset
- ProcessInput(node, 2, kMachInt32); // length
+ ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
ProcessRemainingInputs(node, 3);
- // Tagged overrides everything if we have to do a typed array bounds
- // check, because we may need to return undefined then.
- MachineType output_type;
- if (use & kRepTagged) {
- output_type = kMachAnyTagged;
- } else if (use & kRepFloat64) {
- if (access.machine_type() & kRepFloat32) {
- output_type = access.machine_type();
+
+ NodeOutputInfo output_info;
+ if (truncation.TruncatesUndefinedToZeroOrNaN()) {
+ if (truncation.TruncatesNaNToZero()) {
+ // If undefined is truncated to a non-NaN number, we can use
+ // the load's representation.
+ output_info = NodeOutputInfo(access.machine_type().representation(),
+ NodeProperties::GetType(node));
} else {
- output_type = kMachFloat64;
+ // If undefined is truncated to a number, but the use can
+ // observe NaN, we need to output at least the float32
+ // representation.
+ if (access.machine_type().representation() ==
+ MachineRepresentation::kFloat32) {
+ output_info =
+ NodeOutputInfo(access.machine_type().representation(),
+ NodeProperties::GetType(node));
+ } else {
+ output_info = NodeOutputInfo::Float64();
+ }
}
- } else if (use & kRepFloat32) {
- output_type = kMachFloat32;
} else {
- output_type = access.machine_type();
+ // If undefined is not truncated away, we need to have the tagged
+ // representation.
+ output_info = NodeOutputInfo::AnyTagged();
}
- SetOutput(node, output_type);
- if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
+ SetOutput(node, output_info);
+ if (lower())
+ lowering->DoLoadBuffer(node, output_info.representation(), changer_);
break;
}
case IrOpcode::kStoreBuffer: {
BufferAccess access = BufferAccessOf(node->op());
- ProcessInput(node, 0, kMachPtr); // buffer
- ProcessInput(node, 1, kMachInt32); // offset
- ProcessInput(node, 2, kMachInt32); // length
- ProcessInput(node, 3, access.machine_type()); // value
+ ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // length
+ ProcessInput(node, 3,
+ TruncatingUseInfoFromRepresentation(
+ access.machine_type().representation())); // value
ProcessRemainingInputs(node, 4);
- SetOutput(node, 0);
+ SetOutput(node, NodeOutputInfo::None());
if (lower()) lowering->DoStoreBuffer(node);
break;
}
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
- ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, access.machine_type);
- if (lower()) lowering->DoLoadElement(node);
+ SetOutputFromMachineType(node, access.machine_type);
break;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
- ProcessInput(node, 1, kMachInt32); // index
- ProcessInput(node, 2, access.machine_type); // value
+ ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 2,
+ TruncatingUseInfoFromRepresentation(
+ access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
- SetOutput(node, 0);
- if (lower()) lowering->DoStoreElement(node);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kObjectIsNumber: {
- ProcessInput(node, 0, kMachAnyTagged);
- SetOutput(node, kRepBit | kTypeBool);
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
if (lower()) lowering->DoObjectIsNumber(node);
break;
}
case IrOpcode::kObjectIsSmi: {
- ProcessInput(node, 0, kMachAnyTagged);
- SetOutput(node, kRepBit | kTypeBool);
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
if (lower()) lowering->DoObjectIsSmi(node);
break;
}
@@ -913,29 +1251,31 @@ class RepresentationSelector {
// Machine-level operators.
//------------------------------------------------------------------
case IrOpcode::kLoad: {
- // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- MachineTypeUnion tBase = kRepTagged | kMachPtr;
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachIntPtr); // index
+ // TODO(jarin) Eventually, we should get rid of all machine stores
+ // from the high-level phases, then this becomes UNREACHABLE.
+ LoadRepresentation rep = LoadRepresentationOf(node->op());
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, rep);
+ SetOutputFromMachineType(node, rep);
break;
}
case IrOpcode::kStore: {
- // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
- MachineTypeUnion tBase = kRepTagged | kMachPtr;
- StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
- ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachIntPtr); // index
- ProcessInput(node, 2, rep.machine_type());
+ // TODO(jarin) Eventually, we should get rid of all machine stores
+ // from the high-level phases, then this becomes UNREACHABLE.
+ StoreRepresentation rep = StoreRepresentationOf(node->op());
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
+ ProcessInput(node, 1, UseInfo::PointerInt()); // index
+ ProcessInput(node, 2,
+ TruncatingUseInfoFromRepresentation(rep.representation()));
ProcessRemainingInputs(node, 3);
- SetOutput(node, 0);
+ SetOutput(node, NodeOutputInfo::None());
break;
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
- return VisitBinop(node, kMachUint32, kMachUint32);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -944,12 +1284,15 @@ class RepresentationSelector {
// We use signed int32 as the output type for these word32 operations,
// though the machine bits are the same for either signed or unsigned,
// because JavaScript considers the result from these operations signed.
- return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kWord32Equal:
- return VisitBinop(node, kRepWord32, kRepBit);
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Bool());
case IrOpcode::kWord32Clz:
- return VisitUnop(node, kMachUint32, kMachUint32);
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
@@ -993,42 +1336,45 @@ class RepresentationSelector {
case IrOpcode::kWord64Shl:
case IrOpcode::kWord64Shr:
case IrOpcode::kWord64Sar:
- return VisitBinop(node, kRepWord64, kRepWord64);
+ return VisitBinop(node, UseInfo::TruncatingWord64(),
+ NodeOutputInfo::Int64());
case IrOpcode::kWord64Equal:
- return VisitBinop(node, kRepWord64, kRepBit);
+ return VisitBinop(node, UseInfo::TruncatingWord64(),
+ NodeOutputInfo::Bool());
case IrOpcode::kChangeInt32ToInt64:
- return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepWord64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kWord64, Type::Signed32()));
case IrOpcode::kChangeUint32ToUint64:
- return VisitUnop(node, kTypeUint32 | kRepWord32,
- kTypeUint32 | kRepWord64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kWord64, Type::Unsigned32()));
case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, kTypeNumber | kRepFloat64,
- kTypeNumber | kRepFloat32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float32());
case IrOpcode::kTruncateFloat64ToInt32:
- return VisitUnop(node, kTypeNumber | kRepFloat64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
case IrOpcode::kTruncateInt64ToInt32:
// TODO(titzer): Is kTypeInt32 correct here?
- return VisitUnop(node, kTypeInt32 | kRepWord64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Word64TruncatingToWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kChangeFloat32ToFloat64:
- return VisitUnop(node, kTypeNumber | kRepFloat32,
- kTypeNumber | kRepFloat64);
+ return VisitUnop(node, UseInfo::Float32(), NodeOutputInfo::Float64());
case IrOpcode::kChangeInt32ToFloat64:
- return VisitUnop(node, kTypeInt32 | kRepWord32,
- kTypeInt32 | kRepFloat64);
+ return VisitUnop(
+ node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kFloat64, Type::Signed32()));
case IrOpcode::kChangeUint32ToFloat64:
- return VisitUnop(node, kTypeUint32 | kRepWord32,
- kTypeUint32 | kRepFloat64);
+ return VisitUnop(node, UseInfo::TruncatingWord32(),
+ NodeOutputInfo(MachineRepresentation::kFloat64,
+ Type::Unsigned32()));
case IrOpcode::kChangeFloat64ToInt32:
- return VisitUnop(node, kTypeInt32 | kRepFloat64,
- kTypeInt32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
+ NodeOutputInfo::Int32());
case IrOpcode::kChangeFloat64ToUint32:
- return VisitUnop(node, kTypeUint32 | kRepFloat64,
- kTypeUint32 | kRepWord32);
+ return VisitUnop(node, UseInfo::Float64TruncatingToWord32(),
+ NodeOutputInfo::Uint32());
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
@@ -1042,25 +1388,28 @@ class RepresentationSelector {
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
- return VisitUnop(node, kMachFloat64, kMachFloat64);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64Cmp(node);
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
- return VisitUnop(node, kMachFloat64, kMachInt32);
+ return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Int32());
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
- return VisitBinop(node, kMachFloat64, kMachInt32, kMachFloat64);
+ return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
+ NodeOutputInfo::Float64());
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
- return VisitLeaf(node, kMachPtr);
+ return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kStateValues:
VisitStateValues(node);
break;
default:
VisitInputs(node);
+ // Assume the output is tagged.
+ SetOutput(node, NodeOutputInfo::AnyTagged());
break;
}
}
@@ -1071,7 +1420,7 @@ class RepresentationSelector {
replacement->op()->mnemonic());
if (replacement->id() < count_ &&
- GetInfo(replacement)->output == GetInfo(node)->output) {
+ GetInfo(node)->output_type()->Is(GetInfo(replacement)->output_type())) {
// Replace with a previously existing node eagerly only if the type is the
// same.
node->ReplaceUses(replacement);
@@ -1086,23 +1435,44 @@ class RepresentationSelector {
node->NullAllInputs(); // Node is now dead.
}
- void PrintUseInfo(Node* node) {
- TRACE("#%d:%-20s ", node->id(), node->op()->mnemonic());
- PrintInfo(GetUseInfo(node));
- TRACE("\n");
+ void PrintOutputInfo(NodeInfo* info) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << info->representation() << " (";
+ info->output_type()->PrintTo(os, Type::SEMANTIC_DIM);
+ os << ")";
+ }
}
- void PrintInfo(MachineTypeUnion info) {
+ void PrintRepresentation(MachineRepresentation rep) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << static_cast<MachineType>(info);
+ os << rep;
+ }
+ }
+
+ void PrintTruncation(Truncation truncation) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << truncation.description();
+ }
+ }
+
+ void PrintUseInfo(UseInfo info) {
+ if (FLAG_trace_representation) {
+ OFStream os(stdout);
+ os << info.preferred() << ":" << info.truncation().description();
}
}
private:
JSGraph* jsgraph_;
size_t const count_; // number of nodes in the graph
- NodeInfo* info_; // node id -> usage information
+ ZoneVector<NodeInfo> info_; // node id -> usage information
+#ifdef DEBUG
+ ZoneVector<InputUseInfos> node_input_use_infos_; // Debug information about
+ // requirements on inputs.
+#endif // DEBUG
NodeVector nodes_; // collected nodes
NodeVector replacements_; // replacements to be done after lowering
Phase phase_; // current phase of algorithm
@@ -1114,15 +1484,13 @@ class RepresentationSelector {
// lowering. Once this phase becomes a vanilla reducer, it should get source
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Type* safe_int_additive_range_;
+ TypeCache const& type_cache_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() >= 0);
DCHECK(node->id() < count_);
return &info_[node->id()];
}
-
- MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
};
@@ -1130,7 +1498,7 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
zone_(zone),
- zero_thirtyone_range_(Type::Range(0, 31, zone)),
+ type_cache_(TypeCache::Get()),
source_positions_(source_positions) {}
@@ -1142,134 +1510,13 @@ void SimplifiedLowering::LowerAllNodes() {
}
-namespace {
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineType representation,
- Type* field_type, Type* input_type) {
- if (field_type->Is(Type::TaggedSigned()) ||
- input_type->Is(Type::TaggedSigned())) {
- // Write barriers are only for writes of heap objects.
- return kNoWriteBarrier;
- }
- if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
- // Write barriers are not necessary when storing true, false, null or
- // undefined, because these special oddballs are always in the root set.
- return kNoWriteBarrier;
- }
- if (base_is_tagged == kTaggedBase &&
- RepresentationOf(representation) == kRepTagged) {
- if (input_type->IsConstant() &&
- input_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<HeapObject> input =
- Handle<HeapObject>::cast(input_type->AsConstant()->Value());
- if (input->IsMap()) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
- }
- Isolate* const isolate = input->GetIsolate();
- RootIndexMap root_index_map(isolate);
- int root_index = root_index_map.Lookup(*input);
- if (root_index != RootIndexMap::kInvalidRootIndex &&
- isolate->heap()->RootIsImmortalImmovable(root_index)) {
- // Write barriers are unnecessary for immortal immovable roots.
- return kNoWriteBarrier;
- }
- }
- if (field_type->Is(Type::TaggedPointer()) ||
- input_type->Is(Type::TaggedPointer())) {
- // Write barriers for heap objects don't need a Smi check.
- return kPointerWriteBarrier;
- }
- // Write barriers are only for writes into heap objects (i.e. tagged base).
- return kFullWriteBarrier;
- }
- return kNoWriteBarrier;
-}
-
-} // namespace
-
-
-void SimplifiedLowering::DoAllocate(Node* node) {
- PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
- if (pretenure == NOT_TENURED) {
- Callable callable = CodeFactory::AllocateInNewSpace(isolate());
- Node* target = jsgraph()->HeapConstant(callable.code());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- const Operator* op = common()->Call(descriptor);
- node->InsertInput(graph()->zone(), 0, target);
- node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, op);
- } else {
- DCHECK_EQ(TENURED, pretenure);
- AllocationSpace space = OLD_SPACE;
- Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
- Operator::Properties props = node->op()->properties();
- CallDescriptor* desc =
- Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
- ExternalReference ref(f, jsgraph()->isolate());
- int32_t flags = AllocateTargetSpace::encode(space);
- node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
- node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
- node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
- node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
- node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- }
-}
-
-
-void SimplifiedLowering::DoLoadField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
-}
-
-
-void SimplifiedLowering::DoStoreField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(1));
- WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.machine_type, access.type, type);
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(access.machine_type, kind)));
-}
-
-
-Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
- Node* const key) {
- Node* index = key;
- const int element_size_shift = ElementSizeLog2Of(access.machine_type);
- if (element_size_shift) {
- index = graph()->NewNode(machine()->Word32Shl(), index,
- jsgraph()->Int32Constant(element_size_shift));
- }
- const int fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->Int32Add(), index,
- jsgraph()->Int32Constant(fixed_offset));
- }
- if (machine()->Is64()) {
- // TODO(turbofan): This is probably only correct for typed arrays, and only
- // if the typed arrays are at most 2GiB in size, which happens to match
- // exactly our current situation.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
- }
- return index;
-}
-
-
-void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
+void SimplifiedLowering::DoLoadBuffer(Node* node,
+ MachineRepresentation output_rep,
RepresentationChanger* changer) {
DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
- DCHECK_NE(kMachNone, RepresentationOf(output_type));
- MachineType const type = BufferAccessOf(node->op()).machine_type();
- if (output_type != type) {
+ DCHECK_NE(MachineRepresentation::kNone, output_rep);
+ MachineType const access_type = BufferAccessOf(node->op()).machine_type();
+ if (output_rep != access_type.representation()) {
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
@@ -1285,19 +1532,21 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue =
- graph()->NewNode(machine()->Load(type), buffer, index, effect, if_true);
- Node* vtrue = changer->GetRepresentationFor(etrue, type, output_type);
+ Node* etrue = graph()->NewNode(machine()->Load(access_type), buffer, index,
+ effect, if_true);
+ Node* vtrue = changer->GetRepresentationFor(
+ etrue, access_type.representation(), NodeProperties::GetType(node),
+ output_rep, Truncation::None());
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
Node* vfalse;
- if (output_type & kRepTagged) {
+ if (output_rep == MachineRepresentation::kTagged) {
vfalse = jsgraph()->UndefinedConstant();
- } else if (output_type & kRepFloat64) {
+ } else if (output_rep == MachineRepresentation::kFloat64) {
vfalse =
jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
- } else if (output_type & kRepFloat32) {
+ } else if (output_rep == MachineRepresentation::kFloat32) {
vfalse =
jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
} else {
@@ -1315,37 +1564,18 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
node->ReplaceInput(1, vfalse);
node->ReplaceInput(2, merge);
node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, common()->Phi(output_type, 2));
+ NodeProperties::ChangeOp(node, common()->Phi(output_rep, 2));
} else {
- NodeProperties::ChangeOp(node, machine()->CheckedLoad(type));
+ NodeProperties::ChangeOp(node, machine()->CheckedLoad(access_type));
}
}
void SimplifiedLowering::DoStoreBuffer(Node* node) {
DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
- MachineType const type = BufferAccessOf(node->op()).machine_type();
- NodeProperties::ChangeOp(node, machine()->CheckedStore(type));
-}
-
-
-void SimplifiedLowering::DoLoadElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
-}
-
-
-void SimplifiedLowering::DoStoreElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(2));
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(
- node,
- machine()->Store(StoreRepresentation(
- access.machine_type,
- ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
- access.type, type))));
+ MachineRepresentation const rep =
+ BufferAccessOf(node->op()).machine_type().representation();
+ NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
}
@@ -1364,7 +1594,7 @@ void SimplifiedLowering::DoObjectIsNumber(Node* node) {
Node* vfalse = graph()->NewNode(
machine()->WordEqual(),
graph()->NewNode(
- machine()->Load(kMachAnyTagged), input,
+ machine()->Load(MachineType::AnyTagged()), input,
jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
graph()->start(), if_false),
jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
@@ -1372,7 +1602,7 @@ void SimplifiedLowering::DoObjectIsNumber(Node* node) {
node->ReplaceInput(0, vtrue);
node->AppendInput(graph()->zone(), vfalse);
node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(kMachBool, 2));
+ NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
}
@@ -1430,7 +1660,8 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
@@ -1507,7 +1738,8 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
@@ -1586,7 +1818,7 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
Diamond d(graph(), common(), check, BranchHint::kFalse);
Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
- return d.Phi(kMachUint32, zero, div);
+ return d.Phi(MachineRepresentation::kWord32, zero, div);
}
@@ -1618,7 +1850,8 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
const Operator* const merge_op = common()->Merge(2);
- const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+ const Operator* const phi_op =
+ common()->Phi(MachineRepresentation::kWord32, 2);
Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
graph()->start());
@@ -1649,10 +1882,10 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
}
-void SimplifiedLowering::DoShift(Node* node, Operator const* op) {
+void SimplifiedLowering::DoShift(Node* node, Operator const* op,
+ Type* rhs_type) {
Node* const rhs = NodeProperties::GetValueInput(node, 1);
- Type* const rhs_type = NodeProperties::GetType(rhs);
- if (!rhs_type->Is(zero_thirtyone_range_)) {
+ if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
jsgraph()->Int32Constant(0x1f)));
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 49662a60c6..f9410f8b41 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -12,6 +12,11 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
+
namespace compiler {
// Forward declarations.
@@ -26,20 +31,14 @@ class SimplifiedLowering final {
void LowerAllNodes();
- // TODO(titzer): These are exposed for direct testing. Use a friend class.
- void DoAllocate(Node* node);
- void DoLoadField(Node* node);
- void DoStoreField(Node* node);
- // TODO(turbofan): The output_type can be removed once the result of the
+ // TODO(turbofan): The representation can be removed once the result of the
// representation analysis is stored in the node bounds.
- void DoLoadBuffer(Node* node, MachineType output_type,
+ void DoLoadBuffer(Node* node, MachineRepresentation rep,
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
- void DoLoadElement(Node* node);
- void DoStoreElement(Node* node);
void DoObjectIsNumber(Node* node);
void DoObjectIsSmi(Node* node);
- void DoShift(Node* node, Operator const* op);
+ void DoShift(Node* node, Operator const* op, Type* rhs_type);
void DoStringEqual(Node* node);
void DoStringLessThan(Node* node);
void DoStringLessThanOrEqual(Node* node);
@@ -47,7 +46,7 @@ class SimplifiedLowering final {
private:
JSGraph* const jsgraph_;
Zone* const zone_;
- Type* const zero_thirtyone_range_;
+ TypeCache const& type_cache_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
@@ -56,7 +55,6 @@ class SimplifiedLowering final {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Node* ComputeIndex(const ElementAccess& access, Node* const key);
Node* StringComparison(Node* node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index acd0f66ef6..120d7926d5 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -89,6 +89,8 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
+ case IrOpcode::kReferenceEqual:
+ return ReduceReferenceEqual(node);
default:
break;
}
@@ -96,6 +98,23 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
+Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
+ DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
+ Node* const left = NodeProperties::GetValueInput(node, 0);
+ Node* const right = NodeProperties::GetValueInput(node, 1);
+ HeapObjectMatcher match_left(left);
+ HeapObjectMatcher match_right(right);
+ if (match_left.HasValue() && match_right.HasValue()) {
+ if (match_left.Value().is_identical_to(match_right.Value())) {
+ return Replace(jsgraph()->TrueConstant());
+ } else {
+ return Replace(jsgraph()->FalseConstant());
+ }
+ }
+ return NoChange();
+}
+
+
Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
Node* a) {
DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 831090ac9f..979a3d0399 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -25,6 +25,8 @@ class SimplifiedOperatorReducer final : public Reducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceReferenceEqual(Node* node);
+
Reduction Change(Node* node, const Operator* op, Node* a);
Reduction ReplaceFloat64(double value);
Reduction ReplaceInt32(int32_t value);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 62dc8df621..1eaa287fee 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -29,24 +29,24 @@ MachineType BufferAccess::machine_type() const {
switch (external_array_type_) {
case kExternalUint8Array:
case kExternalUint8ClampedArray:
- return kMachUint8;
+ return MachineType::Uint8();
case kExternalInt8Array:
- return kMachInt8;
+ return MachineType::Int8();
case kExternalUint16Array:
- return kMachUint16;
+ return MachineType::Uint16();
case kExternalInt16Array:
- return kMachInt16;
+ return MachineType::Int16();
case kExternalUint32Array:
- return kMachUint32;
+ return MachineType::Uint32();
case kExternalInt32Array:
- return kMachInt32;
+ return MachineType::Int32();
case kExternalFloat32Array:
- return kMachFloat32;
+ return MachineType::Float32();
case kExternalFloat64Array:
- return kMachFloat64;
+ return MachineType::Float64();
}
UNREACHABLE();
- return kMachNone;
+ return MachineType::None();
}
@@ -176,6 +176,7 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
V(NumberToInt32, Operator::kNoProperties, 1) \
V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index ee6b8930b9..3821a6de57 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -7,8 +7,8 @@
#include <iosfwd>
-#include "src/compiler/machine-type.h"
#include "src/handles.h"
+#include "src/machine-type.h"
#include "src/objects.h"
namespace v8 {
@@ -148,6 +148,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* NumberShiftRightLogical();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
+ const Operator* NumberIsHoleNaN();
const Operator* PlainPrimitiveToNumber();
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 1c23c8ab88..77cc227038 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -271,7 +271,7 @@ Node* StateValuesAccess::iterator::node() {
MachineType StateValuesAccess::iterator::type() {
Node* state = Top()->node;
if (state->opcode() == IrOpcode::kStateValues) {
- return kMachAnyTagged;
+ return MachineType::AnyTagged();
} else {
DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
const ZoneVector<MachineType>* types =
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
new file mode 100644
index 0000000000..42c4627b67
--- /dev/null
+++ b/deps/v8/src/compiler/type-hint-analyzer.cc
@@ -0,0 +1,98 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/type-hint-analyzer.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/type-hints.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// TODO(bmeurer): This detour via types is ugly.
+BinaryOperationHints::Hint ToHint(Type* type) {
+ if (type->Is(Type::None())) return BinaryOperationHints::kNone;
+ if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
+ if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
+ if (type->Is(Type::Number())) return BinaryOperationHints::kNumber;
+ if (type->Is(Type::String())) return BinaryOperationHints::kString;
+ return BinaryOperationHints::kAny;
+}
+
+} // namespace
+
+
+bool TypeHintAnalysis::GetBinaryOperationHints(
+ TypeFeedbackId id, BinaryOperationHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
+ BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
+ *hints = BinaryOperationHints(ToHint(state.GetLeftType()),
+ ToHint(state.GetRightType()),
+ ToHint(state.GetResultType()));
+ return true;
+}
+
+
+bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
+ ToBooleanHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
+ ToBooleanStub stub(code->GetIsolate(), code->extra_ic_state());
+// TODO(bmeurer): Replace ToBooleanStub::Types with ToBooleanHints.
+#define ASSERT_COMPATIBLE(NAME, Name) \
+ STATIC_ASSERT(1 << ToBooleanStub::NAME == \
+ static_cast<int>(ToBooleanHint::k##Name))
+ ASSERT_COMPATIBLE(UNDEFINED, Undefined);
+ ASSERT_COMPATIBLE(BOOLEAN, Boolean);
+ ASSERT_COMPATIBLE(NULL_TYPE, Null);
+ ASSERT_COMPATIBLE(SMI, SmallInteger);
+ ASSERT_COMPATIBLE(SPEC_OBJECT, Receiver);
+ ASSERT_COMPATIBLE(STRING, String);
+ ASSERT_COMPATIBLE(SYMBOL, Symbol);
+ ASSERT_COMPATIBLE(HEAP_NUMBER, HeapNumber);
+ ASSERT_COMPATIBLE(SIMD_VALUE, SimdValue);
+#undef ASSERT_COMPATIBLE
+ *hints = ToBooleanHints(stub.types().ToIntegral());
+ return true;
+}
+
+
+TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
+ DisallowHeapAllocation no_gc;
+ TypeHintAnalysis::Infos infos(zone());
+ Isolate* const isolate = code->GetIsolate();
+ int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ switch (target->kind()) {
+ case Code::BINARY_OP_IC:
+ case Code::TO_BOOLEAN_IC: {
+ // Add this feedback to the {infos}.
+ TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
+ infos.insert(std::make_pair(id, handle(target, isolate)));
+ break;
+ }
+ default:
+ // Ignore the remaining code objects.
+ break;
+ }
+ }
+ return new (zone()) TypeHintAnalysis(infos);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/type-hint-analyzer.h b/deps/v8/src/compiler/type-hint-analyzer.h
new file mode 100644
index 0000000000..1a79905633
--- /dev/null
+++ b/deps/v8/src/compiler/type-hint-analyzer.h
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
+#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
+
+#include "src/compiler/type-hints.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The result of analyzing type hints.
+class TypeHintAnalysis final : public ZoneObject {
+ public:
+ typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
+
+ explicit TypeHintAnalysis(Infos const& infos) : infos_(infos) {}
+
+ bool GetBinaryOperationHints(TypeFeedbackId id,
+ BinaryOperationHints* hints) const;
+ bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
+
+ private:
+ Infos const infos_;
+};
+
+
+// The class that performs type hint analysis on the fullcodegen code object.
+class TypeHintAnalyzer final {
+ public:
+ explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
+
+ TypeHintAnalysis* Analyze(Handle<Code> code);
+
+ private:
+ Zone* zone() const { return zone_; }
+
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/deps/v8/src/compiler/type-hints.cc b/deps/v8/src/compiler/type-hints.cc
new file mode 100644
index 0000000000..06abad6380
--- /dev/null
+++ b/deps/v8/src/compiler/type-hints.cc
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/type-hints.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHints::Hint hint) {
+ switch (hint) {
+ case BinaryOperationHints::kNone:
+ return os << "None";
+ case BinaryOperationHints::kSignedSmall:
+ return os << "SignedSmall";
+ case BinaryOperationHints::kSigned32:
+ return os << "Signed32";
+ case BinaryOperationHints::kNumber:
+ return os << "Number";
+ case BinaryOperationHints::kString:
+ return os << "String";
+ case BinaryOperationHints::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
+ return os << hints.left() << "*" << hints.right() << "->" << hints.result();
+}
+
+
+std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
+ switch (hint) {
+ case ToBooleanHint::kNone:
+ return os << "None";
+ case ToBooleanHint::kUndefined:
+ return os << "Undefined";
+ case ToBooleanHint::kBoolean:
+ return os << "Boolean";
+ case ToBooleanHint::kNull:
+ return os << "Null";
+ case ToBooleanHint::kSmallInteger:
+ return os << "SmallInteger";
+ case ToBooleanHint::kReceiver:
+ return os << "Receiver";
+ case ToBooleanHint::kString:
+ return os << "String";
+ case ToBooleanHint::kSymbol:
+ return os << "Symbol";
+ case ToBooleanHint::kHeapNumber:
+ return os << "HeapNumber";
+ case ToBooleanHint::kSimdValue:
+ return os << "SimdValue";
+ case ToBooleanHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
+ if (hints == ToBooleanHint::kAny) return os << "Any";
+ if (hints == ToBooleanHint::kNone) return os << "None";
+ bool first = true;
+ for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * CHAR_BIT; ++i) {
+ ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
+ if (hints & hint) {
+ if (!first) os << "|";
+ first = false;
+ os << hint;
+ }
+ }
+ return os;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/type-hints.h b/deps/v8/src/compiler/type-hints.h
new file mode 100644
index 0000000000..f1cc64036c
--- /dev/null
+++ b/deps/v8/src/compiler/type-hints.h
@@ -0,0 +1,84 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPE_HINTS_H_
+#define V8_COMPILER_TYPE_HINTS_H_
+
+#include "src/base/flags.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Type hints for an binary operation.
+class BinaryOperationHints final {
+ public:
+ enum Hint { kNone, kSignedSmall, kSigned32, kNumber, kString, kAny };
+
+ BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
+ BinaryOperationHints(Hint left, Hint right, Hint result)
+ : bit_field_(LeftField::encode(left) | RightField::encode(right) |
+ ResultField::encode(result)) {}
+
+ static BinaryOperationHints Any() {
+ return BinaryOperationHints(kAny, kAny, kAny);
+ }
+
+ Hint left() const { return LeftField::decode(bit_field_); }
+ Hint right() const { return RightField::decode(bit_field_); }
+ Hint result() const { return ResultField::decode(bit_field_); }
+
+ bool operator==(BinaryOperationHints const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(BinaryOperationHints const& that) const {
+ return !(*this == that);
+ }
+
+ friend size_t hash_value(BinaryOperationHints const& hints) {
+ return hints.bit_field_;
+ }
+
+ private:
+ typedef BitField<Hint, 0, 3> LeftField;
+ typedef BitField<Hint, 3, 3> RightField;
+ typedef BitField<Hint, 6, 3> ResultField;
+
+ uint32_t bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
+std::ostream& operator<<(std::ostream&, BinaryOperationHints);
+
+
+// Type hints for the ToBoolean type conversion.
+enum class ToBooleanHint : uint16_t {
+ kNone = 0u,
+ kUndefined = 1u << 0,
+ kBoolean = 1u << 1,
+ kNull = 1u << 2,
+ kSmallInteger = 1u << 3,
+ kReceiver = 1u << 4,
+ kString = 1u << 5,
+ kSymbol = 1u << 6,
+ kHeapNumber = 1u << 7,
+ kSimdValue = 1u << 8,
+ kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
+ kSymbol | kHeapNumber | kSimdValue
+};
+
+std::ostream& operator<<(std::ostream&, ToBooleanHint);
+
+typedef base::Flags<ToBooleanHint, uint16_t> ToBooleanHints;
+
+std::ostream& operator<<(std::ostream&, ToBooleanHints);
+
+DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TYPE_HINTS_H_
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index d6a4a58fa0..c1f816d34b 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -54,12 +54,15 @@ Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
singleton_false_ = Type::Constant(factory->false_value(), zone);
singleton_true_ = Type::Constant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
falsish_ = Type::Union(
Type::Undetectable(),
- Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
- Type::NullOrUndefined(), zone),
+ Type::Union(
+ Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
+ Type::NullOrUndefined(), zone),
+ singleton_the_hole_, zone),
zone);
truish_ = Type::Union(
singleton_true_,
@@ -252,7 +255,6 @@ class Typer::Visitor : public Reducer {
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- static Type* JSUnaryNotTyper(Type*, Typer*);
static Type* JSTypeOfTyper(Type*, Typer*);
static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
@@ -634,6 +636,11 @@ Type* Typer::Visitor::TypeStateValues(Node* node) {
}
+Type* Typer::Visitor::TypeObjectState(Node* node) {
+ return Type::Internal(zone());
+}
+
+
Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
return Type::Internal(zone());
}
@@ -697,6 +704,10 @@ Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
return t->singleton_false_;
}
+ if ((lhs->Is(t->singleton_the_hole_) || rhs->Is(t->singleton_the_hole_)) &&
+ !lhs->Maybe(rhs)) {
+ return t->singleton_false_;
+ }
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
@@ -1136,16 +1147,6 @@ Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
// JS unary operators.
-Type* Typer::Visitor::JSUnaryNotTyper(Type* type, Typer* t) {
- return Invert(ToBoolean(type, t), t);
-}
-
-
-Type* Typer::Visitor::TypeJSUnaryNot(Node* node) {
- return TypeUnaryOp(node, JSUnaryNotTyper);
-}
-
-
Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
Factory* const f = t->isolate()->factory();
if (type->Is(Type::Boolean())) {
@@ -1215,13 +1216,23 @@ Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
}
+Type* Typer::Visitor::TypeJSCreateArray(Node* node) {
+ return Type::OtherObject();
+}
+
+
Type* Typer::Visitor::TypeJSCreateClosure(Node* node) {
+ return Type::Function();
+}
+
+
+Type* Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
return Type::OtherObject();
}
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
- return Type::None(), Type::OtherObject();
+ return Type::OtherObject();
}
@@ -1230,6 +1241,11 @@ Type* Typer::Visitor::TypeJSCreateLiteralObject(Node* node) {
}
+Type* Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
+ return Type::OtherObject();
+}
+
+
Type* Typer::Visitor::JSLoadPropertyTyper(Type* object, Type* name, Typer* t) {
// TODO(rossberg): Use range types and sized array types to filter undefined.
if (object->IsArray() && name->Is(Type::Integral32())) {
@@ -1245,7 +1261,37 @@ Type* Typer::Visitor::TypeJSLoadProperty(Node* node) {
}
-Type* Typer::Visitor::TypeJSLoadNamed(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
+ Factory* const f = isolate()->factory();
+ Handle<Name> name = NamedAccessOf(node->op()).name();
+ if (name.is_identical_to(f->prototype_string())) {
+ Type* receiver = Operand(node, 0);
+ if (receiver->Is(Type::None())) return Type::None();
+ if (receiver->IsConstant() &&
+ receiver->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(receiver->AsConstant()->Value());
+ if (function->has_prototype()) {
+ // We need to add a code dependency on the initial map of the {function}
+ // in order to be notified about changes to "prototype" of {function},
+ // so we can only infer a constant type if deoptimization is enabled.
+ if (flags() & kDeoptimizationEnabled) {
+ JSFunction::EnsureHasInitialMap(function);
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ return Type::Constant(handle(initial_map->prototype(), isolate()),
+ zone());
+ }
+ }
+ } else if (receiver->IsClass() &&
+ receiver->AsClass()->Map()->IsJSFunctionMap()) {
+ Handle<Map> map = receiver->AsClass()->Map();
+ return map->has_non_instance_prototype() ? Type::Primitive(zone())
+ : Type::Receiver(zone());
+ }
+ }
+ return Type::Any();
+}
Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) { return Type::Any(); }
@@ -1369,6 +1415,10 @@ Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
+ ContextAccess const& access = ContextAccessOf(node->op());
+ if (access.index() == Context::EXTENSION_INDEX) {
+ return Type::TaggedPointer();
+ }
// Since contexts are mutable, we just return the top.
return Type::Any();
}
@@ -1507,12 +1557,12 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsMinusZero:
case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
+ case Runtime::kInlineIsJSReceiver:
return Type::Boolean(zone());
case Runtime::kInlineDoubleLo:
case Runtime::kInlineDoubleHi:
return Type::Signed32();
case Runtime::kInlineConstructDouble:
- case Runtime::kInlineDateField:
case Runtime::kInlineMathFloor:
case Runtime::kInlineMathSqrt:
case Runtime::kInlineMathAcos:
@@ -1522,8 +1572,11 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return Type::Number();
case Runtime::kInlineMathClz32:
return Type::Range(0, 32, zone());
- case Runtime::kInlineStringGetLength:
- return Type::Range(0, String::kMaxLength, zone());
+ case Runtime::kInlineCreateIterResultObject:
+ case Runtime::kInlineRegExpConstructResult:
+ return Type::OtherObject();
+ case Runtime::kInlineSubString:
+ return Type::String();
case Runtime::kInlineToInteger:
return TypeUnaryOp(node, ToInteger);
case Runtime::kInlineToLength:
@@ -1540,6 +1593,8 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToPrimitive);
case Runtime::kInlineToString:
return TypeUnaryOp(node, ToString);
+ case Runtime::kHasInPrototypeChain:
+ return Type::Boolean();
default:
break;
}
@@ -1677,6 +1732,11 @@ Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
}
+Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
+ return Type::Boolean(zone());
+}
+
+
Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
@@ -2025,9 +2085,19 @@ Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeInt64AddWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
@@ -2075,6 +2145,26 @@ Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
}
+Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat64ToInt64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat32ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
+Type* Typer::Visitor::TypeTryTruncateFloat64ToUint64(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
}
@@ -2120,6 +2210,16 @@ Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
}
+Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeRoundUint64ToFloat64(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat64(), zone());
+}
+
+
Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
return Type::Number();
}
@@ -2223,12 +2323,36 @@ Type* Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
}
+Type* Typer::Visitor::TypeFloat32RoundDown(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat64RoundDown(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Type::Number();
}
+Type* Typer::Visitor::TypeFloat32RoundUp(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundUp(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat32RoundTruncate(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Type::Number();
@@ -2241,6 +2365,18 @@ Type* Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
}
+Type* Typer::Visitor::TypeFloat32RoundTiesEven(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeFloat64RoundTiesEven(Node* node) {
+ // TODO(sigurds): We could have a tighter bound here.
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
return Type::Signed32();
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 3b9b31b77f..41770266c8 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -58,6 +58,7 @@ class Typer {
Type* singleton_false_;
Type* singleton_true_;
+ Type* singleton_the_hole_;
Type* signed32ish_;
Type* unsigned32ish_;
Type* falsish_;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 2b4bb9d092..1a3ef8e783 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -250,12 +250,12 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
default: {
- UNREACHABLE();
+ V8_Fatal(__FILE__, __LINE__, "Switch #%d illegally used by #%d:%s",
+ node->id(), use->id(), use->op()->mnemonic());
break;
}
}
}
- CHECK_LE(1, count_case);
CHECK_EQ(1, count_default);
CHECK_EQ(node->op()->ControlOutputCount(), count_case + count_default);
// Type is empty.
@@ -436,6 +436,7 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(6, input_count);
break;
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
case IrOpcode::kTypedStateValues:
// TODO(jarin): what are the constraints on these?
break;
@@ -456,7 +457,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSUnaryNot:
// Type is Boolean.
CheckUpperIs(node, Type::Boolean());
break;
@@ -511,12 +511,21 @@ void Verifier::Visitor::Check(Node* node) {
// Type is OtherObject.
CheckUpperIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateArray:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
+ CheckUpperIs(node, Type::Function());
+ break;
+ case IrOpcode::kJSCreateIterResultObject:
+ // Type is OtherObject.
CheckUpperIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
CheckUpperIs(node, Type::OtherObject());
break;
@@ -673,6 +682,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kNumberIsHoleNaN:
+ // Number -> Boolean
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Boolean());
+ break;
case IrOpcode::kPlainPrimitiveToNumber:
// PlainPrimitive -> Number
CheckValueInputIs(node, 0, Type::PlainPrimitive());
@@ -848,7 +862,9 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kUint32LessThan:
case IrOpcode::kUint32LessThanOrEqual:
case IrOpcode::kInt64Add:
+ case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64Sub:
+ case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kInt64Mul:
case IrOpcode::kInt64Div:
case IrOpcode::kInt64Mod:
@@ -878,15 +894,23 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat32RoundDown:
case IrOpcode::kFloat64RoundDown:
+ case IrOpcode::kFloat32RoundUp:
+ case IrOpcode::kFloat64RoundUp:
+ case IrOpcode::kFloat32RoundTruncate:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
+ case IrOpcode::kFloat32RoundTiesEven:
+ case IrOpcode::kFloat64RoundTiesEven:
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
case IrOpcode::kRoundInt64ToFloat32:
case IrOpcode::kRoundInt64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat64:
+ case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
case IrOpcode::kBitcastFloat32ToInt32:
@@ -900,6 +924,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
@@ -951,7 +979,7 @@ static bool HasDominatingDef(Schedule* schedule, Node* node,
use_pos--;
}
block = block->dominator();
- if (block == NULL) break;
+ if (block == nullptr) break;
use_pos = static_cast<int>(block->NodeCount()) - 1;
if (node == block->control_input()) return true;
}
@@ -962,7 +990,7 @@ static bool HasDominatingDef(Schedule* schedule, Node* node,
static bool Dominates(Schedule* schedule, Node* dominator, Node* dominatee) {
BasicBlock* dom = schedule->block(dominator);
BasicBlock* sub = schedule->block(dominatee);
- while (sub != NULL) {
+ while (sub != nullptr) {
if (sub == dom) {
return true;
}
@@ -1078,7 +1106,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
{
// Verify the dominance relation.
ZoneVector<BitVector*> dominators(zone);
- dominators.resize(count, NULL);
+ dominators.resize(count, nullptr);
// Compute a set of all the nodes that dominate a given node by using
// a forward fixpoint. O(n^2).
@@ -1091,7 +1119,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
queue.pop();
BitVector* block_doms = dominators[block->id().ToSize()];
BasicBlock* idom = block->dominator();
- if (idom != NULL && !block_doms->Contains(idom->id().ToInt())) {
+ if (idom != nullptr && !block_doms->Contains(idom->id().ToInt())) {
V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
block->rpo_number(), idom->rpo_number());
}
@@ -1099,7 +1127,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
BasicBlock* succ = block->SuccessorAt(s);
BitVector* succ_doms = dominators[succ->id().ToSize()];
- if (succ_doms == NULL) {
+ if (succ_doms == nullptr) {
// First time visiting the node. S.doms = B U B.doms
succ_doms = new (zone) BitVector(static_cast<int>(count), zone);
succ_doms->CopyFrom(*block_doms);
@@ -1121,7 +1149,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
b != rpo_order->end(); ++b) {
BasicBlock* block = *b;
BasicBlock* idom = block->dominator();
- if (idom == NULL) continue;
+ if (idom == nullptr) continue;
BitVector* block_doms = dominators[block->id().ToSize()];
for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
@@ -1161,7 +1189,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
// Check inputs to control for this block.
Node* control = block->control_input();
- if (control != NULL) {
+ if (control != nullptr) {
CHECK_EQ(block, schedule->block(control));
CheckInputsDominate(schedule, block, control,
static_cast<int>(block->NodeCount()) - 1);
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
new file mode 100644
index 0000000000..17065d61b4
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -0,0 +1,2031 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-compiler.h"
+
+#include "src/isolate-inl.h"
+
+#include "src/base/platform/platform.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/typer.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
+ if (wasm::WasmOpcodes::IsSupported(opcode)) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Unsupported opcode #%d:%s reported as supported", opcode,
+ wasm::WasmOpcodes::OpcodeName(opcode));
+ }
+ V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
+ wasm::WasmOpcodes::OpcodeName(opcode));
+ return nullptr;
+}
+
+
+void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
+ Graph* g = jsgraph->graph();
+ if (g->end()) {
+ NodeProperties::MergeControlToEnd(g, jsgraph->common(), node);
+ } else {
+ g->SetEnd(g->NewNode(jsgraph->common()->End(1), node));
+ }
+}
+
+
+enum TrapReason {
+ kTrapUnreachable,
+ kTrapMemOutOfBounds,
+ kTrapDivByZero,
+ kTrapDivUnrepresentable,
+ kTrapRemByZero,
+ kTrapFloatUnrepresentable,
+ kTrapFuncInvalid,
+ kTrapFuncSigMismatch,
+ kTrapCount
+};
+
+
+static const char* kTrapMessages[] = {
+ "unreachable", "memory access out of bounds",
+ "divide by zero", "divide result unrepresentable",
+ "remainder by zero", "integer result unrepresentable",
+ "invalid function", "function signature mismatch"};
+} // namespace
+
+
+// A helper that handles building graph fragments for trapping.
+// To avoid generating a ton of redundant code that just calls the runtime
+// to trap, we generate a per-trap-reason block of code that all trap sites
+// in this function will branch to.
+class WasmTrapHelper : public ZoneObject {
+ public:
+ explicit WasmTrapHelper(WasmGraphBuilder* builder)
+ : builder_(builder),
+ jsgraph_(builder->jsgraph()),
+ graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
+ for (int i = 0; i < kTrapCount; i++) traps_[i] = nullptr;
+ }
+
+ // Make the current control path trap to unreachable.
+ void Unreachable() { ConnectTrap(kTrapUnreachable); }
+
+ // Add a check that traps if {node} is equal to {val}.
+ Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
+ Int32Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ if (val == 0) {
+ AddTrapIfFalse(reason, node);
+ } else {
+ AddTrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
+ jsgraph()->Int32Constant(val)));
+ }
+ return builder_->Control();
+ }
+
+ // Add a check that traps if {node} is zero.
+ Node* ZeroCheck32(TrapReason reason, Node* node) {
+ return TrapIfEq32(reason, node, 0);
+ }
+
+ // Add a check that traps if {node} is equal to {val}.
+ Node* TrapIfEq64(TrapReason reason, Node* node, int64_t val) {
+ Int64Matcher m(node);
+ if (m.HasValue() && !m.Is(val)) return graph()->start();
+ AddTrapIfTrue(reason,
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
+ jsgraph()->Int64Constant(val)));
+ return builder_->Control();
+ }
+
+ // Add a check that traps if {node} is zero.
+ Node* ZeroCheck64(TrapReason reason, Node* node) {
+ return TrapIfEq64(reason, node, 0);
+ }
+
+ // Add a trap if {cond} is true.
+ void AddTrapIfTrue(TrapReason reason, Node* cond) {
+ AddTrapIf(reason, cond, true);
+ }
+
+ // Add a trap if {cond} is false.
+ void AddTrapIfFalse(TrapReason reason, Node* cond) {
+ AddTrapIf(reason, cond, false);
+ }
+
+ // Add a trap if {cond} is true or false according to {iftrue}.
+ void AddTrapIf(TrapReason reason, Node* cond, bool iftrue) {
+ Node** effect_ptr = builder_->effect_;
+ Node** control_ptr = builder_->control_;
+ Node* before = *effect_ptr;
+ BranchHint hint = iftrue ? BranchHint::kFalse : BranchHint::kTrue;
+ Node* branch = graph()->NewNode(common()->Branch(hint), cond, *control_ptr);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ *control_ptr = iftrue ? if_true : if_false;
+ ConnectTrap(reason);
+ *control_ptr = iftrue ? if_false : if_true;
+ *effect_ptr = before;
+ }
+
+ private:
+ WasmGraphBuilder* builder_;
+ JSGraph* jsgraph_;
+ Graph* graph_;
+ Node* traps_[kTrapCount];
+ Node* effects_[kTrapCount];
+
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph()->common(); }
+
+ void ConnectTrap(TrapReason reason) {
+ if (traps_[reason] == nullptr) {
+ // Create trap code for the first time this trap is used.
+ return BuildTrapCode(reason);
+ }
+ // Connect the current control and effect to the existing trap code.
+ builder_->AppendToMerge(traps_[reason], builder_->Control());
+ builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
+ }
+
+ void BuildTrapCode(TrapReason reason) {
+ Node* exception = builder_->String(kTrapMessages[reason]);
+ Node* end;
+ Node** control_ptr = builder_->control_;
+ Node** effect_ptr = builder_->effect_;
+ wasm::ModuleEnv* module = builder_->module_;
+ *control_ptr = traps_[reason] =
+ graph()->NewNode(common()->Merge(1), *control_ptr);
+ *effect_ptr = effects_[reason] =
+ graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
+
+ if (module && !module->context.is_null()) {
+ // Use the module context to call the runtime to throw an exception.
+ Runtime::FunctionId f = Runtime::kThrow;
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ Node* inputs[] = {
+ jsgraph()->CEntryStubConstant(fun->result_size), // C entry
+ exception, // exception
+ jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(fun->nargs), // arity
+ jsgraph()->Constant(module->context), // context
+ *effect_ptr,
+ *control_ptr};
+
+ Node* node = graph()->NewNode(
+ common()->Call(desc), static_cast<int>(arraysize(inputs)), inputs);
+ *control_ptr = node;
+ *effect_ptr = node;
+ }
+ if (false) {
+ // End the control flow with a throw
+ Node* thrw =
+ graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
+ *effect_ptr, *control_ptr);
+ end = thrw;
+ } else {
+ // End the control flow with returning 0xdeadbeef
+ Node* ret_value;
+ if (builder_->GetFunctionSignature()->return_count() > 0) {
+ switch (builder_->GetFunctionSignature()->GetReturn()) {
+ case wasm::kAstI32:
+ ret_value = jsgraph()->Int32Constant(0xdeadbeef);
+ break;
+ case wasm::kAstI64:
+ ret_value = jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+ break;
+ case wasm::kAstF32:
+ ret_value = jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+ break;
+ case wasm::kAstF64:
+ ret_value = jsgraph()->Float64Constant(
+ bit_cast<double>(0xdeadbeefdeadbeef));
+ break;
+ default:
+ UNREACHABLE();
+ ret_value = nullptr;
+ }
+ } else {
+ ret_value = jsgraph()->Int32Constant(0xdeadbeef);
+ }
+ end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
+ *effect_ptr, *control_ptr);
+ }
+
+ MergeControlToEnd(jsgraph(), end);
+ }
+};
+
+
+WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
+ wasm::FunctionSig* function_signature)
+ : zone_(zone),
+ jsgraph_(jsgraph),
+ module_(nullptr),
+ mem_buffer_(nullptr),
+ mem_size_(nullptr),
+ function_table_(nullptr),
+ control_(nullptr),
+ effect_(nullptr),
+ cur_buffer_(def_buffer_),
+ cur_bufsize_(kDefaultBufferSize),
+ trap_(new (zone) WasmTrapHelper(this)),
+ function_signature_(function_signature) {
+ DCHECK_NOT_NULL(jsgraph_);
+}
+
+
+Node* WasmGraphBuilder::Error() { return jsgraph()->Dead(); }
+
+
+Node* WasmGraphBuilder::Start(unsigned params) {
+ Node* start = graph()->NewNode(jsgraph()->common()->Start(params));
+ graph()->SetStart(start);
+ return start;
+}
+
+
+Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+ return graph()->NewNode(jsgraph()->common()->Parameter(index),
+ graph()->start());
+}
+
+
+Node* WasmGraphBuilder::Loop(Node* entry) {
+ return graph()->NewNode(jsgraph()->common()->Loop(1), entry);
+}
+
+
+Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
+ Node* terminate =
+ graph()->NewNode(jsgraph()->common()->Terminate(), effect, control);
+ MergeControlToEnd(jsgraph(), terminate);
+ return terminate;
+}
+
+
+unsigned WasmGraphBuilder::InputCount(Node* node) {
+ return static_cast<unsigned>(node->InputCount());
+}
+
+
+bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
+ return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
+ NodeProperties::GetControlInput(phi) == merge;
+}
+
+
+void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ merge->AppendInput(jsgraph()->zone(), from);
+ int new_size = merge->InputCount();
+ NodeProperties::ChangeOp(
+ merge, jsgraph()->common()->ResizeMergeOrPhi(merge->op(), new_size));
+}
+
+
+void WasmGraphBuilder::AppendToPhi(Node* merge, Node* phi, Node* from) {
+ DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ int new_size = phi->InputCount();
+ phi->InsertInput(jsgraph()->zone(), phi->InputCount() - 1, from);
+ NodeProperties::ChangeOp(
+ phi, jsgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
+}
+
+
+Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
+ return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
+}
+
+
+Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+ Node* control) {
+ DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
+ Node** buf = Realloc(vals, count);
+ buf = Realloc(buf, count + 1);
+ buf[count] = control;
+ return graph()->NewNode(jsgraph()->common()->Phi(type, count), count + 1,
+ buf);
+}
+
+
+Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
+ Node* control) {
+ DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
+ Node** buf = Realloc(effects, count);
+ buf = Realloc(buf, count + 1);
+ buf[count] = control;
+ return graph()->NewNode(jsgraph()->common()->EffectPhi(count), count + 1,
+ buf);
+}
+
+
+Node* WasmGraphBuilder::Int32Constant(int32_t value) {
+ return jsgraph()->Int32Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Int64Constant(int64_t value) {
+ return jsgraph()->Int64Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
+ Node* right) {
+ const Operator* op;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ switch (opcode) {
+ case wasm::kExprI32Add:
+ op = m->Int32Add();
+ break;
+ case wasm::kExprI32Sub:
+ op = m->Int32Sub();
+ break;
+ case wasm::kExprI32Mul:
+ op = m->Int32Mul();
+ break;
+ case wasm::kExprI32DivS: {
+ trap_->ZeroCheck32(kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
+ jsgraph()->Int32Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq32(kTrapDivUnrepresentable, left, kMinInt);
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
+ denom_is_not_m1, *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ }
+ case wasm::kExprI32DivU:
+ op = m->Uint32Div();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck32(kTrapDivByZero, right));
+ case wasm::kExprI32RemS: {
+ trap_->ZeroCheck32(kTrapRemByZero, right);
+ Diamond d(graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word32Equal(), right,
+ jsgraph()->Int32Constant(-1)));
+
+ Node* rem = graph()->NewNode(m->Int32Mod(), left, right, d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ rem);
+ }
+ case wasm::kExprI32RemU:
+ op = m->Uint32Mod();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck32(kTrapRemByZero, right));
+ case wasm::kExprI32And:
+ op = m->Word32And();
+ break;
+ case wasm::kExprI32Ior:
+ op = m->Word32Or();
+ break;
+ case wasm::kExprI32Xor:
+ op = m->Word32Xor();
+ break;
+ case wasm::kExprI32Shl:
+ op = m->Word32Shl();
+ break;
+ case wasm::kExprI32ShrU:
+ op = m->Word32Shr();
+ break;
+ case wasm::kExprI32ShrS:
+ op = m->Word32Sar();
+ break;
+ case wasm::kExprI32Eq:
+ op = m->Word32Equal();
+ break;
+ case wasm::kExprI32Ne:
+ return Invert(Binop(wasm::kExprI32Eq, left, right));
+ case wasm::kExprI32LtS:
+ op = m->Int32LessThan();
+ break;
+ case wasm::kExprI32LeS:
+ op = m->Int32LessThanOrEqual();
+ break;
+ case wasm::kExprI32LtU:
+ op = m->Uint32LessThan();
+ break;
+ case wasm::kExprI32LeU:
+ op = m->Uint32LessThanOrEqual();
+ break;
+ case wasm::kExprI32GtS:
+ op = m->Int32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GeS:
+ op = m->Int32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GtU:
+ op = m->Uint32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI32GeU:
+ op = m->Uint32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ case wasm::kExprI64Add:
+ op = m->Int64Add();
+ break;
+ case wasm::kExprI64Sub:
+ op = m->Int64Sub();
+ break;
+ case wasm::kExprI64Mul:
+ op = m->Int64Mul();
+ break;
+ case wasm::kExprI64DivS: {
+ trap_->ZeroCheck64(kTrapDivByZero, right);
+ Node* before = *control_;
+ Node* denom_is_m1;
+ Node* denom_is_not_m1;
+ Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
+ *control_ = denom_is_m1;
+ trap_->TrapIfEq64(kTrapDivUnrepresentable, left,
+ std::numeric_limits<int64_t>::min());
+ if (*control_ != denom_is_m1) {
+ *control_ = graph()->NewNode(jsgraph()->common()->Merge(2),
+ denom_is_not_m1, *control_);
+ } else {
+ *control_ = before;
+ }
+ return graph()->NewNode(m->Int64Div(), left, right, *control_);
+ }
+ case wasm::kExprI64DivU:
+ op = m->Uint64Div();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck64(kTrapDivByZero, right));
+ case wasm::kExprI64RemS: {
+ trap_->ZeroCheck64(kTrapRemByZero, right);
+ Diamond d(jsgraph()->graph(), jsgraph()->common(),
+ graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)));
+
+ Node* rem = graph()->NewNode(m->Int64Mod(), left, right, d.if_false);
+
+ return d.Phi(MachineRepresentation::kWord64, jsgraph()->Int64Constant(0),
+ rem);
+ }
+ case wasm::kExprI64RemU:
+ op = m->Uint64Mod();
+ return graph()->NewNode(op, left, right,
+ trap_->ZeroCheck64(kTrapRemByZero, right));
+ case wasm::kExprI64And:
+ op = m->Word64And();
+ break;
+ case wasm::kExprI64Ior:
+ op = m->Word64Or();
+ break;
+ case wasm::kExprI64Xor:
+ op = m->Word64Xor();
+ break;
+ case wasm::kExprI64Shl:
+ op = m->Word64Shl();
+ break;
+ case wasm::kExprI64ShrU:
+ op = m->Word64Shr();
+ break;
+ case wasm::kExprI64ShrS:
+ op = m->Word64Sar();
+ break;
+ case wasm::kExprI64Eq:
+ op = m->Word64Equal();
+ break;
+ case wasm::kExprI64Ne:
+ return Invert(Binop(wasm::kExprI64Eq, left, right));
+ case wasm::kExprI64LtS:
+ op = m->Int64LessThan();
+ break;
+ case wasm::kExprI64LeS:
+ op = m->Int64LessThanOrEqual();
+ break;
+ case wasm::kExprI64LtU:
+ op = m->Uint64LessThan();
+ break;
+ case wasm::kExprI64LeU:
+ op = m->Uint64LessThanOrEqual();
+ break;
+ case wasm::kExprI64GtS:
+ op = m->Int64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GeS:
+ op = m->Int64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GtU:
+ op = m->Uint64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprI64GeU:
+ op = m->Uint64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+#endif
+
+ case wasm::kExprF32CopySign:
+ return BuildF32CopySign(left, right);
+ case wasm::kExprF64CopySign:
+ return BuildF64CopySign(left, right);
+ case wasm::kExprF32Add:
+ op = m->Float32Add();
+ break;
+ case wasm::kExprF32Sub:
+ op = m->Float32Sub();
+ break;
+ case wasm::kExprF32Mul:
+ op = m->Float32Mul();
+ break;
+ case wasm::kExprF32Div:
+ op = m->Float32Div();
+ break;
+ case wasm::kExprF32Eq:
+ op = m->Float32Equal();
+ break;
+ case wasm::kExprF32Ne:
+ return Invert(Binop(wasm::kExprF32Eq, left, right));
+ case wasm::kExprF32Lt:
+ op = m->Float32LessThan();
+ break;
+ case wasm::kExprF32Ge:
+ op = m->Float32LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Gt:
+ op = m->Float32LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Le:
+ op = m->Float32LessThanOrEqual();
+ break;
+ case wasm::kExprF64Add:
+ op = m->Float64Add();
+ break;
+ case wasm::kExprF64Sub:
+ op = m->Float64Sub();
+ break;
+ case wasm::kExprF64Mul:
+ op = m->Float64Mul();
+ break;
+ case wasm::kExprF64Div:
+ op = m->Float64Div();
+ break;
+ case wasm::kExprF64Eq:
+ op = m->Float64Equal();
+ break;
+ case wasm::kExprF64Ne:
+ return Invert(Binop(wasm::kExprF64Eq, left, right));
+ case wasm::kExprF64Lt:
+ op = m->Float64LessThan();
+ break;
+ case wasm::kExprF64Le:
+ op = m->Float64LessThanOrEqual();
+ break;
+ case wasm::kExprF64Gt:
+ op = m->Float64LessThan();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF64Ge:
+ op = m->Float64LessThanOrEqual();
+ std::swap(left, right);
+ break;
+ case wasm::kExprF32Min:
+ return BuildF32Min(left, right);
+ case wasm::kExprF64Min:
+ return BuildF64Min(left, right);
+ case wasm::kExprF32Max:
+ return BuildF32Max(left, right);
+ case wasm::kExprF64Max:
+ return BuildF64Max(left, right);
+ default:
+ op = UnsupportedOpcode(opcode);
+ }
+ return graph()->NewNode(op, left, right);
+}
+
+
+Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
+ const Operator* op;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ switch (opcode) {
+ case wasm::kExprBoolNot:
+ op = m->Word32Equal();
+ return graph()->NewNode(op, input, jsgraph()->Int32Constant(0));
+ case wasm::kExprF32Abs:
+ op = m->Float32Abs();
+ break;
+ case wasm::kExprF32Neg:
+ return BuildF32Neg(input);
+ case wasm::kExprF32Sqrt:
+ op = m->Float32Sqrt();
+ break;
+ case wasm::kExprF64Abs:
+ op = m->Float64Abs();
+ break;
+ case wasm::kExprF64Neg:
+ return BuildF64Neg(input);
+ case wasm::kExprF64Sqrt:
+ op = m->Float64Sqrt();
+ break;
+ case wasm::kExprI32SConvertF64:
+ return BuildI32SConvertF64(input);
+ case wasm::kExprI32UConvertF64:
+ return BuildI32UConvertF64(input);
+ case wasm::kExprF32ConvertF64:
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprF64SConvertI32:
+ op = m->ChangeInt32ToFloat64();
+ break;
+ case wasm::kExprF64UConvertI32:
+ op = m->ChangeUint32ToFloat64();
+ break;
+ case wasm::kExprF32SConvertI32:
+ op = m->ChangeInt32ToFloat64(); // TODO(titzer): two conversions
+ input = graph()->NewNode(op, input);
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprF32UConvertI32:
+ op = m->ChangeUint32ToFloat64();
+ input = graph()->NewNode(op, input);
+ op = m->TruncateFloat64ToFloat32();
+ break;
+ case wasm::kExprI32SConvertF32:
+ return BuildI32SConvertF32(input);
+ case wasm::kExprI32UConvertF32:
+ return BuildI32UConvertF32(input);
+ case wasm::kExprF64ConvertF32:
+ op = m->ChangeFloat32ToFloat64();
+ break;
+ case wasm::kExprF32ReinterpretI32:
+ op = m->BitcastInt32ToFloat32();
+ break;
+ case wasm::kExprI32ReinterpretF32:
+ op = m->BitcastFloat32ToInt32();
+ break;
+ case wasm::kExprI32Clz:
+ op = m->Word32Clz();
+ break;
+ case wasm::kExprI32Ctz: {
+ if (m->Word32Ctz().IsSupported()) {
+ op = m->Word32Ctz().op();
+ break;
+ } else {
+ return BuildI32Ctz(input);
+ }
+ }
+ case wasm::kExprI32Popcnt: {
+ if (m->Word32Popcnt().IsSupported()) {
+ op = m->Word32Popcnt().op();
+ break;
+ } else {
+ return BuildI32Popcnt(input);
+ }
+ }
+ case wasm::kExprF32Floor: {
+ if (m->Float32RoundDown().IsSupported()) {
+ op = m->Float32RoundDown().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32Ceil: {
+ if (m->Float32RoundUp().IsSupported()) {
+ op = m->Float32RoundUp().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32Trunc: {
+ if (m->Float32RoundTruncate().IsSupported()) {
+ op = m->Float32RoundTruncate().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF32NearestInt: {
+ if (m->Float32RoundTiesEven().IsSupported()) {
+ op = m->Float32RoundTiesEven().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Floor: {
+ if (m->Float64RoundDown().IsSupported()) {
+ op = m->Float64RoundDown().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Ceil: {
+ if (m->Float64RoundUp().IsSupported()) {
+ op = m->Float64RoundUp().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64Trunc: {
+ if (m->Float64RoundTruncate().IsSupported()) {
+ op = m->Float64RoundTruncate().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+ case wasm::kExprF64NearestInt: {
+ if (m->Float64RoundTiesEven().IsSupported()) {
+ op = m->Float64RoundTiesEven().op();
+ break;
+ } else {
+ op = UnsupportedOpcode(opcode);
+ break;
+ }
+ }
+
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
+ case wasm::kExprI32ConvertI64:
+ op = m->TruncateInt64ToInt32();
+ break;
+ case wasm::kExprI64SConvertI32:
+ op = m->ChangeInt32ToInt64();
+ break;
+ case wasm::kExprI64UConvertI32:
+ op = m->ChangeUint32ToUint64();
+ break;
+ case wasm::kExprF32SConvertI64:
+ op = m->RoundInt64ToFloat32();
+ break;
+ case wasm::kExprF32UConvertI64:
+ op = m->RoundUint64ToFloat32();
+ break;
+ case wasm::kExprF64SConvertI64:
+ op = m->RoundInt64ToFloat64();
+ break;
+ case wasm::kExprF64UConvertI64:
+ op = m->RoundUint64ToFloat64();
+ break;
+ case wasm::kExprI64SConvertF32: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToInt64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64SConvertF64: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToInt64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64UConvertF32: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat32ToUint64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprI64UConvertF64: {
+ Node* trunc = graph()->NewNode(m->TryTruncateFloat64ToUint64(), input);
+ Node* result =
+ graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
+ Node* overflow =
+ graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ trap_->ZeroCheck64(kTrapFloatUnrepresentable, overflow);
+ return result;
+ }
+ case wasm::kExprF64ReinterpretI64:
+ op = m->BitcastInt64ToFloat64();
+ break;
+ case wasm::kExprI64ReinterpretF64:
+ op = m->BitcastFloat64ToInt64();
+ break;
+ case wasm::kExprI64Clz:
+ op = m->Word64Clz();
+ break;
+ case wasm::kExprI64Ctz: {
+ if (m->Word64Ctz().IsSupported()) {
+ op = m->Word64Ctz().op();
+ break;
+ } else {
+ return BuildI64Ctz(input);
+ }
+ }
+ case wasm::kExprI64Popcnt: {
+ if (m->Word64Popcnt().IsSupported()) {
+ op = m->Word64Popcnt().op();
+ break;
+ } else {
+ return BuildI64Popcnt(input);
+ }
+ }
+#endif
+ default:
+ op = UnsupportedOpcode(opcode);
+ }
+ return graph()->NewNode(op, input);
+}
+
+
+Node* WasmGraphBuilder::Float32Constant(float value) {
+ return jsgraph()->Float32Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Float64Constant(double value) {
+ return jsgraph()->Float64Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Constant(Handle<Object> value) {
+ return jsgraph()->Constant(value);
+}
+
+
+Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
+ Node** false_node) {
+ DCHECK_NOT_NULL(cond);
+ DCHECK_NOT_NULL(*control_);
+ Node* branch =
+ graph()->NewNode(jsgraph()->common()->Branch(), cond, *control_);
+ *true_node = graph()->NewNode(jsgraph()->common()->IfTrue(), branch);
+ *false_node = graph()->NewNode(jsgraph()->common()->IfFalse(), branch);
+ return branch;
+}
+
+
+Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
+ return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
+}
+
+
+Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ return graph()->NewNode(jsgraph()->common()->IfValue(value), sw);
+}
+
+
+Node* WasmGraphBuilder::IfDefault(Node* sw) {
+ DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
+ return graph()->NewNode(jsgraph()->common()->IfDefault(), sw);
+}
+
+
+Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
+ DCHECK_NOT_NULL(*control_);
+ DCHECK_NOT_NULL(*effect_);
+
+ if (count == 0) {
+ // Handle a return of void.
+ vals[0] = jsgraph()->Int32Constant(0);
+ count = 1;
+ }
+
+ Node** buf = Realloc(vals, count);
+ buf = Realloc(buf, count + 2);
+ buf[count] = *effect_;
+ buf[count + 1] = *control_;
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(), count + 2, vals);
+
+ MergeControlToEnd(jsgraph(), ret);
+ return ret;
+}
+
+
+Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
+
+
+Node* WasmGraphBuilder::Unreachable() {
+ trap_->Unreachable();
+ return nullptr;
+}
+
+
+Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
+ Node* result =
+ Unop(wasm::kExprF32ReinterpretI32,
+ Binop(wasm::kExprI32Xor, Unop(wasm::kExprI32ReinterpretF32, input),
+ jsgraph()->Int32Constant(0x80000000)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
+#if WASM_64
+ Node* result =
+ Unop(wasm::kExprF64ReinterpretI64,
+ Binop(wasm::kExprI64Xor, Unop(wasm::kExprI64ReinterpretF64, input),
+ jsgraph()->Int64Constant(0x8000000000000000)));
+
+ return result;
+#else
+ MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Node* old_high_word = graph()->NewNode(m->Float64ExtractHighWord32(), input);
+ Node* new_high_word = Binop(wasm::kExprI32Xor, old_high_word,
+ jsgraph()->Int32Constant(0x80000000));
+
+ return graph()->NewNode(m->Float64InsertHighWord32(), input, new_high_word);
+#endif
+}
+
+
+Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
+ Node* result = Unop(
+ wasm::kExprF32ReinterpretI32,
+ Binop(wasm::kExprI32Ior,
+ Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
+ jsgraph()->Int32Constant(0x7fffffff)),
+ Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
+ jsgraph()->Int32Constant(0x80000000))));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
+#if WASM_64
+ Node* result = Unop(
+ wasm::kExprF64ReinterpretI64,
+ Binop(wasm::kExprI64Ior,
+ Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
+ jsgraph()->Int64Constant(0x7fffffffffffffff)),
+ Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
+ jsgraph()->Int64Constant(0x8000000000000000))));
+
+ return result;
+#else
+ MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Node* high_word_left = graph()->NewNode(m->Float64ExtractHighWord32(), left);
+ Node* high_word_right =
+ graph()->NewNode(m->Float64ExtractHighWord32(), right);
+
+ Node* new_high_word =
+ Binop(wasm::kExprI32Ior, Binop(wasm::kExprI32And, high_word_left,
+ jsgraph()->Int32Constant(0x7fffffff)),
+ Binop(wasm::kExprI32And, high_word_right,
+ jsgraph()->Int32Constant(0x80000000)));
+
+ return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
+#endif
+}
+
+
+Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
+ Diamond left_le_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Le, left, right));
+
+ Diamond right_lt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Eq, left, left));
+
+ return left_le_right.Phi(
+ wasm::kAstF32, left,
+ right_lt_left.Phi(wasm::kAstF32, right,
+ left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
+ Diamond left_ge_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Ge, left, right));
+
+ Diamond right_gt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Gt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF32Eq, left, left));
+
+ return left_ge_right.Phi(
+ wasm::kAstF32, left,
+ right_gt_left.Phi(wasm::kAstF32, right,
+ left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
+ Diamond left_le_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Le, left, right));
+
+ Diamond right_lt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Eq, left, left));
+
+ return left_le_right.Phi(
+ wasm::kAstF64, left,
+ right_lt_left.Phi(wasm::kAstF64, right,
+ left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
+ Diamond left_ge_right(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Ge, left, right));
+
+ Diamond right_gt_left(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Lt, right, left));
+
+ Diamond left_is_not_nan(graph(), jsgraph()->common(),
+ Binop(wasm::kExprF64Eq, left, left));
+
+ return left_ge_right.Phi(
+ wasm::kAstF64, left,
+ right_gt_left.Phi(wasm::kAstF64, right,
+ left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+}
+
+
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF32Trunc, input);
+ // TODO(titzer): two conversions
+ Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), f64_trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF64Trunc, input);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF32Trunc, input);
+ // TODO(titzer): two conversions
+ Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), f64_trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // Truncation of the input value is needed for the overflow check later.
+ Node* trunc = Unop(wasm::kExprF64Trunc, input);
+ Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
+
+ // Convert the result back to f64. If we end up at a different value than the
+ // truncated input value, then there has been an overflow and we trap.
+ Node* check = Unop(wasm::kExprF64UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
+ trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
+ //// Implement the following code as TF graph.
+ // value = value | (value << 1);
+ // value = value | (value << 2);
+ // value = value | (value << 4);
+ // value = value | (value << 8);
+ // value = value | (value << 16);
+ // return CountPopulation32(0xffffffff XOR value);
+
+ Node* result =
+ Binop(wasm::kExprI32Ior, input,
+ Binop(wasm::kExprI32Shl, input, jsgraph()->Int32Constant(1)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(2)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(4)));
+
+ result = Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(8)));
+
+ result =
+ Binop(wasm::kExprI32Ior, result,
+ Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(16)));
+
+ result = BuildI32Popcnt(
+ Binop(wasm::kExprI32Xor, jsgraph()->Int32Constant(0xffffffff), result));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
+ //// Implement the following code as TF graph.
+ // value = value | (value << 1);
+ // value = value | (value << 2);
+ // value = value | (value << 4);
+ // value = value | (value << 8);
+ // value = value | (value << 16);
+ // value = value | (value << 32);
+ // return CountPopulation64(0xffffffffffffffff XOR value);
+
+ Node* result =
+ Binop(wasm::kExprI64Ior, input,
+ Binop(wasm::kExprI64Shl, input, jsgraph()->Int64Constant(1)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(2)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(4)));
+
+ result = Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(8)));
+
+ result =
+ Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(16)));
+
+ result =
+ Binop(wasm::kExprI64Ior, result,
+ Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(32)));
+
+ result = BuildI64Popcnt(Binop(
+ wasm::kExprI64Xor, jsgraph()->Int64Constant(0xffffffffffffffff), result));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
+ //// Implement the following code as a TF graph.
+ // value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+ // value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+ // value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+ // value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+ // value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+
+ Node* result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, input, jsgraph()->Int32Constant(1)),
+ jsgraph()->Int32Constant(0x55555555)),
+ Binop(wasm::kExprI32And, input, jsgraph()->Int32Constant(0x55555555)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(2)),
+ jsgraph()->Int32Constant(0x33333333)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x33333333)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(4)),
+ jsgraph()->Int32Constant(0x0f0f0f0f)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0f0f0f0f)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(8)),
+ jsgraph()->Int32Constant(0x00ff00ff)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x00ff00ff)));
+
+ result = Binop(
+ wasm::kExprI32Add,
+ Binop(wasm::kExprI32And,
+ Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(16)),
+ jsgraph()->Int32Constant(0x0000ffff)),
+ Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0000ffff)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
+ //// Implement the following code as a TF graph.
+ // value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
+ // value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
+ // value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
+ // value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
+ // value = ((value >> 16) & 0x0000ffff0000ffff) + (value &
+ // 0x0000ffff0000ffff);
+ // value = ((value >> 32) & 0x00000000ffffffff) + (value &
+ // 0x00000000ffffffff);
+
+ Node* result =
+ Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And,
+ Binop(wasm::kExprI64ShrU, input, jsgraph()->Int64Constant(1)),
+ jsgraph()->Int64Constant(0x5555555555555555)),
+ Binop(wasm::kExprI64And, input,
+ jsgraph()->Int64Constant(0x5555555555555555)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(2)),
+ jsgraph()->Int64Constant(0x3333333333333333)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x3333333333333333)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(4)),
+ jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(8)),
+ jsgraph()->Int64Constant(0x00ff00ff00ff00ff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x00ff00ff00ff00ff)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(16)),
+ jsgraph()->Int64Constant(0x0000ffff0000ffff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x0000ffff0000ffff)));
+
+ result = Binop(wasm::kExprI64Add,
+ Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
+ jsgraph()->Int64Constant(32)),
+ jsgraph()->Int64Constant(0x00000000ffffffff)),
+ Binop(wasm::kExprI64And, result,
+ jsgraph()->Int64Constant(0x00000000ffffffff)));
+
+ return result;
+}
+
+
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
+ const size_t params = sig->parameter_count();
+ const size_t extra = 2; // effect and control inputs.
+ const size_t count = 1 + params + extra;
+
+ // Reallocate the buffer to make space for extra inputs.
+ args = Realloc(args, count);
+
+ // Add effect and control inputs.
+ args[params + 1] = *effect_;
+ args[params + 2] = *control_;
+
+ const Operator* op = jsgraph()->common()->Call(
+ module_->GetWasmCallDescriptor(jsgraph()->zone(), sig));
+ Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+
+ *effect_ = call;
+ return call;
+}
+
+
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
+ DCHECK_NULL(args[0]);
+
+ // Add code object as constant.
+ args[0] = Constant(module_->GetFunctionCode(index));
+ wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
+
+ return BuildWasmCall(sig, args);
+}
+
+
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
+ DCHECK_NOT_NULL(args[0]);
+
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+
+ // Compute the code object by loading it from the function table.
+ Node* key = args[0];
+ Node* table = FunctionTable();
+
+ // Bounds check the index.
+ int table_size = static_cast<int>(module_->FunctionTableSize());
+ {
+ Node* size = Int32Constant(static_cast<int>(table_size));
+ Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+ trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+ }
+
+ // Load signature from the table and check.
+ // The table is a FixedArray; signatures are encoded as SMIs.
+ // [sig1, sig2, sig3, ...., code1, code2, code3 ...]
+ ElementAccess access = AccessBuilder::ForFixedArrayElement();
+ const int fixed_offset = access.header_size - access.tag();
+ {
+ Node* load_sig = graph()->NewNode(
+ machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(),
+ graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2)),
+ Int32Constant(fixed_offset)),
+ *effect_, *control_);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(index));
+ trap_->AddTrapIfFalse(kTrapFuncSigMismatch, sig_match);
+ }
+
+ // Load code object from the table.
+ int offset = fixed_offset + kPointerSize * table_size;
+ Node* load_code = graph()->NewNode(
+ machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(),
+ graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2)),
+ Int32Constant(offset)),
+ *effect_, *control_);
+
+ args[0] = load_code;
+ wasm::FunctionSig* sig = module_->GetSignature(index);
+ return BuildWasmCall(sig, args);
+}
+
+
+Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ switch (type) {
+ case wasm::kAstI32:
+ return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ case wasm::kAstI64:
+ // TODO(titzer): i64->JS has no good solution right now. Using lower 32
+ // bits.
+ node =
+ graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), node);
+ return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ case wasm::kAstF32:
+ node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
+ node);
+ return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ case wasm::kAstF64:
+ return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ case wasm::kAstStmt:
+ return jsgraph()->UndefinedConstant();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+
+Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
+ wasm::LocalType type) {
+ // Do a JavaScript ToNumber.
+ Node* num =
+ graph()->NewNode(jsgraph()->javascript()->ToNumber(), node, context,
+ jsgraph()->EmptyFrameState(), *effect_, *control_);
+ *control_ = num;
+ *effect_ = num;
+
+ // Change representation.
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ num = graph()->NewNode(simplified.ChangeTaggedToFloat64(), num);
+
+ switch (type) {
+ case wasm::kAstI32: {
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript),
+ num);
+ break;
+ }
+ case wasm::kAstI64:
+ // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript),
+ num);
+ num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+ break;
+ case wasm::kAstF32:
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
+ num);
+ break;
+ case wasm::kAstF64:
+ break;
+ case wasm::kAstStmt:
+ num = jsgraph()->Int32Constant(0);
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ return num;
+}
+
+
+Node* WasmGraphBuilder::Invert(Node* node) {
+ return Unop(wasm::kExprBoolNot, node);
+}
+
+
+void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
+ wasm::FunctionSig* sig) {
+ int params = static_cast<int>(sig->parameter_count());
+ int count = params + 3;
+ Node** args = Buffer(count);
+
+ // Build the start and the JS parameter nodes.
+ Node* start = Start(params + 3);
+ *control_ = start;
+ *effect_ = start;
+ // JS context is the last parameter.
+ Node* context = graph()->NewNode(
+ jsgraph()->common()->Parameter(params + 1, "context"), start);
+
+ int pos = 0;
+ args[pos++] = Constant(wasm_code);
+
+ // Convert JS parameters to WASM numbers.
+ for (int i = 0; i < params; i++) {
+ Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ args[pos++] = FromJS(param, context, sig->GetParam(i));
+ }
+
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ // Call the WASM code.
+ CallDescriptor* desc = module_->GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ Node* jsval =
+ ToJS(call, context,
+ sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* ret =
+ graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
+
+ MergeControlToEnd(jsgraph(), ret);
+}
+
+
+void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
+ wasm::FunctionSig* sig) {
+ int js_count = function->shared()->internal_formal_parameter_count();
+ int wasm_count = static_cast<int>(sig->parameter_count());
+
+ // Build the start and the parameter nodes.
+ Isolate* isolate = jsgraph()->isolate();
+ CallDescriptor* desc;
+ Node* start = Start(wasm_count + 3);
+ *effect_ = start;
+ *control_ = start;
+ // JS context is the last parameter.
+ Node* context = Constant(Handle<Context>(function->context(), isolate));
+ Node** args = Buffer(wasm_count + 7);
+
+ bool arg_count_before_args = false;
+ bool add_new_target_undefined = false;
+
+ int pos = 0;
+ if (js_count == wasm_count) {
+ // exact arity match, just call the function directly.
+ desc = Linkage::GetJSCallDescriptor(graph()->zone(), false, wasm_count + 1,
+ CallDescriptor::kNoFlags);
+ arg_count_before_args = false;
+ add_new_target_undefined = true;
+ } else {
+ // Use the Call builtin.
+ Callable callable = CodeFactory::Call(isolate);
+ args[pos++] = jsgraph()->HeapConstant(callable.code());
+ desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
+ callable.descriptor(), wasm_count + 1,
+ CallDescriptor::kNoFlags);
+ arg_count_before_args = true;
+ }
+
+ args[pos++] = jsgraph()->Constant(function); // JS function.
+ if (arg_count_before_args) {
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ }
+ // JS receiver.
+ Handle<Object> global(function->context()->global_object(), isolate);
+ args[pos++] = jsgraph()->Constant(global);
+
+ // Convert WASM numbers to JS values.
+ for (int i = 0; i < wasm_count; i++) {
+ Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ args[pos++] = ToJS(param, context, sig->GetParam(i));
+ }
+
+ if (add_new_target_undefined) {
+ args[pos++] = jsgraph()->UndefinedConstant(); // new target
+ }
+
+ if (!arg_count_before_args) {
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ }
+ args[pos++] = context;
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+
+ // Convert the return value back.
+ Node* val =
+ FromJS(call, context,
+ sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+
+ MergeControlToEnd(jsgraph(), ret);
+}
+
+
+Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+ if (offset == 0) {
+ if (!mem_buffer_)
+ mem_buffer_ = jsgraph()->IntPtrConstant(module_->mem_start);
+ return mem_buffer_;
+ } else {
+ return jsgraph()->IntPtrConstant(module_->mem_start + offset);
+ }
+}
+
+
+Node* WasmGraphBuilder::MemSize(uint32_t offset) {
+ int32_t size = static_cast<int>(module_->mem_end - module_->mem_start);
+ if (offset == 0) {
+ if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
+ return mem_size_;
+ } else {
+ return jsgraph()->Int32Constant(size + offset);
+ }
+}
+
+
+Node* WasmGraphBuilder::FunctionTable() {
+ if (!function_table_) {
+ DCHECK(!module_->function_table.is_null());
+ function_table_ = jsgraph()->Constant(module_->function_table);
+ }
+ return function_table_;
+}
+
+
+Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
+ MachineType mem_type = module_->GetGlobalType(index);
+ Node* addr = jsgraph()->IntPtrConstant(
+ module_->globals_area + module_->module->globals->at(index).offset);
+ const Operator* op = jsgraph()->machine()->Load(mem_type);
+ Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
+ *control_);
+ *effect_ = node;
+ return node;
+}
+
+
+Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
+ MachineType mem_type = module_->GetGlobalType(index);
+ Node* addr = jsgraph()->IntPtrConstant(
+ module_->globals_area + module_->module->globals->at(index).offset);
+ const Operator* op = jsgraph()->machine()->Store(
+ StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
+ Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
+ *effect_, *control_);
+ *effect_ = node;
+ return node;
+}
+
+
+void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+ uint32_t offset) {
+ // TODO(turbofan): fold bounds checks for constant indexes.
+ CHECK_GE(module_->mem_end, module_->mem_start);
+ ptrdiff_t size = module_->mem_end - module_->mem_start;
+ byte memsize = wasm::WasmOpcodes::MemSize(memtype);
+ Node* cond;
+ if (static_cast<ptrdiff_t>(offset) >= size ||
+ static_cast<ptrdiff_t>(offset + memsize) > size) {
+ // The access will always throw.
+ cond = jsgraph()->Int32Constant(0);
+ } else {
+ // Check against the limit.
+ size_t limit = size - offset - memsize;
+ CHECK(limit <= kMaxUInt32);
+ cond = graph()->NewNode(
+ jsgraph()->machine()->Uint32LessThanOrEqual(), index,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
+ }
+
+ trap_->AddTrapIfFalse(kTrapMemOutOfBounds, cond);
+}
+
+
+Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+ Node* index, uint32_t offset) {
+ Node* load;
+
+ if (module_ && module_->asm_js) {
+ // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+ DCHECK_EQ(0, offset);
+ const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
+ load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
+ *control_);
+ } else {
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset);
+ load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
+ }
+
+ *effect_ = load;
+
+ if (type == wasm::kAstI64 &&
+ ElementSizeLog2Of(memtype.representation()) < 3) {
+ // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
+ if (memtype.IsSigned()) {
+ // sign extend
+ load = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
+ } else {
+ // zero extend
+ load =
+ graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), load);
+ }
+ }
+
+ return load;
+}
+
+
+Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
+ uint32_t offset, Node* val) {
+ Node* store;
+ if (module_ && module_->asm_js) {
+ // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+ DCHECK_EQ(0, offset);
+ const Operator* op =
+ jsgraph()->machine()->CheckedStore(memtype.representation());
+ store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val, *effect_,
+ *control_);
+ } else {
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset);
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store =
+ graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
+ }
+ *effect_ = store;
+ return store;
+}
+
+
+void WasmGraphBuilder::PrintDebugName(Node* node) {
+ PrintF("#%d:%s", node->id(), node->op()->mnemonic());
+}
+
+
+Node* WasmGraphBuilder::String(const char* string) {
+ return jsgraph()->Constant(
+ jsgraph()->isolate()->factory()->NewStringFromAsciiChecked(string));
+}
+
+
+Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
+
+
+Handle<JSFunction> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
+ Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
+ wasm::WasmFunction* func = &module->module->functions->at(index);
+
+ //----------------------------------------------------------------------------
+ // Create the JSFunction object.
+ //----------------------------------------------------------------------------
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
+ int params = static_cast<int>(func->sig->parameter_count());
+ shared->set_length(params);
+ shared->set_internal_formal_parameter_count(1 + params);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->wasm_function_map(), name, MaybeHandle<Code>());
+ function->SetInternalField(0, *module_object);
+ function->set_shared(*shared);
+
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ JSOperatorBuilder javascript(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.set_module(module);
+ builder.BuildJSToWasmWrapper(wasm_code, func->sig);
+
+ //----------------------------------------------------------------------------
+ // Run the compilation pipeline.
+ //----------------------------------------------------------------------------
+ {
+ // Changes lowering requires types.
+ Typer typer(isolate, &graph);
+ NodeVector roots(&zone);
+ jsgraph.GetCachedNodes(&roots);
+ typer.Run(roots);
+
+ // Run generic and change lowering.
+ JSGenericLowering generic(true, &jsgraph);
+ ChangeLowering changes(&jsgraph);
+ GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
+ graph_reducer.AddReducer(&changes);
+ graph_reducer.AddReducer(&generic);
+ graph_reducer.ReduceGraph();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ int params = static_cast<int>(
+ module->GetFunctionSignature(index)->parameter_count());
+ CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
+ &zone, false, params + 1, CallDescriptor::kNoFlags);
+ CompilationInfo info("js-to-wasm", isolate, &zone);
+ // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the wrapper code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (func->name_offset > 0) {
+ const byte* ptr = module->module->module_start + func->name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "JS->WASM function wrapper #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ // Set the JSFunction's machine code.
+ function->set_code(*code);
+ }
+ return function;
+}
+
+
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<JSFunction> function,
+ uint32_t index) {
+ wasm::WasmFunction* func = &module->module->functions->at(index);
+
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ JSOperatorBuilder javascript(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.set_module(module);
+ builder.BuildWasmToJSWrapper(function, func->sig);
+
+ Handle<Code> code = Handle<Code>::null();
+ {
+ // Changes lowering requires types.
+ Typer typer(isolate, &graph);
+ NodeVector roots(&zone);
+ jsgraph.GetCachedNodes(&roots);
+ typer.Run(roots);
+
+ // Run generic and change lowering.
+ JSGenericLowering generic(true, &jsgraph);
+ ChangeLowering changes(&jsgraph);
+ GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
+ graph_reducer.AddReducer(&changes);
+ graph_reducer.AddReducer(&generic);
+ graph_reducer.ReduceGraph();
+
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming = module->GetWasmCallDescriptor(&zone, func->sig);
+ CompilationInfo info("wasm-to-js", isolate, &zone);
+ // TODO(titzer): this is technically a WASM wrapper, not a wasm function.
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the wrapper code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (func->name_offset > 0) {
+ const byte* ptr = module->module->module_start + func->name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "WASM->JS function wrapper #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ }
+ return code;
+}
+
+
+// Helper function to compile a single function.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction& function,
+ int index) {
+ if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
+ // TODO(titzer): clean me up a bit.
+ OFStream os(stdout);
+ os << "Compiling WASM function #" << index << ":";
+ if (function.name_offset > 0) {
+ os << module_env->module->GetName(function.name_offset);
+ }
+ os << std::endl;
+ }
+ // Initialize the function environment for decoding.
+ wasm::FunctionEnv env;
+ env.module = module_env;
+ env.sig = function.sig;
+ env.local_int32_count = function.local_int32_count;
+ env.local_int64_count = function.local_int64_count;
+ env.local_float32_count = function.local_float32_count;
+ env.local_float64_count = function.local_float64_count;
+ env.SumLocals();
+
+ // Create a TF graph during decoding.
+ Zone zone;
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags());
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
+ wasm::TreeResult result = wasm::BuildTFGraph(
+ &builder, &env, // --
+ module_env->module->module_start, // --
+ module_env->module->module_start + function.code_start_offset, // --
+ module_env->module->module_start + function.code_end_offset); // --
+
+ if (result.failed()) {
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compilation failed: " << result << std::endl;
+ }
+ // Add the function as another context for the exception
+ Vector<char> buffer;
+ SNPrintF(buffer, "Compiling WASM function #%d:%s failed:", index,
+ module_env->module->GetName(function.name_offset));
+ thrower.Failed(buffer.start(), result);
+ return Handle<Code>::null();
+ }
+
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor = const_cast<CallDescriptor*>(
+ module_env->GetWasmCallDescriptor(&zone, function.sig));
+ CompilationInfo info("wasm", isolate, &zone);
+ info.set_output_code_kind(Code::WASM_FUNCTION);
+ Handle<Code> code =
+ Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
+
+#ifdef ENABLE_DISASSEMBLER
+ // Disassemble the code for debugging.
+ if (!code.is_null() && FLAG_print_opt_code) {
+ Vector<char> buffer;
+ const char* name = "";
+ if (function.name_offset > 0) {
+ const byte* ptr = module_env->module->module_start + function.name_offset;
+ name = reinterpret_cast<const char*>(ptr);
+ }
+ SNPrintF(buffer, "WASM function #%d:%s", index, name);
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
+#endif
+ return code;
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
new file mode 100644
index 0000000000..1a17a832e4
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -0,0 +1,190 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_WASM_COMPILER_H_
+#define V8_COMPILER_WASM_COMPILER_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+// Forward declarations for some compiler data structures.
+class Node;
+class JSGraph;
+class Graph;
+}
+
+namespace wasm {
+// Forward declarations for some WASM data structures.
+struct ModuleEnv;
+struct WasmFunction;
+class ErrorThrower;
+
+// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
+typedef compiler::Node TFNode;
+typedef compiler::JSGraph TFGraph;
+}
+
+namespace compiler {
+// Compiles a single function, producing a code object.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction& function, int index);
+
+// Wraps a JS function, producing a code object that can be called from WASM.
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<JSFunction> function,
+ uint32_t index);
+
+// Wraps a given wasm code object, producing a JSFunction that can be called
+// from JavaScript.
+Handle<JSFunction> CompileJSToWasmWrapper(
+ Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
+ Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
+
+// Abstracts details of building TurboFan graph nodes for WASM to separate
+// the WASM decoder from the internal details of TurboFan.
+class WasmTrapHelper;
+class WasmGraphBuilder {
+ public:
+ WasmGraphBuilder(Zone* z, JSGraph* g, wasm::FunctionSig* function_signature);
+
+ Node** Buffer(size_t count) {
+ if (count > cur_bufsize_) {
+ size_t new_size = count + cur_bufsize_ + 5;
+ cur_buffer_ =
+ reinterpret_cast<Node**>(zone_->New(new_size * sizeof(Node*)));
+ cur_bufsize_ = new_size;
+ }
+ return cur_buffer_;
+ }
+
+ //-----------------------------------------------------------------------
+ // Operations independent of {control} or {effect}.
+ //-----------------------------------------------------------------------
+ Node* Error();
+ Node* Start(unsigned params);
+ Node* Param(unsigned index, wasm::LocalType type);
+ Node* Loop(Node* entry);
+ Node* Terminate(Node* effect, Node* control);
+ Node* Merge(unsigned count, Node** controls);
+ Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+ Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* Float32Constant(float value);
+ Node* Float64Constant(double value);
+ Node* Constant(Handle<Object> value);
+ Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right);
+ Node* Unop(wasm::WasmOpcode opcode, Node* input);
+ unsigned InputCount(Node* node);
+ bool IsPhiWithMerge(Node* phi, Node* merge);
+ void AppendToMerge(Node* merge, Node* from);
+ void AppendToPhi(Node* merge, Node* phi, Node* from);
+
+ //-----------------------------------------------------------------------
+ // Operations that read and/or write {control} and {effect}.
+ //-----------------------------------------------------------------------
+ Node* Branch(Node* cond, Node** true_node, Node** false_node);
+ Node* Switch(unsigned count, Node* key);
+ Node* IfValue(int32_t value, Node* sw);
+ Node* IfDefault(Node* sw);
+ Node* Return(unsigned count, Node** vals);
+ Node* ReturnVoid();
+ Node* Unreachable();
+
+ Node* CallDirect(uint32_t index, Node** args);
+ Node* CallIndirect(uint32_t index, Node** args);
+ void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
+ void BuildWasmToJSWrapper(Handle<JSFunction> function,
+ wasm::FunctionSig* sig);
+ Node* ToJS(Node* node, Node* context, wasm::LocalType type);
+ Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+ Node* Invert(Node* node);
+ Node* FunctionTable();
+
+ //-----------------------------------------------------------------------
+ // Operations that concern the linear memory.
+ //-----------------------------------------------------------------------
+ Node* MemSize(uint32_t offset);
+ Node* LoadGlobal(uint32_t index);
+ Node* StoreGlobal(uint32_t index, Node* val);
+ Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+ uint32_t offset);
+ Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val);
+
+ static void PrintDebugName(Node* node);
+
+ Node* Control() { return *control_; }
+ Node* Effect() { return *effect_; }
+
+ void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
+
+ void set_control_ptr(Node** control) { this->control_ = control; }
+
+ void set_effect_ptr(Node** effect) { this->effect_ = effect; }
+
+ wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+
+ private:
+ static const int kDefaultBufferSize = 16;
+ friend class WasmTrapHelper;
+
+ Zone* zone_;
+ JSGraph* jsgraph_;
+ wasm::ModuleEnv* module_;
+ Node* mem_buffer_;
+ Node* mem_size_;
+ Node* function_table_;
+ Node** control_;
+ Node** effect_;
+ Node** cur_buffer_;
+ size_t cur_bufsize_;
+ Node* def_buffer_[kDefaultBufferSize];
+
+ WasmTrapHelper* trap_;
+ wasm::FunctionSig* function_signature_;
+
+ // Internal helper methods.
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph();
+
+ Node* String(const char* string);
+ Node* MemBuffer(uint32_t offset);
+ void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+ Node* BuildF32Neg(Node* input);
+ Node* BuildF64Neg(Node* input);
+ Node* BuildF32CopySign(Node* left, Node* right);
+ Node* BuildF64CopySign(Node* left, Node* right);
+ Node* BuildF32Min(Node* left, Node* right);
+ Node* BuildF32Max(Node* left, Node* right);
+ Node* BuildF64Min(Node* left, Node* right);
+ Node* BuildF64Max(Node* left, Node* right);
+ Node* BuildI32SConvertF32(Node* input);
+ Node* BuildI32SConvertF64(Node* input);
+ Node* BuildI32UConvertF32(Node* input);
+ Node* BuildI32UConvertF64(Node* input);
+ Node* BuildI32Ctz(Node* input);
+ Node* BuildI32Popcnt(Node* input);
+ Node* BuildI64Ctz(Node* input);
+ Node* BuildI64Popcnt(Node* input);
+
+ Node** Realloc(Node** buffer, size_t count) {
+ Node** buf = Buffer(count);
+ if (buf != buffer) memcpy(buf, buffer, count * sizeof(Node*));
+ return buf;
+ }
+};
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_COMPILER_H_
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
new file mode 100644
index 0000000000..7419a5c31f
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -0,0 +1,282 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/wasm/wasm-module.h"
+
+#include "src/compiler/linkage.h"
+
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+// TODO(titzer): this should not be in the WASM namespace.
+namespace wasm {
+
+using compiler::LocationSignature;
+using compiler::CallDescriptor;
+using compiler::LinkageLocation;
+
+namespace {
+MachineType MachineTypeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return MachineType::Int32();
+ case kAstI64:
+ return MachineType::Int64();
+ case kAstF64:
+ return MachineType::Float64();
+ case kAstF32:
+ return MachineType::Float32();
+ default:
+ UNREACHABLE();
+ return MachineType::AnyTagged();
+ }
+}
+
+
+// Platform-specific configuration for C calling convention.
+LinkageLocation regloc(Register reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+LinkageLocation regloc(DoubleRegister reg) {
+ return LinkageLocation::ForRegister(reg.code());
+}
+
+
+LinkageLocation stackloc(int i) {
+ return LinkageLocation::ForCallerFrameSlot(i);
+}
+
+
+#if V8_TARGET_ARCH_IA32
+// ===========================================================================
+// == ia32 ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_RETURN_REGISTERS eax, edx
+#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
+#define FP_RETURN_REGISTERS xmm1, xmm2
+
+#elif V8_TARGET_ARCH_X64
+// ===========================================================================
+// == x64 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS rax, rdx, rcx, rbx, rsi, rdi
+#define GP_RETURN_REGISTERS rax, rdx
+#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
+#define FP_RETURN_REGISTERS xmm1, xmm2
+
+#elif V8_TARGET_ARCH_X87
+// ===========================================================================
+// == x87 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_RETURN_REGISTERS eax, edx
+#define FP_RETURN_REGISTERS stX_0
+
+#elif V8_TARGET_ARCH_ARM
+// ===========================================================================
+// == arm ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r0, r1, r2, r3
+#define GP_RETURN_REGISTERS r0, r1
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#elif V8_TARGET_ARCH_ARM64
+// ===========================================================================
+// == arm64 ====================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define GP_RETURN_REGISTERS x0, x1
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#elif V8_TARGET_ARCH_MIPS
+// ===========================================================================
+// == mips ===================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS a0, a1, a2, a3
+#define GP_RETURN_REGISTERS v0, v1
+#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
+#define FP_RETURN_REGISTERS f2, f4
+
+#elif V8_TARGET_ARCH_MIPS64
+// ===========================================================================
+// == mips64 =================================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define GP_RETURN_REGISTERS v0, v1
+#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
+#define FP_RETURN_REGISTERS f2, f4
+
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+// ===========================================================================
+// == ppc & ppc64 ============================================================
+// ===========================================================================
+#define GP_PARAM_REGISTERS r3, r4, r5, r6, r7, r8, r9, r10
+#define GP_RETURN_REGISTERS r3, r4
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTERS d0, d1
+
+#else
+// ===========================================================================
+// == unknown ================================================================
+// ===========================================================================
+// Don't define anything. We'll just always use the stack.
+#endif
+
+
+// Helper for allocating either an GP or FP reg, or the next stack slot.
+struct Allocator {
+ Allocator(const Register* gp, int gpc, const DoubleRegister* fp, int fpc)
+ : gp_count(gpc),
+ gp_offset(0),
+ gp_regs(gp),
+ fp_count(fpc),
+ fp_offset(0),
+ fp_regs(fp),
+ stack_offset(0) {}
+
+ int gp_count;
+ int gp_offset;
+ const Register* gp_regs;
+
+ int fp_count;
+ int fp_offset;
+ const DoubleRegister* fp_regs;
+
+ int stack_offset;
+
+ LinkageLocation Next(LocalType type) {
+ if (IsFloatingPoint(type)) {
+ // Allocate a floating point register/stack location.
+ if (fp_offset < fp_count) {
+ return regloc(fp_regs[fp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += Words(type);
+ return stackloc(offset);
+ }
+ } else {
+ // Allocate a general purpose register/stack location.
+ if (gp_offset < gp_count) {
+ return regloc(gp_regs[gp_offset++]);
+ } else {
+ int offset = -1 - stack_offset;
+ stack_offset += Words(type);
+ return stackloc(offset);
+ }
+ }
+ }
+ bool IsFloatingPoint(LocalType type) {
+ return type == kAstF32 || type == kAstF64;
+ }
+ int Words(LocalType type) {
+ // The code generation for pushing parameters on the stack does not
+ // distinguish between float32 and float64. Therefore also float32 needs
+ // two words.
+ if (kPointerSize < 8 &&
+ (type == kAstI64 || type == kAstF64 || type == kAstF32)) {
+ return 2;
+ }
+ return 1;
+ }
+};
+} // namespace
+
+
+// General code uses the above configuration data.
+CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
+ FunctionSig* fsig) {
+ MachineSignature::Builder msig(zone, fsig->return_count(),
+ fsig->parameter_count());
+ LocationSignature::Builder locations(zone, fsig->return_count(),
+ fsig->parameter_count());
+
+#ifdef GP_RETURN_REGISTERS
+ static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
+ static const int kGPReturnRegistersCount =
+ static_cast<int>(arraysize(kGPReturnRegisters));
+#else
+ static const Register* kGPReturnRegisters = nullptr;
+ static const int kGPReturnRegistersCount = 0;
+#endif
+
+#ifdef FP_RETURN_REGISTERS
+ static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
+ static const int kFPReturnRegistersCount =
+ static_cast<int>(arraysize(kFPReturnRegisters));
+#else
+ static const DoubleRegister* kFPReturnRegisters = nullptr;
+ static const int kFPReturnRegistersCount = 0;
+#endif
+
+ Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
+ kFPReturnRegisters, kFPReturnRegistersCount);
+
+ // Add return location(s).
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ LocalType ret = fsig->GetReturn(i);
+ msig.AddReturn(MachineTypeFor(ret));
+ locations.AddReturn(rets.Next(ret));
+ }
+
+#ifdef GP_PARAM_REGISTERS
+ static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
+ static const int kGPParamRegistersCount =
+ static_cast<int>(arraysize(kGPParamRegisters));
+#else
+ static const Register* kGPParamRegisters = nullptr;
+ static const int kGPParamRegistersCount = 0;
+#endif
+
+#ifdef FP_PARAM_REGISTERS
+ static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
+ static const int kFPParamRegistersCount =
+ static_cast<int>(arraysize(kFPParamRegisters));
+#else
+ static const DoubleRegister* kFPParamRegisters = nullptr;
+ static const int kFPParamRegistersCount = 0;
+#endif
+
+ Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
+ kFPParamRegistersCount);
+
+ // Add register and/or stack parameter(s).
+ const int parameter_count = static_cast<int>(fsig->parameter_count());
+ for (int i = 0; i < parameter_count; i++) {
+ LocalType param = fsig->GetParam(i);
+ msig.AddParam(MachineTypeFor(param));
+ locations.AddParam(params.Next(param));
+ }
+
+ const RegList kCalleeSaveRegisters = 0;
+ const RegList kCalleeSaveFPRegisters = 0;
+
+ // The target for WASM calls is always a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig.Build(), // machine_sig
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kUseNativeStack, // flags
+ "c-call");
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index eab8fe3233..be406fbad2 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -49,8 +49,8 @@ class X64OperandConverter : public InstructionOperandConverter {
Operand ToOperand(InstructionOperand* op, int extra = 0) {
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return Operand(offset.from_stack_pointer() ? rsp : rbp,
offset.offset() + extra);
}
@@ -573,13 +573,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} while (false)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ movq(rsp, rbp);
- __ popq(rbp);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ movq(rbp, MemOperand(rbp, 0));
}
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -599,10 +611,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(reg);
}
RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -611,6 +625,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -622,6 +637,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(equal, kWrongFunctionContext);
}
__ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
}
@@ -632,8 +648,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
@@ -642,10 +660,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters);
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
@@ -655,6 +678,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -667,12 +692,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -912,6 +940,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat32ToFloat64:
ASSEMBLE_SSE_UNOP(Cvtss2sd);
break;
+ case kSSEFloat32Round: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ break;
+ }
case kSSEFloat64Cmp:
ASSEMBLE_SSE_BINOP(Ucomisd);
break;
@@ -1011,6 +1046,150 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ AssertZeroExtended(i.OutputRegister());
break;
}
+ case kSSEFloat32ToInt64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ Label done;
+ Label fail;
+ __ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
+ }
+ // If the input is NaN, then the conversion fails.
+ __ j(parity_even, &fail);
+ // If the input is INT64_MIN, then the conversion succeeds.
+ __ j(equal, &done);
+ __ cmpq(i.OutputRegister(0), Immediate(1));
+ // If the conversion results in INT64_MIN, but the input was not
+ // INT64_MIN, then the conversion fails.
+ __ j(no_overflow, &done);
+ __ bind(&fail);
+ __ Set(i.OutputRegister(1), 0);
+ __ bind(&done);
+ }
+ break;
+ case kSSEFloat64ToInt64:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
+ }
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ Label done;
+ Label fail;
+ __ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
+ }
+ // If the input is NaN, then the conversion fails.
+ __ j(parity_even, &fail);
+ // If the input is INT64_MIN, then the conversion succeeds.
+ __ j(equal, &done);
+ __ cmpq(i.OutputRegister(0), Immediate(1));
+ // If the conversion results in INT64_MIN, but the input was not
+ // INT64_MIN, then the conversion fails.
+ __ j(no_overflow, &done);
+ __ bind(&fail);
+ __ Set(i.OutputRegister(1), 0);
+ __ bind(&done);
+ }
+ break;
+ case kSSEFloat32ToUint64: {
+ Label done;
+ Label success;
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 0);
+ }
+ // There does not exist a Float32ToUint64 instruction, so we have to use
+ // the Float32ToInt64 instruction.
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ // Check if the result of the Float32ToInt64 conversion is positive, we
+ // are already done.
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ // The result of the first conversion was negative, which means that the
+ // input value was not within the positive int64 range. We subtract 2^64
+ // and convert it again to see if it is within the uint64 range.
+ __ Move(kScratchDoubleReg, -9223372036854775808.0f);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ addss(kScratchDoubleReg, i.InputOperand(0));
+ }
+ __ Cvttss2siq(i.OutputRegister(), kScratchDoubleReg);
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ // The only possible negative value here is 0x80000000000000000, which is
+ // used on x64 to indicate an integer overflow.
+ __ j(negative, &done);
+ // The input value is within uint64 range and the second conversion worked
+ // successfully, but we still have to undo the subtraction we did
+ // earlier.
+ __ Set(kScratchRegister, 0x8000000000000000);
+ __ orq(i.OutputRegister(), kScratchRegister);
+ __ bind(&success);
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ }
+ __ bind(&done);
+ break;
+ }
+ case kSSEFloat64ToUint64: {
+ Label done;
+ Label success;
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 0);
+ }
+ // There does not exist a Float64ToUint64 instruction, so we have to use
+ // the Float64ToInt64 instruction.
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ // Check if the result of the Float64ToInt64 conversion is positive, we
+ // are already done.
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ // The result of the first conversion was negative, which means that the
+ // input value was not within the positive int64 range. We subtract 2^64
+ // and convert it again to see if it is within the uint64 range.
+ __ Move(kScratchDoubleReg, -9223372036854775808.0);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
+ } else {
+ __ addsd(kScratchDoubleReg, i.InputOperand(0));
+ }
+ __ Cvttsd2siq(i.OutputRegister(), kScratchDoubleReg);
+ __ testq(i.OutputRegister(), i.OutputRegister());
+ // The only possible negative value here is 0x80000000000000000, which is
+ // used on x64 to indicate an integer overflow.
+ __ j(negative, &done);
+ // The input value is within uint64 range and the second conversion worked
+ // successfully, but we still have to undo the subtraction we did
+ // earlier.
+ __ Set(kScratchRegister, 0x8000000000000000);
+ __ orq(i.OutputRegister(), kScratchRegister);
+ __ bind(&success);
+ if (instr->OutputCount() > 1) {
+ __ Set(i.OutputRegister(1), 1);
+ }
+ __ bind(&done);
+ break;
+ }
case kSSEInt32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
__ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
@@ -1032,6 +1211,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Cvtqsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
+ case kSSEUint64ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movq(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqui2ss(i.OutputDoubleRegister(), kScratchRegister,
+ i.TempRegister(0));
+ break;
+ case kSSEUint64ToFloat64:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movq(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqui2sd(i.OutputDoubleRegister(), kScratchRegister,
+ i.TempRegister(0));
+ break;
case kSSEUint32ToFloat64:
if (instr->InputAt(0)->IsRegister()) {
__ movl(kScratchRegister, i.InputRegister(0));
@@ -1357,15 +1554,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Push:
if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else if (instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
__ pushq(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
}
break;
@@ -1604,17 +1805,17 @@ static const int kQuadWordSize = 16;
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1696,10 +1897,10 @@ void CodeGenerator::AssembleReturn() {
__ addp(rsp, Immediate(stack_size));
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ movq(rsp, rbp); // Move stack pointer back to frame pointer.
__ popq(rbp); // Pop caller's frame pointer.
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -1719,7 +1920,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- X64OperandConverter g(this, NULL);
+ X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1840,16 +2041,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- X64OperandConverter g(this, NULL);
+ X64OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
- __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+ Register src = g.ToRegister(source);
+ Register dst = g.ToRegister(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
} else if (source->IsRegister() && destination->IsStackSlot()) {
Register src = g.ToRegister(source);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
Operand dst = g.ToOperand(destination);
- __ xchgq(src, dst);
+ __ movq(src, dst);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() &&
destination->IsDoubleStackSlot())) {
@@ -1858,8 +2068,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
__ movq(tmp, dst);
- __ xchgq(tmp, src);
- __ movq(dst, tmp);
+ __ pushq(src);
+ frame_access_state()->IncreaseSPDelta(1);
+ src = g.ToOperand(source);
+ __ movq(src, tmp);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index a9165cfaca..8e8e7652c3 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -63,6 +63,7 @@ namespace compiler {
V(SSEFloat32Max) \
V(SSEFloat32Min) \
V(SSEFloat32ToFloat64) \
+ V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
@@ -78,9 +79,15 @@ namespace compiler {
V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
V(SSEInt64ToFloat32) \
V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
new file mode 100644
index 0000000000..f8537c879c
--- /dev/null
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -0,0 +1,182 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return true; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ switch (instr->arch_opcode()) {
+ case kX64Add:
+ case kX64Add32:
+ case kX64And:
+ case kX64And32:
+ case kX64Cmp:
+ case kX64Cmp32:
+ case kX64Test:
+ case kX64Test32:
+ case kX64Or:
+ case kX64Or32:
+ case kX64Xor:
+ case kX64Xor32:
+ case kX64Sub:
+ case kX64Sub32:
+ case kX64Imul:
+ case kX64Imul32:
+ case kX64ImulHigh32:
+ case kX64UmulHigh32:
+ case kX64Idiv:
+ case kX64Idiv32:
+ case kX64Udiv:
+ case kX64Udiv32:
+ case kX64Not:
+ case kX64Not32:
+ case kX64Neg:
+ case kX64Neg32:
+ case kX64Shl:
+ case kX64Shl32:
+ case kX64Shr:
+ case kX64Shr32:
+ case kX64Sar:
+ case kX64Sar32:
+ case kX64Ror:
+ case kX64Ror32:
+ case kX64Lzcnt:
+ case kX64Lzcnt32:
+ case kX64Tzcnt:
+ case kX64Tzcnt32:
+ case kX64Popcnt:
+ case kX64Popcnt32:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Mul:
+ case kSSEFloat32Div:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat32Round:
+ case kSSEFloat32Max:
+ case kSSEFloat32Min:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Mul:
+ case kSSEFloat64Div:
+ case kSSEFloat64Mod:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ case kSSEFloat64Sqrt:
+ case kSSEFloat64Round:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ case kSSEFloat64ToInt64:
+ case kSSEFloat32ToInt64:
+ case kSSEFloat64ToUint64:
+ case kSSEFloat32ToUint64:
+ case kSSEInt32ToFloat64:
+ case kSSEInt64ToFloat32:
+ case kSSEInt64ToFloat64:
+ case kSSEUint64ToFloat32:
+ case kSSEUint64ToFloat64:
+ case kSSEUint32ToFloat64:
+ case kSSEFloat64ExtractLowWord32:
+ case kSSEFloat64ExtractHighWord32:
+ case kSSEFloat64InsertLowWord32:
+ case kSSEFloat64InsertHighWord32:
+ case kSSEFloat64LoadLowWord32:
+ case kAVXFloat32Cmp:
+ case kAVXFloat32Add:
+ case kAVXFloat32Sub:
+ case kAVXFloat32Mul:
+ case kAVXFloat32Div:
+ case kAVXFloat32Max:
+ case kAVXFloat32Min:
+ case kAVXFloat64Cmp:
+ case kAVXFloat64Add:
+ case kAVXFloat64Sub:
+ case kAVXFloat64Mul:
+ case kAVXFloat64Div:
+ case kAVXFloat64Max:
+ case kAVXFloat64Min:
+ case kAVXFloat64Abs:
+ case kAVXFloat64Neg:
+ case kAVXFloat32Abs:
+ case kAVXFloat32Neg:
+ case kX64BitcastFI:
+ case kX64BitcastDL:
+ case kX64BitcastIF:
+ case kX64BitcastLD:
+ case kX64Lea32:
+ case kX64Lea:
+ case kX64Dec32:
+ case kX64Inc32:
+ return (instr->addressing_mode() == kMode_None)
+ ? kNoOpcodeFlags
+ : kIsLoadOperation | kHasSideEffect;
+
+ case kX64Movsxbl:
+ case kX64Movzxbl:
+ case kX64Movsxwl:
+ case kX64Movzxwl:
+ case kX64Movsxlq:
+ DCHECK(instr->InputCount() >= 1);
+ return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
+ : kIsLoadOperation;
+
+ case kX64Movb:
+ case kX64Movw:
+ return kHasSideEffect;
+
+ case kX64Movl:
+ if (instr->HasOutput()) {
+ DCHECK(instr->InputCount() >= 1);
+ return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
+ : kIsLoadOperation;
+ } else {
+ return kHasSideEffect;
+ }
+
+ case kX64Movq:
+ case kX64Movsd:
+ case kX64Movss:
+ return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
+
+ case kX64StackCheck:
+ return kIsLoadOperation;
+
+ case kX64Push:
+ case kX64Poke:
+ return kHasSideEffect;
+
+#define CASE(Name) case k##Name:
+ COMMON_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+ // Already covered in architecture independent code.
+ UNREACHABLE();
+ }
+
+ UNREACHABLE();
+ return kNoOpcodeFlags;
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ // TODO(all): Add instruction cost modeling.
+ return 1;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 0f1dc816a1..c47a42eefe 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -41,12 +41,12 @@ class X64OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
- if (displacement != NULL) {
+ if (displacement != nullptr) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
@@ -57,7 +57,7 @@ class X64OperandGenerator final : public OperandGenerator {
mode = kMRn_modes[scale_exponent];
}
} else {
- if (displacement == NULL) {
+ if (displacement == nullptr) {
mode = kMode_MR;
} else {
inputs[(*input_count)++] = UseImmediate(displacement);
@@ -65,10 +65,10 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
} else {
- DCHECK(index != NULL);
+ DCHECK_NOT_NULL(index);
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
- if (displacement != NULL) {
+ if (displacement != nullptr) {
inputs[(*input_count)++] = UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
@@ -91,7 +91,7 @@ class X64OperandGenerator final : public OperandGenerator {
size_t* input_count) {
BaseWithIndexAndDisplacement64Matcher m(operand, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -108,33 +108,32 @@ class X64OperandGenerator final : public OperandGenerator {
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kX64Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX64Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -156,12 +155,12 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -198,29 +197,29 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kX64Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX64Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kX64Movb;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kX64Movw;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
- case kRepTagged: // Fall through.
- case kRepWord64:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
- default:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -233,39 +232,41 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand value_operand =
g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
inputs[input_count++] = value_operand;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedLoadWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -289,33 +290,35 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
X64OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepWord64:
+ case MachineRepresentation::kWord64:
opcode = kCheckedStoreWord64;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -529,8 +532,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
return;
}
VisitWord32Shift(this, node, kX64Shl32);
@@ -637,7 +640,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
// Try to match the Add to a leal pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
m.displacement());
return;
@@ -653,6 +656,16 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ VisitBinop(this, node, kX64Add, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Add, &cont);
+}
+
+
void InstructionSelector::VisitInt32Sub(Node* node) {
X64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -683,6 +696,16 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Sub, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Sub, &cont);
+}
+
+
namespace {
void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
@@ -741,8 +764,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
return;
}
VisitMul(this, node, kX64Imul32);
@@ -834,6 +857,70 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat32ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat64ToInt64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat32ToUint64, output_count, outputs, 1, inputs);
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kSSEFloat64ToUint64, output_count, outputs, 1, inputs);
+}
+
+
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -971,6 +1058,22 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEUint64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEUint64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -1110,11 +1213,31 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
+}
+
+
void InstructionSelector::VisitFloat64RoundDown(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
}
@@ -1125,9 +1248,19 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
X64OperandGenerator g(this);
// Prepare for C function call.
@@ -1138,26 +1271,27 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
- if (Node* input = (*arguments)[n]) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : g.UseRegister(input);
+ InstructionOperand value = g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(*arguments)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input))
- ? g.UseRegister(input)
- : g.Use(input);
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
Emit(kX64Push, g.NoOutput(), value);
}
}
@@ -1371,12 +1505,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || IsDefined(result)) {
+ if (result == nullptr || IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
@@ -1384,6 +1518,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kInt32SubWithOverflow:
cont.OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(this, node, kX64Sub32, &cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Add, &cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kX64Sub, &cont);
default:
break;
}
@@ -1653,8 +1793,14 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kWord64Popcnt;
}
if (CpuFeatures::IsSupported(SSE4_1)) {
- flags |= MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
}
return flags;
}
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index bda59bb139..a7b7246d3d 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -4,11 +4,11 @@
#include "src/compiler/code-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/scopes.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/frames-x87.h"
#include "src/x87/macro-assembler-x87.h"
@@ -42,12 +42,18 @@ class X87OperandConverter : public InstructionOperandConverter {
return Operand(ToRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
- FrameOffset offset =
- linkage()->GetFrameOffset(AllocatedOperand::cast(op)->index(), frame());
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ AllocatedOperand::cast(op)->index());
return Operand(offset.from_stack_pointer() ? esp : ebp,
offset.offset() + extra);
}
+ Operand ToMaterializableOperand(int materializable_offset) {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(
+ Frame::FPOffsetToSlot(materializable_offset));
+ return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+ }
+
Operand HighOperand(InstructionOperand* op) {
DCHECK(op->IsDoubleStackSlot());
return ToOperand(op, kPointerSize);
@@ -189,6 +195,9 @@ class OutOfLineLoadFloat final : public OutOfLineCode {
void Generate() final {
DCHECK(result_.code() == 0);
USE(result_);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ push(Immediate(0xffffffff));
__ push(Immediate(0x7fffffff));
@@ -330,13 +339,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} while (false)
-void CodeGenerator::AssembleDeconstructActivationRecord() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_slots = frame()->GetSpillSlotCount();
- if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
- __ mov(esp, ebp);
- __ pop(ebp);
+void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta > 0) {
+ __ add(esp, Immediate(sp_slot_delta * kPointerSize));
}
+ frame_access_state()->SetFrameAccessToDefault();
+}
+
+
+void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
+ int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
+ if (sp_slot_delta < 0) {
+ __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
+ frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
+ }
+ if (frame()->needs_frame()) {
+ __ mov(ebp, MemOperand(ebp, 0));
+ }
+ frame_access_state()->SetFrameAccessToSP();
}
@@ -346,6 +367,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
switch (ArchOpcodeField::decode(instr->opcode())) {
case kArchCallCodeObject: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
@@ -369,10 +394,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ fld1();
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObject: {
- AssembleDeconstructActivationRecord();
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
__ jmp(code, RelocInfo::CODE_TARGET);
@@ -381,6 +412,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchCallJSFunction: {
@@ -391,6 +423,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
bool double_result =
@@ -406,6 +442,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
__ fld1();
}
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallJSFunction: {
@@ -415,21 +452,39 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- AssembleDeconstructActivationRecord();
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchLazyBailout: {
EnsureSpaceForLazyDeopt();
RecordCallPosition(instr);
+ // Lazy Bailout entry, need to re-initialize FPU state.
+ __ fninit();
+ __ fld1();
break;
}
case kArchPrepareCallCFunction: {
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
break;
}
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ break;
case kArchCallCFunction: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
@@ -438,6 +493,21 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ bool double_result =
+ instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ if (double_result) {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ }
+ __ fninit();
+ if (double_result) {
+ __ fld_d(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ } else {
+ __ fld1();
+ }
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -450,6 +520,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
break;
case kArchNop:
+ case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
@@ -474,7 +545,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fild_s(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kPointerSize));
- AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ Deoptimizer::BailoutType bailout_type =
+ Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
break;
}
case kArchRet:
@@ -628,7 +701,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
InstructionOperand* source = instr->InputAt(0);
InstructionOperand* destination = instr->Output();
DCHECK(source->IsConstant());
- X87OperandConverter g(this, NULL);
+ X87OperandConverter g(this, nullptr);
Constant src_constant = g.ToConstant(source);
DCHECK_EQ(Constant::kFloat64, src_constant.type());
@@ -655,6 +728,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32Add: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
@@ -667,6 +743,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32Sub: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
@@ -679,6 +758,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32Mul: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
@@ -691,6 +773,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32Div: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
@@ -705,13 +790,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Float32Max: {
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = below;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+
+ // At least one NaN.
+ // Return the second operands if one of the two operands is NaN
+ __ j(parity_even, &return_right, Label::kNear);
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
@@ -725,12 +816,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fadd(1);
__ jmp(&return_left, Label::kNear);
- __ bind(&check_nan_left);
- __ fld(0);
- __ fld(0);
- __ FCmp(); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
-
__ bind(&return_right);
__ fxch();
@@ -742,13 +827,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Float32Min: {
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = above;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_s(MemOperand(esp, kFloatSize));
__ fld_s(MemOperand(esp, 0));
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
+ // At least one NaN.
+ // Return the second operands if one of the two operands is NaN
+ __ j(parity_even, &return_right, Label::kNear);
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
@@ -775,11 +865,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ pop(eax); // restore esp
__ jmp(&return_left, Label::kNear);
- __ bind(&check_nan_left);
- __ fld(0);
- __ fld(0);
- __ FCmp(); // NaN check.
- __ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
__ fxch();
@@ -790,6 +875,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32Sqrt: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ fsqrt();
@@ -797,13 +885,39 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32Abs: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ fabs();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
+ case kX87Float32Round: {
+ RoundingMode mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ // Set the correct round mode in x87 control register
+ __ X87SetRC((mode << 10));
+
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ InstructionOperand* input = instr->InputAt(0);
+ USE(input);
+ DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(i.InputOperand(0));
+ }
+ __ frndint();
+ __ X87SetRC(0x0000);
+ break;
+ }
case kX87Float64Add: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
@@ -816,6 +930,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Sub: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
@@ -827,6 +944,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Mul: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
@@ -838,6 +958,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Div: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
@@ -850,6 +973,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kX87Float64Mod: {
FrameScope frame_scope(&masm_, StackFrame::MANUAL);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ mov(eax, esp);
__ PrepareCallCFunction(4, eax);
__ fstp(0);
@@ -865,6 +991,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Float64Max: {
Label check_zero, return_left, return_right;
Condition condition = below;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fld_d(MemOperand(esp, 0));
@@ -894,6 +1023,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Float64Min: {
Label check_zero, return_left, return_right;
Condition condition = above;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_d(MemOperand(esp, kDoubleSize));
__ fld_d(MemOperand(esp, 0));
@@ -921,6 +1053,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Abs: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
__ fabs();
@@ -930,6 +1065,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Int32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
DCHECK(input->IsRegister() || input->IsStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
if (input->IsRegister()) {
Register input_reg = i.InputRegister(0);
@@ -950,12 +1088,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_s(i.InputOperand(0));
}
break;
}
case kX87Uint32ToFloat64: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ LoadUint32NoSSE2(i.InputRegister(0));
break;
@@ -979,6 +1123,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(esp, Immediate(kDoubleSize));
} else {
DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_d(i.InputOperand(0));
__ sub(esp, Immediate(kDoubleSize));
@@ -1049,6 +1196,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float64Sqrt: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ X87SetFPUCW(0x027F);
__ fstp(0);
__ fld_d(MemOperand(esp, 0));
@@ -1060,16 +1210,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Float64Round: {
RoundingMode mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- if (mode == MiscField::encode(kRoundDown)) {
- __ X87SetRC(0x0400);
- } else {
- __ X87SetRC(0x0c00);
- }
+ // Set the correct round mode in x87 control register
+ __ X87SetRC((mode << 10));
if (!instr->InputAt(0)->IsDoubleRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
DCHECK(input->IsDoubleStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_d(i.InputOperand(0));
}
@@ -1134,6 +1284,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X87Register output = i.OutputDoubleRegister();
USE(output);
DCHECK(output.code() == 0);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_d(i.MemoryOperand());
} else {
@@ -1148,6 +1301,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
X87Register output = i.OutputDoubleRegister();
USE(output);
DCHECK(output.code() == 0);
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
__ fstp(0);
__ fld_s(i.MemoryOperand());
} else {
@@ -1158,24 +1314,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87BitcastFI: {
- __ fstp(0);
__ mov(i.OutputRegister(), MemOperand(esp, 0));
__ lea(esp, Operand(esp, kFloatSize));
break;
}
case kX87BitcastIF: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
if (instr->InputAt(0)->IsRegister()) {
__ lea(esp, Operand(esp, -kFloatSize));
__ mov(MemOperand(esp, 0), i.InputRegister(0));
- __ fstp(0);
__ fld_s(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kFloatSize));
} else {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ mov(MemOperand(esp, 0), i.InputRegister(0));
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
+ __ fld_s(i.InputOperand(0));
}
break;
}
@@ -1217,30 +1371,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX87Push:
if (instr->InputAt(0)->IsDoubleRegister()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.machine_type() == kRepFloat32) {
+ if (allocated.representation() == MachineRepresentation::kFloat32) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_s(Operand(esp, 0));
} else {
- DCHECK(allocated.machine_type() == kRepFloat64);
+ DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(Operand(esp, 0));
}
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (instr->InputAt(0)->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.machine_type() == kRepFloat32) {
+ if (allocated.representation() == MachineRepresentation::kFloat32) {
__ sub(esp, Immediate(kDoubleSize));
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
} else {
- DCHECK(allocated.machine_type() == kRepFloat64);
+ DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
}
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
+ frame_access_state()->IncreaseSPDelta(1);
} else {
__ push(i.InputOperand(0));
+ frame_access_state()->IncreaseSPDelta(1);
}
break;
case kX87Poke: {
@@ -1633,20 +1791,20 @@ void CodeGenerator::AssembleDeoptimizerCall(
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
// Assemble a prologue similar the to cdecl calling convention.
__ push(ebp);
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
// TODO(turbofan): this prologue is redundant with OSR, but needed for
// code aging.
- CompilationInfo* info = this->info();
- __ Prologue(info->IsCodePreAgingActive());
- } else if (needs_frame_) {
+ __ Prologue(this->info()->GeneratePreagedPrologue());
+ } else if (frame()->needs_frame()) {
__ StubPrologue();
} else {
frame()->SetElidedFrameSizeInSlots(kPCOnStackSize / kPointerSize);
}
+ frame_access_state()->SetFrameAccessToDefault();
int stack_shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
@@ -1689,6 +1847,21 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ // Clear the FPU stack only if there is no return value in the stack.
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ bool clear_stack = true;
+ for (int i = 0; i < descriptor->ReturnCount(); i++) {
+ MachineRepresentation rep = descriptor->GetReturnType(i).representation();
+ LinkageLocation loc = descriptor->GetReturnLocation(i);
+ if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
+ clear_stack = false;
+ break;
+ }
+ }
+ if (clear_stack) __ fstp(0);
+
int pop_count = static_cast<int>(descriptor->StackParameterCount());
const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers.
@@ -1699,10 +1872,10 @@ void CodeGenerator::AssembleReturn() {
}
}
- if (descriptor->kind() == CallDescriptor::kCallAddress) {
+ if (descriptor->IsCFunctionCall()) {
__ mov(esp, ebp); // Move stack pointer back to frame pointer.
__ pop(ebp); // Pop caller's frame pointer.
- } else if (descriptor->IsJSFunctionCall() || needs_frame_) {
+ } else if (frame()->needs_frame()) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
@@ -1723,7 +1896,7 @@ void CodeGenerator::AssembleReturn() {
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
- X87OperandConverter g(this, NULL);
+ X87OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
@@ -1750,11 +1923,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromFrame(src, &offset)) {
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ mov(dst, Operand(ebp, offset));
+ __ mov(dst, g.ToMaterializableOperand(offset));
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- __ push(Operand(ebp, offset));
+ __ push(g.ToMaterializableOperand(offset));
__ pop(dst);
}
} else if (destination->IsRegister()) {
@@ -1817,11 +1990,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fst_s(dst);
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fst_d(dst);
break;
default:
@@ -1834,11 +2007,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsDoubleRegister()) {
// always only push one value into the x87 stack.
__ fstp(0);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(src);
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(src);
break;
default:
@@ -1846,12 +2019,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
Operand dst = g.ToOperand(destination);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(src);
__ fstp_s(dst);
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(src);
__ fstp_d(dst);
break;
@@ -1867,7 +2040,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
- X87OperandConverter g(this, NULL);
+ X87OperandConverter g(this, nullptr);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
@@ -1880,23 +2053,27 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ xchg(g.ToRegister(source), g.ToOperand(destination));
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory.
- Operand src = g.ToOperand(source);
- Operand dst = g.ToOperand(destination);
- __ push(dst);
- __ push(src);
- __ pop(dst);
- __ pop(src);
+ Operand dst1 = g.ToOperand(destination);
+ __ push(dst1);
+ frame_access_state()->IncreaseSPDelta(1);
+ Operand src1 = g.ToOperand(source);
+ __ push(src1);
+ Operand dst2 = g.ToOperand(destination);
+ __ pop(dst2);
+ frame_access_state()->IncreaseSPDelta(-1);
+ Operand src2 = g.ToOperand(source);
+ __ pop(src2);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
UNREACHABLE();
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(g.ToOperand(destination));
__ fxch();
__ fstp_s(g.ToOperand(destination));
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(g.ToOperand(destination));
__ fxch();
__ fstp_d(g.ToOperand(destination));
@@ -1906,14 +2083,14 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.machine_type()) {
- case kRepFloat32:
+ switch (allocated.representation()) {
+ case MachineRepresentation::kFloat32:
__ fld_s(g.ToOperand(source));
__ fld_s(g.ToOperand(destination));
__ fstp_s(g.ToOperand(source));
__ fstp_s(g.ToOperand(destination));
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
__ fld_d(g.ToOperand(source));
__ fld_d(g.ToOperand(destination));
__ fstp_d(g.ToOperand(source));
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
index 24871e94ca..b498d9c59c 100644
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -43,6 +43,7 @@ namespace compiler {
V(X87Float32Min) \
V(X87Float32Abs) \
V(X87Float32Sqrt) \
+ V(X87Float32Round) \
V(X87LoadFloat64Constant) \
V(X87Float64Add) \
V(X87Float64Sub) \
diff --git a/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc b/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc
new file mode 100644
index 0000000000..af86a87ad7
--- /dev/null
+++ b/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNIMPLEMENTED();
+}
+
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNIMPLEMENTED();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index 5c6f10255f..cff4aafb27 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -54,18 +54,18 @@ class X87OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == NULL)
+ int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
- if (base != NULL) {
+ if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
- base = NULL;
+ base = nullptr;
}
}
- if (base != NULL) {
+ if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
- if (index != NULL) {
+ if (index != nullptr) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
@@ -88,7 +88,7 @@ class X87OperandGenerator final : public OperandGenerator {
}
} else {
DCHECK(scale >= 0 && scale <= 3);
- if (index != NULL) {
+ if (index != nullptr) {
inputs[(*input_count)++] = UseRegister(index);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
@@ -113,7 +113,7 @@ class X87OperandGenerator final : public OperandGenerator {
size_t* input_count) {
BaseWithIndexAndDisplacement32Matcher m(node, true);
DCHECK(m.matches());
- if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+ if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
m.displacement(), inputs, input_count);
} else {
@@ -130,29 +130,29 @@ class X87OperandGenerator final : public OperandGenerator {
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
- MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case kRepFloat32:
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
opcode = kX87Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX87Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kX87Movsxbl : kX87Movzxbl;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX87Movsxbl : kX87Movzxbl;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kX87Movsxwl : kX87Movzxwl;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -175,12 +175,12 @@ void InstructionSelector::VisitStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineType rep = RepresentationOf(store_rep.machine_type());
+ MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(kRepTagged, rep);
+ DCHECK_EQ(MachineRepresentation::kTagged, rep);
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -217,26 +217,27 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kX87Movss;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kX87Movsd;
break;
- case kRepBit: // Fall through.
- case kRepWord8:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
opcode = kX87Movb;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kX87Movw;
break;
- case kRepTagged: // Fall through.
- case kRepWord32:
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
- default:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -244,7 +245,8 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
- } else if (rep == kRepWord8 || rep == kRepBit) {
+ } else if (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@@ -257,36 +259,39 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(NULL), input_count, inputs);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
}
void InstructionSelector::VisitCheckedLoad(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
- MachineType typ = TypeOf(OpParameter<MachineType>(node));
+ CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
X87OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case kRepWord8:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
break;
- case kRepWord16:
- opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedLoadWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedLoadFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -306,38 +311,42 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+ MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
X87OperandGenerator g(this);
Node* const buffer = node->InputAt(0);
Node* const offset = node->InputAt(1);
Node* const length = node->InputAt(2);
Node* const value = node->InputAt(3);
- ArchOpcode opcode;
+ ArchOpcode opcode = kArchNop;
switch (rep) {
- case kRepWord8:
+ case MachineRepresentation::kWord8:
opcode = kCheckedStoreWord8;
break;
- case kRepWord16:
+ case MachineRepresentation::kWord16:
opcode = kCheckedStoreWord16;
break;
- case kRepWord32:
+ case MachineRepresentation::kWord32:
opcode = kCheckedStoreWord32;
break;
- case kRepFloat32:
+ case MachineRepresentation::kFloat32:
opcode = kCheckedStoreFloat32;
break;
- case kRepFloat64:
+ case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- default:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
InstructionOperand value_operand =
- g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
- : g.UseRegister(value));
+ g.CanBeImmediate(value) ? g.UseImmediate(value)
+ : ((rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)
+ ? g.UseByteRegister(value)
+ : g.UseRegister(value));
InstructionOperand offset_operand = g.UseRegister(offset);
InstructionOperand length_operand =
g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -508,8 +517,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
VisitShift(this, node, kX87Shl);
@@ -552,7 +561,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
// Try to match the Add to a lea pattern
BaseWithIndexAndDisplacement32Matcher m(node);
if (m.matches() &&
- (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
@@ -589,8 +598,8 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32ScaleMatcher m(node, true);
if (m.matches()) {
Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : NULL;
- EmitLea(this, node, index, m.scale(), base, NULL);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, node, index, m.scale(), base, nullptr);
return;
}
X87OperandGenerator g(this);
@@ -699,7 +708,7 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, NULL);
+ Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, nullptr);
}
@@ -713,7 +722,7 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -721,7 +730,7 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -729,7 +738,7 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -737,7 +746,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -745,7 +754,7 @@ void InstructionSelector::VisitFloat32Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -753,7 +762,7 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -761,7 +770,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -769,7 +778,7 @@ void InstructionSelector::VisitFloat64Div(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -786,7 +795,7 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -794,7 +803,7 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -802,7 +811,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
@@ -810,35 +819,42 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, NULL);
+ Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundDown),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
}
@@ -849,6 +865,27 @@ void InstructionSelector::VisitFloat64RoundDown(Node* node) {
}
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundToZero),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
@@ -861,9 +898,23 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
- const CallDescriptor* descriptor,
- Node* node) {
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32Round | MiscField::encode(kRoundToNearest),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64Round | MiscField::encode(kRoundToNearest),
+ g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
+ Node* node) {
X87OperandGenerator g(this);
// Prepare for C function call.
@@ -876,26 +927,27 @@ void InstructionSelector::EmitPrepareArguments(NodeVector* arguments,
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
- if (Node* input = (*arguments)[n]) {
+ PushParameter input = (*arguments)[n];
+ if (input.node()) {
int const slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input)
- ? g.UseImmediate(input)
- : g.UseRegister(input);
+ InstructionOperand value = g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
+ : g.UseRegister(input.node());
Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
} else {
// Push any stack arguments.
- for (Node* input : base::Reversed(*arguments)) {
+ for (PushParameter input : base::Reversed(*arguments)) {
// TODO(titzer): handle pushing double parameters.
- if (input == nullptr) continue;
+ if (input.node() == nullptr) continue;
InstructionOperand value =
- g.CanBeImmediate(input)
- ? g.UseImmediate(input)
+ g.CanBeImmediate(input.node())
+ ? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input))
- ? g.UseRegister(input)
- : g.Use(input);
+ sequence()->IsFloat(GetVirtualRegister(input.node()))
+ ? g.UseRegister(input.node())
+ : g.Use(input.node());
Emit(kX87Push, g.NoOutput(), value);
}
}
@@ -1070,12 +1122,12 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (ProjectionIndexOf(value->op()) == 1u) {
// We cannot combine the <Operation>WithOverflow with this branch
// unless the 0th projection (the use of the actual value of the
- // <Operation> is either NULL, which means there's no use of the
+ // <Operation> is either nullptr, which means there's no use of the
// actual value, or was already defined, which means it is scheduled
// *AFTER* this branch).
Node* const node = value->InputAt(0);
Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == NULL || selector->IsDefined(result)) {
+ if (result == nullptr || selector->IsDefined(result)) {
switch (node->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1276,6 +1328,15 @@ InstructionSelector::SupportedMachineOperatorFlags() {
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt;
}
+
+ flags |= MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kFloat64RoundTiesEven;
return flags;
}
diff --git a/deps/v8/src/compiler/zone-pool.h b/deps/v8/src/compiler/zone-pool.h
index 15866ea358..aaf9daac46 100644
--- a/deps/v8/src/compiler/zone-pool.h
+++ b/deps/v8/src/compiler/zone-pool.h
@@ -19,16 +19,17 @@ class ZonePool final {
public:
class Scope final {
public:
- explicit Scope(ZonePool* zone_pool) : zone_pool_(zone_pool), zone_(NULL) {}
+ explicit Scope(ZonePool* zone_pool)
+ : zone_pool_(zone_pool), zone_(nullptr) {}
~Scope() { Destroy(); }
Zone* zone() {
- if (zone_ == NULL) zone_ = zone_pool_->NewEmptyZone();
+ if (zone_ == nullptr) zone_ = zone_pool_->NewEmptyZone();
return zone_;
}
void Destroy() {
- if (zone_ != NULL) zone_pool_->ReturnZone(zone_);
- zone_ = NULL;
+ if (zone_ != nullptr) zone_pool_->ReturnZone(zone_);
+ zone_ = nullptr;
}
private:
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 437aacf4af..67257ae0d7 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -56,24 +56,28 @@ Context* Context::previous() {
void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
-bool Context::has_extension() { return extension() != nullptr; }
-Object* Context::extension() { return get(EXTENSION_INDEX); }
-void Context::set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+bool Context::has_extension() { return !extension()->IsTheHole(); }
+HeapObject* Context::extension() {
+ return HeapObject::cast(get(EXTENSION_INDEX));
+}
+void Context::set_extension(HeapObject* object) {
+ set(EXTENSION_INDEX, object);
+}
JSModule* Context::module() { return JSModule::cast(get(EXTENSION_INDEX)); }
void Context::set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
-JSGlobalObject* Context::global_object() {
- Object* result = get(GLOBAL_OBJECT_INDEX);
- DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
- return reinterpret_cast<JSGlobalObject*>(result);
+Context* Context::native_context() {
+ Object* result = get(NATIVE_CONTEXT_INDEX);
+ DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result));
+ return reinterpret_cast<Context*>(result);
}
-void Context::set_global_object(JSGlobalObject* object) {
- set(GLOBAL_OBJECT_INDEX, object);
+void Context::set_native_context(Context* context) {
+ set(NATIVE_CONTEXT_INDEX, context);
}
@@ -120,8 +124,8 @@ bool Context::IsScriptContext() {
bool Context::HasSameSecurityTokenAs(Context* that) {
- return this->global_object()->native_context()->security_token() ==
- that->global_object()->native_context()->security_token();
+ return this->native_context()->security_token() ==
+ that->native_context()->security_token();
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 67d19a1eff..79a9e926a5 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -4,10 +4,10 @@
#include "src/contexts.h"
+#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
-#include "src/scopeinfo.h"
namespace v8 {
namespace internal {
@@ -82,8 +82,8 @@ Context* Context::declaration_context() {
JSObject* Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
- Object* object = extension();
- if (object == nullptr) return nullptr;
+ HeapObject* object = extension();
+ if (object->IsTheHole()) return nullptr;
if (IsBlockContext()) {
if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
@@ -103,7 +103,7 @@ JSReceiver* Context::extension_receiver() {
ScopeInfo* Context::scope_info() {
DCHECK(IsModuleContext() || IsScriptContext() || IsBlockContext());
- Object* object = extension();
+ HeapObject* object = extension();
if (object->IsSloppyBlockWithEvalContextExtension()) {
DCHECK(IsBlockContext());
object = SloppyBlockWithEvalContextExtension::cast(object)->scope_info();
@@ -118,6 +118,11 @@ String* Context::catch_name() {
}
+JSGlobalObject* Context::global_object() {
+ return JSGlobalObject::cast(native_context()->extension());
+}
+
+
Context* Context::script_context() {
Context* current = this;
while (!current->IsScriptContext()) {
@@ -127,17 +132,6 @@ Context* Context::script_context() {
}
-Context* Context::native_context() {
- // Fast case: the receiver context is already a native context.
- if (IsNativeContext()) return this;
- // The global object has a direct pointer to the native context. If the
- // following DCHECK fails, the native context is probably being accessed
- // indirectly during bootstrapping. This is unsupported.
- DCHECK(global_object()->IsJSGlobalObject());
- return global_object()->native_context();
-}
-
-
JSObject* Context::global_proxy() {
return native_context()->global_proxy_object();
}
@@ -152,30 +146,24 @@ void Context::set_global_proxy(JSObject* object) {
* Lookups a property in an object environment, taking the unscopables into
* account. This is used For HasBinding spec algorithms for ObjectEnvironment.
*/
-static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
+static Maybe<bool> UnscopableLookup(LookupIterator* it) {
Isolate* isolate = it->isolate();
- Maybe<PropertyAttributes> attrs = JSReceiver::GetPropertyAttributes(it);
- DCHECK(attrs.IsJust() || isolate->has_pending_exception());
- if (!attrs.IsJust() || attrs.FromJust() == ABSENT) return attrs;
+ Maybe<bool> found = JSReceiver::HasProperty(it);
+ if (!found.IsJust() || !found.FromJust()) return found;
- Handle<Symbol> unscopables_symbol = isolate->factory()->unscopables_symbol();
- Handle<Object> receiver = it->GetReceiver();
Handle<Object> unscopables;
- MaybeHandle<Object> maybe_unscopables =
- Object::GetProperty(receiver, unscopables_symbol);
- if (!maybe_unscopables.ToHandle(&unscopables)) {
- return Nothing<PropertyAttributes>();
- }
- if (!unscopables->IsSpecObject()) return attrs;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, unscopables,
+ Object::GetProperty(it->GetReceiver(),
+ isolate->factory()->unscopables_symbol()),
+ Nothing<bool>());
+ if (!unscopables->IsJSReceiver()) return Just(true);
Handle<Object> blacklist;
- MaybeHandle<Object> maybe_blacklist =
- Object::GetProperty(unscopables, it->name());
- if (!maybe_blacklist.ToHandle(&blacklist)) {
- DCHECK(isolate->has_pending_exception());
- return Nothing<PropertyAttributes>();
- }
- return blacklist->BooleanValue() ? Just(ABSENT) : attrs;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, blacklist,
+ Object::GetProperty(unscopables, it->name()),
+ Nothing<bool>());
+ return Just(!blacklist->BooleanValue());
}
static void GetAttributesAndBindingFlags(VariableMode mode,
@@ -295,7 +283,15 @@ Handle<Object> Context::Lookup(Handle<String> name,
maybe = Just(ABSENT);
} else {
LookupIterator it(object, name);
- maybe = UnscopableLookup(&it);
+ Maybe<bool> found = UnscopableLookup(&it);
+ if (found.IsNothing()) {
+ maybe = Nothing<PropertyAttributes>();
+ } else {
+ // Luckily, consumers of |maybe| only care whether the property
+ // was absent or not, so we can return a dummy |NONE| value
+ // for its attributes when it was present.
+ maybe = Just(found.FromJust() ? NONE : ABSENT);
+ }
}
} else {
maybe = JSReceiver::GetPropertyAttributes(object, name);
@@ -557,6 +553,15 @@ bool Context::IsJSBuiltin(Handle<Context> native_context,
#ifdef DEBUG
+
+bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object* object) {
+ // During bootstrapping we allow all objects to pass as global
+ // objects. This is necessary to fix circular dependencies.
+ return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
+ isolate->bootstrapper()->IsActive() || object->IsNativeContext();
+}
+
+
bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
@@ -568,13 +573,6 @@ bool Context::IsBootstrappingOrValidParentContext(
context->IsModuleContext() || !child->IsModuleContext();
}
-
-bool Context::IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object) {
- // During bootstrapping we allow all objects to pass as global
- // objects. This is necessary to fix circular dependencies.
- return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
- isolate->bootstrapper()->IsActive() || object->IsJSGlobalObject();
-}
#endif
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index c0d7a20069..6c9e195075 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -78,24 +78,27 @@ enum BindingFlags {
// Factory::NewContext.
#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
V(CONCAT_ITERABLE_TO_ARRAY_INDEX, JSFunction, concat_iterable_to_array) \
V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(OBJECT_FREEZE, JSFunction, object_freeze) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)
-#define NATIVE_CONTEXT_JS_BUILTINS(V) \
- V(APPLY_PREPARE_BUILTIN_INDEX, JSFunction, apply_prepare_builtin) \
- V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
- concat_iterable_to_array_builtin) \
- V(REFLECT_APPLY_PREPARE_BUILTIN_INDEX, JSFunction, \
- reflect_apply_prepare_builtin) \
- V(REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX, JSFunction, \
- reflect_construct_prepare_builtin)
+#define NATIVE_CONTEXT_JS_BUILTINS(V) \
+ V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
+ concat_iterable_to_array_builtin)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
@@ -107,10 +110,7 @@ enum BindingFlags {
V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
- V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@@ -118,7 +118,6 @@ enum BindingFlags {
V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter) \
V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function) \
V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(MAP_FROM_ARRAY_INDEX, JSFunction, map_from_array) \
V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
@@ -129,12 +128,10 @@ enum BindingFlags {
V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
native_object_notifier_perform_change) \
V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe) \
- V(NO_SIDE_EFFECT_TO_STRING_FUN_INDEX, JSFunction, \
- no_side_effect_to_string_fun) \
+ V(NO_SIDE_EFFECTS_TO_STRING_FUN_INDEX, JSFunction, \
+ no_side_effects_to_string_fun) \
V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(OBJECT_GET_OWN_PROPERTY_DESCROPTOR_INDEX, JSFunction, \
- object_get_own_property_descriptor) \
V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice) \
V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
@@ -142,6 +139,7 @@ enum BindingFlags {
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
promise_has_user_defined_reject_handler) \
V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
@@ -152,13 +150,9 @@ enum BindingFlags {
V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(SET_FROM_ARRAY_INDEX, JSFunction, set_from_array) \
V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
- to_complete_property_descriptor) \
- V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
NATIVE_CONTEXT_JS_BUILTINS(V)
@@ -184,6 +178,7 @@ enum BindingFlags {
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
+ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \
@@ -194,6 +189,9 @@ enum BindingFlags {
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache) \
+ V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
+ V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
+ generator_function_function) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
@@ -205,13 +203,36 @@ enum BindingFlags {
V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
- V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
- V(JS_ARRAY_STRONG_MAPS_INDEX, Object, js_array_strong_maps) \
+ V(JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_smi_elements_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_holey_smi_elements_map_index) \
+ V(JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, Map, js_array_fast_elements_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_holey_elements_map_index) \
+ V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_double_elements_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
+ js_array_fast_holey_double_elements_map_index) \
+ V(JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_smi_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_holey_smi_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_holey_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_double_elements_strong_map_index) \
+ V(JS_ARRAY_FAST_HOLEY_DOUBLE_ELEMENTS_STRONG_MAP_INDEX, Map, \
+ js_array_fast_holey_double_elements_strong_map_index) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
+ V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
+ V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
@@ -222,12 +243,17 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
+ V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
+ V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
+ V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
+ V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map) \
+ V(PROXY_MAP_INDEX, Map, proxy_map) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
- V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
+ V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
@@ -236,6 +262,7 @@ enum BindingFlags {
sloppy_function_without_prototype_map) \
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_with_readonly_prototype_map) \
+ V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
@@ -330,7 +357,7 @@ class ScriptContextTable : public FixedArray {
// function contexts, and non-NULL for 'with' contexts.
// Used to implement the 'with' statement.
//
-// [ extension ] A pointer to an extension JSObject, or NULL. Used to
+// [ extension ] A pointer to an extension JSObject, or "the hole". Used to
// implement 'with' statements and dynamic declarations
// (through 'eval'). The object in a 'with' statement is
// stored in the extension slot of a 'with' context.
@@ -369,12 +396,12 @@ class Context: public FixedArray {
// These slots are in all contexts.
CLOSURE_INDEX,
PREVIOUS_INDEX,
- // The extension slot is used for either the global object (in global
+ // The extension slot is used for either the global object (in native
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts), the serialized
// scope info (block contexts), or the module instance (module contexts).
EXTENSION_INDEX,
- GLOBAL_OBJECT_INDEX,
+ NATIVE_CONTEXT_INDEX,
// These slots are only in native contexts.
#define NATIVE_CONTEXT_SLOT(index, type, name) index,
@@ -391,6 +418,9 @@ class Context: public FixedArray {
// Total number of slots.
NATIVE_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST,
+ FIRST_JS_ARRAY_MAP_SLOT = JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX,
+ FIRST_JS_ARRAY_STRONG_MAP_SLOT =
+ JS_ARRAY_FAST_SMI_ELEMENTS_STRONG_MAP_INDEX,
MIN_CONTEXT_SLOTS = GLOBAL_PROXY_INDEX,
// This slot holds the thrown value in catch contexts.
@@ -408,8 +438,8 @@ class Context: public FixedArray {
inline void set_previous(Context* context);
inline bool has_extension();
- inline Object* extension();
- inline void set_extension(Object* object);
+ inline HeapObject* extension();
+ inline void set_extension(HeapObject* object);
JSObject* extension_object();
JSReceiver* extension_receiver();
ScopeInfo* scope_info();
@@ -423,18 +453,19 @@ class Context: public FixedArray {
Context* declaration_context();
bool is_declaration_context();
- inline JSGlobalObject* global_object();
- inline void set_global_object(JSGlobalObject* object);
-
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
void set_global_proxy(JSObject* global);
+ // Get the JSGlobalObject object.
+ JSGlobalObject* global_object();
+
// Get the script context by traversing the context chain.
Context* script_context();
- // Compute the native context by traversing the context chain.
- Context* native_context();
+ // Compute the native context.
+ inline Context* native_context();
+ inline void set_native_context(Context* context);
// Predicates for context types. IsNativeContext is also defined on Object
// because we frequently have to know if arbitrary objects are natives
@@ -534,6 +565,13 @@ class Context: public FixedArray {
: SLOPPY_FUNCTION_MAP_INDEX;
}
+ static int ArrayMapIndex(ElementsKind elements_kind,
+ Strength strength = Strength::WEAK) {
+ DCHECK(IsFastElementsKind(elements_kind));
+ return elements_kind + (is_strong(strength) ? FIRST_JS_ARRAY_STRONG_MAP_SLOT
+ : FIRST_JS_ARRAY_MAP_SLOT);
+ }
+
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
static const int kNotFound = -1;
@@ -549,8 +587,8 @@ class Context: public FixedArray {
private:
#ifdef DEBUG
// Bootstrapping-aware type checks.
+ static bool IsBootstrappingOrNativeContext(Isolate* isolate, Object* object);
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
- static bool IsBootstrappingOrGlobalObject(Isolate* isolate, Object* object);
#endif
STATIC_ASSERT(kHeaderSize == Internals::kContextHeaderSize);
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index c05c0644da..3e56799bc9 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -20,7 +20,6 @@
#include "src/conversions.h"
#include "src/double.h"
#include "src/objects-inl.h"
-#include "src/scanner.h"
#include "src/strtod.h"
namespace v8 {
@@ -295,7 +294,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
}
-
+// ES6 18.2.5 parseInt(string, radix)
template <class Iterator, class EndMark>
double InternalStringToInt(UnicodeCache* unicode_cache,
Iterator current,
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 2ce1d70fe6..7867719968 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -10,6 +10,7 @@
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
+#include "src/codegen.h"
#include "src/conversions-inl.h"
#include "src/dtoa.h"
#include "src/factory.h"
@@ -440,7 +441,7 @@ char* DoubleToRadixCString(double value, int radix) {
// at least one digit.
int integer_pos = kBufferSize - 2;
do {
- double remainder = std::fmod(integer_part, radix);
+ double remainder = modulo(integer_part, radix);
integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
integer_part -= remainder;
integer_part /= radix;
@@ -483,6 +484,7 @@ char* DoubleToRadixCString(double value, int radix) {
}
+// ES6 18.2.4 parseFloat(string)
double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
int flags, double empty_string_val) {
Handle<String> flattened = String::Flatten(string);
@@ -490,7 +492,6 @@ double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
DisallowHeapAllocation no_gc;
String::FlatContent flat = flattened->GetFlatContent();
DCHECK(flat.IsFlat());
- // ECMA-262 section 15.1.2.3, empty string is NaN
if (flat.IsOneByte()) {
return StringToDouble(unicode_cache, flat.ToOneByteVector(), flags,
empty_string_val);
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 87eca24498..d8a3f091f8 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -714,7 +714,11 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed) \
SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
- SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
+ SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed) \
+ SC(turbo_escape_allocs_replaced, V8.TurboEscapeAllocsReplaced) \
+ SC(crankshaft_escape_allocs_replaced, V8.CrankshaftEscapeAllocsReplaced) \
+ SC(turbo_escape_loads_replaced, V8.TurboEscapeLoadsReplaced) \
+ SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced)
// This file contains all the v8 counters that are in use.
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index 09aece0b20..cd736ecd8f 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -298,13 +298,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -988,7 +981,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1213,14 +1208,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r1);
@@ -1838,14 +1825,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), r0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(r1), instr->index());
- return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2455,13 +2434,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2556,12 +2528,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index e534179ec8..6329f36fb2 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -131,7 +128,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1377,25 +1373,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1895,25 +1872,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2517,19 +2475,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2574,19 +2519,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index c1b155af49..2bd0788232 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -122,7 +122,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
}
@@ -169,7 +169,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(r1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -178,7 +178,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -197,7 +197,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
if (need_write_barrier) {
@@ -874,60 +874,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1864,40 +1810,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(r0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand(stamp));
- __ ldr(scratch, MemOperand(scratch));
- __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch, scratch0());
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2240,7 +2152,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ b(ge, instr->TrueLabel(chunk_));
}
@@ -2592,39 +2504,20 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ b(lt, is_false);
- __ b(eq, is_true);
- __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ b(eq, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(gt, is_false);
+ __ b(eq, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
Register instance_type = ip;
__ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
__ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
- if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
+ if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
@@ -2683,6 +2576,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = ip;
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2698,6 +2592,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ ldrb(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+
__ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, eq);
@@ -2736,7 +2640,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(r0);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
@@ -2813,7 +2717,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ldr(result, ContextOperand(context, instr->slot_index()));
+ __ ldr(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
@@ -2830,7 +2734,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3286,15 +3190,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
__ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ldr(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
+ __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3351,7 +3254,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3395,7 +3299,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3415,7 +3319,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize r0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ mov(r0, Operand(arity));
// Invoke function.
@@ -3726,7 +3631,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3797,11 +3702,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r1));
DCHECK(ToRegister(instr->result()).is(r0));
- __ mov(r0, Operand(instr->arity()));
-
// Change context.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ mov(r0, Operand(instr->arity()));
+
// Load the code entry address
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
@@ -3840,19 +3747,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r1));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- __ mov(r0, Operand(instr->arity()));
- // No cell in r2 for construct type feedback in optimized code
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r1));
@@ -3875,7 +3769,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3890,17 +3784,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5319,50 +5213,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // r6 = literals array.
- // r1 = regexp literal.
- // r0 = regexp literal clone.
- // r2-5 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ Move(r6, instr->hydrogen()->literals());
- __ ldr(r1, FieldMemOperand(r6, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r0.
- __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r4, Operand(instr->hydrogen()->pattern()));
- __ mov(r3, Operand(instr->hydrogen()->flags()));
- __ Push(r6, r5, r4, r3);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(r1, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r1, r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r3));
DCHECK(ToRegister(instr->result()).is(r0));
@@ -5443,8 +5293,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
__ b(lt, false_label);
// Check for callable or undetectable objects => false.
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
@@ -5471,30 +5321,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5627,8 +5453,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ SmiTst(r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5642,7 +5468,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
@@ -5770,7 +5596,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index 35887c1bef..24a083ff2f 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -5,12 +5,12 @@
#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
+#include "src/ast/scopes.h"
#include "src/crankshaft/arm/lithium-arm.h"
#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -24,13 +24,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -207,6 +203,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -237,9 +238,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
@@ -292,10 +290,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -317,13 +311,9 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index 635c6dd251..3f43338585 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -82,13 +82,6 @@ void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -1076,15 +1069,6 @@ LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- // The call to CallConstructStub will expect the constructor to be in x1.
- LOperand* constructor = UseFixed(instr->constructor(), x1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The call to ArrayConstructCode will expect the constructor to be in x1.
@@ -1384,13 +1368,6 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), x0);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
return new(zone()) LDebugBreak();
}
@@ -1573,8 +1550,11 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- LOperand* scratch = TempRegister();
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, scratch);
+ LOperand* scratch1 = TempRegister();
+ LOperand* scratch2 = TempRegister();
+ LHasInPrototypeChainAndBranch* result = new (zone())
+ LHasInPrototypeChainAndBranch(object, prototype, scratch1, scratch2);
+ return AssignEnvironment(result);
}
@@ -1587,12 +1567,6 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister(), TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
HCompareMinusZeroAndBranch* instr) {
LOperand* value = UseRegister(instr->value());
@@ -2041,13 +2015,6 @@ LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
HValue* value = instr->value();
DCHECK(value->representation().IsDouble());
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 52c94b4534..1b627d13f8 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -35,7 +35,6 @@ class LCodeGen;
V(Branch) \
V(CallFunction) \
V(CallJSFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -65,7 +64,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -92,7 +90,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsSmiAndBranch) \
V(IsStringAndBranch) \
V(IsUndetectableAndBranch) \
@@ -139,7 +136,6 @@ class LCodeGen;
V(Prologue) \
V(PreparePushArguments) \
V(PushArguments) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -851,25 +847,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -1265,23 +1242,6 @@ class LContext final : public LTemplateInstruction<1, 0, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
@@ -1490,18 +1450,20 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
};
-class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
- LOperand* scratch) {
+ LOperand* scratch1, LOperand* scratch2) {
inputs_[0] = object;
inputs_[1] = prototype;
- temps_[0] = scratch;
+ temps_[0] = scratch1;
+ temps_[1] = scratch2;
}
LOperand* object() const { return inputs_[0]; }
LOperand* prototype() const { return inputs_[1]; }
- LOperand* scratch() const { return temps_[0]; }
+ LOperand* scratch1() const { return temps_[0]; }
+ LOperand* scratch2() const { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
"has-in-prototype-chain-and-branch")
@@ -1583,21 +1545,6 @@ class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 2> {
- public:
- LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -2292,19 +2239,6 @@ class LPushArguments final : public LTemplateResultInstruction<0> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LReturn final : public LTemplateInstruction<0, 3, 0> {
public:
LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 5c90beb68e..571bc154af 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -396,23 +396,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(instr->IsMarkedAsCall());
- DCHECK(ToRegister(instr->constructor()).is(x1));
-
- __ Mov(x0, instr->arity());
- // No cell in x2 for construct type feedback in optimized code.
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-
- DCHECK(ToRegister(instr->result()).is(x0));
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->context()).is(cp));
@@ -437,7 +420,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -451,17 +434,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ B(&done);
__ Bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -637,7 +620,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
}
@@ -668,7 +651,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
__ Push(x1, x10);
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -677,7 +660,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
// Context is returned in x0. It replaces the context passed to us. It's
@@ -900,62 +883,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
-
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
-
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::DeoptimizeBranch(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
BranchType branch_type, Register reg, int bit,
@@ -1555,20 +1482,20 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ Bind(deferred->exit());
if (instr->hydrogen()->MustPrefillWithFiller()) {
- Register filler_count = temp1;
- Register filler = temp2;
- Register untagged_result = ToRegister(instr->temp3());
+ Register start = temp1;
+ Register end = temp2;
+ Register filler = ToRegister(instr->temp3());
+
+ __ Sub(start, result, kHeapObjectTag);
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ Mov(filler_count, size / kPointerSize);
+ __ Add(end, start, size);
} else {
- __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2);
+ __ Add(end, start, ToRegister(instr->size()));
}
-
- __ Sub(untagged_result, result, kHeapObjectTag);
- __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
- __ FillFields(untagged_result, filler_count, filler);
+ __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(start, end, filler);
} else {
DCHECK(instr->temp3() == NULL);
}
@@ -1651,7 +1578,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in argc (receiver) which is x0, as
// expected by InvokeFunction.
ParameterCount actual(argc);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -1902,7 +1830,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
__ B(ge, true_label);
}
@@ -1979,7 +1907,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize x0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
__ Mov(arity_reg, arity);
// Invoke function.
@@ -2047,11 +1976,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->function()).is(x1));
- __ Mov(x0, Operand(instr->arity()));
-
// Change context.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+ __ Mov(x0, instr->arity());
+
// Load the code entry address
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ Call(x10);
@@ -2328,27 +2259,13 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
__ JumpIfSmi(input, false_label);
Register map = scratch2;
+ __ CompareObjectType(input, map, scratch1, JS_FUNCTION_TYPE);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- // We expect CompareObjectType to load the object instance type in scratch1.
- __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE);
- __ B(lt, false_label);
- __ B(eq, true_label);
- __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE);
__ B(eq, true_label);
} else {
- __ IsObjectJSObjectType(input, map, scratch1, false_label);
+ __ B(eq, false_label);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
{
UseScratchRegisterScope temps(masm());
@@ -2599,40 +2516,6 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register temp1 = x10;
- Register temp2 = x11;
- Smi* index = instr->index();
-
- DCHECK(object.is(result) && object.Is(x0));
- DCHECK(instr->IsMarkedAsCall());
-
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(temp1, Operand(stamp));
- __ Ldr(temp1, MemOperand(temp1));
- __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(temp1, temp2);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ Bind(&done);
- }
-}
-
-
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
Deoptimizer::BailoutType type = instr->hydrogen()->type();
// TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
@@ -2839,8 +2722,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(object, x1, x1, JS_PROXY_TYPE);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
Label use_cache, call_runtime;
@@ -2853,7 +2736,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
@@ -2972,7 +2855,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
- Register const object_map = ToRegister(instr->scratch());
+ Register const object_map = ToRegister(instr->scratch1());
+ Register const object_instance_type = ToRegister(instr->scratch2());
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2987,6 +2871,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ Bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ Ldrb(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+
__ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ Cmp(object_prototype, prototype);
__ B(eq, instr->TrueLabel(chunk_));
@@ -3020,7 +2914,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(x1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3030,29 +2924,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- // Get the frame pointer for the calling frame.
- __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-
- EmitCompareAndBranch(
- instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -4582,7 +4453,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ Push(x0);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
@@ -4852,7 +4723,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(scratch1, scratch2);
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -5577,48 +5448,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // x7 = literals array.
- // x1 = regexp literal.
- // x0 = regexp literal clone.
- // x10-x12 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ LoadObject(x7, instr->hydrogen()->literals());
- __ Ldr(x1, FieldMemOperand(x7, literal_offset));
- __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function
- // Result will be in x0.
- __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ Mov(x11, Operand(instr->hydrogen()->pattern()));
- __ Mov(x10, Operand(instr->hydrogen()->flags()));
- __ Push(x7, x12, x11, x10);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ Mov(x1, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x0, Smi::FromInt(size));
- __ Push(x1, x0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ Pop(x1);
-
- __ Bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize);
-}
-
-
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object = ToRegister(instr->object());
@@ -5774,8 +5603,8 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ JumpIfSmi(value, false_label);
__ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(value, map, scratch, FIRST_SPEC_OBJECT_TYPE,
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE,
false_label, lt);
// Check for callable or undetectable objects => false.
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
@@ -5848,14 +5677,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
- __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
__ B(ge, &copy_receiver);
Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
+ __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
__ B(&done);
__ Bind(&copy_receiver);
@@ -5950,7 +5779,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ Push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index f5eac7c88b..18856da154 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -7,11 +7,11 @@
#include "src/crankshaft/arm64/lithium-arm64.h"
+#include "src/ast/scopes.h"
#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -26,13 +26,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -201,9 +197,6 @@ class LCodeGen: public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
- void PopulateDeoptimizationData(Handle<Code> code);
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
MemOperand BuildSeqStringOperand(Register string,
Register temp,
LOperand* index,
@@ -314,6 +307,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -342,13 +340,9 @@ class LCodeGen: public LCodeGenBase {
void EnsureSpaceForLazyDeopt(int space_needed) override;
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table itself is
diff --git a/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
index 6128fbc973..ab3bff2edc 100644
--- a/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-escape-analysis.cc
@@ -142,6 +142,7 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
// necessary.
HValue* HEscapeAnalysisPhase::NewLoadReplacement(
HLoadNamedField* load, HValue* load_value) {
+ isolate()->counters()->crankshaft_escape_loads_replaced()->Increment();
HValue* replacement = load_value;
Representation representation = load->representation();
if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
@@ -319,6 +320,8 @@ void HEscapeAnalysisPhase::Run() {
for (int i = 0; i < max_fixpoint_iteration_count; i++) {
CollectCapturedValues();
if (captured_.is_empty()) break;
+ isolate()->counters()->crankshaft_escape_allocs_replaced()->Increment(
+ captured_.length());
PerformScalarReplacement();
captured_.Rewind(0);
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index 1e49202fe0..e2e026fb5f 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -776,7 +776,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kBlockEntry:
case HValue::kBoundsCheckBaseIndexInformation:
case HValue::kCallFunction:
- case HValue::kCallNew:
case HValue::kCallNewArray:
case HValue::kCallStub:
case HValue::kCapturedObject:
@@ -803,8 +802,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kHasInstanceTypeAndBranch:
case HValue::kInnerAllocatedObject:
case HValue::kInstanceOf:
- case HValue::kIsConstructCallAndBranch:
- case HValue::kHasInPrototypeChainAndBranch:
case HValue::kIsSmiAndBranch:
case HValue::kIsStringAndBranch:
case HValue::kIsUndetectableAndBranch:
@@ -819,7 +816,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kParameter:
case HValue::kPhi:
case HValue::kPushArguments:
- case HValue::kRegExpLiteral:
case HValue::kReturn:
case HValue::kSeqStringGetChar:
case HValue::kStoreCodeEntry:
@@ -853,11 +849,11 @@ bool HInstruction::CanDeoptimize() {
case HValue::kCheckSmi:
case HValue::kCheckValue:
case HValue::kClampToUint8:
- case HValue::kDateField:
case HValue::kDeoptimize:
case HValue::kDiv:
case HValue::kForInCacheArray:
case HValue::kForInPrepareMap:
+ case HValue::kHasInPrototypeChainAndBranch:
case HValue::kInvokeFunction:
case HValue::kLoadContextSlot:
case HValue::kLoadFunctionPrototype:
@@ -1526,7 +1522,7 @@ HValue* HChange::Canonicalize() {
HValue* HWrapReceiver::Canonicalize() {
if (HasNoUses()) return NULL;
- if (receiver()->type().IsJSObject()) {
+ if (receiver()->type().IsJSReceiver()) {
return receiver();
}
return this;
@@ -1622,7 +1618,7 @@ HValue* HUnaryMathOperation::Canonicalize() {
HValue* HCheckInstanceType::Canonicalize() {
- if ((check_ == IS_SPEC_OBJECT && value()->type().IsJSObject()) ||
+ if ((check_ == IS_JS_RECEIVER && value()->type().IsJSReceiver()) ||
(check_ == IS_JS_ARRAY && value()->type().IsJSArray()) ||
(check_ == IS_STRING && value()->type().IsString())) {
return value();
@@ -1641,9 +1637,9 @@ void HCheckInstanceType::GetCheckInterval(InstanceType* first,
InstanceType* last) {
DCHECK(is_interval_check());
switch (check_) {
- case IS_SPEC_OBJECT:
- *first = FIRST_SPEC_OBJECT_TYPE;
- *last = LAST_SPEC_OBJECT_TYPE;
+ case IS_JS_RECEIVER:
+ *first = FIRST_JS_RECEIVER_TYPE;
+ *last = LAST_JS_RECEIVER_TYPE;
return;
case IS_JS_ARRAY:
*first = *last = JS_ARRAY_TYPE;
@@ -1718,7 +1714,7 @@ HValue* HCheckValue::Canonicalize() {
const char* HCheckInstanceType::GetCheckName() const {
switch (check_) {
- case IS_SPEC_OBJECT: return "object";
+ case IS_JS_RECEIVER: return "object";
case IS_JS_ARRAY: return "array";
case IS_JS_DATE:
return "date";
@@ -3278,7 +3274,7 @@ bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
value()->type().IsNull() ||
value()->type().IsBoolean() ||
value()->type().IsUndefined() ||
- value()->type().IsJSObject()) {
+ value()->type().IsJSReceiver()) {
*block = SecondSuccessor();
return true;
}
@@ -4081,11 +4077,13 @@ HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
}
switch (op) {
case kMathExp:
- return H_CONSTANT_DOUBLE(fast_exp(d));
+ lazily_initialize_fast_exp(isolate);
+ return H_CONSTANT_DOUBLE(fast_exp(d, isolate));
case kMathLog:
return H_CONSTANT_DOUBLE(std::log(d));
case kMathSqrt:
- return H_CONSTANT_DOUBLE(fast_sqrt(d));
+ lazily_initialize_fast_sqrt(isolate);
+ return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate));
case kMathPowHalf:
return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
case kMathAbs:
@@ -4157,8 +4155,8 @@ HInstruction* HPower::New(Isolate* isolate, Zone* zone, HValue* context,
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
- double result = power_helper(c_left->DoubleValue(),
- c_right->DoubleValue());
+ double result =
+ power_helper(isolate, c_left->DoubleValue(), c_right->DoubleValue());
return H_CONSTANT_DOUBLE(std::isnan(result)
? std::numeric_limits<double>::quiet_NaN()
: result);
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index dfed6e32ce..13ada8c606 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -62,7 +62,6 @@ class LChunkBuilder;
V(CallWithDescriptor) \
V(CallJSFunction) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -86,7 +85,6 @@ class LChunkBuilder;
V(Constant) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -105,7 +103,6 @@ class LChunkBuilder;
V(InnerAllocatedObject) \
V(InstanceOf) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
@@ -131,7 +128,6 @@ class LChunkBuilder;
V(Power) \
V(Prologue) \
V(PushArguments) \
- V(RegExpLiteral) \
V(Return) \
V(Ror) \
V(Sar) \
@@ -778,7 +774,7 @@ class HValue : public ZoneObject {
bool ToStringOrToNumberCanBeObserved() const {
if (type().IsTaggedPrimitive()) return false;
- if (type().IsJSObject()) return true;
+ if (type().IsJSReceiver()) return true;
return !representation().IsSmiOrInteger32() && !representation().IsDouble();
}
@@ -2435,21 +2431,6 @@ class HCallFunction final : public HBinaryCall {
};
-class HCallNew final : public HBinaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
-
- HValue* context() { return first(); }
- HValue* constructor() { return second(); }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew)
-
- private:
- HCallNew(HValue* context, HValue* constructor, int argument_count)
- : HBinaryCall(context, constructor, argument_count) {}
-};
-
-
class HCallNewArray final : public HBinaryCall {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray, HValue*, int,
@@ -2885,7 +2866,7 @@ class HCheckValue final : public HUnaryOperation {
class HCheckInstanceType final : public HUnaryOperation {
public:
enum Check {
- IS_SPEC_OBJECT,
+ IS_JS_RECEIVER,
IS_JS_ARRAY,
IS_JS_DATE,
IS_STRING,
@@ -2903,10 +2884,9 @@ class HCheckInstanceType final : public HUnaryOperation {
HType CalculateInferredType() override {
switch (check_) {
- case IS_SPEC_OBJECT: return HType::JSObject();
+ case IS_JS_RECEIVER: return HType::JSReceiver();
case IS_JS_ARRAY: return HType::JSArray();
- case IS_JS_DATE:
- return HType::JSObject();
+ case IS_JS_DATE: return HType::JSObject();
case IS_STRING: return HType::String();
case IS_INTERNALIZED_STRING: return HType::String();
}
@@ -4565,20 +4545,6 @@ class HStringCompareAndBranch final : public HTemplateControlInstruction<2, 3> {
};
-class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch)
- private:
- HIsConstructCallAndBranch() {}
-};
-
-
class HHasInstanceTypeAndBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(
@@ -5715,15 +5681,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
return false;
}
- // Stores to old space allocations require no write barriers if the value is
- // an old space allocation.
- while (value->IsInnerAllocatedObject()) {
- value = HInnerAllocatedObject::cast(value)->base_object();
- }
- if (value->IsAllocate() &&
- !HAllocate::cast(value)->IsNewSpaceAllocation()) {
- return false;
- }
}
return true;
}
@@ -6972,7 +6929,7 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return FLAG_vector_stores; }
+ bool HasVectorAndSlot() const { return true; }
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
FeedbackVectorSlot slot) {
feedback_vector_ = vector;
@@ -7210,8 +7167,6 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
return feedback_vector_;
}
bool HasVectorAndSlot() const {
- DCHECK(!(FLAG_vector_stores && initialization_state_ != MEGAMORPHIC) ||
- !feedback_vector_.is_null());
return !feedback_vector_.is_null();
}
void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
@@ -7450,75 +7405,6 @@ class HStringCharFromCode final : public HTemplateInstruction<2> {
};
-template <int V>
-class HMaterializedLiteral : public HTemplateInstruction<V> {
- public:
- HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
- : literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
- this->set_representation(Representation::Tagged());
- }
-
- HMaterializedLiteral<V>(int index, int depth)
- : literal_index_(index), depth_(depth),
- allocation_site_mode_(DONT_TRACK_ALLOCATION_SITE) {
- this->set_representation(Representation::Tagged());
- }
-
- int literal_index() const { return literal_index_; }
- int depth() const { return depth_; }
- AllocationSiteMode allocation_site_mode() const {
- return allocation_site_mode_;
- }
-
- private:
- bool IsDeletable() const final { return true; }
-
- int literal_index_;
- int depth_;
- AllocationSiteMode allocation_site_mode_;
-};
-
-
-class HRegExpLiteral final : public HMaterializedLiteral<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
- Handle<FixedArray>,
- Handle<String>,
- Handle<String>,
- int);
-
- HValue* context() { return OperandAt(0); }
- Handle<FixedArray> literals() { return literals_; }
- Handle<String> pattern() { return pattern_; }
- Handle<String> flags() { return flags_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
-
- private:
- HRegExpLiteral(HValue* context,
- Handle<FixedArray> literals,
- Handle<String> pattern,
- Handle<String> flags,
- int literal_index)
- : HMaterializedLiteral<1>(literal_index, 0),
- literals_(literals),
- pattern_(pattern),
- flags_(flags) {
- SetOperandAt(0, context);
- SetAllSideEffects();
- set_type(HType::JSObject());
- }
-
- Handle<FixedArray> literals_;
- Handle<String> pattern_;
- Handle<String> flags_;
-};
-
-
class HTypeof final : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
@@ -7645,28 +7531,6 @@ class HToFastProperties final : public HUnaryOperation {
};
-class HDateField final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
-
- Smi* index() const { return index_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField)
-
- private:
- HDateField(HValue* date, Smi* index)
- : HUnaryOperation(date), index_(index) {
- set_representation(Representation::Tagged());
- }
-
- Smi* index_;
-};
-
-
class HSeqStringGetChar final : public HTemplateInstruction<2> {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.h b/deps/v8/src/crankshaft/hydrogen-osr.h
index 52c94dcad3..e2cfd30428 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.h
+++ b/deps/v8/src/crankshaft/hydrogen-osr.h
@@ -5,7 +5,7 @@
#ifndef V8_CRANKSHAFT_HYDROGEN_OSR_H_
#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/crankshaft/hydrogen.h"
#include "src/zone.h"
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index 8118aead9a..98337be052 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -7,7 +7,8 @@
#include <sstream>
#include "src/allocation-site-scopes.h"
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopeinfo.h"
#include "src/code-factory.h"
#include "src/crankshaft/hydrogen-bce.h"
#include "src/crankshaft/hydrogen-bch.h"
@@ -39,9 +40,8 @@
// GetRootConstructor
#include "src/ic/ic-inl.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
-#include "src/scopeinfo.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-codegen-ia32.h" // NOLINT
@@ -1861,11 +1861,9 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
NOT_TENURED, JS_ARRAY_TYPE);
// Initialize the JSRegExpResult header.
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HValue* native_context = Add<HLoadNamedField>(
- global_object, nullptr, HObjectAccess::ForJSGlobalObjectNativeContext());
+ context(), nullptr,
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
@@ -2084,7 +2082,7 @@ HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
// First check whether {receiver} is already a spec object (fast case).
IfBuilder receiver_is_not_spec_object(this);
receiver_is_not_spec_object.If<HCompareNumericAndBranch>(
- receiver_instance_type, Add<HConstant>(FIRST_SPEC_OBJECT_TYPE),
+ receiver_instance_type, Add<HConstant>(FIRST_JS_RECEIVER_TYPE),
Token::LT);
receiver_is_not_spec_object.Then();
{
@@ -3265,13 +3263,9 @@ void HGraphBuilder::BuildCreateAllocationMemento(
HInstruction* HGraphBuilder::BuildGetNativeContext() {
- // Get the global object, then the native context
- HValue* global_object = Add<HLoadNamedField>(
+ return Add<HLoadNamedField>(
context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- return Add<HLoadNamedField>(global_object, nullptr,
- HObjectAccess::ForObservableJSObjectOffset(
- JSGlobalObject::kNativeContextOffset));
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
}
@@ -3279,12 +3273,9 @@ HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
// Get the global object, then the native context
HInstruction* context = Add<HLoadNamedField>(
closure, nullptr, HObjectAccess::ForFunctionContextPointer());
- HInstruction* global_object = Add<HLoadNamedField>(
+ return Add<HLoadNamedField>(
context, nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- JSGlobalObject::kNativeContextOffset);
- return Add<HLoadNamedField>(global_object, nullptr, access);
+ HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
}
@@ -3436,16 +3427,9 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
? builder()->BuildGetNativeContext(constructor_function_)
: builder()->BuildGetNativeContext();
- HInstruction* index = builder()->Add<HConstant>(
- static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
-
- HInstruction* map_array = builder()->Add<HLoadKeyed>(
- native_context, index, nullptr, nullptr, FAST_ELEMENTS);
-
- HInstruction* kind_index = builder()->Add<HConstant>(kind_);
-
- return builder()->Add<HLoadKeyed>(map_array, kind_index, nullptr, nullptr,
- FAST_ELEMENTS);
+ HObjectAccess access =
+ HObjectAccess::ForContextSlot(Context::ArrayMapIndex(kind_));
+ return builder()->Add<HLoadNamedField>(native_context, nullptr, access);
}
@@ -3556,12 +3540,7 @@ HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- JSGlobalObject::kNativeContextOffset);
- HValue* native_context = Add<HLoadNamedField>(global_object, nullptr, access);
+ HValue* native_context = BuildGetNativeContext();
HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
return Add<HLoadNamedField>(native_context, nullptr, function_access);
}
@@ -4704,6 +4683,12 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
environment()->Bind(scope->arguments(), graph()->GetArgumentsObject());
}
+ int rest_index;
+ Variable* rest = scope->rest_parameter(&rest_index);
+ if (rest) {
+ return Bailout(kRestParameter);
+ }
+
if (scope->this_function_var() != nullptr ||
scope->new_target_var() != nullptr) {
return Bailout(kSuperReference);
@@ -4983,8 +4968,8 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
HValue* receiver = environment()->arguments_environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
New<HHasInstanceTypeAndBranch>(return_value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ FIRST_JS_RECEIVER_TYPE,
+ LAST_JS_RECEIVER_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
@@ -5064,7 +5049,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// Generate a compare and branch.
- CHECK_ALIVE(VisitForValue(clause->label()));
+ CHECK_BAILOUT(VisitForValue(clause->label()));
+ if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
HValue* label_value = Pop();
Type* label_type = clause->label()->bounds().lower;
@@ -5720,8 +5706,8 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
} else {
HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ BuildGetNativeContext(), nullptr,
+ HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
global_object, variable->name(), ast_context()->typeof_mode());
instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
@@ -5780,12 +5766,14 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- Handle<LiteralsArray> literals(closure->literals());
- HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
- expr->pattern(),
- expr->flags(),
- expr->literal_index());
+ Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ HValue* values[] = {
+ context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
+ Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
+ HConstant* stub_value = Add<HConstant>(callable.code());
+ HInstruction* instr = New<HCallWithDescriptor>(
+ stub_value, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)), NORMAL_CALL);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5896,13 +5884,11 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
site_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
- Handle<LiteralsArray> closure_literals(closure->literals(), isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
- Add<HPushArguments>(Add<HConstant>(closure_literals),
- Add<HConstant>(literal_index),
+ Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
Add<HConstant>(constant_properties),
Add<HConstant>(flags));
@@ -6065,10 +6051,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
- Add<HPushArguments>(Add<HConstant>(literals),
- Add<HConstant>(literal_index),
- Add<HConstant>(constants),
- Add<HConstant>(flags));
+ Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
+ Add<HConstant>(constants), Add<HConstant>(flags));
Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
@@ -6080,8 +6064,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
- // The literal index is on the stack, too.
- Push(Add<HConstant>(expr->literal_index()));
HInstruction* elements = NULL;
@@ -6123,7 +6105,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Add<HSimulate>(expr->GetIdForElement(i));
}
- Drop(1); // array literal index
return ast_context()->ReturnValue(Pop());
}
@@ -6939,16 +6920,14 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
} else {
HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
+ BuildGetNativeContext(), nullptr,
+ HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
HStoreNamedGeneric* instr =
Add<HStoreNamedGeneric>(global_object, var->name(), value,
function_language_mode(), PREMONOMORPHIC);
- if (FLAG_vector_stores) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- instr->SetVectorAndSlot(vector, slot);
- }
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ instr->SetVectorAndSlot(vector, slot);
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7065,6 +7044,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
+
VariableProxy* proxy = expr->target()->AsVariableProxy();
Property* prop = expr->target()->AsProperty();
DCHECK(proxy == NULL || prop == NULL);
@@ -7080,11 +7060,11 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
- if (expr->op() != Token::INIT_CONST) {
+ if (expr->op() != Token::INIT) {
return Bailout(kNonInitializerAssignmentToConst);
}
} else if (var->mode() == CONST_LEGACY) {
- if (expr->op() != Token::INIT_CONST_LEGACY) {
+ if (expr->op() != Token::INIT) {
CHECK_ALIVE(VisitForValue(expr->value()));
return ast_context()->ReturnValue(Pop());
}
@@ -7158,14 +7138,13 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
default:
mode = HStoreContextSlot::kNoCheck;
}
- } else if (expr->op() == Token::INIT_VAR ||
- expr->op() == Token::INIT_LET ||
- expr->op() == Token::INIT_CONST) {
- mode = HStoreContextSlot::kNoCheck;
} else {
- DCHECK(expr->op() == Token::INIT_CONST_LEGACY);
-
- mode = HStoreContextSlot::kCheckIgnoreAssignment;
+ DCHECK_EQ(Token::INIT, expr->op());
+ if (var->mode() == CONST_LEGACY) {
+ mode = HStoreContextSlot::kCheckIgnoreAssignment;
+ } else {
+ mode = HStoreContextSlot::kNoCheck;
+ }
}
HValue* context = BuildContextChainWalk(var);
@@ -7278,9 +7257,8 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
result->SetVectorAndSlot(vector, slot);
return result;
} else {
- if (FLAG_vector_stores &&
- current_feedback_vector()->GetKind(slot) ==
- FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ if (current_feedback_vector()->GetKind(slot) ==
+ FeedbackVectorSlotKind::KEYED_STORE_IC) {
// It's possible that a keyed store of a constant string was converted
// to a named store. Here, at the last minute, we need to make sure to
// use a generic Keyed Store if we are using the type vector, because
@@ -7296,11 +7274,9 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
object, name, value, function_language_mode(), PREMONOMORPHIC);
- if (FLAG_vector_stores) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
- }
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
return result;
}
}
@@ -7325,11 +7301,9 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
} else {
HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
object, key, value, function_language_mode(), PREMONOMORPHIC);
- if (FLAG_vector_stores) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
- }
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+ result->SetVectorAndSlot(vector, slot);
return result;
}
}
@@ -7647,7 +7621,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (access_type == LOAD) {
KeyedLoadICNexus nexus(vector, slot);
name = nexus.FindFirstName();
- } else if (FLAG_vector_stores) {
+ } else {
KeyedStoreICNexus nexus(vector, slot);
name = nexus.FindFirstName();
}
@@ -7890,7 +7864,7 @@ void HOptimizedGraphBuilder::PushLoad(Property* expr,
void HOptimizedGraphBuilder::BuildLoad(Property* expr,
BailoutId ast_id) {
HInstruction* instr = NULL;
- if (expr->IsStringAccess()) {
+ if (expr->IsStringAccess() && expr->GetKeyType() == ELEMENT) {
HValue* index = Pop();
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
@@ -7991,16 +7965,15 @@ HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
HValue* fun, HValue* context,
int argument_count, HValue* expected_param_count) {
- ArgumentAdaptorDescriptor descriptor(isolate());
+ HValue* new_target = graph()->GetConstantUndefined();
HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = { context, fun, arity, expected_param_count };
+ HValue* op_vals[] = {context, fun, new_target, arity, expected_param_count};
- Handle<Code> adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- HConstant* adaptor_value = Add<HConstant>(adaptor);
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+ HConstant* stub = Add<HConstant>(callable.code());
- return New<HCallWithDescriptor>(adaptor_value, argument_count, descriptor,
+ return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
Vector<HValue*>(op_vals, arraysize(op_vals)));
}
@@ -8390,6 +8363,14 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TraceInline(target, caller, "target has context-allocated variables");
return false;
}
+
+ int rest_index;
+ Variable* rest = target_info.scope()->rest_parameter(&rest_index);
+ if (rest) {
+ TraceInline(target, caller, "target uses rest parameters");
+ return false;
+ }
+
FunctionLiteral* function = target_info.literal();
// The following conditions must be checked again after re-parsing, because
@@ -8414,6 +8395,13 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
}
+ // Unsupported variable references present.
+ if (function->scope()->this_function_var() != nullptr ||
+ function->scope()->new_target_var() != nullptr) {
+ TraceInline(target, caller, "target uses new target or this function");
+ return false;
+ }
+
// All declarations must be inlineable.
ZoneList<Declaration*>* decls = target_info.scope()->declarations();
int decl_count = decls->length();
@@ -8715,7 +8703,7 @@ bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
// static
bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
Handle<Map> receiver_map) {
- return !receiver_map.is_null() &&
+ return !receiver_map.is_null() && receiver_map->prototype()->IsJSObject() &&
receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
!receiver_map->is_dictionary_map() && !receiver_map->is_observed() &&
@@ -9941,13 +9929,16 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
Handle<JSFunction> constructor = expr->target();
+ DCHECK(
+ constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric) ||
+ constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubApi));
HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
- if (constructor->IsInobjectSlackTrackingInProgress()) {
- constructor->CompleteInobjectSlackTracking();
- }
+ constructor->CompleteInobjectSlackTrackingIfActive();
// Calculate instance size from initial map of constructor.
DCHECK(constructor->has_initial_map());
@@ -10004,18 +9995,21 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
instr = prev_instr;
} while (instr != check);
environment()->SetExpressionStackAt(receiver_index, function);
- HInstruction* call =
- PreProcessCall(New<HCallNew>(function, argument_count));
- return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
if (TryHandleArrayCallNew(expr, function)) return;
-
- HInstruction* call =
- PreProcessCall(New<HCallNew>(function, argument_count));
- return ast_context()->ReturnInstruction(call, expr->id());
}
+
+ HValue* arity = Add<HConstant>(argument_count - 1);
+ HValue* op_vals[] = {context(), function, function, arity};
+ Callable callable = CodeFactory::Construct(isolate());
+ HConstant* stub = Add<HConstant>(callable.code());
+ PushArgumentsFromEnvironment(argument_count);
+ HInstruction* construct =
+ New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+ Vector<HValue*>(op_vals, arraysize(op_vals)));
+ return ast_context()->ReturnInstruction(construct, expr->id());
}
@@ -10044,11 +10038,7 @@ HValue* HGraphBuilder::BuildAllocateEmptyArrayBuffer(HValue* byte_length) {
BuildAllocate(Add<HConstant>(JSArrayBuffer::kSizeWithInternalFields),
HType::JSObject(), JS_ARRAY_BUFFER_TYPE, HAllocationMode());
- HValue* global_object = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HValue* native_context = Add<HLoadNamedField>(
- global_object, nullptr, HObjectAccess::ForJSGlobalObjectNativeContext());
+ HValue* native_context = BuildGetNativeContext();
Add<HStoreNamedField>(
result, HObjectAccess::ForMap(),
Add<HLoadNamedField>(
@@ -11482,7 +11472,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
- if (!constructor->map()->has_non_instance_prototype()) {
+ if (constructor->IsConstructor() &&
+ !constructor->map()->has_non_instance_prototype()) {
JSFunction::EnsureHasInitialMap(constructor);
DCHECK(constructor->has_initial_map());
Handle<Map> initial_map(constructor->initial_map(), isolate());
@@ -11565,7 +11556,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
} else {
BuildCheckHeapObject(operand_to_check);
Add<HCheckInstanceType>(operand_to_check,
- HCheckInstanceType::IS_SPEC_OBJECT);
+ HCheckInstanceType::IS_JS_RECEIVER);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return result;
@@ -11731,6 +11722,11 @@ void HOptimizedGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
}
+HValue* HOptimizedGraphBuilder::AddThisFunction() {
+ return AddInstruction(BuildThisFunction());
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
@@ -12153,6 +12149,12 @@ void HOptimizedGraphBuilder::VisitExportDeclaration(
}
+void HOptimizedGraphBuilder::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ CHECK_ALIVE(Visit(node->expression()));
+}
+
+
// Generators for inline runtime functions.
// Support for types.
void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
@@ -12164,14 +12166,14 @@ void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsJSReceiver(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
New<HHasInstanceTypeAndBranch>(value,
- FIRST_SPEC_OBJECT_TYPE,
- LAST_SPEC_OBJECT_TYPE);
+ FIRST_JS_RECEIVER_TYPE,
+ LAST_JS_RECEIVER_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -12180,8 +12182,8 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE);
+ HHasInstanceTypeAndBranch* result = New<HHasInstanceTypeAndBranch>(
+ value, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@@ -12334,10 +12336,7 @@ void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
HValue* instance_type =
Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
if_proxy.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(FIRST_JS_PROXY_TYPE), Token::GTE);
- if_proxy.And();
- if_proxy.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(LAST_JS_PROXY_TYPE), Token::LTE);
+ instance_type, Add<HConstant>(JS_PROXY_TYPE), Token::EQ);
if_proxy.CaptureContinuation(&continuation);
return ast_context()->ReturnContinuation(&continuation, call->id());
@@ -12373,22 +12372,6 @@ void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
}
-// Support for construct call checks.
-void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 0);
- if (function_state()->outer() != NULL) {
- // We are generating graph for inlined function.
- HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
- ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
- return ast_context()->ReturnValue(value);
- } else {
- return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(),
- call->id());
- }
-}
-
-
// Support for arguments.length and arguments[?].
void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
DCHECK(call->arguments()->length() == 0);
@@ -12480,25 +12463,6 @@ void HOptimizedGraphBuilder::GenerateIsDate(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateThrowNotDateError(CallRuntime* call) {
- DCHECK_EQ(0, call->arguments()->length());
- Add<HDeoptimize>(Deoptimizer::kNotADateObject, Deoptimizer::EAGER);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* date = Pop();
- HDateField* result = New<HDateField>(date, index);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
DCHECK(call->arguments()->length() == 3);
@@ -12627,15 +12591,6 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateStringGetLength(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* string = Pop();
- HInstruction* result = BuildLoadStringLength(string);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
DCHECK_EQ(4, call->arguments()->length());
@@ -12784,18 +12739,6 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateHasInPrototypeChain(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* prototype = Pop();
- HValue* object = Pop();
- HHasInPrototypeChainAndBranch* result =
- New<HHasInPrototypeChainAndBranch>(object, prototype);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 62e55c3efe..40a18347be 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -7,11 +7,11 @@
#include "src/accessors.h"
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/compiler.h"
#include "src/crankshaft/hydrogen-instructions.h"
-#include "src/scopes.h"
#include "src/zone.h"
namespace v8 {
@@ -2199,15 +2199,12 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsTypedArray) \
F(IsRegExp) \
F(IsJSProxy) \
- F(IsConstructCall) \
F(Call) \
F(ArgumentsLength) \
F(Arguments) \
F(ValueOf) \
F(SetValueOf) \
F(IsDate) \
- F(DateField) \
- F(ThrowNotDateError) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
@@ -2219,7 +2216,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(ToLength) \
F(ToNumber) \
F(IsFunction) \
- F(IsSpecObject) \
+ F(IsJSReceiver) \
F(MathPow) \
F(IsMinusZero) \
F(HasCachedArrayIndex) \
@@ -2234,7 +2231,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
- F(HasInPrototypeChain) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
F(DataViewInitialize) \
@@ -2267,8 +2263,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(CreateIterResultObject) \
/* Arrays */ \
F(HasFastPackedElements) \
- /* Strings */ \
- F(StringGetLength) \
/* JSValue */ \
F(JSValueGetValue)
@@ -2827,6 +2821,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* BuildContextChainWalk(Variable* var);
+ HValue* AddThisFunction();
HInstruction* BuildThisFunction();
HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 7b05078a9b..4ec33ab146 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -171,7 +171,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
}
@@ -245,7 +245,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(edi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -254,7 +254,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -880,60 +880,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1729,37 +1675,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(eax));
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2144,7 +2059,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
@@ -2495,29 +2410,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(equal, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2597,6 +2494,15 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, equal);
@@ -2673,7 +2579,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) RestoreCallerDoubles();
if (dynamic_frame_alignment_) {
@@ -3182,16 +3088,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
- const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(receiver, Operand(receiver, global_offset));
- const int proxy_offset = JSGlobalObject::kGlobalProxyOffset;
- __ mov(receiver, FieldOperand(receiver, proxy_offset));
+ __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
__ bind(&receiver_ok);
}
@@ -3232,7 +3136,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3273,7 +3178,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3291,7 +3196,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize eax to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
// Invoke function directly.
@@ -3354,11 +3260,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(eax, instr->arity());
-
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
+ __ mov(eax, instr->arity());
+
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
@@ -3739,7 +3647,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3778,19 +3686,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->constructor()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // No cell in ebx for construct type feedback in optimized code
- __ mov(ebx, isolate()->factory()->undefined_value());
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ Move(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->constructor()).is(edi));
@@ -3814,7 +3709,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3829,17 +3724,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5200,58 +5095,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- Label materialized;
- // Registers will be used as follows:
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- // esi = context.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(ecx, instr->hydrogen()->literals());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated, Label::kNear);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5334,8 +5177,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ JumpIfSmi(input, false_label, false_distance);
__ cmp(input, factory()->null_value());
__ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -5360,32 +5203,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp);
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5514,8 +5331,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ test(eax, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5527,7 +5344,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -5647,7 +5464,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index 03f6a89b35..06a3e10bf2 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -5,13 +5,13 @@
#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
+#include "src/ast/scopes.h"
#include "src/base/logging.h"
#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
#include "src/crankshaft/ia32/lithium-ia32.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -26,15 +26,11 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -181,6 +177,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, argc, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
@@ -216,9 +217,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
@@ -277,10 +275,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -318,15 +312,11 @@ class LCodeGen: public LCodeGenBase {
void MakeSureStackPagesMapped(int offset);
#endif
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
index b90f6bb21e..c3284df882 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
@@ -369,7 +369,9 @@ void LGapResolver::EmitSwap(int index) {
// Register-register.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchg(dst, src);
+ __ push(src);
+ __ mov(src, dst);
+ __ pop(dst);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index b4186ba573..a0cb93975f 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -319,15 +319,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
context()->PrintTo(stream);
@@ -1029,7 +1020,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
LOperand* temp = TempRegister();
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ return AssignEnvironment(result);
}
@@ -1252,14 +1245,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
@@ -1801,14 +1786,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2500,13 +2477,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2614,12 +2584,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index 08c051ad52..ab7a4b5516 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -37,7 +37,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -65,7 +64,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -93,7 +91,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -133,7 +130,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1113,19 +1109,6 @@ class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1367,27 +1350,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index)
- : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- Smi* index() const { return index_; }
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1908,25 +1870,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2521,19 +2464,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index 41f78cd183..5bd1e6a9b8 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -53,8 +53,12 @@ LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
+ deoptimizations_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
- last_lazy_deopt_pc_(0) {}
+ translations_(info->zone()),
+ inlined_function_count_(0),
+ last_lazy_deopt_pc_(0),
+ osr_pc_offset_(-1) {}
bool LCodeGenBase::GenerateBody() {
@@ -280,6 +284,68 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
}
+void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
+ int length = deoptimizations_.length();
+ if (length == 0) return;
+ Handle<DeoptimizationInputData> data =
+ DeoptimizationInputData::New(isolate(), length, TENURED);
+
+ Handle<ByteArray> translations =
+ translations_.CreateByteArray(isolate()->factory());
+ data->SetTranslationByteArray(*translations);
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
+ data->SetWeakCellCache(Smi::FromInt(0));
+
+ Handle<FixedArray> literals =
+ factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+ {
+ AllowDeferredHandleDereference copy_handles;
+ for (int i = 0; i < deoptimization_literals_.length(); i++) {
+ literals->set(i, *deoptimization_literals_[i]);
+ }
+ data->SetLiteralArray(*literals);
+ }
+
+ data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+ // Populate the deoptimization entries.
+ for (int i = 0; i < length; i++) {
+ LEnvironment* env = deoptimizations_[i];
+ data->SetAstId(i, env->ast_id());
+ data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+ data->SetArgumentsStackHeight(i,
+ Smi::FromInt(env->arguments_stack_height()));
+ data->SetPc(i, Smi::FromInt(env->pc_offset()));
+ }
+ code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+ DCHECK_EQ(0, deoptimization_literals_.length());
+ for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(function);
+ }
+ inlined_function_count_ = deoptimization_literals_.length();
+
+ // Define deoptimization literals for all unoptimized code objects of inlined
+ // functions. This ensures unoptimized code is kept alive by optimized code.
+ AllowDeferredHandleDereference allow_shared_function_info_dereference;
+ for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
+ DefineDeoptimizationLiteral(handle(function->code()));
+ }
+}
+
+
Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
diff --git a/deps/v8/src/crankshaft/lithium-codegen.h b/deps/v8/src/crankshaft/lithium-codegen.h
index 97a0722736..b1f7dac2e5 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.h
+++ b/deps/v8/src/crankshaft/lithium-codegen.h
@@ -53,6 +53,9 @@ class LCodeGenBase BASE_EMBEDDED {
Translation* translation);
int DefineDeoptimizationLiteral(Handle<Object> literal);
+ void PopulateDeoptimizationData(Handle<Code> code);
+ void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
// Check that an environment assigned via AssignEnvironment is actually being
// used. Redundant assignments keep things alive longer than necessary, and
// consequently lead to worse code, so it's important to minimize this.
@@ -74,8 +77,12 @@ class LCodeGenBase BASE_EMBEDDED {
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
+ ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ TranslationBuffer translations_;
+ int inlined_function_count_;
int last_lazy_deopt_pc_;
+ int osr_pc_offset_;
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index b4771c0557..82ad6962be 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -4,7 +4,7 @@
#include "src/crankshaft/lithium.h"
-#include "src/scopes.h"
+#include "src/ast/scopes.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
@@ -406,54 +406,6 @@ Representation LChunk::LookupLiteralRepresentation(
}
-static void AddWeakObjectToCodeDependency(Isolate* isolate,
- Handle<HeapObject> object,
- Handle<Code> code) {
- Handle<WeakCell> cell = Code::WeakCellFor(code);
- Heap* heap = isolate->heap();
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
- dep = DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
- heap->AddWeakObjectToCodeDependency(object, dep);
-}
-
-
-void LChunk::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const {
- DCHECK(code->is_optimized_code());
- ZoneList<Handle<Map> > maps(1, zone());
- ZoneList<Handle<HeapObject> > objects(1, zone());
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::CELL &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
- objects.Add(Handle<HeapObject>(it.rinfo()->target_cell()), zone());
- } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
- if (it.rinfo()->target_object()->IsMap()) {
- Handle<Map> map(Map::cast(it.rinfo()->target_object()));
- maps.Add(map, zone());
- } else {
- Handle<HeapObject> object(
- HeapObject::cast(it.rinfo()->target_object()));
- objects.Add(object, zone());
- }
- }
- }
- for (int i = 0; i < maps.length(); i++) {
- if (maps.at(i)->dependent_code()->number_of_entries(
- DependentCode::kWeakCodeGroup) == 0) {
- isolate()->heap()->AddRetainedMap(maps.at(i));
- }
- Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code);
- }
- for (int i = 0; i < objects.length(); i++) {
- AddWeakObjectToCodeDependency(isolate(), objects.at(i), code);
- }
- code->set_can_have_weak_objects(true);
-}
-
-
void LChunk::CommitDependencies(Handle<Code> code) const {
if (!code->is_optimized_code()) return;
HandleScope scope(isolate());
@@ -471,7 +423,6 @@ void LChunk::CommitDependencies(Handle<Code> code) const {
}
info_->dependencies()->Commit(code);
- RegisterWeakObjectsInOptimizedCode(code);
}
@@ -503,7 +454,8 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
Handle<Code> LChunk::Codegen() {
- MacroAssembler assembler(info()->isolate(), NULL, 0);
+ MacroAssembler assembler(info()->isolate(), NULL, 0,
+ CodeObjectRequired::kYes);
LOG_CODE_EVENT(info()->isolate(),
CodeStartLinePosInfoRecordEvent(
assembler.positions_recorder()));
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
index 126517e2eb..10e980e983 100644
--- a/deps/v8/src/crankshaft/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -690,7 +690,6 @@ class LChunk : public ZoneObject {
int spill_slot_count_;
private:
- void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const;
void CommitDependencies(Handle<Code> code) const;
CompilationInfo* info_;
diff --git a/deps/v8/src/crankshaft/mips/OWNERS b/deps/v8/src/crankshaft/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/crankshaft/mips/OWNERS
+++ b/deps/v8/src/crankshaft/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index a82b262dbb..2414f0d61c 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -145,7 +145,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
}
@@ -189,7 +189,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(a1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -198,7 +198,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -217,7 +217,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sw(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
@@ -841,60 +841,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1430,9 +1376,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
switch (constant) {
case -1:
if (overflow) {
- __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
- Operand(zero_reg));
+ Label no_overflow;
+ __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow);
} else {
__ Subu(result, zero_reg, left);
}
@@ -1658,21 +1605,19 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
}
} else { // can_overflow.
- Register overflow = scratch0();
- Register scratch = scratch1();
+ Register scratch = scratch0();
+ Label no_overflow_label;
if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, scratch);
- __ SubuAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
+ __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
+ Operand(right_reg), &no_overflow_label);
} else {
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
+ __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -1690,20 +1635,6 @@ void LCodeGen::DoConstantS(LConstantS* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
DCHECK(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
-#if V8_HOST_ARCH_IA32
- // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
- // builds.
- uint64_t bits = instr->bits();
- if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
- V8_UINT64_C(0x7FF0000000000000)) {
- uint32_t lo = static_cast<uint32_t>(bits);
- uint32_t hi = static_cast<uint32_t>(bits >> 32);
- __ li(at, Operand(lo));
- __ li(scratch0(), Operand(hi));
- __ Move(result, at, scratch0());
- return;
- }
-#endif
double v = instr->value();
__ Move(result, v);
}
@@ -1728,39 +1659,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch, Operand(stamp));
- __ lw(scratch, MemOperand(scratch));
- __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch, Operand(scratch0()));
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ li(a1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1854,21 +1752,19 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
}
} else { // can_overflow.
- Register overflow = scratch0();
Register scratch = scratch1();
+ Label no_overflow_label;
if (right->IsStackSlot()) {
Register right_reg = EmitLoadRegister(right, scratch);
- __ AdduAndCheckForOverflow(ToRegister(result),
- ToRegister(left),
- right_reg,
- overflow); // Reg at also used as scratch.
+ __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
+ Operand(right_reg), &no_overflow_label);
} else {
DCHECK(right->IsRegister() || right->IsConstantOperand());
- __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
- ToOperand(right), overflow, scratch);
+ __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+ &no_overflow_label, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
- Operand(zero_reg));
+ DeoptimizeIf(al, instr);
+ __ bind(&no_overflow_label);
}
}
@@ -2153,7 +2049,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2519,31 +2415,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
-
+ __ GetObjectType(input, temp, temp2);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- __ GetObjectType(input, temp, temp2);
- __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ GetObjectType(input, temp, temp2);
- __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
Register instance_type = scratch1();
DCHECK(!instance_type.is(temp));
@@ -2609,6 +2487,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = scratch1();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2619,10 +2498,25 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ SmiTst(object, at);
EmitFalseBranch(instr, eq, at, Operand(zero_reg));
}
+
// Loop through the {object}s prototype chain looking for the {prototype}.
__ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(object_instance_type, object_instance_type,
+ Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ Operand(zero_reg));
+ // Deoptimize for proxies.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
__ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -2663,7 +2557,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(v0);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
@@ -2741,7 +2635,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ lw(result, ContextOperand(context, instr->slot_index()));
+ __ lw(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2761,7 +2655,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3236,14 +3130,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
__ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ lw(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ lw(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
+ __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3302,7 +3195,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3345,7 +3239,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3364,7 +3258,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize a0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
// Invoke function.
@@ -3722,7 +3617,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3772,11 +3667,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(a0, Operand(instr->arity()));
-
// Change context.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ li(a0, Operand(instr->arity()));
+
// Load the code entry address
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -3815,19 +3712,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- // No cell in a2 for construct type feedback in optimized code
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -3850,7 +3734,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3864,17 +3748,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4517,9 +4401,7 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- FPURegister dbl_scratch = double_scratch0();
- __ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
+ __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
}
@@ -4604,8 +4486,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
__ mtc1(src, dbl_scratch);
__ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ __ Cvt_d_uw(dbl_scratch, src, f22);
}
if (FLAG_inline_new) {
@@ -5333,59 +5214,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // t3 = literals array.
- // a1 = regexp literal.
- // a0 = regexp literal clone.
- // a2 and t0-t2 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ li(t3, instr->hydrogen()->literals());
- __ lw(a1, FieldMemOperand(t3, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a1, Operand(at));
-
- // Create regexp literal using runtime function
- // Result will be in v0.
- __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(t1, Operand(instr->hydrogen()->pattern()));
- __ li(t0, Operand(instr->hydrogen()->flags()));
- __ Push(t3, t2, t1, t0);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(a1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(a1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ lw(a3, FieldMemOperand(a1, i));
- __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
- __ sw(a3, FieldMemOperand(v0, i));
- __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
- __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5493,9 +5321,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
@@ -5527,34 +5355,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
-
- EmitBranch(instr, eq, temp1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, temp2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5685,10 +5485,10 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ And(at, object, kSmiTagMask);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
__ GetObjectType(object, a1, a1);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(LAST_JS_PROXY_TYPE));
+ Operand(JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5702,7 +5502,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
@@ -5826,7 +5626,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ li(at, scope_info);
__ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index 8fc78a96d4..160ab9a665 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -5,12 +5,12 @@
#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#include "src/ast/scopes.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
#include "src/crankshaft/mips/lithium-mips.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -24,13 +24,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -201,6 +197,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -235,9 +236,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
@@ -310,10 +308,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -346,13 +340,9 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index d65d5582c0..a9978e1068 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -305,13 +305,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -993,7 +986,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1221,14 +1216,6 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1785,14 +1772,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2402,13 +2381,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2503,12 +2475,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index e064edd6b1..880d243312 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -130,7 +127,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1340,25 +1336,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1858,25 +1835,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2476,19 +2434,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2533,19 +2478,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
diff --git a/deps/v8/src/crankshaft/mips64/OWNERS b/deps/v8/src/crankshaft/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/crankshaft/mips64/OWNERS
+++ b/deps/v8/src/crankshaft/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index a615030fae..29d19ee809 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -120,7 +120,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
frame_is_built_ = true;
}
@@ -164,7 +164,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(a1);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -173,7 +173,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -192,7 +192,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sd(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
@@ -830,60 +830,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1810,39 +1756,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch, Operand(stamp));
- __ ld(scratch, MemOperand(scratch));
- __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch, Operand(scratch0()));
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ li(a1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2254,7 +2167,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
- ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+ ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
if (expected.Contains(ToBooleanStub::STRING)) {
@@ -2624,27 +2537,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ GetObjectType(input, temp, temp2);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
- __ GetObjectType(input, temp, temp2);
- __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+ __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ GetObjectType(input, temp, temp2);
- __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2715,6 +2612,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = scratch1();
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2730,6 +2628,19 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(object_instance_type, object_instance_type,
+ Operand(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ Operand(zero_reg));
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
__ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
@@ -2771,7 +2682,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(v0);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
@@ -2849,7 +2760,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ld(result, ContextOperand(context, instr->slot_index()));
+ __ ld(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2869,7 +2780,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3405,14 +3316,13 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ GetObjectType(receiver, scratch, scratch);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
__ bind(&global_object);
__ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ld(result,
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ ld(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
+ __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3471,7 +3381,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is a0, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3514,7 +3425,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3533,7 +3444,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize a0 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
__ li(a0, Operand(arity));
// Invoke function.
@@ -3910,7 +3822,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3960,11 +3872,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(a0, Operand(instr->arity()));
-
// Change context.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ li(a0, Operand(instr->arity()));
+
// Load the code entry address
__ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
@@ -4003,19 +3917,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(a1));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- __ li(a0, Operand(instr->arity()));
- // No cell in a2 for construct type feedback in optimized code
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(a1));
@@ -4038,7 +3939,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4052,17 +3953,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4736,7 +4637,7 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
FPURegister dbl_scratch = double_scratch0();
__ mtc1(ToRegister(input), dbl_scratch);
- __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22?
+ __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
}
@@ -4793,7 +4694,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
__ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
__ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch);
}
if (FLAG_inline_new) {
@@ -5518,59 +5419,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // a7 = literals array.
- // a1 = regexp literal.
- // a0 = regexp literal clone.
- // a2 and a4-a6 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ li(a7, instr->hydrogen()->literals());
- __ ld(a1, FieldMemOperand(a7, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a1, Operand(at));
-
- // Create regexp literal using runtime function
- // Result will be in v0.
- __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a5, Operand(instr->hydrogen()->pattern()));
- __ li(a4, Operand(instr->hydrogen()->flags()));
- __ Push(a7, a6, a5, a4);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(a1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a1, a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(a1);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ld(a3, FieldMemOperand(a1, i));
- __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
- __ sd(a3, FieldMemOperand(v0, i));
- __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
- __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5678,9 +5526,9 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(input, scratch, scratch1());
- __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
@@ -5713,34 +5561,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
-
- EmitBranch(instr, eq, temp1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, temp2,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5872,10 +5692,10 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ And(at, object, kSmiTagMask);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
__ GetObjectType(object, a1, a1);
DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(LAST_JS_PROXY_TYPE));
+ Operand(JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
@@ -5889,7 +5709,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
DCHECK(result.is(v0));
@@ -6011,7 +5831,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ li(at, scope_info);
__ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index 3d9433be48..efadb0f26b 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -5,12 +5,12 @@
#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+#include "src/ast/scopes.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
#include "src/crankshaft/mips64/lithium-mips64.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -24,13 +24,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -203,6 +199,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
@@ -237,9 +238,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
@@ -313,10 +311,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -349,13 +343,9 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index f0fba39036..129f61587f 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -305,13 +305,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -993,7 +986,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1221,14 +1216,6 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), a1);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), a1);
@@ -1791,14 +1778,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), a0);
- LDateField* result =
- new(zone()) LDateField(object, FixedTemp(a1), instr->index());
- return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2407,13 +2386,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2508,12 +2480,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index fb0e3cba72..01dc234c5a 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -35,7 +35,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -63,7 +62,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -91,7 +89,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -132,7 +129,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1372,25 +1368,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1920,25 +1897,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2522,19 +2480,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2579,19 +2524,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 8f8a0e50f5..936b8a76ef 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -130,9 +130,9 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(prologue_offset);
if (NeedsEagerFrame()) {
if (info()->IsStub()) {
- __ StubPrologue(prologue_offset);
+ __ StubPrologue(ip, prologue_offset);
} else {
- __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
+ __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
}
frame_is_built_ = true;
}
@@ -175,7 +175,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(r4);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -184,7 +184,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(r4);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -203,7 +203,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Load parameter from stack.
__ LoadP(r3, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ StoreP(r3, target, r0);
// Update the write barrier. This clobbers r6 and r3.
if (need_write_barrier) {
@@ -803,61 +803,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- {
- AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1868,41 +1813,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(r3));
- DCHECK(!scratch.is(scratch0()));
- DCHECK(!scratch.is(object));
-
- if (index->value() == 0) {
- __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand(stamp));
- __ LoadP(scratch, MemOperand(scratch));
- __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch, scratch0());
- __ bne(&runtime);
- __ LoadP(result,
- FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ b(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ LoadSmiLiteral(r4, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
String::Encoding encoding) {
if (index->IsConstantOperand()) {
@@ -2277,7 +2187,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ bge(instr->TrueLabel(chunk_));
}
@@ -2665,39 +2575,20 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
__ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
- __ blt(is_false);
- __ beq(is_true);
- __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ beq(is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
- __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ bgt(is_false);
+ __ beq(is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
Register instance_type = ip;
__ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
__ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
- if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
+ if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
__ bne(is_true);
} else {
__ bne(is_false);
@@ -2756,6 +2647,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
Register const object_map = scratch0();
+ Register const object_instance_type = ip;
Register const object_prototype = object_map;
Register const prototype = ToRegister(instr->prototype());
@@ -2771,6 +2663,15 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ lbz(object_instance_type,
+ FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
+ DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+ // Deoptimize for proxies.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
@@ -2822,7 +2723,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(r3);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
@@ -2899,7 +2800,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ LoadP(result, ContextOperand(context, instr->slot_index()));
+ __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2927,7 +2828,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Register scratch = scratch0();
- MemOperand target = ContextOperand(context, instr->slot_index());
+ MemOperand target = ContextMemOperand(context, instr->slot_index());
Label skip_assignment;
@@ -3463,14 +3364,15 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
- __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
__ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(result, FieldMemOperand(result, JSGlobalObject::kGlobalProxyOffset));
+ __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
if (result.is(receiver)) {
__ bind(&result_in_receiver);
} else {
@@ -3527,7 +3429,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// The number of arguments is stored in receiver which is r3, as expected
// by InvokeFunction.
ParameterCount actual(receiver);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3569,7 +3472,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3589,7 +3492,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize r3 to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ mov(r3, Operand(arity));
bool is_self_call = function.is_identical_to(info()->closure());
@@ -3953,7 +3857,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4003,11 +3907,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(ToRegister(instr->result()).is(r3));
- __ mov(r3, Operand(instr->arity()));
-
// Change context.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ mov(r3, Operand(instr->arity()));
+
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
@@ -4057,19 +3963,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->constructor()).is(r4));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- __ mov(r3, Operand(instr->arity()));
- // No cell in r5 for construct type feedback in optimized code
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->constructor()).is(r4));
@@ -4092,7 +3985,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4106,17 +3999,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ b(&done);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5602,50 +5495,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label materialized;
- // Registers will be used as follows:
- // r10 = literals array.
- // r4 = regexp literal.
- // r3 = regexp literal clone.
- // r5 and r7-r9 are used as temporaries.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ Move(r10, instr->hydrogen()->literals());
- __ LoadP(r4, FieldMemOperand(r10, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r4, ip);
- __ bne(&materialized);
-
- // Create regexp literal using runtime function
- // Result will be in r3.
- __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
- __ mov(r8, Operand(instr->hydrogen()->pattern()));
- __ mov(r7, Operand(instr->hydrogen()->flags()));
- __ Push(r10, r9, r8, r7);
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mr(r4, r3);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
-
- __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
- __ b(&allocated);
-
- __ bind(&runtime_allocate);
- __ LoadSmiLiteral(r3, Smi::FromInt(size));
- __ Push(r4, r3);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(r4);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r6));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -5724,8 +5573,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ beq(true_label);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
__ blt(false_label);
// Check for callable or undetectable objects => false.
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
@@ -5753,33 +5602,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp1 = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp1, scratch0());
- EmitBranch(instr, eq);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
- DCHECK(!temp1.is(temp2));
- // Get the frame pointer for the calling frame.
- __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&check_frame_marker);
- __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
- __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5908,8 +5730,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ TestIfSmi(r3, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5923,7 +5745,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r3);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
@@ -6044,7 +5866,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index 69653921f7..b0f016d309 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -5,12 +5,12 @@
#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
+#include "src/ast/scopes.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
#include "src/crankshaft/ppc/lithium-ppc.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -24,13 +24,9 @@ class LCodeGen : public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -183,6 +179,11 @@ class LCodeGen : public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
LInstruction* instr, LOperand* context);
@@ -208,9 +209,6 @@ class LCodeGen : public LCodeGenBase {
LOperand* op, bool is_tagged, bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
@@ -259,10 +257,6 @@ class LCodeGen : public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp1, Register temp2);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
@@ -281,13 +275,9 @@ class LCodeGen : public LCodeGenBase {
template <class T>
void EmitVectorStoreICRegisters(T* instr);
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 67a860ae55..63aead7a3c 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -311,13 +311,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -1002,7 +995,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1222,14 +1217,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* constructor = UseFixed(instr->constructor(), r4);
- LCallNew* result = new (zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* constructor = UseFixed(instr->constructor(), r4);
@@ -1801,14 +1788,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), r3);
- LDateField* result =
- new (zone()) LDateField(object, FixedTemp(r4), instr->index());
- return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2408,13 +2387,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LRegExpLiteral(context), r3),
- instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2509,12 +2481,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new (zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index e4d267ec85..e86edc9afc 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -131,7 +128,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1328,25 +1324,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1827,25 +1804,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2413,17 +2371,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
@@ -2464,17 +2411,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) { temps_[0] = temp; }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index 49bc2c7ded..df50f81167 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -4,11 +4,11 @@
#include "src/crankshaft/typing.h"
+#include "src/ast/scopes.h"
#include "src/frames.h"
#include "src/frames-inl.h"
#include "src/ostreams.h"
-#include "src/parser.h" // for CompileTimeValue; TODO(rossberg): should move
-#include "src/scopes.h"
+#include "src/parsing/parser.h" // for CompileTimeValue; TODO(rossberg): move
#include "src/splay-tree-inl.h"
namespace v8 {
@@ -410,14 +410,9 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->key()->AsLiteral()->value()->IsInternalizedString() &&
prop->emit_store()) {
// Record type feed back for the property.
- TypeFeedbackId id = prop->key()->AsLiteral()->LiteralFeedbackId();
FeedbackVectorSlot slot = prop->GetSlot();
SmallMapList maps;
- if (FLAG_vector_stores) {
- oracle()->CollectReceiverTypes(slot, &maps);
- } else {
- oracle()->CollectReceiverTypes(id, &maps);
- }
+ oracle()->CollectReceiverTypes(slot, &maps);
prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
: Handle<Map>::null());
}
@@ -445,32 +440,20 @@ void AstTyper::VisitAssignment(Assignment* expr) {
// Collect type feedback.
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
- TypeFeedbackId id = expr->AssignmentFeedbackId();
FeedbackVectorSlot slot = expr->AssignmentSlot();
- expr->set_is_uninitialized(FLAG_vector_stores
- ? oracle()->StoreIsUninitialized(slot)
- : oracle()->StoreIsUninitialized(id));
+ expr->set_is_uninitialized(oracle()->StoreIsUninitialized(slot));
if (!expr->IsUninitialized()) {
SmallMapList* receiver_types = expr->GetReceiverTypes();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
DCHECK(lit_key != NULL && lit_key->value()->IsString());
Handle<String> name = Handle<String>::cast(lit_key->value());
- if (FLAG_vector_stores) {
- oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
- } else {
- oracle()->AssignmentReceiverTypes(id, name, receiver_types);
- }
+ oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
} else {
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
- if (FLAG_vector_stores) {
- oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
- &store_mode, &key_type);
- } else {
- oracle()->KeyedAssignmentReceiverTypes(id, receiver_types,
- &store_mode, &key_type);
- }
+ oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
+ &store_mode, &key_type);
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
}
@@ -629,17 +612,11 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
- TypeFeedbackId store_id = expr->CountStoreFeedbackId();
FeedbackVectorSlot slot = expr->CountSlot();
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
- if (FLAG_vector_stores) {
- oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
- oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
- } else {
- oracle()->GetStoreModeAndKeyType(store_id, &store_mode, &key_type);
- oracle()->CountReceiverTypes(store_id, expr->GetReceiverTypes());
- }
+ oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
+ oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
@@ -793,6 +770,12 @@ void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
+void AstTyper::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
for (int i = 0; i < decls->length(); ++i) {
Declaration* decl = decls->at(i);
diff --git a/deps/v8/src/crankshaft/typing.h b/deps/v8/src/crankshaft/typing.h
index d088b84709..40b538aef3 100644
--- a/deps/v8/src/crankshaft/typing.h
+++ b/deps/v8/src/crankshaft/typing.h
@@ -6,9 +6,9 @@
#define V8_CRANKSHAFT_TYPING_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/effects.h"
-#include "src/scopes.h"
#include "src/type-info.h"
#include "src/types.h"
#include "src/zone.h"
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index d6ad87be1c..3f7e9ba825 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -134,7 +134,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
}
@@ -184,7 +184,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -193,7 +193,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ Push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -808,60 +808,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode, int argc) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -1721,44 +1667,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(rax));
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(object);
- __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
- __ Check(equal, kOperandIsNotADate);
- }
-
- if (index->value() == 0) {
- __ movp(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- Operand stamp_operand = __ ExternalOperand(stamp);
- __ movp(kScratchRegister, stamp_operand);
- __ cmpp(kScratchRegister, FieldOperand(object,
- JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movp(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
- __ movp(arg_reg_1, object);
- __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2193,7 +2101,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
@@ -2552,32 +2460,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(equal, is_false);
}
- // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ GetMapConstructor(temp, temp, kScratchRegister);
@@ -2656,6 +2545,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+
+ // Deoptimize if the object needs to be access checked.
+ __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
__ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmpp(object_prototype, prototype);
EmitTrueBranch(instr, equal);
@@ -2694,7 +2593,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ Push(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
@@ -3260,16 +3159,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
+ __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
- __ movp(receiver,
- Operand(receiver,
- Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(receiver, FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+ __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+ __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
__ bind(&receiver_ok);
}
@@ -3313,7 +3210,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3349,7 +3247,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3368,7 +3266,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize rax to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ Set(rax, arity);
// Invoke function.
@@ -3386,7 +3285,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
- __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
+ generator);
}
}
@@ -3432,11 +3332,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Set(rax, instr->arity());
-
// Change context.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ Set(rax, instr->arity());
+
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3828,7 +3730,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -3867,19 +3769,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->constructor()).is(rdi));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- __ Set(rax, instr->arity());
- // No cell in ebx for construct type feedback in optimized code
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->constructor()).is(rdi));
@@ -3903,7 +3792,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -3918,17 +3807,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5385,57 +5274,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- Label materialized;
- // Registers will be used as follows:
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ Move(rcx, instr->hydrogen()->literals());
- __ movp(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ Push(rcx);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->pattern());
- __ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ movp(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated, Label::kNear);
-
- __ bind(&runtime_allocate);
- __ Push(rbx);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ Pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movp(rdx, FieldOperand(rbx, i));
- __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movp(FieldOperand(rax, i), rdx);
- __ movp(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movp(FieldOperand(rax, size - kPointerSize), rdx);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->value()).is(rbx));
@@ -5533,8 +5371,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ JumpIfSmi(input, false_label, false_distance);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
@@ -5560,32 +5398,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp);
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -5710,8 +5522,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Condition cc = masm()->CheckSmi(rax);
DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -5725,7 +5537,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
@@ -5845,7 +5657,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ Push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index eafdc778ad..6fb918bf84 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -6,13 +6,13 @@
#define V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
+#include "src/ast/scopes.h"
#include "src/base/logging.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
#include "src/crankshaft/x64/lithium-x64.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -26,13 +26,9 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
- osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
@@ -180,6 +176,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, num_arguments, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
@@ -215,9 +216,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
@@ -273,10 +271,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
void EmitPushTaggedOperand(LOperand* operand);
@@ -310,13 +304,9 @@ class LCodeGen: public LCodeGenBase {
void MakeSureStackPagesMapped(int offset);
#endif
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
- int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
diff --git a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index cc65f188ce..3808c377dc 100644
--- a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -244,7 +244,9 @@ void LGapResolver::EmitSwap(int index) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
- __ xchgq(dst, src);
+ __ movp(kScratchRegister, src);
+ __ movp(src, dst);
+ __ movp(dst, kScratchRegister);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 76df55dcef..3c932a24ab 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -311,13 +311,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
@@ -1010,7 +1003,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+ return AssignEnvironment(result);
}
@@ -1231,14 +1226,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* constructor = UseFixed(instr->constructor(), rdi);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* constructor = UseFixed(instr->constructor(), rdi);
@@ -1792,13 +1779,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* object = UseFixed(instr->value(), rax);
- LDateField* result = new(zone()) LDateField(object, instr->index());
- return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2490,13 +2470,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LRegExpLiteral* result = new(zone()) LRegExpLiteral(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2599,12 +2572,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index 74709d3316..ebe1ef9e5d 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -33,7 +33,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -61,7 +60,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -89,7 +87,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -129,7 +126,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1346,23 +1342,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 0> {
- public:
- LDateField(LOperand* date, Smi* index) : index_(index) {
- inputs_[0] = date;
- }
-
- LOperand* date() { return inputs_[0]; }
- Smi* index() const { return index_; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1881,25 +1860,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2486,19 +2446,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
@@ -2543,20 +2490,6 @@ class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCallAndBranch)
-};
-
-
class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 074628b5ef..fe2baa5bb8 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -140,7 +140,7 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsStub()) {
__ StubPrologue();
} else {
- __ Prologue(info()->IsCodePreAgingActive());
+ __ Prologue(info()->GeneratePreagedPrologue());
}
}
@@ -216,7 +216,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
if (info()->scope()->is_script_scope()) {
__ push(edi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -225,7 +225,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
need_write_barrier = false;
} else {
__ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -1171,60 +1171,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
- int length = deoptimizations_.length();
- if (length == 0) return;
- Handle<DeoptimizationInputData> data =
- DeoptimizationInputData::New(isolate(), length, TENURED);
-
- Handle<ByteArray> translations =
- translations_.CreateByteArray(isolate()->factory());
- data->SetTranslationByteArray(*translations);
- data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
- data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
- if (info_->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
- data->SetSharedFunctionInfo(*info_->shared_info());
- } else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
- }
- data->SetWeakCellCache(Smi::FromInt(0));
-
- Handle<FixedArray> literals =
- factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
- { AllowDeferredHandleDereference copy_handles;
- for (int i = 0; i < deoptimization_literals_.length(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
- }
-
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
- data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
- // Populate the deoptimization entries.
- for (int i = 0; i < length; i++) {
- LEnvironment* env = deoptimizations_[i];
- data->SetAstId(i, env->ast_id());
- data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
- data->SetArgumentsStackHeight(i,
- Smi::FromInt(env->arguments_stack_height()));
- data->SetPc(i, Smi::FromInt(env->pc_offset()));
- }
- code->set_deoptimization_data(*data);
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
- DCHECK_EQ(0, deoptimization_literals_.length());
- for (auto function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
- }
- inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -2006,37 +1952,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
}
-void LCodeGen::DoDateField(LDateField* instr) {
- Register object = ToRegister(instr->date());
- Register result = ToRegister(instr->result());
- Register scratch = ToRegister(instr->temp());
- Smi* index = instr->index();
- DCHECK(object.is(result));
- DCHECK(object.is(eax));
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2416,7 +2331,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
@@ -2778,29 +2693,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
- // Assuming the following assertions, we can use the same compares to test
- // for both being a function type and being in the object type range.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
- __ j(below, is_false);
- __ j(equal, is_true);
- __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
- // Faster code path to avoid two compares: subtract lower bound from the
- // actual type and do a signed compare with the width of the type range.
- __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(above, is_false);
+ __ j(equal, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
@@ -2880,6 +2777,15 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
+
+ // Deoptimize if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ // Deoptimize for proxies.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
__ cmp(object_prototype, prototype);
EmitTrueBranch(instr, equal);
@@ -2955,7 +2861,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// safe to write to the context register.
__ push(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
@@ -3453,16 +3359,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
- __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
- const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(receiver, Operand(receiver, global_offset));
- const int proxy_offset = JSGlobalObject::kGlobalProxyOffset;
- __ mov(receiver, FieldOperand(receiver, proxy_offset));
+ __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+ __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
__ bind(&receiver_ok);
}
@@ -3503,7 +3407,8 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(eax);
- __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+ __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+ safepoint_generator);
}
@@ -3544,7 +3449,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
- CallRuntime(Runtime::kDeclareGlobals, 2, instr);
+ CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3562,7 +3467,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Always initialize eax to the number of actual arguments.
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
__ mov(eax, arity);
// Invoke function directly.
@@ -3625,11 +3531,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(eax, instr->arity());
-
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Always initialize new target and number of actual arguments.
+ __ mov(edx, factory()->undefined_value());
+ __ mov(eax, instr->arity());
+
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
@@ -3863,65 +3771,11 @@ void LCodeGen::DoMathFround(LMathFround* instr) {
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- X87Register input = ToX87Register(instr->value());
- X87Register result_reg = ToX87Register(instr->result());
- Register temp_result = ToRegister(instr->temp1());
- Register temp = ToRegister(instr->temp2());
- Label slow, done, smi, finish;
- DCHECK(result_reg.is(input));
-
- // Store input into Heap number and call runtime function kMathExpRT.
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ Move(temp_result, Immediate(0));
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(temp_result, eax);
- }
- __ bind(&done);
- X87LoadForUsage(input);
- __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
-
- {
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(temp_result);
- __ CallRuntimeSaveDoubles(Runtime::kMathSqrt);
- RecordSafepointWithRegisters(instr->pointer_map(), 1,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(temp_result, eax);
- }
- X87PrepareToWrite(result_reg);
- // return value of MathExpRT is Smi or Heap Number.
- __ JumpIfSmi(temp_result, &smi);
- // Heap number(double)
- __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
- __ jmp(&finish);
- // SMI
- __ bind(&smi);
- __ SmiUntag(temp_result);
- __ push(temp_result);
- __ fild_s(MemOperand(esp, 0));
- __ pop(temp_result);
- __ bind(&finish);
- X87CommitWrite(result_reg);
+ X87Register input_reg = ToX87Register(instr->value());
+ __ X87SetFPUCW(0x027F);
+ X87Fxch(input_reg);
+ __ fsqrt();
+ __ X87SetFPUCW(0x037F);
}
@@ -4146,7 +4000,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
- __ InvokeFunction(edi, count, CALL_FUNCTION, generator);
+ __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
@@ -4185,19 +4039,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
}
-void LCodeGen::DoCallNew(LCallNew* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->constructor()).is(edi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- // No cell in ebx for construct type feedback in optimized code
- __ mov(ebx, isolate()->factory()->undefined_value());
- CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ Move(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->constructor()).is(edi));
@@ -4221,7 +4062,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
@@ -4236,17 +4077,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5812,58 +5653,6 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
}
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- Label materialized;
- // Registers will be used as follows:
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
- // esi = context.
- int literal_offset =
- LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
- __ LoadHeapObject(ecx, instr->hydrogen()->literals());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->pattern()));
- __ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated, Label::kNear);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5946,8 +5735,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
__ JumpIfSmi(input, false_label, false_distance);
__ cmp(input, factory()->null_value());
__ j(equal, true_label, true_distance);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
__ j(below, false_label, false_distance);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -5972,32 +5761,6 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
}
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
- Register temp = ToRegister(instr->temp());
-
- EmitIsConstructCall(temp);
- EmitBranch(instr, equal);
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp) {
- // Get the frame pointer for the calling frame.
- __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker, Label::kNear);
- __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
@@ -6128,8 +5891,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ test(eax, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
@@ -6141,7 +5904,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+ CallRuntime(Runtime::kGetPropertyNamesFast, instr);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -6262,7 +6025,7 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, 2, instr);
+ CallRuntime(Runtime::kPushBlockContext, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 821eb822a5..6346344883 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -7,13 +7,13 @@
#include <map>
+#include "src/ast/scopes.h"
#include "src/base/logging.h"
#include "src/crankshaft/lithium-codegen.h"
#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
#include "src/crankshaft/x87/lithium-x87.h"
#include "src/deoptimizer.h"
#include "src/safepoint-table.h"
-#include "src/scopes.h"
#include "src/utils.h"
namespace v8 {
@@ -28,15 +28,11 @@ class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
- deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
- inlined_function_count_(0),
scope_(info->scope()),
- translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
support_aligned_spilled_doubles_(false),
- osr_pc_offset_(-1),
frame_is_built_(false),
x87_stack_(assembler),
safepoints_(info->zone()),
@@ -209,6 +205,11 @@ class LCodeGen: public LCodeGenBase {
CallRuntime(function, argc, instr);
}
+ void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ CallRuntime(function, function->nargs, instr);
+ }
+
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
@@ -244,9 +245,6 @@ class LCodeGen: public LCodeGenBase {
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
- void PopulateDeoptimizationData(Handle<Code> code);
-
- void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
X87Register ToX87Register(int index) const;
@@ -306,10 +304,6 @@ class LCodeGen: public LCodeGenBase {
Label* is_not_string,
SmiCheck check_needed);
- // Emits optimized code for %_IsConstructCall().
- // Caller should branch on equal condition.
- void EmitIsConstructCall(Register temp);
-
// Emits optimized code to deep-copy the contents of statically known
// object graphs (e.g. object literal boilerplate).
void EmitDeepCopy(Handle<JSObject> object,
@@ -353,15 +347,11 @@ class LCodeGen: public LCodeGenBase {
void MakeSureStackPagesMapped(int offset);
#endif
- ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
- int inlined_function_count_;
Scope* const scope_;
- TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
bool support_aligned_spilled_doubles_;
- int osr_pc_offset_;
bool frame_is_built_;
class X87Stack : public ZoneObject {
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index 72b0797ea9..b422e1235b 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -330,15 +330,6 @@ void LInvokeFunction::PrintDataTo(StringStream* stream) {
}
-void LCallNew::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- context()->PrintTo(stream);
- stream->Add(" ");
- constructor()->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
context()->PrintTo(stream);
@@ -1047,7 +1038,9 @@ LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
LOperand* object = UseRegister(instr->object());
LOperand* prototype = UseRegister(instr->prototype());
LOperand* temp = TempRegister();
- return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ LHasInPrototypeChainAndBranch* result =
+ new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+ return AssignEnvironment(result);
}
@@ -1262,14 +1255,6 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
}
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* constructor = UseFixed(instr->constructor(), edi);
- LCallNew* result = new(zone()) LCallNew(context, constructor);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
@@ -1806,14 +1791,6 @@ LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
}
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
- LOperand* date = UseFixed(instr->value(), eax);
- LDateField* result =
- new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
- return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2504,13 +2481,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@@ -2618,12 +2588,6 @@ LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
}
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
- HIsConstructCallAndBranch* instr) {
- return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
return NULL;
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index f0f694ef3d..e033902617 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -37,7 +37,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallWithDescriptor) \
V(CallFunction) \
- V(CallNew) \
V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
@@ -66,7 +65,6 @@ class LCodeGen;
V(ConstantT) \
V(ConstructDouble) \
V(Context) \
- V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
@@ -94,7 +92,6 @@ class LCodeGen;
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCallAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -134,7 +131,6 @@ class LCodeGen;
V(Power) \
V(Prologue) \
V(PushArgument) \
- V(RegExpLiteral) \
V(Return) \
V(SeqStringGetChar) \
V(SeqStringSetChar) \
@@ -1125,19 +1121,6 @@ class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsConstructCallAndBranch final : public LControlInstruction<0, 1> {
- public:
- explicit LIsConstructCallAndBranch(LOperand* temp) {
- temps_[0] = temp;
- }
-
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
- "is-construct-call-and-branch")
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1373,27 +1356,6 @@ class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
};
-class LDateField final : public LTemplateInstruction<1, 1, 1> {
- public:
- LDateField(LOperand* date, LOperand* temp, Smi* index)
- : index_(index) {
- inputs_[0] = date;
- temps_[0] = temp;
- }
-
- LOperand* date() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
- DECLARE_HYDROGEN_ACCESSOR(DateField)
-
- Smi* index() const { return index_; }
-
- private:
- Smi* index_;
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1916,25 +1878,6 @@ class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
};
-class LCallNew final : public LTemplateInstruction<1, 2, 0> {
- public:
- LCallNew(LOperand* context, LOperand* constructor) {
- inputs_[0] = context;
- inputs_[1] = constructor;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* constructor() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
- DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
- void PrintDataTo(StringStream* stream) override;
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
public:
LCallNewArray(LOperand* context, LOperand* constructor) {
@@ -2535,19 +2478,6 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
};
-class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LRegExpLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
- DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index b73ab0bd6a..c58c172640 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -101,6 +101,70 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
};
+#ifndef V8_SHARED
+// Predictable v8::Platform implementation. All background and foreground
+// tasks are run immediately, delayed tasks are not executed at all.
+class PredictablePlatform : public Platform {
+ public:
+ PredictablePlatform() {}
+
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {
+ task->Run();
+ delete task;
+ }
+
+ void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
+ task->Run();
+ delete task;
+ }
+
+ void CallDelayedOnForegroundThread(v8::Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ delete task;
+ }
+
+ void CallIdleOnForegroundThread(v8::Isolate* isolate,
+ IdleTask* task) override {
+ UNREACHABLE();
+ }
+
+ bool IdleTasksEnabled(v8::Isolate* isolate) override { return false; }
+
+ double MonotonicallyIncreasingTime() override {
+ return synthetic_time_in_sec_ += 0.00001;
+ }
+
+ uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
+ const char* name, uint64_t id, uint64_t bind_id,
+ int numArgs, const char** argNames,
+ const uint8_t* argTypes, const uint64_t* argValues,
+ unsigned int flags) override {
+ return 0;
+ }
+
+ void UpdateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name, uint64_t handle) override {}
+
+ const uint8_t* GetCategoryGroupEnabled(const char* name) override {
+ static uint8_t no = 0;
+ return &no;
+ }
+
+ const char* GetCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) override {
+ static const char* dummy = "dummy";
+ return dummy;
+ }
+
+ private:
+ double synthetic_time_in_sec_ = 0.0;
+
+ DISALLOW_COPY_AND_ASSIGN(PredictablePlatform);
+};
+#endif // !V8_SHARED
+
+
v8::Platform* g_platform = NULL;
@@ -425,13 +489,11 @@ int PerIsolateData::RealmIndexOrThrow(
#ifndef V8_SHARED
// performance.now() returns a time stamp as double, measured in milliseconds.
-// When FLAG_verify_predictable mode is enabled it returns current value
-// of Heap::allocations_count().
+// When FLAG_verify_predictable mode is enabled it returns result of
+// v8::Platform::MonotonicallyIncreasingTime().
void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (i::FLAG_verify_predictable) {
- Isolate* v8_isolate = args.GetIsolate();
- i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
- args.GetReturnValue().Set(heap->synthetic_time());
+ args.GetReturnValue().Set(g_platform->MonotonicallyIncreasingTime());
} else {
base::TimeDelta delta =
base::TimeTicks::HighResolutionNow() - kInitialTicks;
@@ -594,9 +656,13 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch(args.GetIsolate());
+ Local<Value> arg = args[i];
Local<String> str_obj;
- if (!args[i]
- ->ToString(args.GetIsolate()->GetCurrentContext())
+
+ if (arg->IsSymbol()) {
+ arg = Local<Symbol>::Cast(arg)->Name();
+ }
+ if (!arg->ToString(args.GetIsolate()->GetCurrentContext())
.ToLocal(&str_obj)) {
try_catch.ReThrow();
return;
@@ -1046,7 +1112,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
i::JSFunction::cast(*compiled_script)->shared()->script()))
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Script::TYPE_NATIVE);
+ script_object->set_type(i::Script::TYPE_EXTENSION);
}
#endif // !V8_SHARED
@@ -2016,7 +2082,13 @@ void Shell::CollectGarbage(Isolate* isolate) {
void Shell::EmptyMessageQueues(Isolate* isolate) {
- while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+#ifndef V8_SHARED
+ if (!i::FLAG_verify_predictable) {
+#endif
+ while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+#ifndef V8_SHARED
+ }
+#endif
}
@@ -2358,7 +2430,14 @@ int Shell::Main(int argc, char* argv[]) {
#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU(options.icu_data_file);
+#ifndef V8_SHARED
+ g_platform = i::FLAG_verify_predictable
+ ? new PredictablePlatform()
+ : v8::platform::CreateDefaultPlatform();
+#else
g_platform = v8::platform::CreateDefaultPlatform();
+#endif // !V8_SHARED
+
v8::V8::InitializePlatform(g_platform);
v8::V8::Initialize();
if (options.natives_blob || options.snapshot_blob) {
@@ -2426,7 +2505,7 @@ int Shell::Main(int argc, char* argv[]) {
result = RunMain(isolate, argc, argv, last_run);
}
printf("======== Full Deoptimization =======\n");
- Testing::DeoptimizeAll();
+ Testing::DeoptimizeAll(isolate);
#if !defined(V8_SHARED)
} else if (i::FLAG_stress_runs > 0) {
options.stress_runs = i::FLAG_stress_runs;
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index 9f806724d5..f249a78856 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -50,10 +50,6 @@
'd8.h',
'd8.cc',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ '<(v8_toolset_for_d8)', ],
@@ -80,6 +76,13 @@
'd8_js2c',
],
}],
+ [ 'v8_postmortem_support=="true"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-Wl,-force_load,<(PRODUCT_DIR)/libv8_base.a'
+ ],
+ },
+ }],
],
}],
['v8_enable_vtunejit==1', {
@@ -98,9 +101,6 @@
'<(icu_gyp_path):icudata',
],
}],
- ['v8_wasm!=0', {
- 'include_dirs': ['../third_party/wasm'],
- }],
],
},
{
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 8d55c788e2..27a0bc39cd 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -8,11 +8,28 @@
// Used by the d8 shell to output results.
var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects
+// Hacky solution to circumvent forcing --allow-natives-syntax for d8
+function isProxy(o) { return false };
+function JSProxyGetTarget(proxy) { };
+function JSProxyGetHandler(proxy) { };
+
+try {
+ isProxy = Function(['object'], 'return %_IsJSProxy(object)');
+ JSProxyGetTarget = Function(['proxy'],
+ 'return %JSProxyGetTarget(proxy)');
+ JSProxyGetHandler = Function(['proxy'],
+ 'return %JSProxyGetHandler(proxy)');
+} catch(e) {};
+
+
function Stringify(x, depth) {
if (depth === undefined)
depth = stringifyDepthLimit;
else if (depth === 0)
- return "*";
+ return "...";
+ if (isProxy(x)) {
+ return StringifyProxy(x, depth);
+ }
switch (typeof x) {
case "undefined":
return "undefined";
@@ -63,3 +80,12 @@ function Stringify(x, depth) {
return "[crazy non-standard value]";
}
}
+
+function StringifyProxy(proxy, depth) {
+ var proxy_type = typeof proxy;
+ var info_object = {
+ target: JSProxyGetTarget(proxy),
+ handler: JSProxyGetHandler(proxy)
+ }
+ return '[' + proxy_type + ' Proxy ' + Stringify(info_object, depth-1) + ']';
+}
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 3106b1622b..f98ad64f1f 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -71,7 +71,7 @@ void DateCache::YearMonthDayFromDays(
*year = 400 * (days / kDaysIn400Years) - kYearsOffset;
days %= kDaysIn400Years;
- DCHECK(DaysFromYearMonth(*year, 0) + days == save_days);
+ DCHECK_EQ(save_days, DaysFromYearMonth(*year, 0) + days);
days--;
int yd1 = days / kDaysIn100Years;
@@ -175,6 +175,20 @@ int DateCache::DaysFromYearMonth(int year, int month) {
}
+void DateCache::BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
+ int* weekday, int* hour, int* min, int* sec,
+ int* ms) {
+ int const days = DaysFromTime(time_ms);
+ int const time_in_day_ms = TimeInDay(time_ms, days);
+ YearMonthDayFromDays(days, year, month, day);
+ *weekday = Weekday(days);
+ *hour = time_in_day_ms / (60 * 60 * 1000);
+ *min = (time_in_day_ms / (60 * 1000)) % 60;
+ *sec = (time_in_day_ms / 1000) % 60;
+ *ms = time_in_day_ms % 1000;
+}
+
+
void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
if (after_->offset_ms == offset_ms &&
after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index 813e5b83c0..0a3e91beb2 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -18,6 +18,7 @@ class DateCache {
static const int kMsPerMin = 60 * 1000;
static const int kSecPerDay = 24 * 60 * 60;
static const int64_t kMsPerDay = kSecPerDay * 1000;
+ static const int64_t kMsPerMonth = kMsPerDay * 30;
// The largest time that can be passed to OS date-time library functions.
static const int kMaxEpochTimeInSec = kMaxInt;
@@ -30,8 +31,7 @@ class DateCache {
// Conservative upper bound on time that can be stored in JSDate
// before UTC conversion.
- static const int64_t kMaxTimeBeforeUTCInMs =
- kMaxTimeInMs + 10 * kMsPerDay;
+ static const int64_t kMaxTimeBeforeUTCInMs = kMaxTimeInMs + kMsPerMonth;
// Sentinel that denotes an invalid local offset.
static const int kInvalidLocalOffsetInMs = kMaxInt;
@@ -190,6 +190,10 @@ class DateCache {
// the first day of the given month in the given year.
int DaysFromYearMonth(int year, int month);
+ // Breaks down the time value.
+ void BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
+ int* weekday, int* hour, int* min, int* sec, int* ms);
+
// Cache stamp is used for invalidating caches in JSDate.
// We increment the stamp each time when the timezone information changes.
// JSDate objects perform stamp check and invalidate their caches if
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index c1afb7d1b6..7e5c4e355e 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -137,17 +137,29 @@ bool DateParser::Parse(Vector<Char> str,
tz.SetSign(token.ascii_sign());
// The following number may be empty.
int n = 0;
+ int length = 0;
if (scanner.Peek().IsNumber()) {
- n = scanner.Next().number();
+ DateToken token = scanner.Next();
+ length = token.length();
+ n = token.number();
}
has_read_number = true;
if (scanner.Peek().IsSymbol(':')) {
tz.SetAbsoluteHour(n);
+ // TODO(littledan): Use minutes as part of timezone?
tz.SetAbsoluteMinute(kNone);
- } else {
+ } else if (length == 2 || length == 1) {
+ // Handle time zones like GMT-8
+ tz.SetAbsoluteHour(n);
+ tz.SetAbsoluteMinute(0);
+ } else if (length == 4 || length == 3) {
+ // Looks like the hhmm format
tz.SetAbsoluteHour(n / 100);
tz.SetAbsoluteMinute(n % 100);
+ } else {
+ // No need to accept time zones like GMT-12345
+ return false;
}
} else if ((token.IsAsciiSign() || token.IsSymbol(')')) &&
has_read_number) {
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/dateparser.cc
index 09dbf1127d..d096a7ec9f 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/dateparser.cc
@@ -100,8 +100,15 @@ bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (sign_ != kNone) {
if (hour_ == kNone) hour_ = 0;
if (minute_ == kNone) minute_ = 0;
- int total_seconds = sign_ * (hour_ * 3600 + minute_ * 60);
- if (!Smi::IsValid(total_seconds)) return false;
+ // Avoid signed integer overflow (undefined behavior) by doing unsigned
+ // arithmetic.
+ unsigned total_seconds_unsigned = hour_ * 3600U + minute_ * 60U;
+ if (total_seconds_unsigned > Smi::kMaxValue) return false;
+ int total_seconds = static_cast<int>(total_seconds_unsigned);
+ if (sign_ < 0) {
+ total_seconds = -total_seconds;
+ }
+ DCHECK(Smi::IsValid(total_seconds));
output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
} else {
output->set_null(UTC_OFFSET);
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 484e1d3eca..533173984c 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -7,7 +7,7 @@
#include "src/allocation.h"
#include "src/char-predicates.h"
-#include "src/scanner.h"
+#include "src/parsing/scanner.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 7f1542e183..2d4cbf13d7 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -24,25 +24,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
Assembler::BlockConstPoolScope block_const_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from
// mov r2, r2
// mov r2, r2
@@ -113,19 +113,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ mov(r1, Operand::Zero());
- __ str(r1, MemOperand(ip, 0));
-
// Load the function pointer off of our current stack frame.
__ ldr(r1, MemOperand(fp,
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
@@ -134,10 +122,16 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(r1, no_reg, dummy, dummy);
+
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
// Load context from the function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index 7272fe7bcf..c2b60a9326 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -25,26 +25,26 @@ void EmitDebugBreakSlot(Assembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc),
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ PatchingAssembler patcher(isolate, reinterpret_cast<Instruction*>(pc),
Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(&patcher);
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc),
+ PatchingAssembler patcher(isolate, reinterpret_cast<Instruction*>(pc),
Assembler::kDebugBreakSlotInstructions);
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
// break slot code from
@@ -124,30 +124,25 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, restarter_frame_function_slot);
- __ Str(xzr, MemOperand(scratch));
-
// We do not know our frame height, but set sp based on fp.
__ Sub(masm->StackPointer(), fp, kPointerSize);
__ AssertStackConsistency();
__ Pop(x1, fp, lr); // Function, Frame, Return address.
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(x1, no_reg, dummy, dummy);
+
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.AcquireX();
+
// Load context from the function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index b249561324..e19b93eebe 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -20,10 +20,9 @@ static inline bool IsDebugContext(Isolate* isolate, Context* context) {
}
-MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
- Handle<String> source,
- bool disable_break,
- Handle<Object> context_extension) {
+MaybeHandle<Object> DebugEvaluate::Global(
+ Isolate* isolate, Handle<String> source, bool disable_break,
+ Handle<HeapObject> context_extension) {
// Handle the processing of break.
DisableBreak disable_break_scope(isolate->debug(), disable_break);
@@ -50,7 +49,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
int inlined_jsframe_index,
Handle<String> source,
bool disable_break,
- Handle<Object> context_extension) {
+ Handle<HeapObject> context_extension) {
// Handle the processing of break.
DisableBreak disable_break_scope(isolate->debug(), disable_break);
@@ -65,15 +64,23 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
SaveContext savex(isolate);
isolate->set_context(*(save->context()));
- // Materialize stack locals and the arguments object.
+ // This is not a lot different than DebugEvaluate::Global, except that
+ // variables accessible by the function we are evaluating from are
+ // materialized and included on top of the native context. Changes to
+ // the materialized object are written back afterwards.
+ // Note that the native context is taken from the original context chain,
+ // which may not be the current native context of the isolate.
ContextBuilder context_builder(isolate, frame, inlined_jsframe_index);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
- Handle<Object> receiver(frame->receiver(), isolate);
+ Handle<Context> context = context_builder.native_context();
+ Handle<JSObject> receiver(context->global_proxy());
MaybeHandle<Object> maybe_result = Evaluate(
isolate, context_builder.outer_info(),
context_builder.innermost_context(), context_extension, receiver, source);
- if (!maybe_result.is_null()) context_builder.UpdateValues();
+ if (!maybe_result.is_null() && !FLAG_debug_eval_readonly_locals) {
+ context_builder.UpdateValues();
+ }
return maybe_result;
}
@@ -81,7 +88,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
// Compile and evaluate source for the given context.
MaybeHandle<Object> DebugEvaluate::Evaluate(
Isolate* isolate, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, Handle<Object> context_extension,
+ Handle<Context> context, Handle<HeapObject> context_extension,
Handle<Object> receiver, Handle<String> source) {
if (context_extension->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
@@ -120,42 +127,68 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> function =
+ Handle<JSFunction> local_function =
handle(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<Context> outer_context = handle(function->context(), isolate);
- outer_info_ = handle(function->shared());
+ Handle<Context> outer_context(local_function->context());
+ native_context_ = Handle<Context>(outer_context->native_context());
+ Handle<JSFunction> global_function(native_context_->closure());
+ outer_info_ = handle(global_function->shared());
Handle<Context> inner_context;
bool stop = false;
- for (ScopeIterator it(isolate, &frame_inspector);
+
+ // Iterate the original context chain to create a context chain that reflects
+ // our needs. The original context chain may look like this:
+ // <native context> <outer contexts> <function context> <inner contexts>
+ // In the resulting context chain, we want to materialize the receiver,
+ // the parameters of the current function, the stack locals. We only
+ // materialize context variables that the function already references,
+ // because only for those variables we can be sure that they will be resolved
+ // correctly. Variables that are not referenced by the function may be
+ // context-allocated and thus accessible, but may be shadowed by stack-
+ // allocated variables and the resolution would be incorrect.
+ // The result will look like this:
+ // <native context> <receiver context>
+ // <materialized stack and accessible context vars> <inner contexts>
+ // All contexts use the closure of the native context, since there is no
+ // function context in the chain. Variables that cannot be resolved are
+ // bound to toplevel (script contexts or global object).
+ // Once debug-evaluate has been executed, the changes to the materialized
+ // objects are written back to the original context chain. Any changes to
+ // the original context chain will therefore be overwritten.
+ const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
+ for (ScopeIterator it(isolate, &frame_inspector, option);
!it.Failed() && !it.Done() && !stop; it.Next()) {
ScopeIterator::ScopeType scope_type = it.Type();
-
if (scope_type == ScopeIterator::ScopeTypeLocal) {
- Handle<Context> parent_context =
+ DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
+ it.GetNonLocals(&non_locals_);
+ Handle<Context> local_context =
it.HasContext() ? it.CurrentContext() : outer_context;
// The "this" binding, if any, can't be bound via "with". If we need
// to, add another node onto the outer context to bind "this".
- parent_context = MaterializeReceiver(parent_context, function);
+ Handle<Context> receiver_context =
+ MaterializeReceiver(native_context_, local_context, local_function,
+ global_function, it.ThisIsNonLocal());
Handle<JSObject> materialized_function = NewJSObjectWithNullProto();
-
- frame_inspector.MaterializeStackLocals(materialized_function, function);
-
- MaterializeArgumentsObject(materialized_function, function);
+ frame_inspector.MaterializeStackLocals(materialized_function,
+ local_function);
+ MaterializeArgumentsObject(materialized_function, local_function);
+ MaterializeContextChain(materialized_function, local_context);
Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, parent_context, materialized_function);
+ global_function, receiver_context, materialized_function);
ContextChainElement context_chain_element;
- context_chain_element.original_context = it.CurrentContext();
+ context_chain_element.original_context = local_context;
context_chain_element.materialized_object = materialized_function;
context_chain_element.scope_info = it.CurrentScopeInfo();
context_chain_.Add(context_chain_element);
stop = true;
- RecordContextsInChain(&inner_context, with_context, with_context);
+ RecordContextsInChain(&inner_context, receiver_context, with_context);
} else if (scope_type == ScopeIterator::ScopeTypeCatch ||
scope_type == ScopeIterator::ScopeTypeWith) {
Handle<Context> cloned_context = Handle<Context>::cast(
@@ -175,7 +208,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
Handle<Context> cloned_context = Handle<Context>::cast(
isolate->factory()->CopyFixedArray(it.CurrentContext()));
Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, cloned_context, materialized_object);
+ global_function, cloned_context, materialized_object);
ContextChainElement context_chain_element;
context_chain_element.original_context = it.CurrentContext();
@@ -187,7 +220,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
RecordContextsInChain(&inner_context, cloned_context, with_context);
} else {
Handle<Context> with_context = isolate->factory()->NewWithContext(
- function, outer_context, materialized_object);
+ global_function, outer_context, materialized_object);
ContextChainElement context_chain_element;
context_chain_element.materialized_object = materialized_object;
@@ -208,6 +241,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
void DebugEvaluate::ContextBuilder::UpdateValues() {
+ // TODO(yangguo): remove updating values.
for (int i = 0; i < context_chain_.length(); i++) {
ContextChainElement element = context_chain_[i];
if (!element.original_context.is_null() &&
@@ -224,6 +258,11 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
FrameInspector(frame_, inlined_jsframe_index_, isolate_)
.UpdateStackLocalsFromMaterializedObject(element.materialized_object,
element.scope_info);
+ if (element.scope_info->scope_type() == FUNCTION_SCOPE) {
+ DCHECK_EQ(context_chain_.length() - 1, i);
+ UpdateContextChainFromMaterializedObject(element.materialized_object,
+ element.original_context);
+ }
}
}
}
@@ -272,41 +311,96 @@ void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
}
-Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
- Handle<Context> target, Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- Handle<Object> receiver;
- switch (scope_info->scope_type()) {
- case FUNCTION_SCOPE: {
- VariableMode mode;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
-
- // Don't bother creating a fake context node if "this" is in the context
- // already.
- if (ScopeInfo::ContextSlotIndex(scope_info,
- isolate_->factory()->this_string(), &mode,
- &init_flag, &maybe_assigned_flag) >= 0) {
- return target;
- }
- receiver = handle(frame_->receiver(), isolate_);
- break;
+MaybeHandle<Object> DebugEvaluate::ContextBuilder::LoadFromContext(
+ Handle<Context> context, Handle<String> name, bool* global) {
+ static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
+ int index;
+ PropertyAttributes attributes;
+ BindingFlags binding;
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding);
+ if (holder.is_null()) return MaybeHandle<Object>();
+ Handle<Object> value;
+ if (index != Context::kNotFound) { // Found on context.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ // Do not shadow variables on the script context.
+ *global = context->IsScriptContext();
+ return Handle<Object>(context->get(index), isolate_);
+ } else { // Found on object.
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ // Do not shadow properties on the global object.
+ *global = object->IsJSGlobalObject();
+ return JSReceiver::GetDataProperty(object, name);
+ }
+}
+
+
+void DebugEvaluate::ContextBuilder::MaterializeContextChain(
+ Handle<JSObject> target, Handle<Context> context) {
+ for (const Handle<String>& name : non_locals_) {
+ HandleScope scope(isolate_);
+ Handle<Object> value;
+ bool global;
+ if (!LoadFromContext(context, name, &global).ToHandle(&value) || global) {
+ // If resolving the variable fails, skip it. If it resolves to a global
+ // variable, skip it as well since it's not read-only and can be resolved
+ // within debug-evaluate.
+ continue;
}
- case MODULE_SCOPE:
- receiver = isolate_->factory()->undefined_value();
- break;
- case SCRIPT_SCOPE:
- receiver = handle(function->global_proxy(), isolate_);
- break;
- default:
- // For eval code, arrow functions, and the like, there's no "this" binding
- // to materialize.
- return target;
+ JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
+}
+
+
+void DebugEvaluate::ContextBuilder::StoreToContext(Handle<Context> context,
+ Handle<String> name,
+ Handle<Object> value) {
+ static const ContextLookupFlags flags = FOLLOW_CONTEXT_CHAIN;
+ int index;
+ PropertyAttributes attributes;
+ BindingFlags binding;
+ Handle<Object> holder =
+ context->Lookup(name, flags, &index, &attributes, &binding);
+ if (holder.is_null()) return;
+ if (attributes & READ_ONLY) return;
+ if (index != Context::kNotFound) { // Found on context.
+ Handle<Context> context = Handle<Context>::cast(holder);
+ context->set(index, *value);
+ } else { // Found on object.
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ LookupIterator lookup(object, name);
+ if (lookup.state() != LookupIterator::DATA) return;
+ CHECK(JSReceiver::SetDataProperty(&lookup, value).FromJust());
+ }
+}
+
- return isolate_->factory()->NewCatchContext(
- function, target, isolate_->factory()->this_string(), receiver);
+void DebugEvaluate::ContextBuilder::UpdateContextChainFromMaterializedObject(
+ Handle<JSObject> source, Handle<Context> context) {
+ // TODO(yangguo): check whether overwriting context fields is actually safe
+ // wrt fields we consider constant.
+ for (const Handle<String>& name : non_locals_) {
+ HandleScope scope(isolate_);
+ Handle<Object> value = JSReceiver::GetDataProperty(source, name);
+ StoreToContext(context, name, value);
+ }
+}
+
+
+Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
+ Handle<Context> parent_context, Handle<Context> lookup_context,
+ Handle<JSFunction> local_function, Handle<JSFunction> global_function,
+ bool this_is_non_local) {
+ Handle<Object> receiver = isolate_->factory()->undefined_value();
+ Handle<String> this_string = isolate_->factory()->this_string();
+ if (this_is_non_local) {
+ bool global;
+ LoadFromContext(lookup_context, this_string, &global).ToHandle(&receiver);
+ } else if (local_function->shared()->scope_info()->HasReceiver()) {
+ receiver = handle(frame_->receiver(), isolate_);
+ }
+ return isolate_->factory()->NewCatchContext(global_function, parent_context,
+ this_string, receiver);
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 49a7fce3ee..c0b1f027d1 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -15,7 +15,7 @@ class DebugEvaluate : public AllStatic {
public:
static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
bool disable_break,
- Handle<Object> context_extension);
+ Handle<HeapObject> context_extension);
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
@@ -25,7 +25,7 @@ class DebugEvaluate : public AllStatic {
static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
int inlined_jsframe_index,
Handle<String> source, bool disable_break,
- Handle<Object> context_extension);
+ Handle<HeapObject> context_extension);
private:
// This class builds a context chain for evaluation of expressions
@@ -54,6 +54,7 @@ class DebugEvaluate : public AllStatic {
void UpdateValues();
Handle<Context> innermost_context() const { return innermost_context_; }
+ Handle<Context> native_context() const { return native_context_; }
Handle<SharedFunctionInfo> outer_info() const { return outer_info_; }
private:
@@ -74,12 +75,29 @@ class DebugEvaluate : public AllStatic {
void MaterializeArgumentsObject(Handle<JSObject> target,
Handle<JSFunction> function);
- Handle<Context> MaterializeReceiver(Handle<Context> target,
- Handle<JSFunction> function);
+ void MaterializeContextChain(Handle<JSObject> target,
+ Handle<Context> context);
+
+ void UpdateContextChainFromMaterializedObject(Handle<JSObject> source,
+ Handle<Context> context);
+
+ Handle<Context> MaterializeReceiver(Handle<Context> parent_context,
+ Handle<Context> lookup_context,
+ Handle<JSFunction> local_function,
+ Handle<JSFunction> global_function,
+ bool this_is_non_local);
+
+ MaybeHandle<Object> LoadFromContext(Handle<Context> context,
+ Handle<String> name, bool* global);
+
+ void StoreToContext(Handle<Context> context, Handle<String> name,
+ Handle<Object> value);
Handle<SharedFunctionInfo> outer_info_;
Handle<Context> innermost_context_;
+ Handle<Context> native_context_;
List<ContextChainElement> context_chain_;
+ List<Handle<String> > non_locals_;
Isolate* isolate_;
JavaScriptFrame* frame_;
int inlined_jsframe_index_;
@@ -88,7 +106,7 @@ class DebugEvaluate : public AllStatic {
static MaybeHandle<Object> Evaluate(Isolate* isolate,
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<Object> context_extension,
+ Handle<HeapObject> context_extension,
Handle<Object> receiver,
Handle<String> source);
};
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index ad54247417..012d291622 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -45,11 +45,6 @@ int FrameInspector::GetParametersCount() {
}
-int FrameInspector::expression_count() {
- return deoptimized_frame_->expression_count();
-}
-
-
Object* FrameInspector::GetFunction() {
return is_optimized_ ? deoptimized_frame_->GetFunction() : frame_->function();
}
@@ -109,6 +104,8 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
// Do not materialize the parameter if it is shadowed by a context local.
+ // TODO(yangguo): check whether this is necessary, now that we materialize
+ // context locals as well.
Handle<String> name(scope_info->ParameterName(i));
if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 86e817d47f..c0d20bbd1d 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -21,7 +21,6 @@ class FrameInspector {
~FrameInspector();
int GetParametersCount();
- int expression_count();
Object* GetFunction();
Object* GetParameter(int index);
Object* GetExpression(int index);
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 99d96404d1..15a0594009 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -4,21 +4,22 @@
#include "src/debug/debug-scopes.h"
+#include "src/ast/scopes.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
- bool ignore_nested_scopes)
+ ScopeIterator::Option option)
: isolate_(isolate),
frame_inspector_(frame_inspector),
nested_scope_chain_(4),
+ non_locals_(nullptr),
seen_script_scope_(false),
failed_(false) {
if (!frame_inspector->GetContext()->IsContext() ||
@@ -46,7 +47,8 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// addEventListener call), even if we drop some nested scopes.
// Later we may optimize getting the nested scopes (cache the result?)
// and include nested scopes into the "fast" iteration case as well.
-
+ bool ignore_nested_scopes = (option == IGNORE_NESTED_SCOPES);
+ bool collect_non_locals = (option == COLLECT_NON_LOCALS);
if (!ignore_nested_scopes && shared_info->HasDebugInfo()) {
// The source position at return is always the end of the function,
// which is not consistent with the current scope chain. Therefore all
@@ -61,8 +63,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
Address call_pc = GetFrame()->pc() - 1;
// Find the break point where execution has stopped.
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
+ BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
ignore_nested_scopes = location.IsReturn();
}
@@ -78,35 +79,37 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
if (scope_info->scope_type() == FUNCTION_SCOPE) {
nested_scope_chain_.Add(scope_info);
}
- } else {
- // Reparse the code and analyze the scopes.
+ if (!collect_non_locals) return;
+ }
+
+ // Reparse the code and analyze the scopes.
+ Scope* scope = NULL;
+ // Check whether we are in global, eval or function code.
+ Zone zone;
+ if (scope_info->scope_type() != FUNCTION_SCOPE) {
+ // Global or eval code.
Handle<Script> script(Script::cast(shared_info->script()));
- Scope* scope = NULL;
-
- // Check whether we are in global, eval or function code.
- Zone zone;
- if (scope_info->scope_type() != FUNCTION_SCOPE) {
- // Global or eval code.
- ParseInfo info(&zone, script);
- if (scope_info->scope_type() == SCRIPT_SCOPE) {
- info.set_global();
- } else {
- DCHECK(scope_info->scope_type() == EVAL_SCOPE);
- info.set_eval();
- info.set_context(Handle<Context>(function->context()));
- }
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.literal()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
+ ParseInfo info(&zone, script);
+ if (scope_info->scope_type() == SCRIPT_SCOPE) {
+ info.set_global();
} else {
- // Function code
- ParseInfo info(&zone, function);
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.literal()->scope();
- }
- RetrieveScopeChain(scope, shared_info);
+ DCHECK(scope_info->scope_type() == EVAL_SCOPE);
+ info.set_eval();
+ info.set_context(Handle<Context>(function->context()));
+ }
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
+ scope = info.literal()->scope();
+ }
+ if (!ignore_nested_scopes) RetrieveScopeChain(scope);
+ if (collect_non_locals) CollectNonLocals(scope);
+ } else {
+ // Function code
+ ParseInfo info(&zone, function);
+ if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
+ scope = info.literal()->scope();
}
+ if (!ignore_nested_scopes) RetrieveScopeChain(scope);
+ if (collect_non_locals) CollectNonLocals(scope);
}
}
@@ -115,6 +118,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate),
frame_inspector_(NULL),
context_(function->context()),
+ non_locals_(nullptr),
seen_script_scope_(false),
failed_(false) {
if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
@@ -320,6 +324,27 @@ Handle<Context> ScopeIterator::CurrentContext() {
}
}
+
+void ScopeIterator::GetNonLocals(List<Handle<String> >* list_out) {
+ Handle<String> this_string = isolate_->factory()->this_string();
+ for (HashMap::Entry* entry = non_locals_->Start(); entry != nullptr;
+ entry = non_locals_->Next(entry)) {
+ Handle<String> name(reinterpret_cast<String**>(entry->key));
+ // We need to treat "this" differently.
+ if (name.is_identical_to(this_string)) continue;
+ list_out->Add(Handle<String>(reinterpret_cast<String**>(entry->key)));
+ }
+}
+
+
+bool ScopeIterator::ThisIsNonLocal() {
+ Handle<String> this_string = isolate_->factory()->this_string();
+ void* key = reinterpret_cast<void*>(this_string.location());
+ HashMap::Entry* entry = non_locals_->Lookup(key, this_string->Hash());
+ return entry != nullptr;
+}
+
+
#ifdef DEBUG
// Debug print of the content of the current scope.
void ScopeIterator::DebugPrint() {
@@ -337,7 +362,7 @@ void ScopeIterator::DebugPrint() {
if (!CurrentContext().is_null()) {
CurrentContext()->Print(os);
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
+ Handle<HeapObject> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print(os);
}
@@ -361,7 +386,7 @@ void ScopeIterator::DebugPrint() {
os << "Closure:\n";
CurrentContext()->Print(os);
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension(), isolate_);
+ Handle<HeapObject> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print(os);
}
@@ -385,8 +410,7 @@ void ScopeIterator::DebugPrint() {
#endif
-void ScopeIterator::RetrieveScopeChain(Scope* scope,
- Handle<SharedFunctionInfo> shared_info) {
+void ScopeIterator::RetrieveScopeChain(Scope* scope) {
if (scope != NULL) {
int source_position = frame_inspector_->GetSourcePosition();
scope->GetNestedScopeChain(isolate_, &nested_scope_chain_, source_position);
@@ -403,6 +427,15 @@ void ScopeIterator::RetrieveScopeChain(Scope* scope,
}
+void ScopeIterator::CollectNonLocals(Scope* scope) {
+ if (scope != NULL) {
+ DCHECK_NULL(non_locals_);
+ non_locals_ = new HashMap(InternalizedStringMatch);
+ scope->CollectNonLocals(non_locals_);
+ }
+}
+
+
MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
Handle<JSGlobalObject> global(CurrentContext()->global_object());
Handle<ScriptContextTable> script_contexts(
@@ -760,7 +793,7 @@ void ScopeIterator::CopyContextLocalsToScopeObject(
// TODO(verwaest): Use AddDataProperty instead.
JSObject::SetOwnPropertyIgnoreAttributes(
scope_object, handle(String::cast(scope_info->get(i + start))), value,
- ::NONE)
+ NONE)
.Check();
}
}
@@ -771,7 +804,8 @@ bool ScopeIterator::CopyContextExtensionToScopeObject(
JSReceiver::KeyCollectionType type) {
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, keys, JSReceiver::GetKeys(extension, type), false);
+ isolate_, keys, JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS),
+ false);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 6e5c459037..d4e335a2a5 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -33,11 +33,15 @@ class ScopeIterator {
static const int kScopeDetailsNameIndex = 2;
static const int kScopeDetailsSize = 3;
+ enum Option { DEFAULT, IGNORE_NESTED_SCOPES, COLLECT_NON_LOCALS };
+
ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
- bool ignore_nested_scopes = false);
+ Option options = DEFAULT);
ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
+ ~ScopeIterator() { delete non_locals_; }
+
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
// More scopes?
@@ -68,6 +72,11 @@ class ScopeIterator {
// be an actual context.
Handle<Context> CurrentContext();
+ // Populate the list with collected non-local variable names.
+ void GetNonLocals(List<Handle<String> >* list_out);
+
+ bool ThisIsNonLocal();
+
#ifdef DEBUG
// Debug print of the content of the current scope.
void DebugPrint();
@@ -78,6 +87,7 @@ class ScopeIterator {
FrameInspector* const frame_inspector_;
Handle<Context> context_;
List<Handle<ScopeInfo> > nested_scope_chain_;
+ HashMap* non_locals_;
bool seen_script_scope_;
bool failed_;
@@ -90,7 +100,17 @@ class ScopeIterator {
JSFunction::cast(frame_inspector_->GetFunction()));
}
- void RetrieveScopeChain(Scope* scope, Handle<SharedFunctionInfo> shared_info);
+ static bool InternalizedStringMatch(void* key1, void* key2) {
+ Handle<String> s1(reinterpret_cast<String**>(key1));
+ Handle<String> s2(reinterpret_cast<String**>(key2));
+ DCHECK(s1->IsInternalizedString());
+ DCHECK(s2->IsInternalizedString());
+ return s1.is_identical_to(s2);
+ }
+
+ void RetrieveScopeChain(Scope* scope);
+
+ void CollectNonLocals(Scope* scope);
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index e41c508f44..bd45b71551 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -86,7 +86,6 @@ int BreakLocation::Iterator::GetModeMask(BreakLocatorType type) {
mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
- mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL);
if (type == ALL_BREAK_LOCATIONS) {
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
@@ -146,9 +145,9 @@ void BreakLocation::Iterator::Next() {
// Find the break point at the supplied address, or the closest one before
// the address.
BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc) {
- Iterator it(debug_info, type);
- it.SkipTo(BreakIndexFromAddress(debug_info, type, pc));
+ Address pc) {
+ Iterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipTo(BreakIndexFromAddress(debug_info, pc));
return it.GetBreakLocation();
}
@@ -156,10 +155,10 @@ BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
// Find the break point at the supplied address, or the closest one before
// the address.
void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc,
+ Address pc,
List<BreakLocation>* result_out) {
- int break_index = BreakIndexFromAddress(debug_info, type, pc);
- Iterator it(debug_info, type);
+ int break_index = BreakIndexFromAddress(debug_info, pc);
+ Iterator it(debug_info, ALL_BREAK_LOCATIONS);
it.SkipTo(break_index);
int statement_position = it.statement_position();
while (!it.Done() && it.statement_position() == statement_position) {
@@ -170,11 +169,11 @@ void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc) {
+ Address pc) {
// Run through all break points to locate the one closest to the address.
int closest_break = 0;
int distance = kMaxInt;
- for (Iterator it(debug_info, type); !it.Done(); it.Next()) {
+ for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
// Check if this break point is closer that what was previously found.
if (it.pc() <= pc && pc - it.pc() < distance) {
closest_break = it.break_index();
@@ -188,14 +187,14 @@ int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
- BreakLocatorType type, int position,
+ int position,
BreakPositionAlignment alignment) {
// Run through all break points to locate the one closest to the source
// position.
int closest_break = 0;
int distance = kMaxInt;
- for (Iterator it(debug_info, type); !it.Done(); it.Next()) {
+ for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
int next_position;
if (alignment == STATEMENT_ALIGNED) {
next_position = it.statement_position();
@@ -211,7 +210,7 @@ BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
}
}
- Iterator it(debug_info, type);
+ Iterator it(debug_info, ALL_BREAK_LOCATIONS);
it.SkipTo(closest_break);
return it.GetBreakLocation();
}
@@ -281,10 +280,11 @@ void BreakLocation::SetDebugBreak() {
if (IsDebugBreak()) return;
DCHECK(IsDebugBreakSlot());
- Builtins* builtins = debug_info_->GetIsolate()->builtins();
+ Isolate* isolate = debug_info_->GetIsolate();
+ Builtins* builtins = isolate->builtins();
Handle<Code> target =
IsReturn() ? builtins->Return_DebugBreak() : builtins->Slot_DebugBreak();
- DebugCodegen::PatchDebugBreakSlot(pc(), target);
+ DebugCodegen::PatchDebugBreakSlot(isolate, pc(), target);
DCHECK(IsDebugBreak());
}
@@ -294,21 +294,15 @@ void BreakLocation::ClearDebugBreak() {
if (IsDebuggerStatement()) return;
DCHECK(IsDebugBreakSlot());
- DebugCodegen::ClearDebugBreakSlot(pc());
+ DebugCodegen::ClearDebugBreakSlot(debug_info_->GetIsolate(), pc());
DCHECK(!IsDebugBreak());
}
-bool BreakLocation::IsStepInLocation() const {
- return IsConstructCall() || IsCall();
-}
-
-
bool BreakLocation::IsDebugBreak() const {
- if (IsDebugBreakSlot()) {
- return rinfo().IsPatchedDebugBreakSlotSequence();
- }
- return false;
+ if (IsDebuggerStatement()) return false;
+ DCHECK(IsDebugBreakSlot());
+ return rinfo().IsPatchedDebugBreakSlotSequence();
}
@@ -333,15 +327,12 @@ void Debug::ThreadInit() {
thread_local_.break_frame_id_ = StackFrame::NO_ID;
thread_local_.last_step_action_ = StepNone;
thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.step_count_ = 0;
thread_local_.last_fp_ = 0;
- thread_local_.queued_step_count_ = 0;
- thread_local_.step_into_fp_ = 0;
- thread_local_.step_out_fp_ = 0;
+ thread_local_.target_fp_ = 0;
+ thread_local_.step_in_enabled_ = false;
// TODO(isolates): frames_are_dropped_?
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
- thread_local_.restarter_frame_function_pointer_ = NULL;
}
@@ -427,7 +418,6 @@ void Debug::Unload() {
void Debug::Break(Arguments args, JavaScriptFrame* frame) {
- Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
DCHECK(args.length() == 0);
@@ -453,91 +443,62 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- // Find the break point where execution has stopped.
+ // Find the break location where execution has stopped.
// PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
Address call_pc = frame->pc() - 1;
- BreakLocation break_location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
-
- // Check whether step next reached a new statement.
- if (!StepNextContinue(&break_location, frame)) {
- // Decrease steps left if performing multiple steps.
- if (thread_local_.step_count_ > 0) {
- thread_local_.step_count_--;
- }
- }
-
- // If there is one or more real break points check whether any of these are
- // triggered.
- Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
- if (break_points_active_ && break_location.HasBreakPoint()) {
- Handle<Object> break_point_objects = break_location.BreakPointObjects();
- break_points_hit = CheckBreakPoints(break_point_objects);
- }
-
- // If step out is active skip everything until the frame where we need to step
- // out to is reached, unless real breakpoint is hit.
- if (StepOutActive() &&
- frame->fp() != thread_local_.step_out_fp_ &&
- break_points_hit->IsUndefined() ) {
- // Step count should always be 0 for StepOut.
- DCHECK(thread_local_.step_count_ == 0);
- } else if (!break_points_hit->IsUndefined() ||
- (thread_local_.last_step_action_ != StepNone &&
- thread_local_.step_count_ == 0)) {
- // Notify debugger if a real break point is triggered or if performing
- // single stepping with no more steps to perform. Otherwise do another step.
-
- // Clear all current stepping setup.
- ClearStepping();
-
- if (thread_local_.queued_step_count_ > 0) {
- // Perform queued steps
- int step_count = thread_local_.queued_step_count_;
-
- // Clear queue
- thread_local_.queued_step_count_ = 0;
-
- PrepareStep(StepNext, step_count, StackFrame::NO_ID);
- } else {
+ BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+
+ // Find actual break points, if any, and trigger debug break event.
+ if (break_points_active_ && location.HasBreakPoint()) {
+ Handle<Object> break_point_objects = location.BreakPointObjects();
+ Handle<Object> break_points_hit = CheckBreakPoints(break_point_objects);
+ if (!break_points_hit->IsUndefined()) {
+ // Clear all current stepping setup.
+ ClearStepping();
// Notify the debug event listeners.
OnDebugBreak(break_points_hit, false);
+ return;
}
- } else if (thread_local_.last_step_action_ != StepNone) {
- // Hold on to last step action as it is cleared by the call to
- // ClearStepping.
- StepAction step_action = thread_local_.last_step_action_;
- int step_count = thread_local_.step_count_;
-
- // If StepNext goes deeper in code, StepOut until original frame
- // and keep step count queued up in the meantime.
- if (step_action == StepNext && frame->fp() < thread_local_.last_fp_) {
- // Count frames until target frame
- int count = 0;
- JavaScriptFrameIterator it(isolate_);
- while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
- count++;
- it.Advance();
- }
+ }
- // Check that we indeed found the frame we are looking for.
- CHECK(!it.done() && (it.frame()->fp() == thread_local_.last_fp_));
- if (step_count > 1) {
- // Save old count and action to continue stepping after StepOut.
- thread_local_.queued_step_count_ = step_count - 1;
- }
+ // No break point. Check for stepping.
+ StepAction step_action = last_step_action();
+ Address current_fp = frame->UnpaddedFP();
+ Address target_fp = thread_local_.target_fp_;
+ Address last_fp = thread_local_.last_fp_;
- // Set up for StepOut to reach target frame.
- step_action = StepOut;
- step_count = count;
- }
+ bool step_break = true;
+ switch (step_action) {
+ case StepNone:
+ return;
+ case StepOut:
+ // Step out has not reached the target frame yet.
+ if (current_fp < target_fp) return;
+ break;
+ case StepNext:
+ // Step next should not break in a deeper frame.
+ if (current_fp < target_fp) return;
+ // Fall through.
+ case StepIn:
+ step_break = location.IsReturn() || (current_fp != last_fp) ||
+ (thread_local_.last_statement_position_ !=
+ location.code()->SourceStatementPosition(frame->pc()));
+ break;
+ case StepFrame:
+ step_break = current_fp != last_fp;
+ break;
+ }
- // Clear all current stepping setup.
- ClearStepping();
+ // Clear all current stepping setup.
+ ClearStepping();
- // Set up for the remaining steps.
- PrepareStep(step_action, step_count, StackFrame::NO_ID);
+ if (step_break) {
+ // Notify the debug event listeners.
+ OnDebugBreak(isolate_->factory()->undefined_value(), false);
+ } else {
+ // Re-prepare to continue.
+ PrepareStep(step_action);
}
}
@@ -634,7 +595,7 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Find the break point and change it.
BreakLocation location = BreakLocation::FromPosition(
- debug_info, ALL_BREAK_LOCATIONS, *source_position, STATEMENT_ALIGNED);
+ debug_info, *source_position, STATEMENT_ALIGNED);
*source_position = location.statement_position();
location.SetBreakPoint(break_point_object);
@@ -677,8 +638,8 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
DCHECK(position >= 0);
// Find the break point and change it.
- BreakLocation location = BreakLocation::FromPosition(
- debug_info, ALL_BREAK_LOCATIONS, position, alignment);
+ BreakLocation location =
+ BreakLocation::FromPosition(debug_info, position, alignment);
location.SetBreakPoint(break_point_object);
feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
@@ -711,8 +672,7 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
Address pc =
debug_info->code()->entry() + break_point_info->code_position();
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, pc);
+ BreakLocation location = BreakLocation::FromAddress(debug_info, pc);
location.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@@ -748,6 +708,16 @@ void Debug::ClearAllBreakPoints() {
void Debug::FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type) {
+ // Debug utility functions are not subject to debugging.
+ if (function->native_context() == *debug_context()) return;
+
+ if (!function->shared()->IsSubjectToDebugging()) {
+ // Builtin functions are not subject to stepping, but need to be
+ // deoptimized, because optimized code does not check for debug
+ // step in at call sites.
+ Deoptimizer::DeoptimizeFunction(*function);
+ return;
+ }
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
@@ -763,77 +733,6 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
}
-void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
- Handle<BindingsArray> new_bindings(function->function_bindings());
- Handle<Object> bindee(new_bindings->bound_function(), isolate_);
-
- if (!bindee.is_null() && bindee->IsJSFunction()) {
- Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
- FloodWithOneShotGeneric(bindee_function);
- }
-}
-
-
-void Debug::FloodDefaultConstructorWithOneShot(Handle<JSFunction> function) {
- DCHECK(function->shared()->is_default_constructor());
- // Instead of stepping into the function we directly step into the super class
- // constructor.
- Isolate* isolate = function->GetIsolate();
- PrototypeIterator iter(isolate, function);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (!proto->IsJSFunction()) return; // Object.prototype
- Handle<JSFunction> function_proto = Handle<JSFunction>::cast(proto);
- FloodWithOneShotGeneric(function_proto);
-}
-
-
-void Debug::FloodWithOneShotGeneric(Handle<JSFunction> function,
- Handle<Object> holder) {
- if (function->shared()->bound()) {
- FloodBoundFunctionWithOneShot(function);
- } else if (function->shared()->is_default_constructor()) {
- FloodDefaultConstructorWithOneShot(function);
- } else {
- Isolate* isolate = function->GetIsolate();
- // Don't allow step into functions in the native context.
- if (function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kFunctionApply) ||
- function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kFunctionCall)) {
- // Handle function.apply and function.call separately to flood the
- // function to be called and not the code for Builtins::FunctionApply or
- // Builtins::FunctionCall. The receiver of call/apply is the target
- // function.
- if (!holder.is_null() && holder->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
- FloodWithOneShotGeneric(js_function);
- }
- } else {
- FloodWithOneShot(function);
- }
- }
-}
-
-
-void Debug::FloodHandlerWithOneShot() {
- // Iterate through the JavaScript stack looking for handlers.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- int stack_slots = 0; // The computed stack slot count is not used.
- if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) {
- // Flood the function with the catch/finally block with break points.
- FloodWithOneShot(Handle<JSFunction>(frame->function()));
- return;
- }
- }
-}
-
-
void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
if (type == BreakUncaughtException) {
break_on_uncaught_exception_ = enable;
@@ -859,43 +758,66 @@ FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
}
-void Debug::PrepareStep(StepAction step_action,
- int step_count,
- StackFrame::Id frame_id) {
- HandleScope scope(isolate_);
+void Debug::PrepareStepIn(Handle<JSFunction> function) {
+ if (!is_active()) return;
+ if (last_step_action() < StepIn) return;
+ if (in_debug_scope()) return;
+ if (thread_local_.step_in_enabled_) {
+ FloodWithOneShot(function);
+ }
+}
- DCHECK(in_debug_scope());
- // Remember this step action and count.
- thread_local_.last_step_action_ = step_action;
- if (step_action == StepOut) {
- // For step out target frame will be found on the stack so there is no need
- // to set step counter for it. It's expected to always be 0 for StepOut.
- thread_local_.step_count_ = 0;
- } else {
- thread_local_.step_count_ = step_count;
+void Debug::PrepareStepOnThrow() {
+ if (!is_active()) return;
+ if (last_step_action() == StepNone) return;
+ if (in_debug_scope()) return;
+
+ ClearOneShot();
+
+ // Iterate through the JavaScript stack looking for handlers.
+ JavaScriptFrameIterator it(isolate_);
+ while (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ int stack_slots = 0; // The computed stack slot count is not used.
+ if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) break;
+ it.Advance();
+ }
+
+ // Find the closest Javascript frame we can flood with one-shots.
+ while (!it.done() &&
+ !it.frame()->function()->shared()->IsSubjectToDebugging()) {
+ it.Advance();
}
+ if (it.done()) return; // No suitable Javascript catch handler.
+
+ FloodWithOneShot(Handle<JSFunction>(it.frame()->function()));
+}
+
+
+void Debug::PrepareStep(StepAction step_action) {
+ HandleScope scope(isolate_);
+
+ DCHECK(in_debug_scope());
+
// Get the frame where the execution has stopped and skip the debug frame if
// any. The debug frame will only be present if execution was stopped due to
// hitting a break point. In other situations (e.g. unhandled exception) the
// debug frame is not present.
- StackFrame::Id id = break_frame_id();
- if (id == StackFrame::NO_ID) {
- // If there is no JavaScript stack don't do anything.
- return;
- }
- if (frame_id != StackFrame::NO_ID) {
- id = frame_id;
- }
- JavaScriptFrameIterator frames_it(isolate_, id);
+ StackFrame::Id frame_id = break_frame_id();
+ // If there is no JavaScript stack don't do anything.
+ if (frame_id == StackFrame::NO_ID) return;
+
+ JavaScriptFrameIterator frames_it(isolate_, frame_id);
JavaScriptFrame* frame = frames_it.frame();
feature_tracker()->Track(DebugFeatureTracker::kStepping);
- // First of all ensure there is one-shot break points in the top handler
- // if any.
- FloodHandlerWithOneShot();
+ // Remember this step action and count.
+ thread_local_.last_step_action_ = step_action;
+ STATIC_ASSERT(StepFrame > StepIn);
+ thread_local_.step_in_enabled_ = (step_action >= StepIn);
// If the function on the top frame is unresolved perform step out. This will
// be the case when calling unknown function and having the debugger stopped
@@ -926,142 +848,57 @@ void Debug::PrepareStep(StepAction step_action,
// PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
Address call_pc = summary.pc() - 1;
- BreakLocation location =
- BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
+ BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
- // If this is the last break code target step out is the only possibility.
- if (location.IsReturn() || step_action == StepOut) {
- if (step_action == StepOut) {
- // Skip step_count frames starting with the current one.
- while (step_count-- > 0 && !frames_it.done()) {
- frames_it.Advance();
- }
- } else {
- DCHECK(location.IsReturn());
- frames_it.Advance();
- }
- // Skip native and extension functions on the stack.
- while (!frames_it.done() &&
- !frames_it.frame()->function()->shared()->IsSubjectToDebugging()) {
- frames_it.Advance();
- }
- // Step out: If there is a JavaScript caller frame, we need to
- // flood it with breakpoints.
- if (!frames_it.done()) {
- // Fill the function to return to with one-shot break points.
- JSFunction* function = frames_it.frame()->function();
- FloodWithOneShot(Handle<JSFunction>(function));
- // Set target frame pointer.
- ActivateStepOut(frames_it.frame());
- }
- return;
- }
+ // At a return statement we will step out either way.
+ if (location.IsReturn()) step_action = StepOut;
- if (step_action != StepNext && step_action != StepMin) {
- // If there's restarter frame on top of the stack, just get the pointer
- // to function which is going to be restarted.
- if (thread_local_.restarter_frame_function_pointer_ != NULL) {
- Handle<JSFunction> restarted_function(
- JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
- FloodWithOneShot(restarted_function);
- } else if (location.IsCall()) {
- // Find target function on the expression stack.
- // Expression stack looks like this (top to bottom):
- // argN
- // ...
- // arg0
- // Receiver
- // Function to call
- int num_expressions_without_args =
- frame->ComputeExpressionsCount() - location.CallArgumentsCount();
- DCHECK(num_expressions_without_args >= 2);
- Object* fun = frame->GetExpression(num_expressions_without_args - 2);
-
- // Flood the actual target of call/apply.
- if (fun->IsJSFunction()) {
- Isolate* isolate = JSFunction::cast(fun)->GetIsolate();
- Code* apply = isolate->builtins()->builtin(Builtins::kFunctionApply);
- Code* call = isolate->builtins()->builtin(Builtins::kFunctionCall);
- // Find target function on the expression stack for expression like
- // Function.call.call...apply(...)
- int i = 1;
- while (fun->IsJSFunction()) {
- Code* code = JSFunction::cast(fun)->shared()->code();
- if (code != apply && code != call) break;
- DCHECK(num_expressions_without_args >= i);
- fun = frame->GetExpression(num_expressions_without_args - i);
- i--;
- }
- }
-
- if (fun->IsJSFunction()) {
- Handle<JSFunction> js_function(JSFunction::cast(fun));
- FloodWithOneShotGeneric(js_function);
- }
- }
-
- ActivateStepIn(frame);
- }
-
- // Fill the current function with one-shot break points even for step in on
- // a call target as the function called might be a native function for
- // which step in will not stop. It also prepares for stepping in
- // getters/setters.
- // If we are stepping into another frame, only fill calls and returns.
- FloodWithOneShot(function, step_action == StepFrame ? CALLS_AND_RETURNS
- : ALL_BREAK_LOCATIONS);
-
- // Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
debug_info->code()->SourceStatementPosition(summary.pc());
thread_local_.last_fp_ = frame->UnpaddedFP();
-}
-
-// Check whether the current debug break should be reported to the debugger. It
-// is used to have step next and step in only report break back to the debugger
-// if on a different frame or in a different statement. In some situations
-// there will be several break points in the same statement when the code is
-// flooded with one-shot break points. This function helps to perform several
-// steps before reporting break back to the debugger.
-bool Debug::StepNextContinue(BreakLocation* break_location,
- JavaScriptFrame* frame) {
- // StepNext and StepOut shouldn't bring us deeper in code, so last frame
- // shouldn't be a parent of current frame.
- StepAction step_action = thread_local_.last_step_action_;
-
- if (step_action == StepNext || step_action == StepOut) {
- if (frame->fp() < thread_local_.last_fp_) return true;
- }
-
- // We stepped into a new frame if the frame pointer changed.
- if (step_action == StepFrame) {
- return frame->UnpaddedFP() == thread_local_.last_fp_;
- }
-
- // If the step last action was step next or step in make sure that a new
- // statement is hit.
- if (step_action == StepNext || step_action == StepIn) {
- // Never continue if returning from function.
- if (break_location->IsReturn()) return false;
-
- // Continue if we are still on the same frame and in the same statement.
- int current_statement_position =
- break_location->code()->SourceStatementPosition(frame->pc());
- return thread_local_.last_fp_ == frame->UnpaddedFP() &&
- thread_local_.last_statement_position_ == current_statement_position;
+ switch (step_action) {
+ case StepNone:
+ UNREACHABLE();
+ break;
+ case StepOut:
+ // Advance to caller frame.
+ frames_it.Advance();
+ // Skip native and extension functions on the stack.
+ while (!frames_it.done() &&
+ !frames_it.frame()->function()->shared()->IsSubjectToDebugging()) {
+ // Builtin functions are not subject to stepping, but need to be
+ // deoptimized to include checks for step-in at call sites.
+ Deoptimizer::DeoptimizeFunction(frames_it.frame()->function());
+ frames_it.Advance();
+ }
+ if (frames_it.done()) {
+ // Stepping out to the embedder. Disable step-in to avoid stepping into
+ // the next (unrelated) call that the embedder makes.
+ thread_local_.step_in_enabled_ = false;
+ } else {
+ // Fill the caller function to return to with one-shot break points.
+ Handle<JSFunction> caller_function(frames_it.frame()->function());
+ FloodWithOneShot(caller_function);
+ thread_local_.target_fp_ = frames_it.frame()->UnpaddedFP();
+ }
+ // Clear last position info. For stepping out it does not matter.
+ thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_fp_ = 0;
+ break;
+ case StepNext:
+ thread_local_.target_fp_ = frame->UnpaddedFP();
+ FloodWithOneShot(function);
+ break;
+ case StepIn:
+ FloodWithOneShot(function);
+ break;
+ case StepFrame:
+ // No point in setting one-shot breaks at places where we are not about
+ // to leave the current frame.
+ FloodWithOneShot(function, CALLS_AND_RETURNS);
+ break;
}
-
- // No step next action - don't continue.
- return false;
-}
-
-
-// Check whether the code object at the specified address is a debug break code
-// object.
-bool Debug::IsDebugBreak(Address addr) {
- Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->is_debug_stub();
}
@@ -1103,41 +940,15 @@ Handle<Object> Debug::GetSourceBreakLocations(
}
-// Handle stepping into a function.
-void Debug::HandleStepIn(Handle<Object> function_obj, bool is_constructor) {
- // Flood getter/setter if we either step in or step to another frame.
- bool step_frame = thread_local_.last_step_action_ == StepFrame;
- if (!StepInActive() && !step_frame) return;
- if (!function_obj->IsJSFunction()) return;
- Handle<JSFunction> function = Handle<JSFunction>::cast(function_obj);
- Isolate* isolate = function->GetIsolate();
-
- StackFrameIterator it(isolate);
- it.Advance();
- // For constructor functions skip another frame.
- if (is_constructor) {
- DCHECK(it.frame()->is_construct());
- it.Advance();
- }
- Address fp = it.frame()->fp();
-
- // Flood the function with one-shot break points if it is called from where
- // step into was requested, or when stepping into a new frame.
- if (fp == thread_local_.step_into_fp_ || step_frame) {
- FloodWithOneShotGeneric(function, Handle<Object>());
- }
-}
-
-
void Debug::ClearStepping() {
// Clear the various stepping setup.
ClearOneShot();
- ClearStepIn();
- ClearStepOut();
- ClearStepNext();
- // Clear multiple step counter.
- thread_local_.step_count_ = 0;
+ thread_local_.last_step_action_ = StepNone;
+ thread_local_.step_in_enabled_ = false;
+ thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_fp_ = 0;
+ thread_local_.target_fp_ = 0;
}
@@ -1158,32 +969,9 @@ void Debug::ClearOneShot() {
}
-void Debug::ActivateStepIn(StackFrame* frame) {
- DCHECK(!StepOutActive());
- thread_local_.step_into_fp_ = frame->UnpaddedFP();
-}
-
-
-void Debug::ClearStepIn() {
- thread_local_.step_into_fp_ = 0;
-}
-
-
-void Debug::ActivateStepOut(StackFrame* frame) {
- DCHECK(!StepInActive());
- thread_local_.step_out_fp_ = frame->UnpaddedFP();
-}
-
-
-void Debug::ClearStepOut() {
- thread_local_.step_out_fp_ = 0;
-}
-
-
-void Debug::ClearStepNext() {
- thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
- thread_local_.last_fp_ = 0;
+void Debug::EnableStepIn() {
+ STATIC_ASSERT(StepFrame > StepIn);
+ thread_local_.step_in_enabled_ = (last_step_action() >= StepIn);
}
@@ -1330,7 +1118,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
{
SharedFunctionInfo::Iterator iterator(isolate_);
while (SharedFunctionInfo* shared = iterator.Next()) {
- if (!shared->optimized_code_map()->IsSmi()) {
+ if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap();
}
}
@@ -1400,6 +1188,7 @@ class SharedFunctionInfoFinder {
target_position_(target_position) {}
void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = NULL) {
+ if (!shared->IsSubjectToDebugging()) return;
int start_position = shared->function_token_position();
if (start_position == RelocInfo::kNoPosition) {
start_position = shared->start_position();
@@ -1449,7 +1238,7 @@ class SharedFunctionInfoFinder {
// cannot be compiled without context (need to find outer compilable SFI etc.)
Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int position) {
- while (true) {
+ for (int iteration = 0;; iteration++) {
// Go through all shared function infos associated with this script to
// find the inner most function containing this position.
// If there is no shared function info for this script at all, there is
@@ -1467,7 +1256,18 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
shared = finder.Result();
if (shared == NULL) break;
// We found it if it's already compiled and has debug code.
- if (shared->HasDebugCode()) return handle(shared);
+ if (shared->HasDebugCode()) {
+ Handle<SharedFunctionInfo> shared_handle(shared);
+ // If the iteration count is larger than 1, we had to compile the outer
+ // function in order to create this shared function info. So there can
+ // be no JSFunction referencing it. We can anticipate creating a debug
+ // info while bypassing PrepareFunctionForBreakpoints.
+ if (iteration > 1) {
+ AllowHeapAllocation allow_before_return;
+ CreateDebugInfo(shared_handle);
+ }
+ return shared_handle;
+ }
}
// If not, compile to reveal inner functions, if possible.
if (shared->allows_lazy_compilation_without_context()) {
@@ -1500,6 +1300,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
closure = finder.ResultClosure();
shared = finder.Result();
}
+ if (shared == NULL) break;
HandleScope scope(isolate_);
if (closure == NULL) {
if (!Compiler::CompileDebugCode(handle(shared))) break;
@@ -1527,11 +1328,13 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
if (!PrepareFunctionForBreakPoints(shared)) return false;
- // Make sure IC state is clean. This is so that we correctly flood
- // accessor pairs when stepping in.
- shared->code()->ClearInlineCaches();
- shared->ClearTypeFeedbackInfo();
+ CreateDebugInfo(shared);
+
+ return true;
+}
+
+void Debug::CreateDebugInfo(Handle<SharedFunctionInfo> shared) {
// Create the debug info object.
DCHECK(shared->HasDebugCode());
Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
@@ -1540,8 +1343,6 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
node->set_next(debug_info_list_);
debug_info_list_ = node;
-
- return true;
}
@@ -1612,14 +1413,11 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEdit::FrameDropMode mode,
- Object** restarter_frame_function_pointer) {
+ LiveEdit::FrameDropMode mode) {
if (mode != LiveEdit::CURRENTLY_SET_MODE) {
thread_local_.frame_drop_mode_ = mode;
}
thread_local_.break_frame_id_ = new_break_frame_id;
- thread_local_.restarter_frame_function_pointer_ =
- restarter_frame_function_pointer;
}
@@ -1674,8 +1472,7 @@ void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
// has stopped.
Address call_pc = summary.pc() - 1;
List<BreakLocation> locations;
- BreakLocation::FromAddressSameStatement(debug_info, ALL_BREAK_LOCATIONS,
- call_pc, &locations);
+ BreakLocation::FromAddressSameStatement(debug_info, call_pc, &locations);
for (BreakLocation location : locations) {
if (location.pc() <= summary.pc()) {
@@ -1690,7 +1487,7 @@ void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
if (frame_it.frame()->id() != frame_id) continue;
}
}
- if (location.IsStepInLocation()) results_out->Add(location.position());
+ if (location.IsCall()) results_out->Add(location.position());
}
}
@@ -1763,6 +1560,7 @@ MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
void Debug::OnThrow(Handle<Object> exception) {
if (in_debug_scope() || ignore_events()) return;
+ PrepareStepOnThrow();
// Temporarily clear any scheduled_exception to allow evaluating
// JavaScript from the debug event handler.
HandleScope scope(isolate_);
@@ -1824,9 +1622,6 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
- // Clear all current stepping setup.
- ClearStepping();
-
// Create the event data object.
Handle<Object> event_data;
// Bail out and don't call debugger if exception.
@@ -2285,6 +2080,9 @@ void Debug::HandleDebugBreak() {
isolate_->stack_guard()->ClearDebugBreak();
+ // Clear stepping to avoid duplicate breaks.
+ ClearStepping();
+
ProcessDebugMessages(debug_command_only);
}
@@ -2335,7 +2133,6 @@ DebugScope::DebugScope(Debug* debug)
}
-
DebugScope::~DebugScope() {
if (!failed_ && prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index c24789d376..4b098db49a 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -31,16 +31,13 @@ class DebugScope;
// Step actions. NOTE: These values are in macros.py as well.
-enum StepAction {
+enum StepAction : int8_t {
StepNone = -1, // Stepping not prepared.
StepOut = 0, // Step out of the current function.
StepNext = 1, // Step to the next statement in the current function.
StepIn = 2, // Step into new functions invoked or the next statement
// in the current function.
- StepMin = 3, // Perform a minimum step in the current function.
- StepInMin = 4, // Step into new functions invoked or perform a minimum step
- // in the current function.
- StepFrame = 5 // Step into a new frame or return to previous frame.
+ StepFrame = 3 // Step into a new frame or return to previous frame.
};
@@ -67,15 +64,12 @@ class BreakLocation {
public:
// Find the break point at the supplied address, or the closest one before
// the address.
- static BreakLocation FromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc);
+ static BreakLocation FromAddress(Handle<DebugInfo> debug_info, Address pc);
- static void FromAddressSameStatement(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc,
+ static void FromAddressSameStatement(Handle<DebugInfo> debug_info, Address pc,
List<BreakLocation>* result_out);
- static BreakLocation FromPosition(Handle<DebugInfo> debug_info,
- BreakLocatorType type, int position,
+ static BreakLocation FromPosition(Handle<DebugInfo> debug_info, int position,
BreakPositionAlignment alignment);
bool IsDebugBreak() const;
@@ -86,15 +80,6 @@ class BreakLocation {
inline bool IsCall() const {
return RelocInfo::IsDebugBreakSlotAtCall(rmode_);
}
- inline bool IsConstructCall() const {
- return RelocInfo::IsDebugBreakSlotAtConstructCall(rmode_);
- }
- inline int CallArgumentsCount() const {
- DCHECK(IsCall());
- return RelocInfo::DebugBreakCallArgumentsCount(data_);
- }
-
- bool IsStepInLocation() const;
inline bool HasBreakPoint() const {
return debug_info_->HasBreakPoint(pc_offset_);
}
@@ -109,7 +94,7 @@ class BreakLocation {
inline RelocInfo rinfo() const {
- return RelocInfo(pc(), rmode(), data_, code());
+ return RelocInfo(debug_info_->GetIsolate(), pc(), rmode(), data_, code());
}
inline int position() const { return position_; }
@@ -164,8 +149,7 @@ class BreakLocation {
friend class Debug;
- static int BreakIndexFromAddress(Handle<DebugInfo> debug_info,
- BreakLocatorType type, Address pc);
+ static int BreakIndexFromAddress(Handle<DebugInfo> debug_info, Address pc);
void SetDebugBreak();
void ClearDebugBreak();
@@ -416,25 +400,16 @@ class Debug {
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type = ALL_BREAK_LOCATIONS);
- void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
- void FloodDefaultConstructorWithOneShot(Handle<JSFunction> function);
- void FloodWithOneShotGeneric(Handle<JSFunction> function,
- Handle<Object> holder = Handle<Object>());
- void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
// Stepping handling.
- void PrepareStep(StepAction step_action,
- int step_count,
- StackFrame::Id frame_id);
+ void PrepareStep(StepAction step_action);
+ void PrepareStepIn(Handle<JSFunction> function);
+ void PrepareStepOnThrow();
void ClearStepping();
void ClearStepOut();
- bool IsStepping() { return thread_local_.step_count_ > 0; }
- bool StepNextContinue(BreakLocation* location, JavaScriptFrame* frame);
- bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
- void HandleStepIn(Handle<Object> function_obj, bool is_constructor);
- bool StepOutActive() { return thread_local_.step_out_fp_ != 0; }
+ void EnableStepIn();
void GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
List<int>* results_out);
@@ -446,6 +421,7 @@ class Debug {
// function needs to be compiled already.
bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function);
+ void CreateDebugInfo(Handle<SharedFunctionInfo> shared);
static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
template <typename C>
@@ -455,9 +431,6 @@ class Debug {
Handle<Object> FindSharedFunctionInfoInScript(Handle<Script> script,
int position);
- // Returns true if the current stub call is patched to call the debugger.
- static bool IsDebugBreak(Address addr);
-
static Handle<Object> GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
BreakPositionAlignment position_aligment);
@@ -470,8 +443,7 @@ class Debug {
// Support for LiveEdit
void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
- LiveEdit::FrameDropMode mode,
- Object** restarter_frame_function_pointer);
+ LiveEdit::FrameDropMode mode);
// Threading support.
char* ArchiveDebug(char* to);
@@ -518,13 +490,8 @@ class Debug {
return reinterpret_cast<Address>(&after_break_target_);
}
- Address restarter_frame_function_pointer_address() {
- Object*** address = &thread_local_.restarter_frame_function_pointer_;
- return reinterpret_cast<Address>(address);
- }
-
- Address step_in_fp_addr() {
- return reinterpret_cast<Address>(&thread_local_.step_into_fp_);
+ Address step_in_enabled_address() {
+ return reinterpret_cast<Address>(&thread_local_.step_in_enabled_);
}
StepAction last_step_action() { return thread_local_.last_step_action_; }
@@ -585,10 +552,7 @@ class Debug {
void InvokeMessageHandler(MessageImpl message);
void ClearOneShot();
- void ActivateStepIn(StackFrame* frame);
- void ClearStepIn();
void ActivateStepOut(StackFrame* frame);
- void ClearStepNext();
void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
Handle<Object> CheckBreakPoints(Handle<Object> break_point);
bool CheckBreakPoint(Handle<Object> break_point_object);
@@ -653,30 +617,20 @@ class Debug {
// Source statement position from last step next action.
int last_statement_position_;
- // Number of steps left to perform before debug event.
- int step_count_;
-
// Frame pointer from last step next or step frame action.
Address last_fp_;
- // Number of queued steps left to perform before debug event.
- int queued_step_count_;
-
- // Frame pointer for frame from which step in was performed.
- Address step_into_fp_;
+ // Frame pointer of the target frame we want to arrive at.
+ Address target_fp_;
- // Frame pointer for the frame where debugger should be called when current
- // step out action is completed.
- Address step_out_fp_;
+ // Whether functions are flooded on entry for step-in and step-frame.
+ // If we stepped out to the embedder, disable flooding to spill stepping
+ // to the next call that the embedder makes.
+ bool step_in_enabled_;
// Stores the way how LiveEdit has patched the stack. It is used when
// debugger returns control back to user script.
LiveEdit::FrameDropMode frame_drop_mode_;
-
- // When restarter frame is on stack, stores the address
- // of the pointer to function being restarted. Otherwise (most of the time)
- // stores NULL. This pointer is used with 'step in' implementation.
- Object** restarter_frame_function_pointer_;
};
// Storage location for registers when handling debug break calls
@@ -773,8 +727,6 @@ class DebugCodegen : public AllStatic {
static void GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode);
- static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
-
// FrameDropper is a code replacement for a JavaScript frame with possibly
// several frames above.
// There is no calling conventions here, because it never actually gets
@@ -782,11 +734,11 @@ class DebugCodegen : public AllStatic {
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
- static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc = -1);
+ static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode);
- static void PatchDebugBreakSlot(Address pc, Handle<Code> code);
- static void ClearDebugBreakSlot(Address pc);
+ static void PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code);
+ static void ClearDebugBreakSlot(Isolate* isolate, Address pc);
};
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 50bd0a9c06..bc2c69602b 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -62,9 +62,7 @@ Debug.ExceptionBreak = { Caught : 0,
Debug.StepAction = { StepOut: 0,
StepNext: 1,
StepIn: 2,
- StepMin: 3,
- StepInMin: 4,
- StepFrame: 5 };
+ StepFrame: 3 };
// The different types of scripts matching enum ScriptType in objects.h.
Debug.ScriptType = { Native: 0,
@@ -945,17 +943,14 @@ function ExecutionState(break_id) {
this.selected_frame = 0;
}
-ExecutionState.prototype.prepareStep = function(opt_action, opt_count,
- opt_callframe) {
- var action = Debug.StepAction.StepIn;
- if (!IS_UNDEFINED(opt_action)) action = TO_NUMBER(opt_action);
- var count = opt_count ? TO_NUMBER(opt_count) : 1;
- var callFrameId = 0;
- if (!IS_UNDEFINED(opt_callframe)) {
- callFrameId = opt_callframe.details_.frameId();
+ExecutionState.prototype.prepareStep = function(action) {
+ if (action === Debug.StepAction.StepIn ||
+ action === Debug.StepAction.StepOut ||
+ action === Debug.StepAction.StepNext ||
+ action === Debug.StepAction.StepFrame) {
+ return %PrepareStep(this.break_id, action);
}
-
- return %PrepareStep(this.break_id, action, count, callFrameId);
+ throw MakeTypeError(kDebuggerType);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
@@ -1459,28 +1454,15 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
// Check for arguments for continue.
if (request.arguments) {
- var count = 1;
var action = Debug.StepAction.StepIn;
// Pull out arguments.
var stepaction = request.arguments.stepaction;
- var stepcount = request.arguments.stepcount;
-
- // Get the stepcount argument if any.
- if (stepcount) {
- count = TO_NUMBER(stepcount);
- if (count < 0) {
- throw MakeError(kDebugger,
- 'Invalid stepcount argument "' + stepcount + '".');
- }
- }
// Get the stepaction argument.
if (stepaction) {
if (stepaction == 'in') {
action = Debug.StepAction.StepIn;
- } else if (stepaction == 'min') {
- action = Debug.StepAction.StepMin;
} else if (stepaction == 'next') {
action = Debug.StepAction.StepNext;
} else if (stepaction == 'out') {
@@ -1492,7 +1474,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
}
// Set up the VM for stepping.
- this.exec_state_.prepareStep(action, count);
+ this.exec_state_.prepareStep(action);
}
// VM should be running after executing this request.
@@ -2605,6 +2587,9 @@ function ValueToProtocolValue_(value, mirror_serializer) {
utils.InstallConstants(global, [
"Debug", Debug,
"DebugCommandProcessor", DebugCommandProcessor,
+ "BreakEvent", BreakEvent,
+ "CompileEvent", CompileEvent,
+ "BreakPoint", BreakPoint,
]);
// Functions needed by the debugger runtime.
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index fb8d495af8..d489a01441 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -23,24 +23,24 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(pc, kSize);
+ CodePatcher patcher(isolate, pc, kSize);
// Add a label for checking the size of the code used for returning.
Label check_codesize;
@@ -105,33 +105,29 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, -1 * kPointerSize));
__ pop(edi); // Function.
__ pop(ebp);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Clear new.target register as a safety measure.
+ __ mov(edx, masm->isolate()->factory()->undefined_value());
+
// Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
+ __ jmp(ebx);
}
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 7e991b62bc..f1f3f2391a 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -4,6 +4,8 @@
#include "src/debug/liveedit.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
@@ -13,9 +15,7 @@
#include "src/global-handles.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/v8.h"
#include "src/v8memory.h"
@@ -811,10 +811,6 @@ bool LiveEdit::SetAfterBreakTarget(Debug* debug) {
switch (debug->thread_local_.frame_drop_mode_) {
case FRAMES_UNTOUCHED:
return false;
- case FRAME_DROPPED_IN_IC_CALL:
- // We must have been calling IC stub. Do not go there anymore.
- code = isolate->builtins()->builtin(Builtins::kPlainReturn_LiveEdit);
- break;
case FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
// Debug break slot stub does not return normally, instead it manually
// cleans the stack and jumps. We should patch the jump address.
@@ -1146,9 +1142,6 @@ void LiveEdit::ReplaceFunctionCode(
LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
- shared_info->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kJSConstructStubGeneric));
-
DeoptimizeDependentFunctions(*shared_info);
isolate->compilation_cache()->Remove(shared_info);
}
@@ -1310,7 +1303,8 @@ static Handle<Code> PatchPositionsInCode(
int new_position = TranslatePosition(position,
position_change_array);
if (position != new_position) {
- RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
+ RelocInfo info_copy(rinfo->isolate(), rinfo->pc(), rinfo->rmode(),
+ new_position, NULL);
buffer_writer.Write(&info_copy);
continue;
}
@@ -1493,17 +1487,13 @@ static bool FixTryCatchHandler(StackFrame* top_frame,
// a. successful work of frame dropper code which eventually gets control,
// b. being compatible with regular stack structure for various stack
// iterators.
-// Returns address of stack allocated pointer to restarted function,
-// the value that is called 'restarter_frame_function_pointer'. The value
-// at this address (possibly updated by GC) may be used later when preparing
-// 'step in' operation.
// Frame structure (conforms InternalFrame structure):
// -- code
// -- SMI maker
// -- function (slot is called "context")
// -- frame base
-static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
- Handle<Code> code) {
+static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
+ Handle<Code> code) {
DCHECK(bottom_js_frame->is_java_script());
Address fp = bottom_js_frame->fp();
@@ -1515,9 +1505,6 @@ static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
Smi::FromInt(StackFrame::INTERNAL);
-
- return reinterpret_cast<Object**>(&Memory::Object_at(
- fp + StandardFrameConstants::kContextOffset));
}
@@ -1525,11 +1512,9 @@ static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
// frames in range. Anyway the bottom frame is restarted rather than dropped,
// and therefore has to be a JavaScript frame.
// Returns error message or NULL.
-static const char* DropFrames(Vector<StackFrame*> frames,
- int top_frame_index,
+static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
int bottom_js_frame_index,
- LiveEdit::FrameDropMode* mode,
- Object*** restarter_frame_function_pointer) {
+ LiveEdit::FrameDropMode* mode) {
if (!LiveEdit::kFrameDropperSupported) {
return "Stack manipulations are not supported in this architecture.";
}
@@ -1544,12 +1529,8 @@ static const char* DropFrames(Vector<StackFrame*> frames,
Isolate* isolate = bottom_js_frame->isolate();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding = true;
- if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->is_debug_stub()) {
- // OK, we can drop inline cache calls.
- *mode = LiveEdit::FRAME_DROPPED_IN_IC_CALL;
- } else if (pre_top_frame_code ==
- isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
+ if (pre_top_frame_code ==
+ isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
// OK, we can drop debug break slot.
*mode = LiveEdit::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
} else if (pre_top_frame_code ==
@@ -1643,10 +1624,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
*top_frame_pc_address = code->entry();
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
- *restarter_frame_function_pointer =
- SetUpFrameDropperFrame(bottom_js_frame, code);
-
- DCHECK((**restarter_frame_function_pointer)->IsJSFunction());
+ SetUpFrameDropperFrame(bottom_js_frame, code);
for (Address a = unused_stack_top;
a < unused_stack_bottom;
@@ -1662,20 +1640,60 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Finding no such frames does not mean error.
class MultipleFunctionTarget {
public:
- MultipleFunctionTarget(Handle<JSArray> shared_info_array,
- Handle<JSArray> result)
- : m_shared_info_array(shared_info_array),
- m_result(result) {}
+ MultipleFunctionTarget(Handle<JSArray> old_shared_array,
+ Handle<JSArray> new_shared_array,
+ Handle<JSArray> result)
+ : old_shared_array_(old_shared_array),
+ new_shared_array_(new_shared_array),
+ result_(result) {}
bool MatchActivation(StackFrame* frame,
LiveEdit::FunctionPatchabilityStatus status) {
- return CheckActivation(m_shared_info_array, m_result, frame, status);
+ return CheckActivation(old_shared_array_, result_, frame, status);
}
const char* GetNotFoundMessage() const {
return NULL;
}
+ bool FrameUsesNewTarget(StackFrame* frame) {
+ if (!frame->is_java_script()) return false;
+ JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
+ Handle<SharedFunctionInfo> old_shared(jsframe->function()->shared());
+ Isolate* isolate = old_shared->GetIsolate();
+ int len = GetArrayLength(old_shared_array_);
+ // Find corresponding new shared function info and return whether it
+ // references new.target.
+ for (int i = 0; i < len; i++) {
+ HandleScope scope(isolate);
+ Handle<Object> old_element =
+ Object::GetElement(isolate, old_shared_array_, i).ToHandleChecked();
+ if (!old_shared.is_identical_to(UnwrapSharedFunctionInfoFromJSValue(
+ Handle<JSValue>::cast(old_element)))) {
+ continue;
+ }
+
+ Handle<Object> new_element =
+ Object::GetElement(isolate, new_shared_array_, i).ToHandleChecked();
+ if (new_element->IsUndefined()) return false;
+ Handle<SharedFunctionInfo> new_shared =
+ UnwrapSharedFunctionInfoFromJSValue(
+ Handle<JSValue>::cast(new_element));
+ if (new_shared->scope_info()->HasNewTarget()) {
+ SetElementSloppy(
+ result_, i,
+ Handle<Smi>(
+ Smi::FromInt(
+ LiveEdit::FUNCTION_BLOCKED_NO_NEW_TARGET_ON_RESTART),
+ isolate));
+ return true;
+ }
+ return false;
+ }
+ return false;
+ }
+
private:
- Handle<JSArray> m_shared_info_array;
- Handle<JSArray> m_result;
+ Handle<JSArray> old_shared_array_;
+ Handle<JSArray> new_shared_array_;
+ Handle<JSArray> result_;
};
@@ -1722,11 +1740,14 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE;
break;
}
- if (frame->is_java_script() &&
- JavaScriptFrame::cast(frame)->function()->shared()->is_generator()) {
- non_droppable_frame_found = true;
- non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
- break;
+ if (frame->is_java_script()) {
+ SharedFunctionInfo* shared =
+ JavaScriptFrame::cast(frame)->function()->shared();
+ if (shared->is_generator()) {
+ non_droppable_frame_found = true;
+ non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
+ break;
+ }
}
if (target.MatchActivation(
frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
@@ -1750,6 +1771,9 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
}
}
+ // We cannot restart a frame that uses new.target.
+ if (target.FrameUsesNewTarget(frames[bottom_js_frame_index])) return NULL;
+
if (!do_drop) {
// We are in check-only mode.
return NULL;
@@ -1761,10 +1785,8 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
}
LiveEdit::FrameDropMode drop_mode = LiveEdit::FRAMES_UNTOUCHED;
- Object** restarter_frame_function_pointer = NULL;
- const char* error_message = DropFrames(frames, top_frame_index,
- bottom_js_frame_index, &drop_mode,
- &restarter_frame_function_pointer);
+ const char* error_message =
+ DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode);
if (error_message != NULL) {
return error_message;
@@ -1778,8 +1800,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
break;
}
}
- debug->FramesHaveBeenDropped(
- new_id, drop_mode, restarter_frame_function_pointer);
+ debug->FramesHaveBeenDropped(new_id, drop_mode);
return NULL;
}
@@ -1787,9 +1808,10 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
- Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
- MultipleFunctionTarget target(shared_info_array, result);
- Isolate* isolate = shared_info_array->GetIsolate();
+ Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
+ Handle<JSArray> result, bool do_drop) {
+ MultipleFunctionTarget target(old_shared_array, new_shared_array, result);
+ Isolate* isolate = old_shared_array->GetIsolate();
const char* message =
DropActivationsInActiveThreadImpl(isolate, target, do_drop);
@@ -1797,7 +1819,7 @@ static const char* DropActivationsInActiveThread(
return message;
}
- int array_len = GetArrayLength(shared_info_array);
+ int array_len = GetArrayLength(old_shared_array);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
@@ -1853,16 +1875,16 @@ bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
class InactiveThreadActivationsChecker : public ThreadVisitor {
public:
- InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
+ InactiveThreadActivationsChecker(Handle<JSArray> old_shared_array,
Handle<JSArray> result)
- : shared_info_array_(shared_info_array), result_(result),
- has_blocked_functions_(false) {
- }
+ : old_shared_array_(old_shared_array),
+ result_(result),
+ has_blocked_functions_(false) {}
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- has_blocked_functions_ |= CheckActivation(
- shared_info_array_, result_, it.frame(),
- LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
+ has_blocked_functions_ |=
+ CheckActivation(old_shared_array_, result_, it.frame(),
+ LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
}
}
bool HasBlockedFunctions() {
@@ -1870,20 +1892,21 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
}
private:
- Handle<JSArray> shared_info_array_;
+ Handle<JSArray> old_shared_array_;
Handle<JSArray> result_;
bool has_blocked_functions_;
};
Handle<JSArray> LiveEdit::CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop) {
- Isolate* isolate = shared_info_array->GetIsolate();
- int len = GetArrayLength(shared_info_array);
+ Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
+ bool do_drop) {
+ Isolate* isolate = old_shared_array->GetIsolate();
+ int len = GetArrayLength(old_shared_array);
- DCHECK(shared_info_array->HasFastElements());
- Handle<FixedArray> shared_info_array_elements(
- FixedArray::cast(shared_info_array->elements()));
+ DCHECK(old_shared_array->HasFastElements());
+ Handle<FixedArray> old_shared_array_elements(
+ FixedArray::cast(old_shared_array->elements()));
Handle<JSArray> result = isolate->factory()->NewJSArray(len);
Handle<FixedArray> result_elements =
@@ -1899,12 +1922,12 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
// running (as we wouldn't want to restart them, because we don't know where
// to restart them from) or suspended. Fail if any one corresponds to the set
// of functions being edited.
- if (FindActiveGenerators(shared_info_array_elements, result_elements, len)) {
+ if (FindActiveGenerators(old_shared_array_elements, result_elements, len)) {
return result;
}
// Check inactive threads. Fail if some functions are blocked there.
- InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
+ InactiveThreadActivationsChecker inactive_threads_checker(old_shared_array,
result);
isolate->thread_manager()->IterateArchivedThreads(
&inactive_threads_checker);
@@ -1913,8 +1936,8 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
}
// Try to drop activations from the current stack.
- const char* error_message =
- DropActivationsInActiveThread(shared_info_array, result, do_drop);
+ const char* error_message = DropActivationsInActiveThread(
+ old_shared_array, new_shared_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
Handle<String> str =
@@ -1947,6 +1970,17 @@ class SingleFrameTarget {
LiveEdit::FunctionPatchabilityStatus saved_status() {
return m_saved_status;
}
+ void set_status(LiveEdit::FunctionPatchabilityStatus status) {
+ m_saved_status = status;
+ }
+
+ bool FrameUsesNewTarget(StackFrame* frame) {
+ if (!frame->is_java_script()) return false;
+ JavaScriptFrame* jsframe = JavaScriptFrame::cast(frame);
+ Handle<SharedFunctionInfo> shared(jsframe->function()->shared());
+ return shared->scope_info()->HasNewTarget();
+ }
+
private:
JavaScriptFrame* m_frame;
LiveEdit::FunctionPatchabilityStatus m_saved_status;
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 29fe60579f..f3d6c54c0e 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -61,8 +61,6 @@ class LiveEdit : AllStatic {
enum FrameDropMode {
// No frame has been dropped.
FRAMES_UNTOUCHED,
- // The top JS frame had been calling IC stub. IC stub mustn't be called now.
- FRAME_DROPPED_IN_IC_CALL,
// The top JS frame had been calling debug break slot stub. Patch the
// address this stub jumps to in the end.
FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
@@ -117,7 +115,8 @@ class LiveEdit : AllStatic {
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
- Handle<JSArray> shared_info_array, bool do_drop);
+ Handle<JSArray> old_shared_array, Handle<JSArray> new_shared_array,
+ bool do_drop);
// Restarts the call frame and completely drops all frames above it.
// Return error message or NULL.
@@ -131,7 +130,8 @@ class LiveEdit : AllStatic {
FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
FUNCTION_REPLACED_ON_ACTIVE_STACK = 5,
FUNCTION_BLOCKED_UNDER_GENERATOR = 6,
- FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7
+ FUNCTION_BLOCKED_ACTIVE_GENERATOR = 7,
+ FUNCTION_BLOCKED_NO_NEW_TARGET_ON_RESTART = 8
};
// Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -172,7 +172,8 @@ class LiveEdit : AllStatic {
*/
// A size of frame base including fp. Padding words starts right above
// the base.
- static const int kFrameDropperFrameSize = 4;
+ static const int kFrameDropperFrameSize =
+ 4 + StandardFrameConstants::kCPSlotCount;
// A number of words that should be reserved on stack for the LiveEdit use.
// Stored on stack in form of Smi.
static const int kFramePaddingInitialSize = 1;
diff --git a/deps/v8/src/debug/liveedit.js b/deps/v8/src/debug/liveedit.js
index 27425c154d..85e55c4c18 100644
--- a/deps/v8/src/debug/liveedit.js
+++ b/deps/v8/src/debug/liveedit.js
@@ -142,14 +142,17 @@
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
- var replaced_function_infos = new GlobalArray();
+ var replaced_function_old_infos = new GlobalArray();
+ var replaced_function_new_infos = new GlobalArray();
for (var i = 0; i < replace_code_list.length; i++) {
- var live_shared_function_infos =
- replace_code_list[i].live_shared_function_infos;
-
- if (live_shared_function_infos) {
- for (var j = 0; j < live_shared_function_infos.length; j++) {
- replaced_function_infos.push(live_shared_function_infos[j]);
+ var old_infos = replace_code_list[i].live_shared_function_infos;
+ var new_info =
+ replace_code_list[i].corresponding_node.info.shared_function_info;
+
+ if (old_infos) {
+ for (var j = 0; j < old_infos.length; j++) {
+ replaced_function_old_infos.push(old_infos[j]);
+ replaced_function_new_infos.push(new_info);
}
}
}
@@ -159,7 +162,9 @@
// Check that function being patched is not currently on stack or drop them.
var dropped_functions_number =
- CheckStackActivations(replaced_function_infos, change_log);
+ CheckStackActivations(replaced_function_old_infos,
+ replaced_function_new_infos,
+ change_log);
// Our current implementation requires client to manually issue "step in"
// command for correct stack state if the stack was modified.
@@ -910,21 +915,24 @@
// For array of wrapped shared function infos checks that none of them
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
- function CheckStackActivations(shared_wrapper_list, change_log) {
- var shared_list = new GlobalArray();
- for (var i = 0; i < shared_wrapper_list.length; i++) {
- shared_list[i] = shared_wrapper_list[i].info;
+ function CheckStackActivations(old_shared_wrapper_list,
+ new_shared_list,
+ change_log) {
+ var old_shared_list = new GlobalArray();
+ for (var i = 0; i < old_shared_wrapper_list.length; i++) {
+ old_shared_list[i] = old_shared_wrapper_list[i].info;
}
- var result = %LiveEditCheckAndDropActivations(shared_list, true);
- if (result[shared_list.length]) {
+ var result = %LiveEditCheckAndDropActivations(
+ old_shared_list, new_shared_list, true);
+ if (result[old_shared_wrapper_list.length]) {
// Extra array element may contain error message.
- throw new Failure(result[shared_list.length]);
+ throw new Failure(result[old_shared_wrapper_list.length]);
}
var problems = new GlobalArray();
var dropped = new GlobalArray();
- for (var i = 0; i < shared_list.length; i++) {
- var shared = shared_wrapper_list[i];
+ for (var i = 0; i < old_shared_list.length; i++) {
+ var shared = old_shared_wrapper_list[i];
if (result[i] == FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
dropped.push({ name: shared.function_name } );
} else if (result[i] != FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
@@ -957,7 +965,8 @@
BLOCKED_UNDER_NATIVE_CODE: 4,
REPLACED_ON_ACTIVE_STACK: 5,
BLOCKED_UNDER_GENERATOR: 6,
- BLOCKED_ACTIVE_GENERATOR: 7
+ BLOCKED_ACTIVE_GENERATOR: 7,
+ BLOCKED_NO_NEW_TARGET_ON_RESTART: 8
};
FunctionPatchabilityStatus.SymbolName = function(code) {
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 30bdcac1b6..c5c58d044b 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -24,25 +24,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
// nop(DEBUG_BREAK_NOP)
@@ -108,26 +108,21 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ li(at, Operand(restarter_frame_function_slot));
- __ sw(zero_reg, MemOperand(at, 0));
-
// We do not know our frame height, but set sp based on fp.
__ Subu(sp, fp, Operand(kPointerSize));
__ Pop(ra, fp, a1); // Return address, Frame, Function.
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+
// Load context from the function.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 9b4d355d79..1d65fd9efd 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -23,25 +23,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
// nop(DEBUG_BREAK_NOP)
@@ -110,26 +110,21 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ li(at, Operand(restarter_frame_function_slot));
- __ sw(zero_reg, MemOperand(at, 0));
-
// We do not know our frame height, but set sp based on fp.
__ Dsubu(sp, fp, Operand(kPointerSize));
__ Pop(ra, fp, a1); // Return address, Frame, Function.
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+
// Load context from the function.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 5ff3e34955..1fd5fa9ecd 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -9,7 +9,6 @@
// Imports
var ErrorToString;
-var FunctionSourceString;
var GlobalArray = global.Array;
var IsNaN = global.isNaN;
var JSONStringify = global.JSON.stringify;
@@ -25,7 +24,6 @@ var SymbolToString;
utils.Import(function(from) {
ErrorToString = from.ErrorToString;
- FunctionSourceString = from.FunctionSourceString;
MakeError = from.MakeError;
MapEntries = from.MapEntries;
MapIteratorNext = from.MapIteratorNext;
@@ -116,7 +114,7 @@ function ClearMirrorCache(value) {
function ObjectIsPromise(value) {
- return IS_SPEC_OBJECT(value) &&
+ return IS_RECEIVER(value) &&
!IS_UNDEFINED(%DebugGetProperty(value, promiseStatusSymbol));
}
@@ -239,11 +237,6 @@ function inherits(ctor, superCtor) {
// Maximum length when sending strings through the JSON protocol.
var kMaxProtocolStringLength = 80;
-// Different kind of properties.
-var PropertyKind = {};
-PropertyKind.Named = 1;
-PropertyKind.Indexed = 2;
-
// A copy of the PropertyType enum from property-details.h
var PropertyType = {};
@@ -750,19 +743,6 @@ ObjectMirror.prototype.hasIndexedInterceptor = function() {
};
-// Get all own property names except for private symbols.
-function TryGetPropertyNames(object) {
- try {
- // TODO(yangguo): Should there be a special debugger implementation of
- // %GetOwnPropertyNames that doesn't perform access checks?
- return %GetOwnPropertyNames(object, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
- } catch (e) {
- // Might have hit a failed access check.
- return [];
- }
-}
-
-
/**
* Return the property names for this object.
* @param {number} kind Indicate whether named, indexed or both kinds of
@@ -771,66 +751,8 @@ function TryGetPropertyNames(object) {
value
* @return {Array} Property names for this object
*/
-ObjectMirror.prototype.propertyNames = function(kind, limit) {
- // Find kind and limit and allocate array for the result
- kind = kind || PropertyKind.Named | PropertyKind.Indexed;
-
- var propertyNames;
- var elementNames;
- var total = 0;
-
- // Find all the named properties.
- if (kind & PropertyKind.Named) {
- propertyNames = TryGetPropertyNames(this.value_);
- total += propertyNames.length;
-
- // Get names for named interceptor properties if any.
- if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(this.value_);
- if (namedInterceptorNames) {
- propertyNames = propertyNames.concat(namedInterceptorNames);
- total += namedInterceptorNames.length;
- }
- }
- }
-
- // Find all the indexed properties.
- if (kind & PropertyKind.Indexed) {
- // Get own element names.
- elementNames = %GetOwnElementNames(this.value_);
- total += elementNames.length;
-
- // Get names for indexed interceptor properties.
- if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
- var indexedInterceptorNames =
- %GetIndexedInterceptorElementNames(this.value_);
- if (indexedInterceptorNames) {
- elementNames = elementNames.concat(indexedInterceptorNames);
- total += indexedInterceptorNames.length;
- }
- }
- }
- limit = MathMin(limit || total, total);
-
- var names = new GlobalArray(limit);
- var index = 0;
-
- // Copy names for named properties.
- if (kind & PropertyKind.Named) {
- for (var i = 0; index < limit && i < propertyNames.length; i++) {
- names[index++] = propertyNames[i];
- }
- }
-
- // Copy names for indexed properties.
- if (kind & PropertyKind.Indexed) {
- for (var i = 0; index < limit && i < elementNames.length; i++) {
- names[index++] = elementNames[i];
- }
- }
-
- return names;
+ObjectMirror.prototype.propertyNames = function() {
+ return %GetOwnPropertyKeys(this.value_, PROPERTY_FILTER_NONE);
};
@@ -842,8 +764,8 @@ ObjectMirror.prototype.propertyNames = function(kind, limit) {
specified value
* @return {Array} Property mirrors for this object
*/
-ObjectMirror.prototype.properties = function(kind, limit) {
- var names = this.propertyNames(kind, limit);
+ObjectMirror.prototype.properties = function() {
+ var names = this.propertyNames();
var properties = new GlobalArray(names.length);
for (var i = 0; i < names.length; i++) {
properties[i] = this.property(names[i]);
@@ -887,7 +809,7 @@ ObjectMirror.prototype.lookupProperty = function(value) {
// Look for property value in properties.
for (var i = 0; i < properties.length; i++) {
- // Skip properties which are defined through assessors.
+ // Skip properties which are defined through accessors.
var property = properties[i];
if (property.propertyType() != PropertyType.AccessorConstant) {
if (%_ObjectEquals(property.value_, value.value_)) {
@@ -987,6 +909,16 @@ FunctionMirror.prototype.name = function() {
/**
+ * Returns the displayName if it is set, otherwise name, otherwise inferred
+ * name.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.debugName = function() {
+ return %FunctionGetDebugName(this.value_);
+}
+
+
+/**
* Returns the inferred name of the function.
* @return {string} Name of the function
*/
@@ -1004,7 +936,7 @@ FunctionMirror.prototype.source = function() {
// Return source if function is resolved. Otherwise just fall through to
// return undefined.
if (this.resolved()) {
- return FunctionSourceString(this.value_);
+ return %FunctionToString(this.value_);
}
};
@@ -2873,24 +2805,15 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
// Add actual properties - named properties followed by indexed properties.
- var propertyNames = mirror.propertyNames(PropertyKind.Named);
- var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed);
- var p = new GlobalArray(propertyNames.length + propertyIndexes.length);
- for (var i = 0; i < propertyNames.length; i++) {
- var propertyMirror = mirror.property(propertyNames[i]);
- p[i] = this.serializeProperty_(propertyMirror);
- if (details) {
- this.add_(propertyMirror.value());
- }
- }
- for (var i = 0; i < propertyIndexes.length; i++) {
- var propertyMirror = mirror.property(propertyIndexes[i]);
- p[propertyNames.length + i] = this.serializeProperty_(propertyMirror);
+ var properties = mirror.propertyNames();
+ for (var i = 0; i < properties.length; i++) {
+ var propertyMirror = mirror.property(properties[i]);
+ properties[i] = this.serializeProperty_(propertyMirror);
if (details) {
this.add_(propertyMirror.value());
}
}
- content.properties = p;
+ content.properties = properties;
var internalProperties = mirror.internalProperties();
if (internalProperties.length > 0) {
@@ -3088,7 +3011,6 @@ utils.InstallFunctions(global, DONT_ENUM, [
utils.InstallConstants(global, [
"ScopeType", ScopeType,
- "PropertyKind", PropertyKind,
"PropertyType", PropertyType,
"PropertyAttribute", PropertyAttribute,
"Mirror", Mirror,
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index ed4a632475..c5ddab8bc0 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -24,25 +24,25 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from
//
// ori r3, r3, 0
@@ -115,19 +115,7 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- __ Ret();
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(ip, Operand(restarter_frame_function_slot));
- __ li(r4, Operand::Zero());
- __ StoreP(r4, MemOperand(ip, 0));
-
// Load the function pointer off of our current stack frame.
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
kPointerSize));
@@ -135,9 +123,15 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(r4, no_reg, dummy, dummy);
+
// Load context from the function.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+
// Get function code.
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 3b65678709..0d56ea7521 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -24,24 +24,24 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(pc, kSize);
+ CodePatcher patcher(isolate, pc, kSize);
Label check_codesize;
patcher.masm()->bind(&check_codesize);
patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(code->entry()),
@@ -106,34 +106,29 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ Move(rax, restarter_frame_function_slot);
- __ movp(Operand(rax, 0), Immediate(0));
-
// We do not know our frame height, but set rsp based on rbp.
__ leap(rsp, Operand(rbp, -1 * kPointerSize));
__ Pop(rdi); // Function.
__ popq(rbp);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(rdi, no_reg, dummy, dummy);
+
// Load context from the function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Clear new.target as a safety measure.
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+
// Get function code.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
- __ jmp(rdx);
+ __ jmp(rbx);
}
const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
index 5ec608a99a..8c04e02b89 100644
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -23,24 +23,24 @@ void EmitDebugBreakSlot(MacroAssembler* masm) {
}
-void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode,
- int call_argc) {
+void DebugCodegen::GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode) {
// Generate enough nop's to make space for a call instruction.
- masm->RecordDebugBreakSlot(mode, call_argc);
+ masm->RecordDebugBreakSlot(mode);
EmitDebugBreakSlot(masm);
}
-void DebugCodegen::ClearDebugBreakSlot(Address pc) {
- CodePatcher patcher(pc, Assembler::kDebugBreakSlotLength);
+void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
+ CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
EmitDebugBreakSlot(patcher.masm());
}
-void DebugCodegen::PatchDebugBreakSlot(Address pc, Handle<Code> code) {
+void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
+ Handle<Code> code) {
DCHECK_EQ(Code::BUILTIN, code->kind());
static const int kSize = Assembler::kDebugBreakSlotLength;
- CodePatcher patcher(pc, kSize);
+ CodePatcher patcher(isolate, pc, kSize);
// Add a label for checking the size of the code used for returning.
Label check_codesize;
@@ -105,33 +105,29 @@ void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
}
-void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->ret(0);
-}
-
-
void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- ExternalReference restarter_frame_function_slot =
- ExternalReference::debug_restarter_frame_function_pointer_address(
- masm->isolate());
- __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
-
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, -1 * kPointerSize));
__ pop(edi); // Function.
__ pop(ebp);
+ ParameterCount dummy(0);
+ __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Clear new.target register as a safety measure.
+ __ mov(edx, masm->isolate()->factory()->undefined_value());
+
// Get function code.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(edx);
+ __ jmp(ebx);
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index dd012db6ab..4bdafbf1b4 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -5,13 +5,13 @@
#include "src/deoptimizer.h"
#include "src/accessors.h"
+#include "src/ast/prettyprinter.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/macro-assembler.h"
-#include "src/prettyprinter.h"
#include "src/profiler/cpu-profiler.h"
#include "src/v8.h"
@@ -763,6 +763,10 @@ void Deoptimizer::DoComputeOutputFrames() {
DoComputeJSFrame(frame_index);
jsframe_count_++;
break;
+ case TranslatedFrame::kInterpretedFunction:
+ DoComputeInterpretedFrame(frame_index);
+ jsframe_count_++;
+ break;
case TranslatedFrame::kArgumentsAdaptor:
DoComputeArgumentsAdaptorFrame(frame_index);
break;
@@ -828,7 +832,7 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned fixed_frame_size = ComputeJavascriptFixedSize(function);
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
@@ -943,8 +947,6 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
input_offset -= kPointerSize;
// Read the context from the translations.
Object* context = value_iterator->GetRawValue();
- // The context should not be a placeholder for a materialized object.
- CHECK(context != isolate_->heap()->arguments_marker());
if (context == isolate_->heap()->undefined_value()) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
@@ -959,6 +961,12 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
WriteValueToOutput(context, input_index, frame_index, output_offset,
"context ");
+ if (context == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_offset;
+ values_to_materialize_.push_back({output_address, value_iterator});
+ }
value_iterator++;
input_index++;
@@ -1022,6 +1030,222 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
}
+void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
+ TranslatedFrame* translated_frame =
+ &(translated_state_.frames()[frame_index]);
+ TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ int input_index = 0;
+
+ BailoutId bytecode_offset = translated_frame->node_id();
+ unsigned height = translated_frame->height();
+ unsigned height_in_bytes = height * kPointerSize;
+ JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ value_iterator++;
+ input_index++;
+ if (trace_scope_ != NULL) {
+ PrintF(trace_scope_->file(), " translating interpreted frame ");
+ function->PrintName(trace_scope_->file());
+ PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d\n",
+ bytecode_offset.ToInt(), height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by InterpreterFrameConstants.
+ unsigned fixed_frame_size = ComputeInterpretedFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new (output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::INTERPRETED);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ CHECK(frame_index >= 0 && frame_index < output_count_);
+ CHECK_NULL(output_[frame_index]);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ Register fp_reg = InterpretedFrame::fp_register();
+ intptr_t top_address;
+ if (is_bottommost) {
+ // Subtract interpreter fixed frame size for the context function slots,
+ // new,target and bytecode offset.
+ top_address = input_->GetRegister(fp_reg.code()) -
+ InterpreterFrameConstants::kFixedFrameSizeFromFp -
+ height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count =
+ function->shared()->internal_formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, the function, new.target and the bytecode offset. Synthesize
+ // their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPCOnStackSize;
+ input_offset -= kPCOnStackSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetCallerPc(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kFPOnStackSize;
+ input_offset -= kFPOnStackSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetCallerFp(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ DCHECK(!is_bottommost ||
+ (input_->GetRegister(fp_reg.code()) +
+ has_alignment_padding_ * kPointerSize) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
+ DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
+ DCHECK(!is_bottommost || !has_alignment_padding_ ||
+ (fp_value & kPointerSize) != 0);
+
+ if (FLAG_enable_embedded_constant_pool) {
+ // For the bottommost output frame the constant pool pointer can be gotten
+ // from the input frame. For subsequent output frames, it can be read from
+ // the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetConstantPool();
+ }
+ output_frame->SetCallerConstantPool(output_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_offset,
+ "caller's constant_pool\n");
+ }
+
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
+ Register context_reg = InterpretedFrame::context_register();
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ // Read the context from the translations.
+ Object* context = value_iterator->GetRawValue();
+ // The context should not be a placeholder for a materialized object.
+ CHECK(context != isolate_->heap()->arguments_marker());
+ value = reinterpret_cast<intptr_t>(context);
+ output_frame->SetContext(value);
+ if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
+ WriteValueToOutput(context, input_index, frame_index, output_offset,
+ "context ");
+ value_iterator++;
+ input_index++;
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+
+ // TODO(rmcilroy): Deal with new.target correctly - currently just set it to
+ // undefined.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ Object* new_target = isolate_->heap()->undefined_value();
+ WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target ");
+
+ // The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ int raw_bytecode_offset =
+ BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset.ToInt();
+ Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
+ WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
+ "bytecode offset ");
+
+ // Translate the rest of the interpreter registers in the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
+ }
+ CHECK_EQ(0u, output_offset);
+
+ // Set the accumulator register.
+ output_frame->SetRegister(
+ kInterpreterAccumulatorRegister.code(),
+ reinterpret_cast<intptr_t>(value_iterator->GetRawValue()));
+ value_iterator++;
+
+ Builtins* builtins = isolate_->builtins();
+ Code* trampoline = builtins->builtin(Builtins::kInterpreterEntryTrampoline);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(trampoline->entry()));
+ output_frame->SetState(0);
+
+ // Update constant pool.
+ if (FLAG_enable_embedded_constant_pool) {
+ intptr_t constant_pool_value =
+ reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ output_frame->SetConstantPool(constant_pool_value);
+ if (is_topmost) {
+ Register constant_pool_reg =
+ InterpretedFrame::constant_pool_pointer_register();
+ output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
+ }
+ }
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost && bailout_type_ != DEBUGGER) {
+ Code* continuation =
+ builtins->builtin(Builtins::kInterpreterNotifyDeoptimized);
+ if (bailout_type_ == LAZY) {
+ continuation =
+ builtins->builtin(Builtins::kInterpreterNotifyLazyDeoptimized);
+ } else if (bailout_type_ == SOFT) {
+ continuation =
+ builtins->builtin(Builtins::kInterpreterNotifySoftDeoptimized);
+ } else {
+ CHECK_EQ(bailout_type_, EAGER);
+ }
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+}
+
+
void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
@@ -1235,12 +1459,6 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
- // The original constructor.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value());
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "new.target\n");
-
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
@@ -1762,7 +1980,7 @@ void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
unsigned Deoptimizer::ComputeInputFrameSize() const {
- unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned fixed_size = ComputeJavascriptFixedSize(function_);
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
unsigned result = fixed_size + fp_to_sp_delta_ -
@@ -1777,7 +1995,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
}
-unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
+unsigned Deoptimizer::ComputeJavascriptFixedSize(JSFunction* function) const {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
return ComputeIncomingArgumentSize(function) +
@@ -1785,6 +2003,15 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
}
+unsigned Deoptimizer::ComputeInterpretedFixedSize(JSFunction* function) const {
+ // The fixed part of the frame consists of the return address, frame
+ // pointer, function, context, new.target, bytecode offset and all the
+ // incoming arguments.
+ return ComputeIncomingArgumentSize(function) +
+ InterpreterFrameConstants::kFixedFrameSize;
+}
+
+
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
@@ -1831,7 +2058,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
while (max_entry_id >= entry_count) entry_count *= 2;
CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
- MacroAssembler masm(isolate, NULL, 16 * KB);
+ MacroAssembler masm(isolate, NULL, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
@@ -1878,8 +2105,13 @@ FrameDescription::FrameDescription(uint32_t frame_size,
int FrameDescription::ComputeFixedSize() {
- return StandardFrameConstants::kFixedFrameSize +
- (ComputeParametersCount() + 1) * kPointerSize;
+ if (type_ == StackFrame::INTERPRETED) {
+ return InterpreterFrameConstants::kFixedFrameSize +
+ (ComputeParametersCount() + 1) * kPointerSize;
+ } else {
+ return StandardFrameConstants::kFixedFrameSize +
+ (ComputeParametersCount() + 1) * kPointerSize;
+ }
}
@@ -2017,6 +2249,15 @@ void Translation::BeginJSFrame(BailoutId node_id,
}
+void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
+ int literal_id, unsigned height) {
+ buffer_->Add(INTERPRETED_FRAME, zone());
+ buffer_->Add(bytecode_offset.ToInt(), zone());
+ buffer_->Add(literal_id, zone());
+ buffer_->Add(height, zone());
+}
+
+
void Translation::BeginCompiledStubFrame(int height) {
buffer_->Add(COMPILED_STUB_FRAME, zone());
buffer_->Add(height, zone());
@@ -2149,6 +2390,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case CONSTRUCT_STUB_FRAME:
return 2;
case JS_FRAME:
+ case INTERPRETED_FRAME:
return 3;
}
FATAL("Unexpected translation type");
@@ -2622,6 +2864,15 @@ TranslatedFrame TranslatedFrame::JSFrame(BailoutId node_id,
}
+TranslatedFrame TranslatedFrame::InterpretedFrame(
+ BailoutId bytecode_offset, SharedFunctionInfo* shared_info, int height) {
+ TranslatedFrame frame(kInterpretedFunction, shared_info->GetIsolate(),
+ shared_info, height);
+ frame.node_id_ = bytecode_offset;
+ return frame;
+}
+
+
TranslatedFrame TranslatedFrame::AccessorFrame(
Kind kind, SharedFunctionInfo* shared_info) {
DCHECK(kind == kSetter || kind == kGetter);
@@ -2648,9 +2899,17 @@ int TranslatedFrame::GetValueCount() {
case kFunction: {
int parameter_count =
raw_shared_info_->internal_formal_parameter_count() + 1;
+ // + 1 for function.
return height_ + parameter_count + 1;
}
+ case kInterpretedFunction: {
+ int parameter_count =
+ raw_shared_info_->internal_formal_parameter_count() + 1;
+ // + 3 for function, context and accumulator.
+ return height_ + parameter_count + 3;
+ }
+
case kGetter:
return 2; // Function and receiver.
@@ -2706,6 +2965,24 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::JSFrame(node_id, shared_info, height);
}
+ case Translation::INTERPRETED_FRAME: {
+ BailoutId bytecode_offset = BailoutId(iterator->Next());
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ base::SmartArrayPointer<char> name =
+ shared_info->DebugName()->ToCString();
+ PrintF(trace_file, " reading input frame %s", name.get());
+ int arg_count = shared_info->internal_formal_parameter_count() + 1;
+ PrintF(trace_file,
+ " => bytecode_offset=%d, args=%d, height=%d; inputs:\n",
+ bytecode_offset.ToInt(), arg_count, height);
+ }
+ return TranslatedFrame::InterpretedFrame(bytecode_offset, shared_info,
+ height);
+ }
+
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
@@ -2818,6 +3095,7 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
switch (opcode) {
case Translation::BEGIN:
case Translation::JS_FRAME:
+ case Translation::INTERPRETED_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
@@ -3212,6 +3490,42 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
object->set_length(*length);
return object;
}
+ case FIXED_ARRAY_TYPE: {
+ Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
+ int32_t length = 0;
+ CHECK(lengthObject->ToInt32(&length));
+ Handle<FixedArray> object =
+ isolate_->factory()->NewFixedArray(length);
+ // We need to set the map, because the fixed array we are
+ // materializing could be a context or an arguments object,
+ // in which case we must retain that information.
+ object->set_map(*map);
+ slot->value_ = object;
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeAt(frame_index, value_index);
+ object->set(i, *value);
+ }
+ return object;
+ }
+ case FIXED_DOUBLE_ARRAY_TYPE: {
+ DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
+ Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
+ int32_t length = 0;
+ CHECK(lengthObject->ToInt32(&length));
+ Handle<FixedArrayBase> object =
+ isolate_->factory()->NewFixedDoubleArray(length);
+ slot->value_ = object;
+ if (length > 0) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(object);
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = MaterializeAt(frame_index, value_index);
+ CHECK(value->IsNumber());
+ double_array->set(i, value->Number());
+ }
+ }
+ return object;
+ }
default:
PrintF(stderr, "[couldn't handle instance type %d]\n",
map->instance_type());
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 8d06956818..10685b6193 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -112,6 +112,7 @@ class TranslatedFrame {
public:
enum Kind {
kFunction,
+ kInterpretedFunction,
kGetter,
kSetter,
kArgumentsAdaptor,
@@ -172,6 +173,9 @@ class TranslatedFrame {
// Constructor static methods.
static TranslatedFrame JSFrame(BailoutId node_id,
SharedFunctionInfo* shared_info, int height);
+ static TranslatedFrame InterpretedFrame(BailoutId bytecode_offset,
+ SharedFunctionInfo* shared_info,
+ int height);
static TranslatedFrame AccessorFrame(Kind kind,
SharedFunctionInfo* shared_info);
static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
@@ -307,6 +311,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
#define DEOPT_MESSAGES_LIST(V) \
+ V(kAccessCheck, "Access check needed") \
V(kNoReason, "no reason") \
V(kConstantGlobalVariableAssignment, "Constant global variable assignment") \
V(kConversionOverflow, "conversion overflow") \
@@ -335,6 +340,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
"Insufficient type feedback for RHS of binary operation") \
V(kKeyIsNegative, "key is negative") \
+ V(kLiteralsWereDisposed, "literals have been disposed") \
V(kLostPrecision, "lost precision") \
V(kLostPrecisionOrNaN, "lost precision or NaN") \
V(kMementoFound, "memento found") \
@@ -355,6 +361,7 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kOutOfBounds, "out of bounds") \
V(kOutsideOfRange, "Outside of range") \
V(kOverflow, "overflow") \
+ V(kProxy, "proxy") \
V(kReceiverWasAGlobalObject, "receiver was a global object") \
V(kSmi, "Smi") \
V(kTooManyArguments, "too many arguments") \
@@ -586,6 +593,7 @@ class Deoptimizer : public Malloced {
void DoComputeOutputFrames();
void DoComputeJSFrame(int frame_index);
+ void DoComputeInterpretedFrame(int frame_index);
void DoComputeArgumentsAdaptorFrame(int frame_index);
void DoComputeConstructStubFrame(int frame_index);
void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
@@ -603,7 +611,8 @@ class Deoptimizer : public Malloced {
const char* debug_hint_string);
unsigned ComputeInputFrameSize() const;
- unsigned ComputeFixedSize(JSFunction* function) const;
+ unsigned ComputeJavascriptFixedSize(JSFunction* function) const;
+ unsigned ComputeInterpretedFixedSize(JSFunction* function) const;
unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
@@ -736,12 +745,9 @@ class FrameDescription {
return malloc(size + frame_size - kPointerSize);
}
-// Bug in VS2015 RC, reported fixed in RTM. Microsoft bug: 1153909.
-#if !defined(_MSC_FULL_VER) || _MSC_FULL_VER != 190022816
void operator delete(void* pointer, uint32_t frame_size) {
free(pointer);
}
-#endif // _MSC_FULL_VER
void operator delete(void* description) {
free(description);
@@ -951,6 +957,7 @@ class TranslationIterator BASE_EMBEDDED {
#define TRANSLATION_OPCODE_LIST(V) \
V(BEGIN) \
V(JS_FRAME) \
+ V(INTERPRETED_FRAME) \
V(CONSTRUCT_STUB_FRAME) \
V(GETTER_STUB_FRAME) \
V(SETTER_STUB_FRAME) \
@@ -996,6 +1003,8 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+ void BeginInterpretedFrame(BailoutId bytecode_offset, int literal_id,
+ unsigned height);
void BeginCompiledStubFrame(int height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 1158e01495..59a57e552e 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -155,7 +155,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], converter.code());
+ RelocInfo relocinfo(isolate, pcs[i], rmodes[i], datas[i],
+ converter.code());
// Indent the printing of the reloc info.
if (i == 0) {
@@ -191,9 +192,6 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
out.AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTarget(rmode)) {
out.AddFormatted(" ;; code:");
- if (rmode == RelocInfo::CONSTRUCT_CALL) {
- out.AddFormatted(" constructor,");
- }
Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 7eafe9bfaf..d4d80dbdec 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -136,20 +136,14 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
FixedArray* to = FixedArray::cast(to_base);
DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
- Address to_address = to->address() + FixedArray::kHeaderSize;
- Address from_address = from->address() + FixedArray::kHeaderSize;
- CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
- reinterpret_cast<Object**>(from_address) + from_start,
- static_cast<size_t>(copy_size));
- if (IsFastObjectElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Heap* heap = from->GetHeap();
- if (!heap->InNewSpace(to)) {
- heap->RecordWrites(to->address(),
- to->OffsetOfElementAt(to_start),
- copy_size);
- }
- heap->incremental_marking()->RecordWrites(to);
+
+ WriteBarrierMode write_barrier_mode =
+ (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
+ ? UPDATE_WRITE_BARRIER
+ : SKIP_WRITE_BARRIER;
+ for (int i = 0; i < copy_size; i++) {
+ Object* value = from->get(from_start + i);
+ to->set(to_start + i, value, write_barrier_mode);
}
}
@@ -160,7 +154,6 @@ static void CopyDictionaryToObjectElements(
DisallowHeapAllocation no_allocation;
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
- Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
@@ -183,24 +176,19 @@ static void CopyDictionaryToObjectElements(
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
+ WriteBarrierMode write_barrier_mode = IsFastObjectElementsKind(to_kind)
+ ? UPDATE_WRITE_BARRIER
+ : SKIP_WRITE_BARRIER;
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
DCHECK(!value->IsTheHole());
- to->set(i + to_start, value, SKIP_WRITE_BARRIER);
+ to->set(i + to_start, value, write_barrier_mode);
} else {
to->set_the_hole(i + to_start);
}
}
- if (IsFastObjectElementsKind(to_kind)) {
- if (!heap->InNewSpace(to)) {
- heap->RecordWrites(to->address(),
- to->OffsetOfElementAt(to_start),
- copy_size);
- }
- heap->incremental_marking()->RecordWrites(to);
- }
}
@@ -440,8 +428,8 @@ static void TraceTopFrame(Isolate* isolate) {
}
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Code* apply_builtin = isolate->builtins()->builtin(
- Builtins::kFunctionApply);
+ Code* apply_builtin =
+ isolate->builtins()->builtin(Builtins::kFunctionPrototypeApply);
if (raw_frame->unchecked_code() == apply_builtin) {
PrintF("apply from ");
it.Advance();
@@ -518,7 +506,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (IsFastPackedElementsKind(kind())) return true;
for (uint32_t i = start; i < end; i++) {
if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store,
- NONE)) {
+ ALL_PROPERTIES)) {
return false;
}
}
@@ -545,14 +533,14 @@ class ElementsAccessorBase : public ElementsAccessor {
bool HasElement(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store,
- PropertyAttributes filter) final {
+ PropertyFilter filter) final {
return ElementsAccessorSubclass::HasElementImpl(holder, index,
backing_store, filter);
}
static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store,
- PropertyAttributes filter) {
+ PropertyFilter filter) {
return ElementsAccessorSubclass::GetEntryForIndexImpl(
*holder, *backing_store, index, filter) != kMaxUInt32;
}
@@ -871,8 +859,12 @@ class ElementsAccessorBase : public ElementsAccessor {
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys, uint32_t range,
- PropertyAttributes filter,
+ PropertyFilter filter,
uint32_t offset) {
+ if (filter & ONLY_ALL_CAN_READ) {
+ // Non-dictionary elements can't have all-can-read accessors.
+ return;
+ }
uint32_t length = 0;
if (object->IsJSArray()) {
length = Smi::cast(JSArray::cast(*object)->length())->value();
@@ -892,7 +884,7 @@ class ElementsAccessorBase : public ElementsAccessor {
void CollectElementIndices(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys, uint32_t range,
- PropertyAttributes filter, uint32_t offset) final {
+ PropertyFilter filter, uint32_t offset) final {
ElementsAccessorSubclass::CollectElementIndicesImpl(
object, backing_store, keys, range, filter, offset);
};
@@ -935,8 +927,7 @@ class ElementsAccessorBase : public ElementsAccessor {
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
- uint32_t index,
- PropertyAttributes filter) {
+ uint32_t index, PropertyFilter filter) {
if (IsHoleyElementsKind(kind())) {
return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
backing_store) &&
@@ -944,16 +935,20 @@ class ElementsAccessorBase : public ElementsAccessor {
? index
: kMaxUInt32;
} else {
- Smi* smi_length = Smi::cast(JSArray::cast(holder)->length());
- uint32_t length = static_cast<uint32_t>(smi_length->value());
+ uint32_t length =
+ holder->IsJSArray()
+ ? static_cast<uint32_t>(
+ Smi::cast(JSArray::cast(holder)->length())->value())
+ : ElementsAccessorSubclass::GetCapacityImpl(holder,
+ backing_store);
return index < length ? index : kMaxUInt32;
}
}
uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
uint32_t index) final {
- return ElementsAccessorSubclass::GetEntryForIndexImpl(holder, backing_store,
- index, NONE);
+ return ElementsAccessorSubclass::GetEntryForIndexImpl(
+ holder, backing_store, index, ALL_PROPERTIES);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -1115,13 +1110,12 @@ class DictionaryElementsAccessor
}
static uint32_t GetEntryForIndexImpl(JSObject* holder, FixedArrayBase* store,
- uint32_t index,
- PropertyAttributes filter) {
+ uint32_t index, PropertyFilter filter) {
DisallowHeapAllocation no_gc;
SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
int entry = dictionary->FindEntry(index);
if (entry == SeededNumberDictionary::kNotFound) return kMaxUInt32;
- if (filter != NONE) {
+ if (filter != ALL_PROPERTIES) {
PropertyDetails details = dictionary->DetailsAt(entry);
PropertyAttributes attr = details.attributes();
if ((attr & filter) != 0) return kMaxUInt32;
@@ -1137,7 +1131,7 @@ class DictionaryElementsAccessor
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys, uint32_t range,
- PropertyAttributes filter,
+ PropertyFilter filter,
uint32_t offset) {
Handle<SeededNumberDictionary> dictionary =
Handle<SeededNumberDictionary>::cast(backing_store);
@@ -1152,6 +1146,12 @@ class DictionaryElementsAccessor
uint32_t index = static_cast<uint32_t>(k->Number());
if (index < offset) continue;
PropertyDetails details = dictionary->DetailsAt(i);
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = dictionary->ValueAt(i);
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ }
PropertyAttributes attr = details.attributes();
if ((attr & filter) != 0) continue;
keys->AddKey(index);
@@ -1847,8 +1847,7 @@ class TypedElementsAccessor
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* backing_store,
- uint32_t index,
- PropertyAttributes filter) {
+ uint32_t index, PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store)
? index
: kMaxUInt32;
@@ -1983,8 +1982,7 @@ class SloppyArgumentsElementsAccessor
static uint32_t GetEntryForIndexImpl(JSObject* holder,
FixedArrayBase* parameters,
- uint32_t index,
- PropertyAttributes filter) {
+ uint32_t index, PropertyFilter filter) {
FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(parameter_map, index);
if (!probe->IsTheHole()) return index;
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 01fc18448f..71e70a1c00 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -47,10 +47,10 @@ class ElementsAccessor {
// all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store,
- PropertyAttributes filter = NONE) = 0;
+ PropertyFilter filter = ALL_PROPERTIES) = 0;
inline bool HasElement(Handle<JSObject> holder, uint32_t index,
- PropertyAttributes filter = NONE) {
+ PropertyFilter filter = ALL_PROPERTIES) {
return HasElement(holder, index, handle(holder->elements()), filter);
}
@@ -120,13 +120,13 @@ class ElementsAccessor {
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys,
uint32_t range = kMaxUInt32,
- PropertyAttributes filter = NONE,
+ PropertyFilter filter = ALL_PROPERTIES,
uint32_t offset = 0) = 0;
inline void CollectElementIndices(Handle<JSObject> object,
KeyAccumulator* keys,
uint32_t range = kMaxUInt32,
- PropertyAttributes filter = NONE,
+ PropertyFilter filter = ALL_PROPERTIES,
uint32_t offset = 0) {
CollectElementIndices(object, handle(object->elements()), keys, range,
filter, offset);
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index ecf2d22f69..d4efb7653d 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -6,7 +6,6 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/deoptimizer.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/vm-state-inl.h"
@@ -95,7 +94,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
if (FLAG_profile_deserialization && target->IsJSFunction()) {
PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
}
- value = CALL_GENERATED_CODE(stub_entry, orig_func, func, recv, argc, argv);
+ value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
+ argc, argv);
}
#ifdef VERIFY_HEAP
@@ -109,10 +109,6 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
isolate->ReportPendingMessages();
- // Reset stepping state when script exits with uncaught exception.
- if (isolate->debug()->is_active()) {
- isolate->debug()->ClearStepping();
- }
return MaybeHandle<Object>();
} else {
isolate->clear_pending_message();
@@ -424,50 +420,16 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-#define RETURN_NATIVE_CALL(name, args) \
- do { \
- Handle<Object> argv[] = args; \
- return Call(isolate, isolate->name##_fun(), \
- isolate->factory()->undefined_value(), arraysize(argv), argv); \
- } while (false)
-
-
-MaybeHandle<Object> Execution::ToDetailString(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_detail_string, { obj });
-}
-
-
-MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
- Handle<Object> time_obj = isolate->factory()->NewNumber(time);
- RETURN_NATIVE_CALL(create_date, { time_obj });
-}
-
-#undef RETURN_NATIVE_CALL
-
-
-MaybeHandle<Object> Execution::ToObject(Isolate* isolate, Handle<Object> obj) {
+MaybeHandle<JSReceiver> Execution::ToObject(Isolate* isolate,
+ Handle<Object> obj) {
Handle<JSReceiver> receiver;
if (JSReceiver::ToObject(isolate, obj).ToHandle(&receiver)) {
return receiver;
}
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject), Object);
-}
-
-
-MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
- Handle<String> flags) {
- Isolate* isolate = pattern->GetIsolate();
- Handle<JSFunction> function = Handle<JSFunction>(
- isolate->native_context()->regexp_function());
- Handle<Object> re_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, re_obj,
- RegExpImpl::CreateRegExpLiteral(function, pattern, flags),
- JSRegExp);
- return Handle<JSRegExp>::cast(re_obj);
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+ JSReceiver);
}
@@ -497,6 +459,11 @@ void StackGuard::HandleGCInterrupt() {
Object* StackGuard::HandleInterrupts() {
+ if (FLAG_verify_predictable) {
+ // Advance synthetic time by making a time request.
+ isolate_->heap()->MonotonicallyIncreasingTimeInMs();
+ }
+
if (CheckAndClearInterrupt(GC_REQUEST)) {
isolate_->heap()->HandleGCRequest();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index f1b1057a61..81b71b631e 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -13,9 +13,6 @@
namespace v8 {
namespace internal {
-// Forward declarations.
-class JSRegExp;
-
class Execution final : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
@@ -52,21 +49,9 @@ class Execution final : public AllStatic {
Handle<Object> argv[],
MaybeHandle<Object>* exception_out = NULL);
- // ECMA-262 9.8
- MUST_USE_RESULT static MaybeHandle<Object> ToDetailString(
- Isolate* isolate, Handle<Object> obj);
-
// ECMA-262 9.9
- MUST_USE_RESULT static MaybeHandle<Object> ToObject(
- Isolate* isolate, Handle<Object> obj);
-
- // Create a new date object from 'time'.
- MUST_USE_RESULT static MaybeHandle<Object> NewDate(
- Isolate* isolate, double time);
-
- // Create a new regular expression object from 'pattern' and 'flags'.
- MUST_USE_RESULT static MaybeHandle<JSRegExp> NewJSRegExp(
- Handle<String> pattern, Handle<String> flags);
+ MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
+ Handle<Object> obj);
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 595259b3aa..f03e6b2e61 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -96,7 +96,6 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
result->set_prototype_users(WeakFixedArray::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_validity_cell(Smi::FromInt(0));
- result->set_constructor_name(Smi::FromInt(0));
return result;
}
@@ -671,7 +670,13 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
}
- Handle<Map> map = external_one_byte_string_map();
+ Handle<Map> map;
+ if (resource->IsCompressible()) {
+ // TODO(hajimehoshi): Rename this to 'uncached_external_one_byte_string_map'
+ map = short_external_one_byte_string_map();
+ } else {
+ map = external_one_byte_string_map();
+ }
Handle<ExternalOneByteString> external_string =
New<ExternalOneByteString>(map, NEW_SPACE);
external_string->set_length(static_cast<int>(length));
@@ -694,8 +699,15 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
static const size_t kOneByteCheckLengthLimit = 32;
bool is_one_byte = length <= kOneByteCheckLengthLimit &&
String::IsOneByte(resource->data(), static_cast<int>(length));
- Handle<Map> map = is_one_byte ?
- external_string_with_one_byte_data_map() : external_string_map();
+ Handle<Map> map;
+ if (resource->IsCompressible()) {
+ // TODO(hajimehoshi): Rename these to 'uncached_external_string_...'.
+ map = is_one_byte ? short_external_string_with_one_byte_data_map()
+ : short_external_string_map();
+ } else {
+ map = is_one_byte ? external_string_with_one_byte_data_map()
+ : external_string_map();
+ }
Handle<ExternalTwoByteString> external_string =
New<ExternalTwoByteString>(map, NEW_SPACE);
external_string->set_length(static_cast<int>(length));
@@ -726,8 +738,10 @@ Handle<Context> Factory::NewNativeContext() {
NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
array->set_map_no_write_barrier(*native_context_map());
Handle<Context> context = Handle<Context>::cast(array);
- context->set_js_array_maps(*undefined_value());
+ context->set_native_context(*context);
context->set_errors_thrown(Smi::FromInt(0));
+ Handle<WeakCell> weak_cell = NewWeakCell(context);
+ context->set_self_weak_cell(*weak_cell);
DCHECK(context->IsNativeContext());
return context;
}
@@ -742,7 +756,7 @@ Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(function->context());
context->set_extension(*scope_info);
- context->set_global_object(function->context()->global_object());
+ context->set_native_context(function->native_context());
DCHECK(context->IsScriptContext());
return context;
}
@@ -764,7 +778,7 @@ Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
array->set_map_no_write_barrier(*module_context_map());
// Instance link will be set later.
Handle<Context> context = Handle<Context>::cast(array);
- context->set_extension(Smi::FromInt(0));
+ context->set_extension(*the_hole_value());
return context;
}
@@ -777,8 +791,8 @@ Handle<Context> Factory::NewFunctionContext(int length,
Handle<Context> context = Handle<Context>::cast(array);
context->set_closure(*function);
context->set_previous(function->context());
- context->set_extension(Smi::FromInt(0));
- context->set_global_object(function->context()->global_object());
+ context->set_extension(*the_hole_value());
+ context->set_native_context(function->native_context());
return context;
}
@@ -794,7 +808,7 @@ Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*name);
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
context->set(Context::THROWN_OBJECT_INDEX, *thrown_object);
return context;
}
@@ -809,7 +823,7 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*extension);
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
return context;
}
@@ -817,14 +831,13 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
- Handle<FixedArray> array =
- NewFixedArrayWithHoles(scope_info->ContextLength());
+ Handle<FixedArray> array = NewFixedArray(scope_info->ContextLength());
array->set_map_no_write_barrier(*block_context_map());
Handle<Context> context = Handle<Context>::cast(array);
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*scope_info);
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
return context;
}
@@ -966,6 +979,13 @@ Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value) {
}
+Handle<TransitionArray> Factory::NewTransitionArray(int capacity) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateTransitionArray(capacity),
+ TransitionArray);
+}
+
+
Handle<AllocationSite> Factory::NewAllocationSite() {
Handle<Map> map = allocation_site_map();
Handle<AllocationSite> site = New<AllocationSite>(map, OLD_SPACE);
@@ -1173,28 +1193,23 @@ DEFINE_ERROR(TypeError, type_error)
#undef DEFINE_ERROR
-void Factory::InitializeFunction(Handle<JSFunction> function,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context) {
+Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Context> context,
+ PretenureFlag pretenure) {
+ AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
+ Handle<JSFunction> function = New<JSFunction>(map, space);
+
function->initialize_properties();
function->initialize_elements();
function->set_shared(*info);
function->set_code(info->code());
function->set_context(*context);
function->set_prototype_or_initial_map(*the_hole_value());
- function->set_literals_or_bindings(*empty_fixed_array());
+ function->set_literals(LiteralsArray::cast(*empty_fixed_array()));
function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
-}
-
-
-Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
- PretenureFlag pretenure) {
- AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
- Handle<JSFunction> result = New<JSFunction>(map, space);
- InitializeFunction(result, info, context);
- return result;
+ isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
+ return function;
}
@@ -1202,14 +1217,19 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<String> name,
MaybeHandle<Code> code) {
Handle<Context> context(isolate()->native_context());
- Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(name, code);
- DCHECK(is_sloppy(info->language_mode()) &&
- (map.is_identical_to(isolate()->sloppy_function_map()) ||
- map.is_identical_to(
- isolate()->sloppy_function_without_prototype_map()) ||
- map.is_identical_to(
- isolate()->sloppy_function_with_readonly_prototype_map()) ||
- map.is_identical_to(isolate()->strict_function_map())));
+ Handle<SharedFunctionInfo> info =
+ NewSharedFunctionInfo(name, code, map->is_constructor());
+ DCHECK(is_sloppy(info->language_mode()));
+ DCHECK(!map->IsUndefined());
+ DCHECK(
+ map.is_identical_to(isolate()->sloppy_function_map()) ||
+ map.is_identical_to(isolate()->sloppy_function_without_prototype_map()) ||
+ map.is_identical_to(
+ isolate()->sloppy_function_with_readonly_prototype_map()) ||
+ map.is_identical_to(isolate()->strict_function_map()) ||
+ // TODO(titzer): wasm_function_map() could be undefined here. ugly.
+ (*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
+ map.is_identical_to(isolate()->proxy_function_map()));
return NewFunction(map, info, context);
}
@@ -1352,8 +1372,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
if (cached.literals != nullptr) {
result->set_literals(cached.literals);
-
- } else if (!info->bound()) {
+ } else {
int number_of_literals = info->num_literals();
Handle<LiteralsArray> literals =
LiteralsArray::New(isolate(), handle(info->feedback_vector()),
@@ -1362,8 +1381,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
// Cache context-specific literals.
Handle<Context> native_context(context->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(
- info, native_context, undefined_value(), literals, BailoutId::None());
+ SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(info, native_context,
+ literals);
}
return result;
@@ -1427,9 +1446,8 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_next_code_link(*undefined_value());
code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_prologue_offset(prologue_offset);
- if (FLAG_enable_embedded_constant_pool) {
- code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
- }
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
@@ -1658,7 +1676,7 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
DCHECK(function->shared()->is_generator());
JSFunction::EnsureHasInitialMap(function);
Handle<Map> map(function->initial_map());
- DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+ DCHECK_EQ(JS_GENERATOR_OBJECT_TYPE, map->instance_type());
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSObjectFromMap(*map),
@@ -1936,103 +1954,81 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
}
-Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
- Handle<Object> prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Handle<Map> map = NewMap(JS_PROXY_TYPE, JSProxy::kSize);
- Map::SetPrototype(map, prototype);
-
- // Allocate the proxy object.
- Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(*handler);
- result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
- Handle<JSReceiver> call_trap,
- Handle<Object> construct_trap,
- Handle<Object> prototype) {
- // Allocate map.
- // TODO(rossberg): Once we optimize proxies, think about a scheme to share
- // maps. Will probably depend on the identity of the handler object, too.
- Handle<Map> map = NewMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
- Map::SetPrototype(map, prototype);
- map->set_is_callable();
- map->set_is_constructor(construct_trap->IsCallable());
-
- // Allocate the proxy object.
- Handle<JSFunctionProxy> result = New<JSFunctionProxy>(map, NEW_SPACE);
- result->InitializeBody(map->instance_size(), Smi::FromInt(0));
- result->set_handler(*handler);
- result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
- result->set_call_trap(*call_trap);
- result->set_construct_trap(*construct_trap);
- return result;
-}
-
-
-void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
- int size) {
- DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
-
- Handle<Map> proxy_map(proxy->map());
- Handle<Map> map = Map::FixProxy(proxy_map, type, size);
-
- // Check that the receiver has at least the size of the fresh object.
- int size_difference = proxy_map->instance_size() - map->instance_size();
- DCHECK(size_difference >= 0);
-
- // Allocate the backing storage for the properties.
- Handle<FixedArray> properties = empty_fixed_array();
-
- Heap* heap = isolate()->heap();
- MaybeHandle<SharedFunctionInfo> shared;
- if (type == JS_FUNCTION_TYPE) {
- OneByteStringKey key(STATIC_CHAR_VECTOR("<freezing call trap>"),
- heap->HashSeed());
- Handle<String> name = InternalizeStringWithKey(&key);
- shared = NewSharedFunctionInfo(name, MaybeHandle<Code>());
+MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
+ Handle<JSReceiver> target_function, Handle<Object> bound_this,
+ Vector<Handle<Object>> bound_args) {
+ DCHECK(target_function->IsCallable());
+ STATIC_ASSERT(Code::kMaxArguments <= FixedArray::kMaxLength);
+ if (bound_args.length() >= Code::kMaxArguments) {
+ THROW_NEW_ERROR(isolate(),
+ NewRangeError(MessageTemplate::kTooManyArguments),
+ JSBoundFunction);
}
- // In order to keep heap in consistent state there must be no allocations
- // before object re-initialization is finished and filler object is installed.
- DisallowHeapAllocation no_allocation;
+ // Determine the prototype of the {target_function}.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), prototype,
+ Object::GetPrototype(isolate(), target_function),
+ JSBoundFunction);
- // Put in filler if the new object is smaller than the old.
- if (size_difference > 0) {
- Address address = proxy->address();
- heap->CreateFillerObjectAt(address + map->instance_size(), size_difference);
- heap->AdjustLiveBytes(*proxy, -size_difference,
- Heap::CONCURRENT_TO_SWEEPER);
+ // Create the [[BoundArguments]] for the result.
+ Handle<FixedArray> bound_arguments;
+ if (bound_args.length() == 0) {
+ bound_arguments = empty_fixed_array();
+ } else {
+ bound_arguments = NewFixedArray(bound_args.length());
+ for (int i = 0; i < bound_args.length(); ++i) {
+ bound_arguments->set(i, *bound_args[i]);
+ }
}
- // Reset the map for the object.
- proxy->synchronized_set_map(*map);
- Handle<JSObject> jsobj = Handle<JSObject>::cast(proxy);
-
- // Reinitialize the object from the constructor map.
- heap->InitializeJSObjectFromMap(*jsobj, *properties, *map);
+ // Setup the map for the JSBoundFunction instance.
+ Handle<Map> map = handle(
+ target_function->IsConstructor()
+ ? isolate()->native_context()->bound_function_with_constructor_map()
+ : isolate()
+ ->native_context()
+ ->bound_function_without_constructor_map(),
+ isolate());
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+ }
+ DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
+
+ // Setup the JSBoundFunction instance.
+ Handle<JSBoundFunction> result =
+ Handle<JSBoundFunction>::cast(NewJSObjectFromMap(map));
+ result->set_bound_target_function(*target_function);
+ result->set_bound_this(*bound_this);
+ result->set_bound_arguments(*bound_arguments);
+ result->set_creation_context(*isolate()->native_context());
+ result->set_length(Smi::FromInt(0));
+ result->set_name(*undefined_value(), SKIP_WRITE_BARRIER);
+ return result;
+}
- // The current native context is used to set up certain bits.
- // TODO(adamk): Using the current context seems wrong, it should be whatever
- // context the JSProxy originated in. But that context isn't stored anywhere.
- Handle<Context> context(isolate()->native_context());
- // Functions require some minimal initialization.
- if (type == JS_FUNCTION_TYPE) {
- map->set_is_constructor(true);
- map->set_is_callable();
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(proxy);
- InitializeFunction(js_function, shared.ToHandleChecked(), context);
+// ES6 section 9.5.15 ProxyCreate (target, handler)
+Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
+ Handle<JSReceiver> handler) {
+ // Allocate the proxy object.
+ Handle<Map> map;
+ if (target->IsCallable()) {
+ if (target->IsConstructor()) {
+ map = Handle<Map>(isolate()->proxy_constructor_map());
+ } else {
+ map = Handle<Map>(isolate()->proxy_callable_map());
+ }
} else {
- // Provide JSObjects with a constructor.
- map->SetConstructor(context->object_function());
+ map = Handle<Map>(isolate()->proxy_map());
}
+ DCHECK(map->prototype()->IsNull());
+ Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
+ result->initialize_properties();
+ result->set_target(*target);
+ result->set_handler(*handler);
+ result->set_hash(*undefined_value(), SKIP_WRITE_BARRIER);
+ return result;
}
@@ -2052,14 +2048,22 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
Handle<Map> map(constructor->initial_map(), isolate());
+ Handle<Map> old_map(object->map(), isolate());
// The proxy's hash should be retained across reinitialization.
Handle<Object> hash(object->hash(), isolate());
+ JSObject::InvalidatePrototypeChains(*old_map);
+ if (old_map->is_prototype_map()) {
+ map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
+ map->set_is_prototype_map(true);
+ }
+ JSObject::UpdatePrototypeUserRegistration(old_map, map, isolate());
+
// Check that the already allocated object has the same size and type as
// objects allocated using the constructor.
- DCHECK(map->instance_size() == object->map()->instance_size());
- DCHECK(map->instance_type() == object->map()->instance_type());
+ DCHECK(map->instance_size() == old_map->instance_size());
+ DCHECK(map->instance_type() == old_map->instance_type());
// Allocate the backing storage for the properties.
Handle<FixedArray> properties = empty_fixed_array();
@@ -2080,22 +2084,13 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
}
-void Factory::BecomeJSObject(Handle<JSProxy> proxy) {
- ReinitializeJSProxy(proxy, JS_OBJECT_TYPE, JSObject::kHeaderSize);
-}
-
-
-void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
- ReinitializeJSProxy(proxy, JS_FUNCTION_TYPE, JSFunction::kSize);
-}
-
-
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, FunctionKind kind,
Handle<Code> code, Handle<ScopeInfo> scope_info,
Handle<TypeFeedbackVector> feedback_vector) {
DCHECK(IsValidFunctionKind(kind));
- Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name, code);
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ name, code, IsConstructable(kind, scope_info->language_mode()));
shared->set_scope_info(*scope_info);
shared->set_feedback_vector(*feedback_vector);
shared->set_kind(kind);
@@ -2128,8 +2123,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- Handle<String> name,
- MaybeHandle<Code> maybe_code) {
+ Handle<String> name, MaybeHandle<Code> maybe_code, bool is_constructor) {
Handle<Map> map = shared_function_info_map();
Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, OLD_SPACE);
@@ -2137,14 +2131,15 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_name(*name);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
- code = handle(isolate()->builtins()->builtin(Builtins::kIllegal));
+ code = isolate()->builtins()->Illegal();
}
share->set_code(*code);
- share->set_optimized_code_map(Smi::FromInt(0));
+ share->set_optimized_code_map(*cleared_optimized_code_map());
share->set_scope_info(ScopeInfo::Empty(isolate()));
- Code* construct_stub =
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
- share->set_construct_stub(construct_stub);
+ Handle<Code> construct_stub =
+ is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
+ : isolate()->builtins()->ConstructedNonConstructable();
+ share->set_construct_stub(*construct_stub);
share->set_instance_class_name(*Object_string());
share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
@@ -2368,7 +2363,7 @@ void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+ store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
store->set(JSRegExp::kAtomPatternIndex, *data);
regexp->set_data(*store);
}
@@ -2383,7 +2378,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
- store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
+ store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 80916b8a20..01a2f7eecf 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -295,6 +295,8 @@ class Factory final {
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
+ Handle<TransitionArray> NewTransitionArray(int capacity);
+
// Allocate a tenured AllocationSite. It's payload is null.
Handle<AllocationSite> NewAllocationSite();
@@ -341,7 +343,9 @@ class Factory final {
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromSize(size_t value,
PretenureFlag pretenure = NOT_TENURED) {
- if (Smi::IsValid(static_cast<intptr_t>(value))) {
+ // We can't use Smi::IsValid() here because that operates on a signed
+ // intptr_t, and casting from size_t could create a bogus sign bit.
+ if (value <= static_cast<size_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
isolate());
}
@@ -475,14 +479,14 @@ class Factory final {
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value,
Handle<Object> done);
- // Allocates a Harmony proxy.
- Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
+ // Allocates a bound function.
+ MaybeHandle<JSBoundFunction> NewJSBoundFunction(
+ Handle<JSReceiver> target_function, Handle<Object> bound_this,
+ Vector<Handle<Object>> bound_args);
- // Allocates a Harmony function proxy.
- Handle<JSProxy> NewJSFunctionProxy(Handle<Object> handler,
- Handle<JSReceiver> call_trap,
- Handle<Object> construct_trap,
- Handle<Object> prototype);
+ // Allocates a Harmony proxy.
+ Handle<JSProxy> NewJSProxy(Handle<JSReceiver> target,
+ Handle<JSReceiver> handler);
// Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the
@@ -493,10 +497,6 @@ class Factory final {
Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy();
- // Change the type of the argument into a JS object/function and reinitialize.
- void BecomeJSObject(Handle<JSProxy> object);
- void BecomeJSFunction(Handle<JSProxy> object);
-
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
bool read_only_prototype = false,
@@ -524,6 +524,8 @@ class Factory final {
Handle<Code> code,
InstanceType type,
int instance_size);
+ Handle<JSFunction> NewFunction(Handle<Map> map, Handle<String> name,
+ MaybeHandle<Code> maybe_code);
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
@@ -629,7 +631,8 @@ class Factory final {
Handle<Code> code, Handle<ScopeInfo> scope_info,
Handle<TypeFeedbackVector> feedback_vector);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
- MaybeHandle<Code> code);
+ MaybeHandle<Code> code,
+ bool is_constructor);
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
@@ -696,29 +699,11 @@ class Factory final {
// Update the cache with a new number-string pair.
void SetNumberStringCache(Handle<Object> number, Handle<String> string);
- // Initializes a function with a shared part and prototype.
- // Note: this code was factored out of NewFunction such that other parts of
- // the VM could use it. Specifically, a function that creates instances of
- // type JS_FUNCTION_TYPE benefit from the use of this function.
- inline void InitializeFunction(Handle<JSFunction> function,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context);
-
// Creates a function initialized with a shared part.
Handle<JSFunction> NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
-
- Handle<JSFunction> NewFunction(Handle<Map> map,
- Handle<String> name,
- MaybeHandle<Code> maybe_code);
-
- // Reinitialize a JSProxy into an (empty) JS object of respective type and
- // size, but keeping the original prototype. The receiver must have at least
- // the size of the new object. The object is reinitialized and behaves as an
- // object that has been freshly allocated.
- void ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type, int size);
};
} // namespace internal
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index f65fe08ee5..9966a70382 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -185,43 +185,50 @@ DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-DEFINE_BOOL(legacy_const, true, "legacy semantics for const in sloppy mode")
-// ES2015 const semantics are staged
-DEFINE_NEG_IMPLICATION(harmony, legacy_const)
+DEFINE_BOOL(legacy_const, false, "legacy semantics for const in sloppy mode")
+// ES2015 const semantics are shipped
+DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, legacy_const, true)
+
+DEFINE_BOOL(promise_extra, true, "additional V8 Promise functions")
+// Removing extra Promise functions is staged
+DEFINE_NEG_IMPLICATION(harmony, promise_extra)
// Activate on ClusterFuzz.
-DEFINE_IMPLICATION(es_staging, harmony_destructuring)
+DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
DEFINE_IMPLICATION(es_staging, move_object_start)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_modules, "harmony modules") \
- V(harmony_proxies, "harmony proxies") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_reflect, "harmony Reflect API") \
- V(harmony_sloppy_function, "harmony sloppy function block scoping") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_simd, "harmony simd") \
- V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_regexp_subclass, "harmony regexp subclassing")
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_modules, "harmony modules") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_function_name, "harmony Function name inference") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_simd, "harmony simd") \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_regexp_subclass, "harmony regexp subclassing") \
+ V(harmony_species, "harmony Symbol.species")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_default_parameters, "harmony default parameters") \
- V(harmony_destructuring, "harmony destructuring") \
- V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_tostring, "harmony toString") \
- V(harmony_sloppy_let, "harmony let in sloppy mode")
+#define HARMONY_STAGED(V) \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_object_observe, "harmony Object.observe") \
- V(harmony_rest_parameters, "harmony rest parameters") \
- V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
- V(harmony_tolength, "harmony ToLength") \
- V(harmony_completion, "harmony completion value semantics")
+#define HARMONY_SHIPPING(V) \
+ V(harmony_default_parameters, "harmony default parameters") \
+ V(harmony_destructuring_assignment, "harmony destructuring assignment") \
+ V(harmony_destructuring_bind, "harmony destructuring bind") \
+ V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
+ V(harmony_object_observe, "harmony Object.observe") \
+ V(harmony_tolength, "harmony ToLength") \
+ V(harmony_tostring, "harmony toString") \
+ V(harmony_completion, "harmony completion value semantics") \
+ V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_sloppy_let, "harmony let in sloppy mode") \
+ V(harmony_sloppy_function, "harmony sloppy function block scoping") \
+ V(harmony_proxies, "harmony proxies") \
+ V(harmony_reflect, "harmony Reflect API")
+
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -253,7 +260,7 @@ DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
// Destructuring shares too much parsing architecture with default parameters
// to be enabled on its own.
-DEFINE_IMPLICATION(harmony_destructuring, harmony_default_parameters)
+DEFINE_IMPLICATION(harmony_destructuring_bind, harmony_default_parameters)
// Flags for experimental implementation features.
DEFINE_BOOL(compiled_keyed_generic_loads, false,
@@ -289,8 +296,7 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
-DEFINE_IMPLICATION(ignition, vector_stores)
-DEFINE_STRING(ignition_filter, "~~", "filter for ignition interpreter")
+DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
DEFINE_BOOL(ignition_fake_try_catch, false,
"enable fake try-catch-finally blocks in ignition for testing")
DEFINE_BOOL(ignition_fallback_on_eval_and_catch, false,
@@ -381,7 +387,7 @@ DEFINE_INT(stress_runs, 0, "number of stress runs")
DEFINE_BOOL(lookup_sample_by_shared, true,
"when picking a function to optimize, watch for shared function "
"info, not JSFunction itself")
-DEFINE_BOOL(flush_optimized_code_cache, true,
+DEFINE_BOOL(flush_optimized_code_cache, false,
"flushes the cache of optimized code for closures on every GC")
DEFINE_BOOL(inline_construct, true, "inline constructor calls")
DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
@@ -401,7 +407,7 @@ DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_BOOL(block_concurrent_recompilation, false,
"block queued jobs until released")
-DEFINE_BOOL(concurrent_osr, true, "concurrent on-stack replacement")
+DEFINE_BOOL(concurrent_osr, false, "concurrent on-stack replacement")
DEFINE_IMPLICATION(concurrent_osr, concurrent_recompilation)
DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
@@ -414,6 +420,8 @@ DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_IMPLICATION(turbo, turbo_inlining)
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
+DEFINE_BOOL(turbo_sp_frame_access, false,
+ "use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
@@ -458,15 +466,18 @@ DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
+DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
+DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
+DEFINE_BOOL(turbo_instruction_scheduling, false,
+ "enable instruction scheduling in TurboFan")
-#if defined(V8_WASM)
// Flags for native WebAssembly.
+DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
-#endif
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -587,8 +598,7 @@ DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
"CPU profiler sampling interval in microseconds")
-// debug.cc
-DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
+// Array abuse tracing
DEFINE_BOOL(trace_js_array_abuse, false,
"trace out-of-bounds accesses to JS arrays")
DEFINE_BOOL(trace_external_array_abuse, false,
@@ -597,6 +607,11 @@ DEFINE_BOOL(trace_array_abuse, false,
"trace out-of-bounds accesses to all arrays")
DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse)
DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
+
+// debugger
+DEFINE_BOOL(debug_eval_readonly_locals, true,
+ "do not update locals after debug-evaluate")
+DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
DEFINE_BOOL(hard_abort, true, "abort by crashing")
@@ -667,15 +682,13 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(finalize_marking_incrementally, true,
- "finalize marking in incremental steps")
DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
-DEFINE_BOOL(parallel_compaction, false, "use parallel compaction")
+DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
@@ -695,6 +708,8 @@ DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
DEFINE_BOOL(memory_reducer, true, "use memory reducer")
DEFINE_BOOL(scavenge_reclaim_unmodified_objects, false,
"remove unmodified and unreferenced objects")
+DEFINE_INT(heap_growing_percent, 0,
+ "specifies heap growing factor as (1 + heap_growing_percent/100)")
// counters.cc
DEFINE_INT(histogram_interval, 600000,
@@ -712,7 +727,6 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL(vector_stores, true, "use vectors for store ics")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -830,6 +844,7 @@ DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
+DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
// mark-compact.cc
DEFINE_BOOL(force_marking_deque_overflows, false,
@@ -947,6 +962,7 @@ DEFINE_BOOL(regexp_possessive_quantifier, false,
DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
DEFINE_BOOL(trace_regexp_assembler, false,
"trace regexp macro assembler calls.")
+DEFINE_BOOL(trace_regexp_parser, false, "trace regexp parsing")
//
// Logging and profiling flags
@@ -1077,7 +1093,7 @@ DEFINE_IMPLICATION(print_all_code, trace_codegen)
DEFINE_BOOL(verify_predictable, false,
"this mode is used for checking that V8 behaves predictably")
-DEFINE_INT(dump_allocations_digest_at_alloc, 0,
+DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 0aeda5a5b3..d60ab29c4e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -6,14 +6,14 @@
#include <sstream>
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/bits.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
-#include "src/scopeinfo.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
@@ -436,6 +436,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return JAVA_SCRIPT;
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
+ case Code::WASM_FUNCTION:
+ return STUB;
case Code::BUILTIN:
if (!marker->IsSmi()) {
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
@@ -756,20 +758,6 @@ bool JavaScriptFrame::HasInlinedFrames() const {
}
-Object* JavaScriptFrame::GetOriginalConstructor() const {
- DCHECK(!HasInlinedFrames());
- Address fp = caller_fp();
- if (has_adapted_arguments()) {
- // Skip the arguments adaptor frame and look at the real caller.
- fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
- }
- DCHECK(IsConstructFrame(fp));
- STATIC_ASSERT(ConstructFrameConstants::kOriginalConstructorOffset ==
- StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize);
- return GetExpression(fp, 3);
-}
-
-
int JavaScriptFrame::GetArgumentsLength() const {
// If there is an arguments adaptor frame get the arguments length from it.
if (has_adapted_arguments()) {
@@ -949,8 +937,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
+ Translation::Opcode frame_opcode =
+ static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, frame_opcode);
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
@@ -958,8 +947,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// in the deoptimization translation are ordered bottom-to-top.
bool is_constructor = IsConstructor();
while (jsframe_count != 0) {
- opcode = static_cast<Translation::Opcode>(it.Next());
- if (opcode == Translation::JS_FRAME) {
+ frame_opcode = static_cast<Translation::Opcode>(it.Next());
+ if (frame_opcode == Translation::JS_FRAME ||
+ frame_opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--;
BailoutId const ast_id = BailoutId(it.Next());
SharedFunctionInfo* const shared_info =
@@ -968,7 +958,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// The translation commands are ordered and the function is always
// at the first position, and the receiver is next.
- opcode = static_cast<Translation::Opcode>(it.Next());
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
// Get the correct function in the optimized frame.
JSFunction* function;
@@ -1005,25 +995,33 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
}
Code* const code = shared_info->code();
- DeoptimizationOutputData* const output_data =
- DeoptimizationOutputData::cast(code->deoptimization_data());
- unsigned const entry =
- Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
- unsigned const pc_offset =
- FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
- DCHECK_NE(0U, pc_offset);
+ unsigned pc_offset;
+ if (frame_opcode == Translation::JS_FRAME) {
+ DeoptimizationOutputData* const output_data =
+ DeoptimizationOutputData::cast(code->deoptimization_data());
+ unsigned const entry =
+ Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
+ pc_offset =
+ FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
+ DCHECK_NE(0U, pc_offset);
+ } else {
+ // TODO(rmcilroy): Modify FrameSummary to enable us to summarize
+ // based on the BytecodeArray and bytecode offset.
+ DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
+ pc_offset = 0;
+ }
FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
frames->Add(summary);
is_constructor = false;
- } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
+ } else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
// The next encountered JS_FRAME will be marked as a constructor call.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ it.Skip(Translation::NumberOfOperandsFor(frame_opcode));
DCHECK(!is_constructor);
is_constructor = true;
} else {
// Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
+ it.Skip(Translation::NumberOfOperandsFor(frame_opcode));
}
}
DCHECK(!is_constructor);
@@ -1095,7 +1093,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
opcode = static_cast<Translation::Opcode>(it.Next());
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
- if (opcode == Translation::JS_FRAME) {
+ if (opcode == Translation::JS_FRAME ||
+ opcode == Translation::INTERPRETED_FRAME) {
jsframe_count--;
// The translation commands are ordered and the function is always at the
@@ -1510,9 +1509,8 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
DCHECK(base::bits::IsPowerOfTwo32(kInnerPointerToCodeCacheSize));
- uint32_t hash = ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
- v8::internal::kZeroHashSeed);
+ uint32_t hash = ComputeIntegerHash(ObjectAddressForHashing(inner_pointer),
+ v8::internal::kZeroHashSeed);
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index f00cbdb887..674d7daeca 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -161,8 +161,6 @@ class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset =
- StandardFrameConstants::kExpressionsOffset - 4 * kPointerSize;
- static const int kOriginalConstructorOffset =
StandardFrameConstants::kExpressionsOffset - 3 * kPointerSize;
static const int kLengthOffset =
StandardFrameConstants::kExpressionsOffset - 2 * kPointerSize;
@@ -172,17 +170,30 @@ class ConstructFrameConstants : public AllStatic {
StandardFrameConstants::kExpressionsOffset - 0 * kPointerSize;
static const int kFrameSize =
- StandardFrameConstants::kFixedFrameSize + 5 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InterpreterFrameConstants : public AllStatic {
public:
+ // Fixed frame includes new.target and bytecode offset.
+ static const int kFixedFrameSize =
+ StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
+ static const int kFixedFrameSizeFromFp =
+ StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
+
+ // FP-relative.
+ static const int kRegisterFilePointerFromFp =
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
+
// Register file pointer relative.
static const int kLastParamFromRegisterPointer =
- StandardFrameConstants::kFixedFrameSize + kPointerSize;
- static const int kFunctionFromRegisterPointer = kPointerSize;
- static const int kContextFromRegisterPointer = 2 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+
+ static const int kBytecodeOffsetFromRegisterPointer = 1 * kPointerSize;
+ static const int kNewTargetFromRegisterPointer = 2 * kPointerSize;
+ static const int kFunctionFromRegisterPointer = 3 * kPointerSize;
+ static const int kContextFromRegisterPointer = 4 * kPointerSize;
};
@@ -583,10 +594,6 @@ class JavaScriptFrame: public StandardFrame {
// about the inlined frames use {GetFunctions} and {Summarize}.
bool HasInlinedFrames() const;
- // Returns the original constructor function that was used in the constructor
- // call to this frame. Note that this is only valid on constructor frames.
- Object* GetOriginalConstructor() const;
-
// Check if this frame has "adapted" arguments in the sense that the
// actual passed arguments are available in an arguments adaptor
// frame below it on the stack.
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 5ca7a52af4..25be8a6636 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -4,14 +4,14 @@
#if V8_TARGET_ARCH_ARM
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/arm/code-stubs-arm.h"
#include "src/arm/macro-assembler-arm.h"
@@ -92,6 +92,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o r1: the JS function object being called (i.e., ourselves)
+// o r3: the new target value
// o cp: our context
// o pp: our caller's constant pool pointer (if enabled)
// o fp: our caller's frame pointer
@@ -120,7 +121,7 @@ void FullCodeGenerator::Generate() {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
__ AssertNotSmi(r2);
- __ CompareObjectType(r2, r2, no_reg, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r2, r2, no_reg, FIRST_JS_RECEIVER_TYPE);
__ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -130,7 +131,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -143,7 +144,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
@@ -180,16 +181,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(r3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(r3); // Preserve new target.
+ }
}
function_in_register_r1 = false;
// Context is returned in r0. It replaces the context passed to us.
@@ -207,13 +218,13 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), r0, r2,
+ kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, r0, &done);
@@ -223,11 +234,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_r1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -241,29 +252,34 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, r1, r0, r2);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, r3, r0, r2);
+ }
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
- Label non_construct_frame, done;
- function_in_register_r1 = false;
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- __ b(ne, &non_construct_frame);
- __ ldr(r0,
- MemOperand(r2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ b(&done);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- __ bind(&non_construct_frame);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ __ mov(RestParamAccessDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ add(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Operand(Smi::FromInt(rest_index)));
+ function_in_register_r1 = false;
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, r0, r2, r3);
+ SetVar(rest_param, r0, r1, r2);
}
Variable* arguments = scope()->arguments();
@@ -297,7 +313,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -434,7 +450,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -672,8 +688,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ tst(result_register(), result_register());
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -711,7 +727,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -817,7 +833,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ str(r0, ContextOperand(cp, variable->index()));
+ __ str(r0, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -839,7 +855,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
__ Push(r2, r0);
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -874,7 +890,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), ContextOperand(cp, variable->index()));
+ __ str(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -896,7 +912,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -908,7 +924,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(r1, r0);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -916,7 +932,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1042,7 +1058,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &done_convert);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1053,9 +1069,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- __ b(le, &call_runtime);
+ __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
+ __ b(eq, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1072,7 +1087,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1108,21 +1123,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register r0. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(r1);
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
int vector_index = SmiFromSlot(slot)->value();
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
-
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
- __ b(gt, &non_proxy);
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ mov(r1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));
@@ -1154,17 +1161,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ cmp(r2, Operand(Smi::FromInt(0)));
- __ b(eq, &update_each);
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ push(r1); // Enumerable.
__ push(r3); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(r3, Operand(r0));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -1225,8 +1227,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(r0);
}
@@ -1240,7 +1242,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1254,7 +1256,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
Operand(isolate()->factory()->home_object_symbol()));
__ ldr(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1270,13 +1272,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1297,12 +1298,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ b(eq, &fast);
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
@@ -1323,25 +1323,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ // Check that last extension is "the hole".
+ __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1369,7 +1367,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1382,7 +1380,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
@@ -1425,7 +1423,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
__ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1452,7 +1450,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r0);
}
@@ -1462,49 +1460,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r5, r0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, d0, size / kPointerSize);
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r1, Operand(expr->pattern()));
+ __ mov(r0, Operand(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(r0);
}
@@ -1531,14 +1492,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ mov(r0, Operand(Smi::FromInt(flags)));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1577,12 +1537,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r0));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1604,7 +1560,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
__ push(r0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1615,7 +1571,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
@@ -1643,7 +1601,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1671,9 +1629,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1686,7 +1646,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1699,13 +1659,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1715,7 +1675,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ ldr(r0, MemOperand(sp));
__ push(r0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1743,13 +1703,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ mov(r0, Operand(Smi::FromInt(expr->ComputeFlags())));
__ Push(r3, r2, r1, r0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1773,33 +1732,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(r0);
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ mov(StoreDescriptor::NameRegister(),
- Operand(Smi::FromInt(array_index)));
- __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, kPointerSize));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
- __ str(result_register(), FieldMemOperand(r1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(r1, offset, result_register(), r2,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ mov(r3, Operand(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ mov(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
+ __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1810,7 +1752,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ pop(); // literal index
__ Pop(r0);
result_saved = false;
}
@@ -1824,14 +1765,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(r0);
@@ -2113,7 +2053,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ mov(r0, Operand(1));
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2133,8 +2073,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ cmp(r0, Operand(0));
- __ b(eq, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ b(ne, &l_try);
// result.value
__ pop(load_receiver); // result
@@ -2237,7 +2177,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
DCHECK(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2254,12 +2194,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, JSGlobalObject::kNativeContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
__ pop(r2);
__ LoadRoot(r3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2293,7 +2231,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2310,7 +2248,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2437,7 +2375,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(r0);
}
@@ -2452,19 +2390,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2474,7 +2412,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2510,7 +2448,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2558,7 +2496,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r0);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2587,11 +2525,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2602,12 +2540,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ b(ne, &assign);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2618,11 +2556,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ b(ne, &const_error);
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2632,24 +2570,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ b(eq, &uninitialized_this);
__ mov(r0, Operand(var->name()));
__ Push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -2658,15 +2597,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r0);
__ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2679,9 +2617,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2697,12 +2635,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2720,8 +2654,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(r0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2732,10 +2665,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(r0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2746,12 +2678,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
@@ -2867,7 +2795,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2929,7 +2857,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2950,7 +2878,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2984,7 +2912,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(r4, r3, r2, r1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -3004,7 +2932,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(r2));
__ mov(r2, Operand(callee->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(r0, r1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3058,7 +2986,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -3100,8 +3028,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(r2);
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3114,8 +3042,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ ldr(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ ldr(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3128,20 +3063,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into r4.
+ // Load new target into r3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(r4, result_register());
+ __ mov(r3, result_register());
// Load function and argument count into r1 and r0.
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(r2);
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3172,7 +3102,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3186,7 +3116,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -3230,9 +3160,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r0, r1, r2, FIRST_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(hs, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3344,40 +3274,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- Register map = r1;
- Register type_reg = r2;
- __ ldr(map, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
-
- // Check the marker in the calling frame.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3450,27 +3347,16 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(r0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r0, r1, FIRST_JS_RECEIVER_TYPE);
// Map is now in r0.
__ b(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ cmp(r1, Operand(JS_FUNCTION_TYPE));
__ b(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
Register instance_type = r2;
@@ -3544,45 +3430,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = r0;
- Register result = r0;
- Register scratch0 = r9;
- Register scratch1 = r1;
-
- if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ ldr(scratch1, MemOperand(scratch1));
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ mov(r1, Operand(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3689,7 +3536,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(r0, &done_convert);
__ Push(r0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(r0);
}
@@ -3709,7 +3556,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ b(ls, &done_convert);
__ bind(&convert);
__ Push(r0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(r0);
}
@@ -3846,59 +3693,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into r3.
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r4, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(r0, Operand::Zero());
- __ b(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r1, r1);
- __ mov(r0, r1);
-
- // Get arguments pointer in r2.
- __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
- __ add(r2, r2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r4, MemOperand(r2, kPointerSize, NegPreIndex));
- __ Push(r4);
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand::Zero());
- __ b(ne, &loop);
- }
-
- __ bind(&args_set_up);
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3933,6 +3727,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(r0);
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r0, FieldMemOperand(r0, Map::kPrototypeOffset));
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
@@ -4190,9 +3995,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime, TAG_OBJECT);
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, JSGlobalObject::kNativeContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
__ pop(r3);
__ pop(r2);
__ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
@@ -4205,7 +4008,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ b(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(r0);
@@ -4217,9 +4020,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ push(r0);
- __ ldr(r0, GlobalObjectOperand());
- __ ldr(r0, FieldMemOperand(r0, JSGlobalObject::kNativeContextOffset));
- __ ldr(r0, ContextOperand(r0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), r0);
}
@@ -4227,7 +4028,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4300,8 +4101,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(r0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4310,10 +4110,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ ldr(r2, GlobalObjectOperand());
+ __ LoadGlobalObject(r2);
__ mov(r1, Operand(var->name()));
__ Push(r2, r1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4325,7 +4125,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(r2));
__ mov(r2, Operand(var->name()));
__ Push(context_register(), r2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(r0);
}
} else {
@@ -4598,12 +4398,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4641,12 +4437,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4718,8 +4510,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(r0, if_false);
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r0, r1, FIRST_JS_RECEIVER_TYPE);
__ b(lt, if_false);
// Check for callable or undetectable objects => false.
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
@@ -4764,7 +4556,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4865,7 +4657,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
+ __ ldr(dst, ContextMemOperand(cp, context_index));
}
@@ -4875,14 +4667,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ mov(ip, Operand(Smi::FromInt(0)));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ ldr(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -4946,7 +4737,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -5004,7 +4795,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 1);
switch (target_state) {
case INTERRUPT:
{
@@ -5056,8 +4848,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
// Replace the call address.
- Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
- replacement_code->entry());
+ Assembler::set_target_address_at(isolate, pc_immediate_load_address,
+ unoptimized_code, replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_immediate_load_address, replacement_code);
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index bb6a4309e4..e4141bb65f 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -4,14 +4,14 @@
#if V8_TARGET_ARCH_ARM64
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/frames-arm64.h"
@@ -92,6 +92,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// - x1: the JS function object being called (i.e. ourselves).
+// - x3: the new target value
// - cp: our context.
// - fp: our caller's frame pointer.
// - jssp: stack pointer.
@@ -119,7 +120,7 @@ void FullCodeGenerator::Generate() {
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
__ AssertNotSmi(x10);
- __ CompareObjectType(x10, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(x10, x10, x11, FIRST_JS_RECEIVER_TYPE);
__ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -133,7 +134,7 @@ void FullCodeGenerator::Generate() {
// Push(lr, fp, cp, x1);
// Add(fp, jssp, 2 * kPointerSize);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
@@ -148,7 +149,7 @@ void FullCodeGenerator::Generate() {
__ Sub(x10, jssp, locals_count * kPointerSize);
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok);
}
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
@@ -158,12 +159,12 @@ void FullCodeGenerator::Generate() {
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
- __ Mov(x3, loop_iterations);
+ __ Mov(x2, loop_iterations);
Label loop_header;
__ Bind(&loop_header);
// Do pushes.
__ PushMultipleTimes(x10 , kMaxPushes);
- __ Subs(x3, x3, 1);
+ __ Subs(x2, x2, 1);
__ B(ne, &loop_header);
}
int remaining = locals_count % kMaxPushes;
@@ -183,16 +184,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Push(x3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Pop(x3); // Restore new target.
+ }
}
function_in_register_x1 = false;
// Context is returned in x0. It replaces the context passed to us.
@@ -226,11 +237,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_x1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -244,35 +255,34 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, x1, x0, x2);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
- function_in_register_x1 = false;
+ SetVar(new_target_var, x3, x0, x2);
+ }
- Label non_construct_frame, done;
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- __ B(ne, &non_construct_frame);
- __ Ldr(x0,
- MemOperand(x2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ B(&done);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ __ Mov(RestParamAccessDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ Add(RestParamAccessDescriptor::parameter_pointer(), fp,
+ StandardFrameConstants::kCallerSPOffset + offset);
+ __ Mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Smi::FromInt(rest_index));
- __ Bind(&non_construct_frame);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ function_in_register_x1 = false;
- __ Bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, x0, x2, x3);
+ SetVar(rest_param, x0, x1, x2);
}
Variable* arguments = scope()->arguments();
@@ -306,7 +316,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -432,7 +442,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in x0.
__ Push(result_register());
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
DCHECK(x0.Is(result_register()));
}
// Pretend that the exit is a backwards jump to the entry.
@@ -675,7 +685,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -844,7 +855,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ Push(x2, xzr);
}
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -901,7 +912,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -917,7 +928,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
}
__ Push(x11, flags);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -925,7 +936,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1048,7 +1059,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(x0, &convert);
- __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE, &done_convert, ge);
__ Bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1058,8 +1069,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
+ __ JumpIfObjectType(x0, x10, x11, JS_PROXY_TYPE, &call_runtime, eq);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1076,7 +1086,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1114,14 +1124,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
int vector_index = SmiFromSlot(slot)->value();
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
-
- __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
- __ Peek(x10, 0); // Get enumerated object.
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- // TODO(all): similar check was done already. Can we avoid it here?
- __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
- DCHECK(Smi::FromInt(0) == 0);
- __ CzeroX(x1, le); // Zero indicates proxy.
+ __ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
// Smi and array, fixed array length (as smi) and initial index.
__ Push(x1, x0, x2, xzr);
@@ -1152,16 +1155,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Cmp(x11, x2);
__ B(eq, &update_each);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- STATIC_ASSERT(kSmiTag == 0);
- __ Cbz(x2, &update_each);
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(x1, x3);
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ Mov(x3, x0);
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex,
@@ -1222,8 +1220,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(x0);
}
@@ -1236,7 +1234,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1249,7 +1247,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1265,9 +1263,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
+ // Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
@@ -1288,9 +1286,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// Terminate at native context.
__ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
- // Check that extension is NULL.
+ // Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ B(&loop);
@@ -1313,18 +1311,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
+ // Check that extension is "the hole".
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
__ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
+ // Check that last extension is "the hole".
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ Cbnz(temp, slow);
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
@@ -1356,7 +1354,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else { // LET || CONST
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ B(done);
@@ -1369,7 +1367,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
__ Mov(LoadDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
@@ -1412,7 +1410,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// binding in harmony mode.
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ Bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1440,7 +1438,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ Bind(&done);
context()->Plug(x0);
break;
@@ -1451,47 +1449,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // x5 = materialized value (RegExp literal)
- // x4 = JS function, literals array
- // x3 = literal index
- // x2 = RegExp pattern
- // x1 = RegExp flags
- // x0 = RegExp literal clone
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ Ldr(x5, FieldMemOperand(x4, literal_offset));
- __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in x0.
- __ Mov(x3, Smi::FromInt(expr->literal_index()));
- __ Mov(x2, Operand(expr->pattern()));
- __ Mov(x1, Operand(expr->flags()));
- __ Push(x4, x3, x2, x1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ Mov(x5, x0);
-
- __ Bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
- __ B(&allocated);
-
- __ Bind(&runtime_allocate);
- __ Mov(x10, Smi::FromInt(size));
- __ Push(x5, x10);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(x5);
-
- __ Bind(&allocated);
- // After this, registers are used as follows:
- // x0: Newly allocated regexp.
- // x5: Materialized regexp.
- // x10, x11, x12: temps.
- __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Mov(x2, Smi::FromInt(expr->literal_index()));
+ __ Mov(x1, Operand(expr->pattern()));
+ __ Mov(x0, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(x0);
}
@@ -1518,14 +1481,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ Mov(x0, Smi::FromInt(flags));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1564,12 +1526,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1590,7 +1548,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
__ Push(x0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1601,7 +1559,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(value);
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1628,7 +1588,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1656,9 +1616,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1671,7 +1633,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ Mov(x0, Smi::FromInt(NONE));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1684,13 +1646,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ Mov(x0, Smi::FromInt(NONE));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Mov(x0, Smi::FromInt(NONE));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1700,7 +1662,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ Peek(x0, 0);
__ Push(x0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1726,13 +1688,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ Mov(x0, Smi::FromInt(expr->ComputeFlags()));
__ Push(x3, x2, x1, x0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1755,33 +1716,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ Mov(x1, Smi::FromInt(expr->literal_index()));
- __ Push(x0, x1);
+ __ Push(x0);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
- __ Peek(StoreDescriptor::ReceiverRegister(), kPointerSize);
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ Peek(x6, kPointerSize); // Copy of array literal.
- __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
- __ Str(result_register(), FieldMemOperand(x1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(x1, offset, result_register(), x10,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ Mov(x3, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ Mov(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
+ __ Peek(StoreDescriptor::ReceiverRegister(), 0);
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1792,7 +1737,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(x0);
result_saved = false;
}
@@ -1806,14 +1750,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(x0);
@@ -1981,7 +1924,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -1999,7 +1942,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2146,7 +2089,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ Push(x0);
}
@@ -2161,19 +2104,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ Mov(x0, Smi::FromInt(DONT_ENUM));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Mov(x0, Smi::FromInt(DONT_ENUM));
__ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2183,7 +2126,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2210,7 +2153,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2258,7 +2201,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Mov(StoreDescriptor::NameRegister(), x0);
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::ValueRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2288,11 +2231,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2302,12 +2245,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
__ Mov(x10, Operand(var->name()));
__ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ Bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2317,11 +2260,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &const_error);
__ Mov(x10, Operand(var->name()));
__ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ Bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2330,11 +2273,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ JumpIfRoot(x10, Heap::kTheHoleValueRootIndex, &uninitialized_this);
__ Mov(x0, Operand(var->name()));
__ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Mov(x11, Operand(var->name()));
@@ -2344,13 +2288,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// jssp[16] : context.
// jssp[24] : value.
__ Push(x0, cp, x11, x10);
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, x1);
- if (FLAG_debug_code && op == Token::INIT_LET) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
__ Ldr(x10, location);
__ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
@@ -2358,14 +2302,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Mov(x1, Operand(var->name()));
__ Push(x0, cp, x1);
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2377,9 +2320,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2396,12 +2339,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
@@ -2419,8 +2358,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(x0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2431,10 +2369,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(x0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2448,12 +2385,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
@@ -2507,6 +2440,7 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallWithLoadIC");
Expression* callee = expr->expression();
// Get the target function.
@@ -2543,6 +2477,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitSuperCallWithLoadIC");
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
@@ -2570,7 +2505,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2585,6 +2520,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedCallWithLoadIC");
// Load the key.
VisitForAccumulatorValue(key);
@@ -2606,6 +2542,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedSuperCallWithLoadIC");
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
@@ -2631,7 +2568,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2644,6 +2581,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
+ ASM_LOCATION("FullCodeGenerator::EmitCall");
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2652,7 +2590,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
@@ -2689,7 +2627,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(x9, x10, x11, x12);
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2708,7 +2646,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// and the object holding it (returned in x1).
__ Mov(x10, Operand(callee->name()));
__ Push(context_register(), x10);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(x0, x1); // Receiver, function.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2736,6 +2674,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitPossiblyEvalCall");
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
@@ -2761,7 +2700,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
// Call the evaluated function.
__ Peek(x1, (arg_count + 1) * kXRegSize);
@@ -2805,8 +2744,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(x2);
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2815,12 +2754,20 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitSuperConstructorCall");
SuperCallReference* super_call_ref =
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ Ldr(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ Ldr(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2833,20 +2780,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into x4.
+ // Load new target into x3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ Mov(x4, result_register());
+ __ Mov(x3, result_register());
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
__ Peek(x1, arg_count * kXRegSize);
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(x2);
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -2876,7 +2818,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2890,7 +2832,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -2934,9 +2876,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE);
+ __ CompareObjectType(x0, x10, x11, FIRST_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(hs, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3049,43 +2991,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(x0, if_false);
- Register map = x10;
- Register type_reg = x11;
- __ Ldr(map, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ Cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &check_frame_marker);
- __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ Bind(&check_frame_marker);
- __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
- __ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+ __ CompareObjectType(x0, x10, x11, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3160,28 +3066,17 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(x0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE);
// x10: object's map.
// x11: object's type.
__ B(lt, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ B(eq, &function);
- __ Cmp(x11, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ Cmp(x11, JS_FUNCTION_TYPE);
__ B(eq, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
Register instance_type = x14;
@@ -3257,45 +3152,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = x0;
- Register result = x0;
- Register stamp_addr = x10;
- Register stamp_cache = x11;
-
- if (index->value() == 0) {
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ Mov(stamp_addr, stamp);
- __ Ldr(stamp_addr, MemOperand(stamp_addr));
- __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Cmp(stamp_addr, stamp_cache);
- __ B(ne, &runtime);
- __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ B(&done);
- }
-
- __ Bind(&runtime);
- __ Mov(x1, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ Bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3396,7 +3252,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(x0, &done_convert);
__ Push(x0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(x0);
}
@@ -3415,7 +3271,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &done_convert, ls);
__ Bind(&convert);
__ Push(x0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ Bind(&done_convert);
context()->Plug(x0);
}
@@ -3556,59 +3412,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into x3.
- __ Peek(x3, 1 * kPointerSize);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ Ldr(x11, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x12, MemOperand(x11, StandardFrameConstants::kContextOffset));
- __ Cmp(x12, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ Mov(x0, Operand(0));
- __ B(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ Ldr(x1, MemOperand(x11, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(x1, x1);
-
- __ Mov(x0, x1);
-
- // Get arguments pointer in x11.
- __ Add(x11, x11, Operand(x1, LSL, kPointerSizeLog2));
- __ Add(x11, x11, StandardFrameConstants::kCallerSPOffset);
- Label loop;
- __ bind(&loop);
- // Pre-decrement x11 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Ldr(x10, MemOperand(x11, -kPointerSize, PreIndex));
- __ Push(x10);
- __ Sub(x1, x1, Operand(1));
- __ Cbnz(x1, &loop);
- }
-
- __ bind(&args_set_up);
- __ Peek(x1, Operand(x0, LSL, kPointerSizeLog2));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, x0);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3643,6 +3446,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(x0);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldr(x0, FieldMemOperand(x0, Map::kPrototypeOffset));
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
@@ -3889,11 +3703,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Register boolean_done = x3;
Register empty_fixed_array = x4;
Register untagged_result = x5;
- __ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg,
- FieldMemOperand(map_reg, JSGlobalObject::kNativeContextOffset));
- __ Ldr(map_reg,
- ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
__ Pop(boolean_done);
__ Pop(result_value);
__ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
@@ -3911,7 +3721,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ B(&done);
__ Bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ Bind(&done);
context()->Plug(x0);
@@ -3923,9 +3733,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
__ Push(x0);
- __ Ldr(x0, GlobalObjectMemOperand());
- __ Ldr(x0, FieldMemOperand(x0, JSGlobalObject::kNativeContextOffset));
- __ Ldr(x0, ContextMemOperand(x0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), x0);
}
@@ -3933,7 +3741,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ Mov(x0, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4004,8 +3812,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(x0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4014,10 +3821,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ Ldr(x12, GlobalObjectMemOperand());
+ __ LoadGlobalObject(x12);
__ Mov(x11, Operand(var->name()));
__ Push(x12, x11);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(x0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4028,7 +3835,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ Mov(x2, Operand(var->name()));
__ Push(context_register(), x2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(x0);
}
} else {
@@ -4300,12 +4107,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4343,12 +4146,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4424,8 +4223,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
__ JumpIfSmi(x0, if_false);
__ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, if_false, lt);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE, if_false, lt);
// Check for callable or undetectable objects => false.
__ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
__ TestAndSplit(x10, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable),
@@ -4475,7 +4274,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4702,7 +4501,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize);
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ Mov(x0, 1);
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -4723,7 +4522,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// The ToBooleanStub argument (result.done) is in x0.
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ Cbz(x0, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ B(ne, &l_try);
// result.value
__ Pop(load_receiver); // result
@@ -4821,7 +4621,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Mov(x10, Smi::FromInt(resume_mode));
__ Push(generator_object, result_register(), x10);
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Unreachable();
@@ -4841,7 +4641,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ Bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ Bind(&done_allocate);
Register map_reg = x1;
@@ -4849,11 +4649,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Register boolean_done = x3;
Register empty_fixed_array = x4;
Register untagged_result = x5;
- __ Ldr(map_reg, GlobalObjectMemOperand());
- __ Ldr(map_reg,
- FieldMemOperand(map_reg, JSGlobalObject::kNativeContextOffset));
- __ Ldr(map_reg,
- ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, map_reg);
__ Pop(result_value);
__ LoadRoot(boolean_done,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -4908,21 +4704,19 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
+ // code.
DCHECK(kSmiTag == 0);
- __ Push(xzr);
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, x10);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
__ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
- __ Push(x10);
} else {
DCHECK(closure_scope->is_function_scope());
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(x10);
}
+ __ Push(x10);
}
@@ -4978,7 +4772,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
@@ -4992,7 +4786,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
// Turn the jump into a nop.
Address branch_address = pc - 3 * kInstructionSize;
- PatchingAssembler patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ PatchingAssembler patcher(isolate, branch_address, 1);
DCHECK(Instruction::Cast(branch_address)
->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
@@ -5026,25 +4821,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address interrupt_address_pointer =
reinterpret_cast<Address>(load) + load->ImmPCOffset();
DCHECK((Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())) ||
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->InterruptCheck()
- ->entry())) ||
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->InterruptCheck()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OsrAfterStackCheck()
- ->entry())) ||
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OsrAfterStackCheck()->entry())) ||
(Memory::uint64_at(interrupt_address_pointer) ==
- reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
- ->builtins()
- ->OnStackReplacement()
- ->entry())));
+ reinterpret_cast<uint64_t>(
+ isolate->builtins()->OnStackReplacement()->entry())));
Memory::uint64_at(interrupt_address_pointer) =
reinterpret_cast<uint64_t>(replacement_code->entry());
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 89675b6c9a..416a69c708 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -4,8 +4,11 @@
#include "src/full-codegen/full-codegen.h"
-#include "src/ast.h"
-#include "src/ast-numbering.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/prettyprinter.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/compiler.h"
@@ -13,9 +16,6 @@
#include "src/debug/liveedit.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
-#include "src/prettyprinter.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -38,7 +38,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
}
CodeGenerator::MakeCodePrologue(info, "full");
const int kInitialBufferSize = 4 * KB;
- MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
+ MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize,
+ CodeObjectRequired::kYes);
if (info->will_serialize()) masm.enable_serializer();
LOG_CODE_EVENT(isolate,
@@ -601,36 +602,24 @@ void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
}
-void FullCodeGenerator::SetCallPosition(Expression* expr, int argc) {
+void FullCodeGenerator::SetCallPosition(Expression* expr) {
if (expr->position() == RelocInfo::kNoPosition) return;
RecordPosition(masm_, expr->position());
if (info_->is_debug()) {
// Always emit a debug break slot before a call.
- DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL,
- argc);
- }
-}
-
-
-void FullCodeGenerator::SetConstructCallPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- RecordPosition(masm_, expr->position());
- if (info_->is_debug()) {
- // Always emit a debug break slot before a construct call.
- DebugCodegen::GenerateSlot(masm_,
- RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL);
+ DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
}
}
void FullCodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* super) {
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
}
void FullCodeGenerator::VisitSuperCallReference(SuperCallReference* super) {
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
}
@@ -925,18 +914,12 @@ void FullCodeGenerator::EmitUnwindBeforeReturn() {
void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
BailoutId bailout_id) {
VisitForStackValue(property->key());
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
PrepareForBailoutForId(bailout_id, NO_REGISTERS);
__ Push(result_register());
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperCallReference* ref) {
- VisitForStackValue(ref->this_function_var());
- __ CallRuntime(Runtime::kGetPrototype, 1);
-}
-
-
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
@@ -951,9 +934,14 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
Comment cmnt(masm_, "[ WithStatement");
SetStatementPosition(stmt);
- VisitForStackValue(stmt->expression());
+ VisitForAccumulatorValue(stmt->expression());
+ Callable callable = CodeFactory::ToObject(isolate());
+ __ Move(callable.descriptor().GetRegisterParameter(0), result_register());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ PrepareForBailoutForId(stmt->ToObjectId(), NO_REGISTERS);
+ __ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext, 2);
+ __ CallRuntime(Runtime::kPushWithContext);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1150,7 +1138,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
__ Push(stmt->variable()->name());
__ Push(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext, 3);
+ __ CallRuntime(Runtime::kPushCatchContext);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1220,7 +1208,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// rethrow the exception if it returns.
__ Call(&finally_entry);
__ Push(result_register());
- __ CallRuntime(Runtime::kReThrow, 1);
+ __ CallRuntime(Runtime::kReThrow);
// Finally block implementation.
__ bind(&finally_entry);
@@ -1337,14 +1325,14 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
__ Push(Smi::FromInt(lit->start_position()));
__ Push(Smi::FromInt(lit->end_position()));
- __ CallRuntime(Runtime::kDefineClass, 5);
+ __ CallRuntime(Runtime::kDefineClass);
PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
EmitClassDefineProperties(lit);
if (lit->class_variable_proxy() != nullptr) {
- EmitVariableAssignment(lit->class_variable_proxy()->var(),
- Token::INIT_CONST, lit->ProxySlot());
+ EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
+ lit->ProxySlot());
}
}
@@ -1392,7 +1380,7 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
SetExpressionPosition(expr);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ CallRuntime(Runtime::kThrow);
// Never returns here.
}
@@ -1493,6 +1481,12 @@ void FullCodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
}
+void FullCodeGenerator::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
int* stack_depth, int* context_length) {
// The macros used here must preserve the result register.
@@ -1648,7 +1642,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
Comment cmnt(masm(), "[ Extend block context");
__ Push(scope->GetScopeInfo(codegen->isolate()));
codegen_->PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ __ CallRuntime(Runtime::kPushBlockContext);
// Replace the context stored in the frame.
codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 80266cd70a..52eddafa1a 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -7,7 +7,8 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/bit-vector.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -15,7 +16,6 @@
#include "src/compiler.h"
#include "src/globals.h"
#include "src/objects.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -83,8 +83,7 @@ class FullCodeGenerator: public AstVisitor {
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 149;
#elif V8_TARGET_ARCH_ARM64
-// TODO(all): Copied ARM value. Check this is sensible for ARM64.
- static const int kCodeSizeMultiplier = 149;
+ static const int kCodeSizeMultiplier = 220;
#elif V8_TARGET_ARCH_PPC64
static const int kCodeSizeMultiplier = 200;
#elif V8_TARGET_ARCH_PPC
@@ -477,27 +476,25 @@ class FullCodeGenerator: public AstVisitor {
F(IsTypedArray) \
F(IsRegExp) \
F(IsJSProxy) \
- F(IsConstructCall) \
F(Call) \
- F(DefaultConstructorCallSuper) \
F(ArgumentsLength) \
F(Arguments) \
F(ValueOf) \
F(SetValueOf) \
F(IsDate) \
- F(DateField) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
F(TwoByteSeqStringSetChar) \
F(ObjectEquals) \
F(IsFunction) \
- F(IsSpecObject) \
+ F(IsJSReceiver) \
F(IsSimdValue) \
F(MathPow) \
F(IsMinusZero) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
+ F(GetSuperConstructor) \
F(FastOneByteArrayJoin) \
F(GeneratorNext) \
F(GeneratorThrow) \
@@ -632,8 +629,6 @@ class FullCodeGenerator: public AstVisitor {
void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
FeedbackVectorSlot slot);
- void EmitLoadSuperConstructor(SuperCallReference* super_call_ref);
-
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
@@ -660,9 +655,12 @@ class FullCodeGenerator: public AstVisitor {
// This is used in loop headers where we want to break for each iteration.
void SetExpressionAsStatementPosition(Expression* expr);
- void SetCallPosition(Expression* expr, int argc);
+ void SetCallPosition(Expression* expr);
- void SetConstructCallPosition(Expression* expr);
+ void SetConstructCallPosition(Expression* expr) {
+ // Currently call and construct calls are treated the same wrt debugging.
+ SetCallPosition(expr);
+ }
// Non-local control flow support.
void EnterTryBlock(int handler_index, Label* handler);
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 089915a587..4ef3a0984f 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -4,6 +4,7 @@
#if V8_TARGET_ARCH_IA32
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -11,8 +12,7 @@
#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -83,6 +83,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o edi: the JS function object being called (i.e. ourselves)
+// o edx: the new target value
// o esi: our context
// o ebp: our caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -109,7 +110,7 @@ void FullCodeGenerator::Generate() {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
__ AssertNotSmi(ecx);
- __ CmpObjectType(ecx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx);
__ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -119,7 +120,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -136,7 +137,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_real_stack_limit(isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
@@ -172,16 +173,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(edx); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(edx); // Restore new target.
+ }
}
function_in_register = false;
// Context is returned in eax. It replaces the context passed to us.
@@ -218,11 +229,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -233,37 +244,36 @@ void FullCodeGenerator::Generate() {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, edi, ebx, edx);
+ SetVar(this_function_var, edi, ebx, ecx);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- Label non_adaptor_frame;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &non_adaptor_frame);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- __ bind(&non_adaptor_frame);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-
- Label non_construct_frame, done;
- __ j(not_equal, &non_construct_frame);
-
- // Construct frame
- __ mov(eax,
- Operand(eax, ConstructFrameConstants::kOriginalConstructorOffset));
- __ jmp(&done);
-
- // Non-construct frame
- __ bind(&non_construct_frame);
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
-
- __ bind(&done);
- SetVar(new_target_var, eax, ebx, edx);
+ SetVar(new_target_var, edx, ebx, ecx);
+ }
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+
+ __ mov(RestParamAccessDescriptor::parameter_count(),
+ Immediate(Smi::FromInt(num_parameters)));
+ __ lea(RestParamAccessDescriptor::parameter_pointer(),
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Immediate(Smi::FromInt(rest_index)));
+ function_in_register = false;
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+ SetVar(rest_param, eax, ebx, edx);
}
Variable* arguments = scope()->arguments();
@@ -296,7 +306,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -401,7 +411,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
__ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -625,9 +635,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ test(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -787,7 +796,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
__ push(
Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -841,7 +850,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VisitForStackValue(declaration->fun());
__ push(
Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -852,7 +861,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -860,7 +869,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -982,7 +991,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -993,9 +1002,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime, use_cache, fixed_array;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- __ j(below_equal, &call_runtime);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
+ __ j(equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1009,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -1040,7 +1048,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register eax. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
@@ -1048,15 +1055,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-
- __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
- __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
- __ j(above, &non_proxy);
- __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(ebx); // Smi
+ __ push(Immediate(Smi::FromInt(1))); // Smi(1) indicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
@@ -1085,18 +1084,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK(Smi::FromInt(0) == 0);
- __ test(edx, edx);
- __ j(zero, &update_each);
-
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, loop_statement.continue_label());
@@ -1154,8 +1147,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ push(Immediate(info));
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(eax);
}
@@ -1168,7 +1161,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1181,7 +1174,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1196,10 +1189,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
@@ -1225,9 +1217,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
Immediate(isolate()->factory()->native_context_map()));
__ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
@@ -1249,19 +1241,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
}
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that last extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an esi-based operand (the write barrier cannot be allowed to
@@ -1293,7 +1284,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ mov(eax, isolate()->factory()->undefined_value());
} else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1306,7 +1297,10 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ ContextOperand(LoadDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
__ mov(LoadDescriptor::NameRegister(), var->name());
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
@@ -1348,7 +1342,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1375,7 +1369,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1386,53 +1380,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
+ __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
+ __ Move(ecx, Immediate(expr->pattern()));
+ __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(eax);
}
@@ -1461,15 +1414,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
@@ -1510,12 +1461,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1533,7 +1480,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1542,7 +1489,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1569,7 +1518,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1596,9 +1545,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1610,7 +1561,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1622,12 +1573,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1636,7 +1587,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
__ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1662,15 +1613,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1695,36 +1644,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax); // array literal.
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(Smi::FromInt(array_index)));
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_constant_fast_elements) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ mov(ecx, Immediate(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(Smi::FromInt(array_index)));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1734,7 +1664,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(eax);
result_saved = false;
}
@@ -1748,14 +1677,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -2035,7 +1963,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ Set(eax, 1);
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2055,8 +1983,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ test(eax, eax);
- __ j(zero, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(not_equal, &l_try);
// result.value
__ pop(load_receiver); // result
@@ -2147,7 +2075,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2164,11 +2092,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -2204,7 +2131,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ push(Immediate(key->value()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2221,7 +2148,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2347,7 +2274,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(eax);
}
@@ -2362,24 +2289,24 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2415,7 +2342,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2463,7 +2390,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), eax);
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2490,11 +2417,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
- __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(StoreDescriptor::ReceiverRegister(),
+ ContextOperand(StoreDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2504,11 +2434,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2518,11 +2448,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &const_error, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2531,24 +2461,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(equal, &uninitialized_this);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2557,15 +2488,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2578,9 +2508,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2597,12 +2527,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2619,8 +2545,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ push(Immediate(key->value()));
__ push(eax);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2630,10 +2555,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
__ push(eax);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2648,13 +2572,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
-
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2761,7 +2680,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2819,7 +2738,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2840,7 +2759,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2875,7 +2794,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2894,7 +2813,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(callee->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ push(eax); // Function.
__ push(edx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2944,7 +2863,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Set(eax, arg_count);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -2986,8 +2905,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3000,8 +2919,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ mov(result_register(),
+ FieldOperand(result_register(), HeapObject::kMapOffset));
+ __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3014,20 +2938,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into ecx.
+ // Load new target into edx.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(ecx, result_register());
+ __ mov(edx, result_register());
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(ebx);
- __ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3058,7 +2977,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3072,7 +2991,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -3116,9 +3035,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
+ Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3231,42 +3150,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- Register map = ebx;
- __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
- __ j(less, if_false);
- __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(less_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3343,52 +3227,40 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
- __ JumpIfSmi(eax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
- // Map is now in eax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(eax, &null, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
+ __ j(below, &null, Label::kNear);
+
+ // Return 'Function' for JSFunction objects.
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &function, Label::kNear);
// Check if the constructor in the map is a JS function.
__ GetMapConstructor(eax, eax, ebx);
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor);
+ __ j(not_equal, &non_function_constructor, Label::kNear);
// eax now contains the constructor function. Grab the
// instance class name from there.
__ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, isolate()->factory()->null_value());
+ __ jmp(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
__ mov(eax, isolate()->factory()->Function_string());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
__ mov(eax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
// All done.
__ bind(&done);
@@ -3438,43 +3310,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = eax;
- Register result = eax;
- Register scratch = ecx;
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3584,7 +3419,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(eax, &done_convert, Label::kNear);
__ Push(eax);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(eax);
}
@@ -3605,7 +3440,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ j(below_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ Push(eax);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(eax);
}
@@ -3745,54 +3580,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ebx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(eax, Immediate(0));
- __ jmp(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ mov(ebx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(ebx);
-
- __ mov(eax, ebx);
- __ lea(edx, Operand(edx, ebx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- __ push(Operand(edx, -1 * kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ebx);
- __ j(not_zero, &loop);
- }
-
- __ bind(&args_set_up);
-
- __ mov(edx, Operand(esp, eax, times_pointer_size, 1 * kPointerSize));
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0 * kPointerSize));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3831,6 +3618,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(eax);
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4119,8 +3917,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -4133,7 +3930,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(eax);
@@ -4144,9 +3941,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as receiver.
__ push(Immediate(isolate()->factory()->undefined_value()));
- __ mov(eax, GlobalObjectOperand());
- __ mov(eax, FieldOperand(eax, JSGlobalObject::kNativeContextOffset));
- __ mov(eax, ContextOperand(eax, expr->context_index()));
+ __ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -4154,7 +3949,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Set(eax, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4225,8 +4020,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4235,9 +4029,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ push(GlobalObjectOperand());
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4249,7 +4044,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(eax);
}
} else {
@@ -4529,12 +4324,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4572,12 +4363,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4648,8 +4435,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, edx);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, edx);
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
@@ -4694,7 +4481,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4807,9 +4594,9 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ push(Immediate(Smi::FromInt(0)));
+ // code.
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
@@ -4877,7 +4664,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Immediate(SmiFromSlot(slot)));
}
@@ -4924,8 +4711,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
- Assembler::set_target_address_at(call_target_address,
- unoptimized_code,
+ Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
+ call_target_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
diff --git a/deps/v8/src/full-codegen/mips/OWNERS b/deps/v8/src/full-codegen/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/full-codegen/mips/OWNERS
+++ b/deps/v8/src/full-codegen/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 3a322ea315..07e9fdfc94 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -12,14 +12,14 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/mips/code-stubs-mips.h"
#include "src/mips/macro-assembler-mips.h"
@@ -101,6 +101,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o a1: the JS function object being called (i.e. ourselves)
+// o a3: the new target value
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -130,7 +131,7 @@ void FullCodeGenerator::Generate() {
__ AssertNotSmi(a2);
__ GetObjectType(a2, a2, a2);
__ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -139,7 +140,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -151,7 +152,7 @@ void FullCodeGenerator::Generate() {
__ Subu(t5, sp, Operand(locals_count * kPointerSize));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t5, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
@@ -190,16 +191,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(a3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(a1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(a3); // Restore new target.
+ }
}
function_in_register_a1 = false;
// Context is returned in v0. It replaces the context passed to us.
@@ -217,13 +228,13 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ lw(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sw(a0, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), a0, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, a0, &done);
@@ -233,11 +244,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_a1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -248,41 +259,38 @@ void FullCodeGenerator::Generate() {
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, a1, a2, a3);
+ SetVar(this_function_var, a1, a0, a2);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, a3, a0, a2);
+ }
- // Get the frame pointer for the calling frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, a1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ li(RestParamAccessDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ Addu(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(RestParamAccessDescriptor::rest_parameter_index(),
+ Operand(Smi::FromInt(rest_index)));
+ DCHECK(a1.is(RestParamAccessDescriptor::rest_parameter_index()));
function_in_register_a1 = false;
- Label non_construct_frame, done;
- __ Branch(&non_construct_frame, ne, a1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-
- __ lw(v0,
- MemOperand(a2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ Branch(&done);
-
- __ bind(&non_construct_frame);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, v0, a2, a3);
+ SetVar(rest_param, v0, a1, a2);
}
Variable* arguments = scope()->arguments();
@@ -316,7 +324,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -434,7 +442,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0.
__ push(v0);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -674,8 +682,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ mov(at, zero_reg);
- Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
}
@@ -715,7 +723,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -821,7 +829,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ sw(at, ContextOperand(cp, variable->index()));
+ __ sw(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -844,7 +852,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
__ Push(a2, a0);
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -879,7 +887,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ sw(result_register(), ContextOperand(cp, variable->index()));
+ __ sw(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -901,7 +909,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -913,7 +921,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -921,7 +929,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1045,7 +1053,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1056,9 +1064,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+ __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1075,7 +1082,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1108,7 +1115,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register v0. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(a1);
@@ -1116,13 +1122,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
- __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
- __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
@@ -1155,16 +1155,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, t0, Operand(a2));
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(&update_each, eq, a2, Operand(zero_reg));
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(a1, a3); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(a3, result_register());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1224,8 +1219,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(v0);
}
@@ -1239,7 +1234,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1253,7 +1248,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
Operand(isolate()->factory()->home_object_symbol()));
__ lw(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1269,12 +1264,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ lw(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1294,11 +1289,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kNativeContextMapRootIndex);
__ Branch(&fast, eq, temp, Operand(t0));
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ lw(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ Branch(&loop);
__ bind(&fast);
}
@@ -1319,23 +1314,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ lw(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that last extension is "the hole".
+ __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1365,7 +1360,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ Branch(done);
@@ -1378,7 +1373,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ li(LoadDescriptor::NameRegister(), Operand(var->name()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
@@ -1422,7 +1417,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1450,7 +1445,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
}
@@ -1460,49 +1455,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // t1 = materialized value (RegExp literal)
- // t0 = JS function, literals array
- // a3 = literal index
- // a2 = RegExp pattern
- // a1 = RegExp flags
- // a0 = RegExp literal clone
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ lw(t1, FieldMemOperand(t0, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, t1, Operand(at));
-
- // Create regexp literal using runtime function.
- // Result will be in v0.
- __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a2, Operand(expr->pattern()));
- __ li(a1, Operand(expr->flags()));
- __ Push(t0, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(t1, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(t1, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(t1);
-
- __ bind(&allocated);
-
- // After this, registers are used as follows:
- // v0: Newly allocated regexp.
- // t1: Materialized regexp.
- // a2: temp.
- __ CopyFields(v0, t1, a2.bit(), size / kPointerSize);
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->pattern()));
+ __ li(a0, Operand(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -1529,13 +1487,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1575,12 +1532,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1602,7 +1555,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1613,7 +1566,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1640,7 +1595,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1668,9 +1623,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1683,7 +1640,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1696,13 +1653,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1712,7 +1669,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ lw(a0, MemOperand(sp));
__ push(a0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1739,13 +1696,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1769,36 +1725,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(v0); // array literal
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ li(StoreDescriptor::NameRegister(),
- Operand(Smi::FromInt(array_index)));
- __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, kPointerSize));
- __ mov(StoreDescriptor::ValueRegister(), result_register());
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(a1, offset, result_register(), a2,
- kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ li(a3, Operand(Smi::FromInt(array_index)));
- __ mov(a0, result_register());
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
+ __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1809,7 +1747,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(); // literal index
__ Pop(v0);
result_saved = false;
}
@@ -1823,14 +1760,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(v0);
@@ -2115,7 +2051,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
__ mov(a1, a0);
__ sw(a1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ li(a0, Operand(1));
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2135,7 +2071,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ Branch(&l_try, eq, v0, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&l_try, ne, result_register(), Operand(at));
// result.value
__ pop(load_receiver); // result
@@ -2228,7 +2165,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
DCHECK(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2245,12 +2182,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
- __ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ pop(a2);
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2286,7 +2221,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2303,7 +2238,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2363,12 +2298,10 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
+ __ AddBranchOvf(v0, left, Operand(right), &stub_call);
break;
case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ BranchOnOverflow(&stub_call, scratch1);
+ __ SubBranchOvf(v0, left, Operand(right), &stub_call);
break;
case Token::MUL: {
__ SmiUntag(scratch1, right);
@@ -2429,7 +2362,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(v0);
}
@@ -2444,19 +2377,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2466,7 +2399,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2503,7 +2436,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2551,7 +2484,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ mov(StoreDescriptor::NameRegister(), result_register());
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2581,11 +2514,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ lw(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2596,12 +2529,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&assign, ne, a3, Operand(t0));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2612,11 +2545,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&const_error, ne, a3, Operand(at));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2626,23 +2559,24 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&uninitialized_this, eq, a3, Operand(at));
__ li(a0, Operand(var->name()));
__ Push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ li(a1, Operand(var->name()));
__ li(a0, Operand(Smi::FromInt(language_mode())));
__ Push(v0, cp, a1, a0); // Value, context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2651,13 +2585,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2670,9 +2604,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2689,12 +2623,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2712,8 +2642,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(v0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2724,10 +2653,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(v0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2744,12 +2672,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2860,7 +2784,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2920,7 +2844,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2942,7 +2866,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2976,7 +2900,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(t3, t2, t1, t0);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2997,7 +2921,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(callee->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(v0, v1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3049,7 +2973,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -3091,8 +3015,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3105,8 +3029,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ lw(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ lw(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3119,20 +3050,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into t0.
+ // Load new target into a3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(t0, result_register());
+ __ mov(a3, result_register());
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(a2);
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3163,7 +3089,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3179,7 +3105,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ Split(ge, a1, Operand(FIRST_JS_RECEIVER_TYPE),
if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3224,7 +3150,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
__ Branch(if_false);
context()->Plug(if_true, if_false);
@@ -3342,44 +3268,9 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
- Register map = a1;
- Register type_reg = a2;
- __ GetObjectType(v0, map, type_reg);
- __ Subu(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne,
- a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
- if_true, if_false, fall_through);
+ Split(eq, a1, Operand(JS_PROXY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3451,26 +3342,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(v0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
- __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&null, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
// Check if the constructor in the map is a JS function.
Register instance_type = a2;
@@ -3547,45 +3426,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = v0;
- Register result = v0;
- Register scratch0 = t5;
- Register scratch1 = a1;
-
- if (index->value() == 0) {
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch1, Operand(stamp));
- __ lw(scratch1, MemOperand(scratch1));
- __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch1, Operand(scratch0));
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ li(a1, Operand(index));
- __ Move(a0, object);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3700,7 +3540,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(v0, &done_convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(v0);
}
@@ -3720,7 +3560,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
__ bind(&convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(v0);
}
@@ -3862,63 +3702,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into a3.
- __ lw(a3, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(t0, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, t0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(a0, zero_reg);
- __ Branch(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(a1, a1);
-
- __ mov(a0, a1);
-
- // Get arguments pointer in a2.
- __ sll(at, a1, kPointerSizeLog2);
- __ addu(a2, a2, at);
- __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ lw(t0, MemOperand(a2));
- __ Push(t0);
- __ Addu(a1, a1, Operand(-1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
- }
-
- __ bind(&args_set_up);
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(at, at, Operand(sp));
- __ lw(a1, MemOperand(at, 0));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, result_register());
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3954,6 +3737,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(v0);
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lw(v0, FieldMemOperand(v0, Map::kPrototypeOffset));
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4031,8 +3825,7 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
+ __ AddBranchOvf(string_length, string_length, Operand(scratch1), &bailout);
__ Branch(&loop, lt, element, Operand(elements_end));
// If array_length is 1, return elements[0], a string.
@@ -4065,8 +3858,7 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
__ And(scratch3, scratch2, Operand(0x80000000));
__ Branch(&bailout, ne, scratch3, Operand(zero_reg));
- __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
+ __ AddBranchOvf(string_length, string_length, Operand(scratch2), &bailout);
__ SmiUntag(string_length);
// Bailout for large object allocations.
@@ -4210,9 +4002,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
- __ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4224,7 +4014,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(v0);
@@ -4236,9 +4026,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ push(v0);
- __ lw(v0, GlobalObjectOperand());
- __ lw(v0, FieldMemOperand(v0, JSGlobalObject::kNativeContextOffset));
- __ lw(v0, ContextOperand(v0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -4246,7 +4034,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4319,8 +4107,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4329,10 +4116,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ lw(a2, GlobalObjectOperand());
+ __ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4344,7 +4131,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(v0);
}
} else {
@@ -4536,10 +4323,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
Register scratch1 = a1;
- Register scratch2 = t0;
__ li(scratch1, Operand(Smi::FromInt(count_value)));
- __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
- __ BranchOnNoOverflow(&done, scratch2);
+ __ AddBranchNoOvf(v0, v0, Operand(scratch1), &done);
// Call stub. Undo operation first.
__ Move(v0, a0);
__ jmp(&stub_call);
@@ -4620,12 +4405,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4664,12 +4445,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4740,9 +4517,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(v0, if_false);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(if_false, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1,
@@ -4787,7 +4564,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
@@ -4888,7 +4665,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ lw(dst, ContextOperand(cp, context_index));
+ __ lw(dst, ContextMemOperand(cp, context_index));
}
@@ -4898,14 +4675,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ li(at, Operand(Smi::FromInt(0)));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, at);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ lw(at, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -4973,7 +4749,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -4988,7 +4764,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
Address branch_address = pc - 6 * kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 1);
switch (target_state) {
case INTERRUPT:
@@ -5016,7 +4793,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_immediate_load_address = pc - 4 * kInstrSize;
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_immediate_load_address,
+ Assembler::set_target_address_at(isolate, pc_immediate_load_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
diff --git a/deps/v8/src/full-codegen/mips64/OWNERS b/deps/v8/src/full-codegen/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/full-codegen/mips64/OWNERS
+++ b/deps/v8/src/full-codegen/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index a51e873709..44dd791a59 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -12,14 +12,14 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/mips64/code-stubs-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
@@ -101,6 +101,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o a1: the JS function object being called (i.e. ourselves)
+// o a3: the new target value
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -130,7 +131,7 @@ void FullCodeGenerator::Generate() {
__ AssertNotSmi(a2);
__ GetObjectType(a2, a2, a2);
__ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
- Operand(FIRST_SPEC_OBJECT_TYPE));
+ Operand(FIRST_JS_RECEIVER_TYPE));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -138,7 +139,7 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -150,7 +151,7 @@ void FullCodeGenerator::Generate() {
__ Dsubu(t1, sp, Operand(locals_count * kPointerSize));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t1, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
@@ -189,16 +190,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(a3); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(a1);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(a3); // Restore new target.
+ }
}
function_in_register_a1 = false;
// Context is returned in v0. It replaces the context passed to us.
@@ -216,13 +227,13 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ sd(a0, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), a0, a2,
+ kRAHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, a0, &done);
@@ -232,11 +243,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_a1| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -247,40 +258,36 @@ void FullCodeGenerator::Generate() {
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, a1, a2, a3);
+ SetVar(this_function_var, a1, a0, a2);
}
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- // Get the frame pointer for the calling frame.
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne, a1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
- function_in_register_a1 = false;
+ SetVar(new_target_var, a3, a0, a2);
+ }
- Label non_construct_frame, done;
- __ Branch(&non_construct_frame, ne, a1,
- Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- __ ld(v0,
- MemOperand(a2, ConstructFrameConstants::kOriginalConstructorOffset));
- __ Branch(&done);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- __ bind(&non_construct_frame);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ __ li(RestParamAccessDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ Daddu(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ li(RestParamAccessDescriptor::rest_parameter_index(),
+ Operand(Smi::FromInt(rest_index)));
+ function_in_register_a1 = false;
- SetVar(new_target_var, v0, a2, a3);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, v0, a1, a2);
}
Variable* arguments = scope()->arguments();
@@ -314,7 +321,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -434,7 +441,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0.
__ push(v0);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -674,8 +681,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
__ mov(a0, result_register());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ mov(at, zero_reg);
- Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ Split(eq, result_register(), Operand(at), if_true, if_false, fall_through);
}
@@ -715,7 +722,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -821,7 +828,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ sd(at, ContextOperand(cp, variable->index()));
+ __ sd(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -844,7 +851,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
__ Push(a2, a0);
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -879,7 +886,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ sd(result_register(), ContextOperand(cp, variable->index()));
+ __ sd(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -901,7 +908,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -913,7 +920,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
__ Push(a1, a0);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -921,7 +928,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1045,7 +1052,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1056,9 +1063,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+ __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1075,7 +1081,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1108,7 +1114,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register v0. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(a1);
@@ -1116,13 +1121,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
- __ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ld(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
- __ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));
@@ -1155,16 +1154,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, a4, Operand(a2));
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(&update_each, eq, a2, Operand(zero_reg));
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(a1, a3); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mov(a3, result_register());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1224,8 +1218,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(v0);
}
@@ -1239,7 +1233,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1253,7 +1247,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
Operand(isolate()->factory()->home_object_symbol()));
__ ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1269,12 +1263,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ld(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ ld(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ ld(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1294,11 +1288,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ LoadRoot(a4, Heap::kNativeContextMapRootIndex);
__ Branch(&fast, eq, temp, Operand(a4));
- // Check that extension is NULL.
- __ ld(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ ld(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ ld(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ Branch(&loop);
__ bind(&fast);
}
@@ -1319,23 +1313,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ ld(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ ld(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ Branch(slow, ne, temp, Operand(zero_reg));
+ // Check that last extension is "the hole".
+ __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1365,7 +1359,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ Branch(done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ Branch(done);
@@ -1378,7 +1372,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ li(LoadDescriptor::NameRegister(), Operand(var->name()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
@@ -1422,7 +1416,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ Branch(&done, ne, at, Operand(zero_reg));
__ li(a0, Operand(var->name()));
__ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
} else {
// Uninitialized legacy const bindings are unholed.
@@ -1450,7 +1444,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
}
@@ -1460,49 +1454,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // a5 = materialized value (RegExp literal)
- // a4 = JS function, literals array
- // a3 = literal index
- // a2 = RegExp pattern
- // a1 = RegExp flags
- // a0 = RegExp literal clone
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ ld(a5, FieldMemOperand(a4, literal_offset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&materialized, ne, a5, Operand(at));
-
- // Create regexp literal using runtime function.
- // Result will be in v0.
- __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a2, Operand(expr->pattern()));
- __ li(a1, Operand(expr->flags()));
- __ Push(a4, a3, a2, a1);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(a5, v0);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ Push(a5, a0);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(a5);
-
- __ bind(&allocated);
-
- // After this, registers are used as follows:
- // v0: Newly allocated regexp.
- // a5: Materialized regexp.
- // a2: temp.
- __ CopyFields(v0, a5, a2.bit(), size / kPointerSize);
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+ __ li(a1, Operand(expr->pattern()));
+ __ li(a0, Operand(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(v0);
}
@@ -1529,13 +1486,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1575,12 +1531,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(a0));
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1602,7 +1554,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1613,7 +1565,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1640,7 +1594,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1668,9 +1622,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1683,7 +1639,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1696,13 +1652,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1712,7 +1668,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ ld(a0, MemOperand(sp));
__ push(a0);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1739,13 +1695,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(a0, result_register());
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
__ Push(a3, a2, a1, a0);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1769,36 +1724,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(v0); // array literal
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ li(StoreDescriptor::NameRegister(),
- Operand(Smi::FromInt(array_index)));
- __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, kPointerSize));
- __ mov(StoreDescriptor::ValueRegister(), result_register());
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset));
- __ sd(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
- __ RecordWriteField(a1, offset, result_register(), a2,
- kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- __ li(a3, Operand(Smi::FromInt(array_index)));
- __ mov(a0, result_register());
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
+ __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ mov(StoreDescriptor::ValueRegister(), result_register());
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1809,7 +1746,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(); // literal index
__ Pop(v0);
result_saved = false;
}
@@ -1823,14 +1759,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Pop(); // literal index
context()->PlugTOS();
} else {
context()->Plug(v0);
@@ -2113,7 +2048,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
__ mov(a1, a0);
__ sd(a1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ li(a0, Operand(1));
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2133,7 +2068,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a0, v0);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ Branch(&l_try, eq, v0, Operand(zero_reg));
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ __ Branch(&l_try, ne, result_register(), Operand(at));
// result.value
__ pop(load_receiver); // result
@@ -2228,7 +2164,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
DCHECK(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2245,12 +2181,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
- __ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ pop(a2);
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2286,7 +2220,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2305,7 +2239,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2430,7 +2364,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(v0);
}
@@ -2445,19 +2379,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
__ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2467,7 +2401,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2504,7 +2438,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2552,7 +2486,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), result_register());
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2582,11 +2516,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ ld(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2597,12 +2531,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&assign, ne, a3, Operand(a4));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2613,11 +2547,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&const_error, ne, a3, Operand(at));
__ li(a3, Operand(var->name()));
__ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2627,11 +2561,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ Branch(&uninitialized_this, eq, a3, Operand(at));
__ li(a0, Operand(var->name()));
__ Push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ li(a4, Operand(var->name()));
@@ -2641,13 +2576,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// jssp[16] : context.
// jssp[24] : value.
__ Push(v0, cp, a4, a3);
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ ld(a2, location);
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -2656,13 +2591,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ li(a0, Operand(var->name()));
__ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2675,9 +2610,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2694,12 +2629,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2717,8 +2648,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(v0);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2729,10 +2659,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(v0);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2749,12 +2678,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(v0);
@@ -2865,7 +2790,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2925,7 +2850,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2947,7 +2872,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2980,7 +2905,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(a6, a5, a4, a1);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -3001,7 +2926,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(callee->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(v0, v1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3053,7 +2978,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -3095,8 +3020,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3109,8 +3034,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ ld(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ ld(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3123,20 +3055,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into a4.
+ // Load new target into a3.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(a4, result_register());
+ __ mov(a3, result_register());
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
__ ld(a1, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(a2);
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3167,7 +3094,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3183,7 +3110,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+ Split(ge, a1, Operand(FIRST_JS_RECEIVER_TYPE),
if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
@@ -3228,7 +3155,7 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
__ JumpIfSmi(v0, if_false);
__ GetObjectType(v0, a1, a2);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
__ Branch(if_false);
context()->Plug(if_true, if_false);
@@ -3346,44 +3273,9 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(v0, if_false);
- Register map = a1;
- Register type_reg = a2;
- __ GetObjectType(v0, map, type_reg);
- __ Subu(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&check_frame_marker, ne,
- a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ GetObjectType(v0, a1, a1);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
- if_true, if_false, fall_through);
+ Split(eq, a1, Operand(JS_PROXY_TYPE), if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3455,26 +3347,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(v0, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1); // Map is now in v0.
- __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&null, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ Branch(&function, eq, a1, Operand(JS_FUNCTION_TYPE));
// Check if the constructor in the map is a JS function.
Register instance_type = a2;
@@ -3551,45 +3431,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = v0;
- Register result = v0;
- Register scratch0 = t1;
- Register scratch1 = a1;
-
- if (index->value() == 0) {
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ li(scratch1, Operand(stamp));
- __ ld(scratch1, MemOperand(scratch1));
- __ ld(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ Branch(&runtime, ne, scratch1, Operand(scratch0));
- __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ li(a1, Operand(index));
- __ Move(a0, object);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3705,7 +3546,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(v0, &done_convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(v0);
}
@@ -3725,7 +3566,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
__ bind(&convert);
__ Push(v0);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(v0);
}
@@ -3867,63 +3708,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Load original constructor into a3.
- __ ld(a3, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a4, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a4,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(a0, zero_reg);
- __ Branch(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(a1, a1);
-
- __ mov(a0, a1);
-
- // Get arguments pointer in a2.
- __ dsll(at, a1, kPointerSizeLog2);
- __ Daddu(a2, a2, Operand(at));
- __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ ld(a4, MemOperand(a2));
- __ Push(a4);
- __ Daddu(a1, a1, Operand(-1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
- }
-
- __ bind(&args_set_up);
- __ dsll(at, a0, kPointerSizeLog2);
- __ Daddu(at, at, Operand(sp));
- __ ld(a1, MemOperand(at, 0));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, result_register());
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3959,6 +3743,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(v0);
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ ld(v0, FieldMemOperand(v0, Map::kPrototypeOffset));
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4215,9 +4010,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
- __ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a1, FieldMemOperand(a1, JSGlobalObject::kNativeContextOffset));
- __ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
__ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4229,7 +4022,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(v0);
@@ -4241,9 +4034,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ push(v0);
- __ ld(v0, GlobalObjectOperand());
- __ ld(v0, FieldMemOperand(v0, JSGlobalObject::kNativeContextOffset));
- __ ld(v0, ContextOperand(v0, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -4251,7 +4042,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4323,8 +4114,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4333,10 +4123,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ ld(a2, GlobalObjectOperand());
+ __ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4348,7 +4138,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(a2));
__ li(a2, Operand(var->name()));
__ Push(context_register(), a2);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(v0);
}
} else {
@@ -4624,12 +4414,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4668,12 +4454,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4744,9 +4526,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(v0, if_false);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(if_false, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1,
@@ -4791,7 +4573,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -4894,7 +4676,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ld(dst, ContextOperand(cp, context_index));
+ __ ld(dst, ContextMemOperand(cp, context_index));
}
@@ -4904,14 +4686,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ li(at, Operand(Smi::FromInt(0)));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, at);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ ld(at, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -4977,7 +4758,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ li(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -4992,7 +4773,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
Address branch_address = pc - 8 * kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 1);
switch (target_state) {
case INTERRUPT:
@@ -5024,7 +4806,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_immediate_load_address = pc - 6 * kInstrSize;
// Replace the stack check address in the load-immediate (6-instr sequence)
// with the entry address of the replacement code.
- Assembler::set_target_address_at(pc_immediate_load_address,
+ Assembler::set_target_address_at(isolate, pc_immediate_load_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index d6ce1c02cd..d9c324c424 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -4,14 +4,14 @@
#if V8_TARGET_ARCH_PPC
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/ppc/code-stubs-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
@@ -89,6 +89,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o r4: the JS function object being called (i.e., ourselves)
+// o r6: the new target value
// o cp: our context
// o fp: our caller's frame pointer (aka r31)
// o sp: stack pointer
@@ -117,7 +118,7 @@ void FullCodeGenerator::Generate() {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
__ AssertNotSmi(r5);
- __ CompareObjectType(r5, r5, no_reg, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r5, r5, no_reg, FIRST_JS_RECEIVER_TYPE);
__ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -134,7 +135,7 @@ void FullCodeGenerator::Generate() {
__ addi(ip, ip, Operand(prologue_offset));
}
info->set_prologue_offset(prologue_offset);
- __ Prologue(info->IsCodePreAgingActive(), prologue_offset);
+ __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
{
Comment cmnt(masm_, "[ Allocate locals");
@@ -148,7 +149,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
__ cmpl(ip, r5);
__ bc_short(ge, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -185,16 +186,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(r4);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(r4);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(r6); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r4);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(r6); // Preserve new target.
+ }
}
function_in_register_r4 = false;
// Context is returned in r3. It replaces the context passed to us.
@@ -212,12 +223,12 @@ void FullCodeGenerator::Generate() {
// Load parameter from stack.
__ LoadP(r3, MemOperand(fp, parameter_offset), r0);
// Store it in the context.
- MemOperand target = ContextOperand(cp, var->index());
+ MemOperand target = ContextMemOperand(cp, var->index());
__ StoreP(r3, target, r0);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+ __ RecordWriteContextSlot(cp, target.offset(), r3, r5,
kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
@@ -228,11 +239,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register_r4| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -246,37 +257,34 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, r4, r3, r5);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, r6, r3, r5);
+ }
- // Get the frame pointer for the calling frame.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- // Skip the arguments adaptor frame if it exists.
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- Label skip;
- __ bne(&skip);
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ bind(&skip);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- // Check the marker in the calling frame.
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
- Label non_construct_frame, done;
+ __ LoadSmiLiteral(RestParamAccessDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ addi(RestParamAccessDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ LoadSmiLiteral(RestParamAccessDescriptor::rest_parameter_index(),
+ Smi::FromInt(rest_index));
function_in_register_r4 = false;
- __ bne(&non_construct_frame);
- __ LoadP(r3, MemOperand(
- r5, ConstructFrameConstants::kOriginalConstructorOffset));
- __ b(&done);
-
- __ bind(&non_construct_frame);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- SetVar(new_target_var, r3, r5, r6);
+ SetVar(rest_param, r3, r4, r5);
}
Variable* arguments = scope()->arguments();
@@ -310,7 +318,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -429,7 +437,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r3
__ push(r3);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -649,8 +657,8 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ cmpi(result_register(), Operand::Zero());
- Split(ne, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -686,7 +694,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
- return ContextOperand(scratch, var->index());
+ return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
@@ -786,7 +794,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ StoreP(ip, ContextOperand(cp, variable->index()), r0);
+ __ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -808,7 +816,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
__ Push(r5, r3);
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -843,7 +851,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ StoreP(result_register(), ContextOperand(cp, variable->index()), r0);
+ __ StoreP(result_register(), ContextMemOperand(cp, variable->index()),
+ r0);
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp, offset, result_register(), r5,
@@ -860,7 +869,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -872,7 +881,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r4, Operand(pairs));
__ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
__ Push(r4, r3);
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -880,7 +889,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -1006,7 +1015,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r3, &convert);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
__ bge(&done_convert);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1017,9 +1026,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
- __ ble(&call_runtime);
+ __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
+ __ beq(&call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1036,7 +1044,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r3); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1073,7 +1081,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ b(&exit);
// We got a fixed array in register r3. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
__ EmitLoadTypeFeedbackVector(r4);
@@ -1081,14 +1088,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ StoreP(
r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
-
- __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi indicates slow check
- __ LoadP(r5, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r5, r6, r6, LAST_JS_PROXY_TYPE);
- __ bgt(&non_proxy);
- __ LoadSmiLiteral(r4, Smi::FromInt(0)); // Zero indicates proxy
- __ bind(&non_proxy);
+ __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ LoadSmiLiteral(r3, Smi::FromInt(0));
@@ -1122,16 +1122,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r7, r5);
__ beq(&update_each);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ CmpSmiLiteral(r5, Smi::FromInt(0), r0);
- __ beq(&update_each);
-
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(r4, r6); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ mr(r6, r3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -1190,8 +1185,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(r3);
}
@@ -1205,7 +1200,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1219,7 +1214,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1235,13 +1230,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ LoadP(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
- __ LoadP(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1262,12 +1256,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
__ beq(&fast);
- // Check that extension is NULL.
- __ LoadP(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ LoadP(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
@@ -1288,25 +1281,23 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ LoadP(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
- // Check that last extension is NULL.
- __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ cmpi(temp, Operand::Zero());
- __ bne(slow);
+ // Check that last extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
- return ContextOperand(context, var->index());
+ return ContextMemOperand(context, var->index());
}
@@ -1334,7 +1325,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else { // LET || CONST
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ b(done);
@@ -1347,7 +1338,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
@@ -1390,7 +1381,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// binding in harmony mode.
__ mov(r3, Operand(var->name()));
__ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1417,7 +1408,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r3);
}
@@ -1427,49 +1418,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // r8 = materialized value (RegExp literal)
- // r7 = JS function, literals array
- // r6 = literal index
- // r5 = RegExp pattern
- // r4 = RegExp flags
- // r3 = RegExp literal clone
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ LoadP(r8, FieldMemOperand(r7, literal_offset), r0);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r8, ip);
- __ bne(&materialized);
-
- // Create regexp literal using runtime function.
- // Result will be in r3.
- __ LoadSmiLiteral(r6, Smi::FromInt(expr->literal_index()));
- __ mov(r5, Operand(expr->pattern()));
- __ mov(r4, Operand(expr->flags()));
- __ Push(r7, r6, r5, r4);
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mr(r8, r3);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
- __ b(&allocated);
-
- __ bind(&runtime_allocate);
- __ LoadSmiLiteral(r3, Smi::FromInt(size));
- __ Push(r8, r3);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r8);
-
- __ bind(&allocated);
- // After this, registers are used as follows:
- // r3: Newly allocated regexp.
- // r8: Materialized regexp.
- // r5: temp.
- __ CopyFields(r3, r8, r5.bit(), size / kPointerSize);
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ mov(r4, Operand(expr->pattern()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(r3);
}
@@ -1496,14 +1450,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
__ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
__ mov(r4, Operand(constant_properties));
int flags = expr->ComputeFlags();
__ LoadSmiLiteral(r3, Smi::FromInt(flags));
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(r6, r5, r4, r3);
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
@@ -1542,12 +1495,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(r3));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1569,7 +1518,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
__ push(r3);
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1580,7 +1529,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r3);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1606,7 +1557,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1634,9 +1585,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1649,7 +1602,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1662,13 +1615,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ mov(r3, Operand(Smi::FromInt(NONE)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r3, Operand(Smi::FromInt(NONE)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1678,7 +1631,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(result_saved);
__ LoadP(r3, MemOperand(sp));
__ push(r3);
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1706,13 +1659,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
__ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
__ mov(r4, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
__ LoadSmiLiteral(r3, Smi::FromInt(expr->ComputeFlags()));
__ Push(r6, r5, r4, r3);
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
@@ -1735,34 +1687,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(r3);
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
- Smi::FromInt(array_index));
- __ LoadP(StoreDescriptor::ReceiverRegister(),
- MemOperand(sp, kPointerSize));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_fast_elements) {
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ LoadP(r8, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
- __ StoreP(result_register(), FieldMemOperand(r4, offset), r0);
- // Update the write barrier for the array store.
- __ RecordWriteField(r4, offset, result_register(), r5, kLRHasBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- __ LoadSmiLiteral(r6, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
+ Smi::FromInt(array_index));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1773,7 +1708,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(r3);
result_saved = false;
}
@@ -1787,14 +1721,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(r3);
@@ -2077,7 +2010,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mr(r4, r3);
__ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ li(r3, Operand(1));
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2096,8 +2029,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ cmpi(r3, Operand::Zero());
- __ beq(&l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ bne(&l_try);
// result.value
__ pop(load_receiver); // result
@@ -2207,7 +2140,7 @@ void FullCodeGenerator::EmitGeneratorResume(
DCHECK(!result_register().is(r4));
__ Push(r4, result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ stop("not-reached");
@@ -2224,12 +2157,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
- __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ pop(r5);
__ LoadRoot(r6,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
@@ -2263,7 +2194,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2280,7 +2211,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2439,7 +2370,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(r3);
}
@@ -2454,19 +2385,19 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
__ push(r3);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2476,7 +2407,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2512,7 +2443,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2560,7 +2491,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), r3);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2589,11 +2520,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2604,12 +2535,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bne(&assign);
__ mov(r6, Operand(var->name()));
__ push(r6);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2620,11 +2551,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bne(&const_error);
__ mov(r6, Operand(var->name()));
__ push(r6);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2634,24 +2565,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ beq(&uninitialized_this);
__ mov(r4, Operand(var->name()));
__ push(r4);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(r3); // Value.
__ mov(r4, Operand(var->name()));
__ mov(r3, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r4, r3); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r4);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -2659,15 +2591,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(r3);
__ mov(r3, Operand(var->name()));
__ Push(cp, r3); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
@@ -2680,9 +2611,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2698,12 +2629,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
@@ -2721,8 +2648,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(r3);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2733,10 +2659,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(r3);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2747,12 +2672,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r3);
@@ -2863,7 +2784,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2922,7 +2843,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2943,7 +2864,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
@@ -2977,7 +2898,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Do the runtime call.
__ Push(r7, r6, r5, r4);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2997,7 +2918,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
DCHECK(!context_register().is(r5));
__ mov(r5, Operand(callee->name()));
__ Push(context_register(), r5);
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(r3, r4); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -3051,7 +2972,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -3093,8 +3014,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(r5);
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3107,8 +3028,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ __ Push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3121,20 +3049,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into r7.
+ // Load new target into r6.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mr(r7, result_register());
+ __ mr(r6, result_register());
// Load function and argument count into r1 and r0.
__ mov(r3, Operand(arg_count));
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(r5);
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3165,7 +3088,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3179,7 +3102,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -3223,9 +3146,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r5, JS_FUNCTION_TYPE);
+ __ CompareObjectType(r3, r4, r5, FIRST_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
+ Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3348,44 +3271,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(r3, if_false);
- Register map = r4;
- Register type_reg = r5;
- __ LoadP(map, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ subi(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ cmpli(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&check_frame_marker);
- __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
- STATIC_ASSERT(StackFrame::CONSTRUCT < 0x4000);
- __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+ __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3460,26 +3346,16 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
+ // If the object is not a JSReceiver, we return null.
__ JumpIfSmi(r3, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r3, r4, FIRST_JS_RECEIVER_TYPE);
// Map is now in r3.
__ blt(&null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ beq(&function);
- __ cmpi(r4, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_SPEC_OBJECT_TYPE - 1);
+ // Return 'Function' for JSFunction objects.
+ __ cmpi(r4, Operand(JS_FUNCTION_TYPE));
__ beq(&function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
Register instance_type = r5;
@@ -3555,47 +3431,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = r3;
- Register result = r3;
- Register scratch0 = r11;
- Register scratch1 = r4;
-
- if (index->value() == 0) {
- __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch1, Operand(stamp));
- __ LoadP(scratch1, MemOperand(scratch1));
- __ LoadP(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
- __ cmp(scratch1, scratch0);
- __ bne(&runtime);
- __ LoadP(result,
- FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()),
- scratch0);
- __ b(&done);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch1);
- __ LoadSmiLiteral(r4, index);
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3699,7 +3534,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(r3, &done_convert);
__ Push(r3);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(r3);
}
@@ -3719,7 +3554,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ ble(&done_convert);
__ bind(&convert);
__ Push(r3);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(r3);
}
@@ -3847,62 +3682,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target.
- VisitForStackValue(args->at(0));
-
- // Evaluate super constructor (to stack and r4).
- VisitForAccumulatorValue(args->at(1));
- __ push(result_register());
- __ mr(r4, result_register());
-
- // Load original constructor into r6.
- __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r7, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
-
- // default constructor has no arguments, so no adaptor frame means no args.
- __ li(r3, Operand::Zero());
- __ b(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(r3);
-
- // Get arguments pointer in r5.
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ add(r5, r5, r0);
- __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
- Label loop;
- __ mtctr(r3);
- __ bind(&loop);
- // Pre-decrement in order to skip receiver.
- __ LoadPU(r7, MemOperand(r5, -kPointerSize));
- __ Push(r7);
- __ bdnz(&loop);
- }
-
- __ bind(&args_set_up);
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, r3);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3939,6 +3718,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(r3);
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadP(r3, FieldMemOperand(r3, Map::kPrototypeOffset));
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
@@ -4214,9 +4004,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
- __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r4, FieldMemOperand(r4, JSGlobalObject::kNativeContextOffset));
- __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ Pop(r5, r6);
__ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
__ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
@@ -4228,7 +4016,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ b(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(r3);
@@ -4240,9 +4028,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ push(r3);
- __ LoadP(r3, GlobalObjectOperand());
- __ LoadP(r3, FieldMemOperand(r3, JSGlobalObject::kNativeContextOffset));
- __ LoadP(r3, ContextOperand(r3, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), r3);
}
@@ -4250,7 +4036,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4323,8 +4109,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4333,10 +4118,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ LoadP(r5, GlobalObjectOperand());
+ __ LoadGlobalObject(r5);
__ mov(r4, Operand(var->name()));
__ Push(r5, r4);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r3);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
@@ -4348,7 +4133,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
DCHECK(!context_register().is(r5));
__ mov(r5, Operand(var->name()));
__ Push(context_register(), r5);
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(r3);
}
} else {
@@ -4618,12 +4403,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4661,12 +4442,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4739,8 +4516,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(r3, if_false);
__ CompareRoot(r3, Heap::kNullValueRootIndex);
__ beq(if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r3, r4, FIRST_JS_RECEIVER_TYPE);
__ blt(if_false);
// Check for callable or undetectable objects => false.
__ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
@@ -4786,7 +4563,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4883,7 +4660,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ LoadP(dst, ContextOperand(cp, context_index), r0);
+ __ LoadP(dst, ContextMemOperand(cp, context_index), r0);
}
@@ -4893,14 +4670,13 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ LoadP(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
__ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -4969,7 +4745,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Operand(SmiFromSlot(slot)));
}
@@ -4983,7 +4759,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
Code* replacement_code) {
Address mov_address = Assembler::target_address_from_return_address(pc);
Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
- CodePatcher patcher(cmp_address, 1);
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, cmp_address, 1);
switch (target_state) {
case INTERRUPT: {
@@ -5016,7 +4793,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
// Replace the stack check address in the mov sequence with the
// entry address of the replacement code.
- Assembler::set_target_address_at(mov_address, unoptimized_code,
+ Assembler::set_target_address_at(isolate, mov_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 4aa86d6bb2..615eb67ba6 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,14 +4,14 @@
#if V8_TARGET_ARCH_X64
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -82,6 +82,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o rdi: the JS function object being called (i.e. ourselves)
+// o rdx: the new target value
// o rsi: our context
// o rbp: our caller's frame pointer
// o rsp: stack pointer (pointing to return address)
@@ -108,7 +109,7 @@ void FullCodeGenerator::Generate() {
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
__ AssertNotSmi(rcx);
- __ CmpObjectType(rcx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rcx);
__ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -118,7 +119,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -133,10 +134,10 @@ void FullCodeGenerator::Generate() {
__ subp(rcx, Immediate(locals_count * kPointerSize));
__ CompareRoot(rcx, Heap::kRealStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
@@ -145,7 +146,7 @@ void FullCodeGenerator::Generate() {
__ bind(&loop_header);
// Do pushes.
for (int i = 0; i < kMaxPushes; i++) {
- __ Push(rdx);
+ __ Push(rax);
}
// Continue loop if not done.
__ decp(rcx);
@@ -154,7 +155,7 @@ void FullCodeGenerator::Generate() {
int remaining = locals_count % kMaxPushes;
// Emit the remaining pushes.
for (int i = 0; i < remaining; i++) {
- __ Push(rdx);
+ __ Push(rax);
}
}
}
@@ -170,16 +171,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ Push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Push(rdx); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ Pop(rdx); // Restore new target.
+ }
}
function_in_register = false;
// Context is returned in rax. It replaces the context passed to us.
@@ -213,11 +224,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -228,38 +239,37 @@ void FullCodeGenerator::Generate() {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, rdi, rbx, rdx);
+ SetVar(this_function_var, rdi, rbx, rcx);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, rdx, rbx, rcx);
+ }
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- Label non_adaptor_frame;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &non_adaptor_frame);
- __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- __ bind(&non_adaptor_frame);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
- Label non_construct_frame, done;
- __ j(not_equal, &non_construct_frame);
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
- // Construct frame
- __ movp(rax,
- Operand(rax, ConstructFrameConstants::kOriginalConstructorOffset));
- __ jmp(&done);
+ __ Move(RestParamAccessDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ leap(RestParamAccessDescriptor::parameter_pointer(),
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ Move(RestParamAccessDescriptor::rest_parameter_index(),
+ Smi::FromInt(rest_index));
+ function_in_register = false;
- // Non-construct frame
- __ bind(&non_construct_frame);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
- __ bind(&done);
- SetVar(new_target_var, rax, rbx, rdx);
+ SetVar(rest_param, rax, rbx, rdx);
}
// Possibly allocate an arguments object.
@@ -294,7 +304,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -404,7 +414,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
__ Push(rax);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -637,9 +647,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ testp(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -799,7 +808,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -853,7 +862,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ Push(variable->name());
VisitForStackValue(declaration->fun());
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -864,7 +873,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -872,7 +881,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -997,7 +1006,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert, Label::kNear);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -1008,9 +1017,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
- __ j(below_equal, &call_runtime);
+ __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
+ __ j(equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1027,7 +1035,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1063,7 +1071,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
@@ -1071,14 +1078,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
- __ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
- __ j(above, &non_proxy);
- __ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
- __ bind(&non_proxy);
- __ Push(rbx); // Smi
+ __ Push(Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi).
@@ -1111,17 +1112,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ Cmp(rdx, Smi::FromInt(0));
- __ j(equal, &update_each, Label::kNear);
-
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ Push(rcx); // Enumerable.
__ Push(rbx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, loop_statement.continue_label());
@@ -1179,8 +1175,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ Push(info);
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(rax);
}
@@ -1194,7 +1190,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1208,7 +1204,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
isolate()->factory()->home_object_symbol());
__ movp(StoreDescriptor::ValueRegister(),
Operand(rsp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1223,10 +1219,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
@@ -1253,9 +1248,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
// Terminate at native context.
__ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
@@ -1277,19 +1272,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
__ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering rsi.
context = temp;
}
}
- // Check that last extension is NULL.
- __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that last extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an rsi-based operand (the write barrier cannot be allowed to
@@ -1321,7 +1315,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
} else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1335,7 +1329,7 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
__ Move(LoadDescriptor::NameRegister(), var->name());
- __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
CallLoadIC(typeof_mode);
@@ -1376,7 +1370,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1403,7 +1397,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1414,53 +1408,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // rdi = JS function.
- // rcx = literals array.
- // rbx = regexp literal.
- // rax = regexp literal clone.
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ movp(rbx, FieldOperand(rcx, literal_offset));
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in rax.
- __ Push(rcx);
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->pattern());
- __ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ movp(rbx, rax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ Push(rbx);
- __ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(rbx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ movp(rdx, FieldOperand(rbx, i));
- __ movp(rcx, FieldOperand(rbx, i + kPointerSize));
- __ movp(FieldOperand(rax, i), rdx);
- __ movp(FieldOperand(rax, i + kPointerSize), rcx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ movp(rdx, FieldOperand(rbx, size - kPointerSize));
- __ movp(FieldOperand(rax, size - kPointerSize), rdx);
- }
+ __ Move(rax, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, expr->pattern());
+ __ Move(rdx, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(rax);
}
@@ -1487,15 +1440,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<FixedArray> constant_properties = expr->constant_properties();
int flags = expr->ComputeFlags();
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
@@ -1536,12 +1487,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
__ Move(StoreDescriptor::NameRegister(), key->value());
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
@@ -1560,7 +1507,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ Push(Smi::FromInt(SLOPPY)); // Language mode
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1569,7 +1516,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1594,7 +1543,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1621,9 +1570,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1635,7 +1586,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1647,12 +1598,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1661,7 +1612,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
__ Push(Operand(rsp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1687,15 +1638,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(expr->ComputeFlags()));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1720,37 +1669,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ Push(rax); // array literal
- __ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
- __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_constant_fast_elements) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ movp(rbx, Operand(rsp, kPointerSize)); // Copy of array literal.
- __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ movp(FieldOperand(rbx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(rbx, offset, result_register(), rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ Move(rcx, Smi::FromInt(array_index));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ Move(StoreDescriptor::NameRegister(), Smi::FromInt(array_index));
+ __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1761,7 +1689,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(rax);
result_saved = false;
}
@@ -1775,14 +1702,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -2063,7 +1989,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ Set(rax, 1);
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2082,8 +2008,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // rax=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testp(result_register(), result_register());
- __ j(zero, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(not_equal, &l_try);
// result.value
__ Pop(load_receiver); // result
@@ -2173,7 +2099,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Push(rbx);
__ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2190,12 +2116,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ movp(rbx, GlobalObjectOperand());
- __ movp(rbx, FieldOperand(rbx, JSGlobalObject::kNativeContextOffset));
- __ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
__ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
@@ -2228,7 +2152,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ Push(key->value());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2245,7 +2169,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2337,7 +2261,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ Push(rax);
}
@@ -2352,17 +2276,17 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ Push(Smi::FromInt(DONT_ENUM));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ Push(Smi::FromInt(DONT_ENUM));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
@@ -2372,7 +2296,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2408,7 +2332,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2456,7 +2380,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), rax);
__ Pop(StoreDescriptor::ReceiverRegister());
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2483,11 +2407,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(StoreDescriptor::NameRegister(), var->name());
- __ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2497,11 +2421,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2511,11 +2435,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &const_error, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2524,24 +2448,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(equal, &uninitialized_this);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(rax); // Value.
__ Push(rsi); // Context.
__ Push(var->name());
__ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2550,15 +2475,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Push(rax);
__ Push(rsi);
__ Push(var->name());
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2571,9 +2495,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2588,12 +2512,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2611,8 +2531,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ Push(key->value());
__ Push(rax);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2623,10 +2542,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
DCHECK(prop != NULL);
__ Push(rax);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2637,12 +2555,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(rax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2752,7 +2666,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2811,7 +2725,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2832,7 +2746,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
@@ -2867,7 +2781,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2885,7 +2799,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// the object holding it (returned in rdx).
__ Push(context_register());
__ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ Push(rax); // Function.
__ Push(rdx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2935,7 +2849,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -2977,8 +2891,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(rbx);
__ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2991,8 +2905,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ Push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ movp(result_register(),
+ FieldOperand(result_register(), HeapObject::kMapOffset));
+ __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3005,20 +2924,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into rcx.
+ // Load new target into rdx.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ movp(rcx, result_register());
+ __ movp(rdx, result_register());
// Load function and argument count into rdi and rax.
__ Set(rax, arg_count);
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(rbx);
- __ Move(rdx, SmiFromSlot(expr->CallFeedbackSlot()));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3050,7 +2964,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3064,7 +2978,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -3108,9 +3022,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_FUNCTION_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
+ Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3222,43 +3136,9 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(rax, if_false);
- Register map = rbx;
- __ movp(map, FieldOperand(rax, HeapObject::kMapOffset));
- __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
- __ j(less, if_false);
- __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(less_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
- // Get the frame pointer for the calling frame.
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &check_frame_marker);
- __ movp(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
- Smi::FromInt(StackFrame::CONSTRUCT));
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, JS_PROXY_TYPE, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3335,52 +3215,40 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
- __ JumpIfSmi(rax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
- // Map is now in rax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(rax, &null, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rax);
+ __ j(below, &null, Label::kNear);
+
+ // Return 'Function' for JSFunction objects.
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &function, Label::kNear);
// Check if the constructor in the map is a JS function.
__ GetMapConstructor(rax, rax, rbx);
__ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor);
+ __ j(not_equal, &non_function_constructor, Label::kNear);
// rax now contains the constructor function. Grab the
// instance class name from there.
__ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
__ movp(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(rax, Heap::kNullValueRootIndex);
+ __ jmp(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, isolate()->factory()->Function_string());
- __ jmp(&done);
+ __ LoadRoot(rax, Heap::kFunction_stringRootIndex);
+ __ jmp(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ LoadRoot(rax, Heap::kNullValueRootIndex);
+ __ LoadRoot(rax, Heap::kObject_stringRootIndex);
// All done.
__ bind(&done);
@@ -3430,51 +3298,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = rax;
- Register result = rax;
- Register scratch = rcx;
-
- if (FLAG_debug_code) {
- __ AssertNotSmi(object);
- __ CmpObjectType(object, JS_DATE_TYPE, scratch);
- __ Check(equal, kOperandIsNotADate);
- }
-
- if (index->value() == 0) {
- __ movp(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- Operand stamp_operand = __ ExternalOperand(stamp);
- __ movp(scratch, stamp_operand);
- __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ movp(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2);
- __ movp(arg_reg_1, object);
- __ Move(arg_reg_2, index, Assembler::RelocInfoNone());
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&done);
- }
-
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3580,7 +3403,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(rax, &done_convert, Label::kNear);
__ Push(rax);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(rax);
}
@@ -3601,7 +3424,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ j(below_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ Push(rax);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(rax);
}
@@ -3741,53 +3564,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rbx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rbx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ movp(rax, Immediate(0));
- __ jmp(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ movp(rbx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger64(rbx, rbx);
-
- __ movp(rax, rbx);
- __ leap(rdx, Operand(rdx, rbx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- __ Push(Operand(rdx, -1 * kPointerSize));
- __ subp(rdx, Immediate(kPointerSize));
- __ decp(rbx);
- __ j(not_zero, &loop);
- }
-
- __ bind(&args_set_up);
- __ movp(rdx, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
- __ movp(rdi, Operand(rsp, rax, times_pointer_size, 0 * kPointerSize));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, rax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3826,6 +3602,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(rax);
+ __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rax, FieldOperand(rax, Map::kPrototypeOffset));
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, return_result, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4136,9 +3923,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime, TAG_OBJECT);
- __ movp(rbx, GlobalObjectOperand());
- __ movp(rbx, FieldOperand(rbx, JSGlobalObject::kNativeContextOffset));
- __ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
__ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
@@ -4149,7 +3934,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(rax);
@@ -4160,9 +3945,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
__ PushRoot(Heap::kUndefinedValueRootIndex);
- __ movp(rax, GlobalObjectOperand());
- __ movp(rax, FieldOperand(rax, JSGlobalObject::kNativeContextOffset));
- __ movp(rax, ContextOperand(rax, expr->context_index()));
+ __ LoadNativeContextSlot(expr->context_index(), rax);
}
@@ -4170,7 +3953,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4242,8 +4025,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4252,9 +4034,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ Push(GlobalObjectOperand());
+ __ movp(rax, NativeContextOperand());
+ __ Push(ContextOperand(rax, Context::EXTENSION_INDEX));
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4266,7 +4049,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(rax);
}
} else {
@@ -4544,12 +4327,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ Pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4587,12 +4366,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4663,8 +4438,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rdx);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rdx);
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
@@ -4709,7 +4484,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -4821,9 +4596,9 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ Push(Smi::FromInt(0));
+ // code.
+ __ movp(rax, NativeContextOperand());
+ __ Push(ContextOperand(rax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
@@ -4894,7 +4669,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
@@ -4939,8 +4714,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
- Assembler::set_target_address_at(call_target_address,
- unoptimized_code,
+ Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
+ call_target_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index 8b1e5e98d4..c38230ad1e 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -4,14 +4,14 @@
#if V8_TARGET_ARCH_X87
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
#include "src/x87/frames-x87.h"
namespace v8 {
@@ -83,6 +83,7 @@ class JumpPatchSite BASE_EMBEDDED {
//
// The live registers are:
// o edi: the JS function object being called (i.e. ourselves)
+// o edx: the new target value
// o esi: our context
// o ebp: our caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -109,7 +110,7 @@ void FullCodeGenerator::Generate() {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
__ AssertNotSmi(ecx);
- __ CmpObjectType(ecx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx);
__ Assert(above_equal, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -119,7 +120,7 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ __ Prologue(info->GeneratePreagedPrologue());
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
@@ -136,7 +137,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_real_stack_limit(isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
@@ -172,16 +173,26 @@ void FullCodeGenerator::Generate() {
if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
- __ CallRuntime(Runtime::kNewScriptContext, 2);
+ __ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
} else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(edx); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(edi);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(edx); // Restore new target.
+ }
}
function_in_register = false;
// Context is returned in eax. It replaces the context passed to us.
@@ -215,11 +226,11 @@ void FullCodeGenerator::Generate() {
}
}
}
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
- // Function register is trashed in case we bailout here. But since that
- // could happen only when we allocate a context the value of
- // |function_in_register| is correct.
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
@@ -228,39 +239,38 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, edi, ebx, edx);
+ SetVar(this_function_var, edi, ebx, ecx);
}
+ // Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- Label non_adaptor_frame;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &non_adaptor_frame);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- __ bind(&non_adaptor_frame);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-
- Label non_construct_frame, done;
- __ j(not_equal, &non_construct_frame);
-
- // Construct frame
- __ mov(eax,
- Operand(eax, ConstructFrameConstants::kOriginalConstructorOffset));
- __ jmp(&done);
-
- // Non-construct frame
- __ bind(&non_construct_frame);
- __ mov(eax, Immediate(isolate()->factory()->undefined_value()));
-
- __ bind(&done);
- SetVar(new_target_var, eax, ebx, edx);
+ SetVar(new_target_var, edx, ebx, ecx);
+ }
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+
+ __ mov(RestParamAccessDescriptor::parameter_count(),
+ Immediate(Smi::FromInt(num_parameters)));
+ __ lea(RestParamAccessDescriptor::parameter_pointer(),
+ Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(RestParamAccessDescriptor::rest_parameter_index(),
+ Immediate(Smi::FromInt(rest_index)));
+ function_in_register = false;
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+ SetVar(rest_param, eax, ebx, edx);
}
Variable* arguments = scope()->arguments();
@@ -293,7 +303,7 @@ void FullCodeGenerator::Generate() {
}
if (FLAG_trace) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
+ __ CallRuntime(Runtime::kTraceEnter);
}
// Visit the declarations and body unless there is an illegal
@@ -398,7 +408,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
__ push(eax);
- __ CallRuntime(Runtime::kTraceExit, 1);
+ __ CallRuntime(Runtime::kTraceExit);
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
@@ -622,9 +632,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ test(result_register(), result_register());
- // The stub returns nonzero for true.
- Split(not_zero, if_true, if_false, fall_through);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
}
@@ -784,7 +793,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
__ push(
Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -833,7 +842,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
VisitForStackValue(declaration->fun());
__ push(
Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot, 3);
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -844,7 +853,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
@@ -852,7 +861,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kDeclareModules);
// Return value is ignored.
}
@@ -974,7 +983,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
ToObjectStub stub(isolate());
@@ -985,9 +994,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check for proxies.
Label call_runtime, use_cache, fixed_array;
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
- __ j(below_equal, &call_runtime);
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
+ __ j(equal, &call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1001,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ __ CallRuntime(Runtime::kGetPropertyNamesFast);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -1032,7 +1040,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ jmp(&exit);
// We got a fixed array in register eax. Iterate through that.
- Label non_proxy;
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
@@ -1040,15 +1047,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
-
- __ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
- __ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
- __ j(above, &non_proxy);
- __ Move(ebx, Immediate(Smi::FromInt(0))); // Zero indicates proxy
- __ bind(&non_proxy);
- __ push(ebx); // Smi
+ __ push(Immediate(Smi::FromInt(1))); // Smi(1) undicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
@@ -1077,18 +1076,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
- // For proxies, no filtering is done.
- // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- DCHECK(Smi::FromInt(0) == 0);
- __ test(edx, edx);
- __ j(zero, &update_each);
-
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter, 2);
+ __ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, loop_statement.continue_label());
@@ -1146,8 +1139,8 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ CallStub(&stub);
} else {
__ push(Immediate(info));
- __ CallRuntime(
- pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
+ __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
context()->Plug(eax);
}
@@ -1160,7 +1153,7 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1173,7 +1166,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
__ mov(StoreDescriptor::NameRegister(),
Immediate(isolate()->factory()->home_object_symbol()));
__ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
}
@@ -1188,10 +1181,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
// Load next context in chain.
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
@@ -1217,9 +1209,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
Immediate(isolate()->factory()->native_context_map()));
__ j(equal, &fast, Label::kNear);
- // Check that extension is NULL.
- __ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
__ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
@@ -1241,19 +1233,18 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
- // Check that extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
- Immediate(0));
- __ j(not_equal, slow);
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
__ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
}
- // Check that last extension is NULL.
- __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
- __ j(not_equal, slow);
+ // Check that last extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
// This function is used only for loads, not stores, so it's safe to
// return an esi-based operand (the write barrier cannot be allowed to
@@ -1285,7 +1276,7 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ mov(eax, isolate()->factory()->undefined_value());
} else { // LET || CONST
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
}
}
__ jmp(done);
@@ -1298,7 +1289,10 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(LoadDescriptor::ReceiverRegister(),
+ ContextOperand(LoadDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
__ mov(LoadDescriptor::NameRegister(), var->name());
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
@@ -1340,7 +1334,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
@@ -1367,7 +1361,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotNoReferenceError;
- __ CallRuntime(function_id, 2);
+ __ CallRuntime(function_id);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1378,53 +1372,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- Label materialized;
- // Registers will be used as follows:
- // edi = JS function.
- // ecx = literals array.
- // ebx = regexp literal.
- // eax = regexp literal clone.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
- __ mov(ebx, FieldOperand(ecx, literal_offset));
- __ cmp(ebx, isolate()->factory()->undefined_value());
- __ j(not_equal, &materialized, Label::kNear);
-
- // Create regexp literal using runtime function
- // Result will be in eax.
- __ push(ecx);
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->pattern()));
- __ push(Immediate(expr->flags()));
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(ebx, eax);
-
- __ bind(&materialized);
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(ebx);
- __ push(Immediate(Smi::FromInt(size)));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(ebx);
-
- __ bind(&allocated);
- // Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ mov(edx, FieldOperand(ebx, i));
- __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
- __ mov(FieldOperand(eax, i), edx);
- __ mov(FieldOperand(eax, i + kPointerSize), ecx);
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ mov(edx, FieldOperand(ebx, size - kPointerSize));
- __ mov(FieldOperand(eax, size - kPointerSize), edx);
- }
+ __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
+ __ Move(ecx, Immediate(expr->pattern()));
+ __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
context()->Plug(eax);
}
@@ -1453,15 +1406,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_properties));
__ push(Immediate(Smi::FromInt(flags)));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
@@ -1502,12 +1453,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(property->GetSlot(0));
- CallStoreIC();
- } else {
- CallStoreIC(key->LiteralFeedbackId());
- }
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1525,7 +1472,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
- __ CallRuntime(Runtime::kSetProperty, 4);
+ __ CallRuntime(Runtime::kSetProperty);
} else {
__ Drop(3);
}
@@ -1534,7 +1481,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
@@ -1561,7 +1510,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1588,9 +1537,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+ __ CallRuntime(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
} else {
- EmitPropertyKey(property, expr->GetIdForProperty(property_index));
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
@@ -1602,7 +1553,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
} else {
__ Drop(3);
}
@@ -1614,12 +1565,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1628,7 +1579,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
__ push(Operand(esp, 0));
- __ CallRuntime(Runtime::kToFastProperties, 1);
+ __ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
@@ -1654,15 +1605,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1687,36 +1636,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) {
__ push(eax); // array literal.
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
- if (FLAG_vector_stores) {
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(Smi::FromInt(array_index)));
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
- EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
- } else if (has_constant_fast_elements) {
- // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
- // cannot transition and don't need to call the runtime stub.
- int offset = FixedArray::kHeaderSize + (array_index * kPointerSize);
- __ mov(ebx, Operand(esp, kPointerSize)); // Copy of array literal.
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- // Store the subexpression value in the array's elements.
- __ mov(FieldOperand(ebx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
- } else {
- // Store the subexpression value in the array's elements.
- __ mov(ecx, Immediate(Smi::FromInt(array_index)));
- StoreArrayLiteralElementStub stub(isolate());
- __ CallStub(&stub);
- }
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(Smi::FromInt(array_index)));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1726,7 +1656,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Drop(1); // literal index
__ Pop(eax);
result_saved = false;
}
@@ -1740,14 +1669,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement, 2);
+ __ CallRuntime(Runtime::kAppendElement);
}
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
if (result_saved) {
- __ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(eax);
@@ -2027,7 +1955,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None());
__ mov(edi, eax);
__ mov(Operand(esp, 2 * kPointerSize), edi);
- SetCallPosition(expr, 1);
+ SetCallPosition(expr);
__ Set(eax, 1);
__ Call(
isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
@@ -2047,8 +1975,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ test(eax, eax);
- __ j(zero, &l_try);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(not_equal, &l_try);
// result.value
__ pop(load_receiver); // result
@@ -2139,7 +2067,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2156,11 +2084,10 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&allocate);
__ Push(Smi::FromInt(JSIteratorResult::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -2196,7 +2123,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
__ push(Immediate(key->value()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
}
@@ -2213,7 +2140,7 @@ void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetExpressionPosition(prop);
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
}
@@ -2339,7 +2266,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// need to check for an own read only property we special case this so we do
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
__ push(eax);
}
@@ -2354,24 +2281,24 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod, 3);
+ __ CallRuntime(Runtime::kDefineClassMethod);
break;
case ObjectLiteral::Property::GETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
__ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked, 4);
+ __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
// Set both the prototype and constructor to have fast properties, and also
// freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2407,7 +2334,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
break;
}
@@ -2455,7 +2382,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
__ Move(StoreDescriptor::NameRegister(), eax);
__ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
@@ -2482,11 +2409,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), var->name());
- __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ __ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
+ __ mov(StoreDescriptor::ReceiverRegister(),
+ ContextOperand(StoreDescriptor::ReceiverRegister(),
+ Context::EXTENSION_INDEX));
+ EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT_LET) {
+ } else if (var->mode() == LET && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2496,11 +2426,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &assign, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ } else if (var->mode() == CONST && op != Token::INIT) {
// Assignment to const variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2510,11 +2440,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(not_equal, &const_error, Label::kNear);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
- } else if (var->is_this() && op == Token::INIT_CONST) {
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
@@ -2523,24 +2453,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ cmp(edx, isolate()->factory()->the_hole_value());
__ j(equal, &uninitialized_this);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ push(eax); // Value.
__ push(esi); // Context.
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+ __ CallRuntime(Runtime::kStoreLookupSlot);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && op == Token::INIT_LET) {
+ if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2549,15 +2480,14 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (op == Token::INIT_CONST_LEGACY) {
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
// Const initializers need a write barrier.
- DCHECK(var->mode() == CONST_LEGACY);
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ push(eax);
__ push(esi);
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
@@ -2570,9 +2500,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
} else {
- DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
}
// Silently ignore store in sloppy mode.
}
@@ -2589,12 +2519,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->AssignmentFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2611,8 +2537,7 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
__ push(Immediate(key->value()));
__ push(eax);
__ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy),
- 4);
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2622,10 +2547,9 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
__ push(eax);
- __ CallRuntime(
- (is_strict(language_mode()) ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy),
- 4);
+ __ CallRuntime((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
@@ -2640,13 +2564,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
DCHECK(StoreDescriptor::ValueRegister().is(eax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->AssignmentSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->AssignmentFeedbackId());
- }
-
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@@ -2753,7 +2672,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2811,7 +2730,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
// - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
+ __ CallRuntime(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2832,7 +2751,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2867,7 +2786,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ push(Immediate(Smi::FromInt(scope()->start_position())));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2886,7 +2805,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
// the object holding it (returned in edx).
__ push(context_register());
__ push(Immediate(callee->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+ __ CallRuntime(Runtime::kLoadLookupSlot);
__ push(eax); // Function.
__ push(edx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
@@ -2936,7 +2855,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Set(eax, arg_count);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -2978,8 +2897,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ CallConstructStub stub(isolate());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2992,8 +2911,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
expr->expression()->AsSuperCallReference();
DCHECK_NOT_NULL(super_call_ref);
- EmitLoadSuperConstructor(super_call_ref);
- __ push(result_register());
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ mov(result_register(),
+ FieldOperand(result_register(), HeapObject::kMapOffset));
+ __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3006,20 +2930,15 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load original constructor into ecx.
+ // Load new target into edx.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(ecx, result_register());
+ __ mov(edx, result_register());
// Load function and argument count into edi and eax.
__ Move(eax, Immediate(arg_count));
__ mov(edi, Operand(esp, arg_count * kPointerSize));
- // Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(ebx);
- __ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
-
- CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
RecordJSReturnSite(expr);
@@ -3050,7 +2969,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3064,7 +2983,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -3108,9 +3027,9 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
&if_true, &if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
+ Split(above_equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3223,42 +3142,7 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
&if_false, &fall_through);
__ JumpIfSmi(eax, if_false);
- Register map = ebx;
- __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- __ CmpInstanceType(map, FIRST_JS_PROXY_TYPE);
- __ j(less, if_false);
- __ CmpInstanceType(map, LAST_JS_PROXY_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(less_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Get the frame pointer for the calling frame.
- __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
- // Skip the arguments adaptor frame if it exists.
- Label check_frame_marker;
- __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &check_frame_marker);
- __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
-
- // Check the marker in the calling frame.
- __ bind(&check_frame_marker);
- __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
- Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ CmpObjectType(eax, JS_PROXY_TYPE, ebx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3335,52 +3219,40 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
- // If the object is a smi, we return null.
- __ JumpIfSmi(eax, &null);
-
- // Check that the object is a JS object but take special care of JS
- // functions to make sure they have 'Function' as their class.
- // Assume that there are only two callable types, and one of them is at
- // either end of the type range for JS object types. Saves extra comparisons.
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
- // Map is now in eax.
- __ j(below, &null);
- STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- FIRST_SPEC_OBJECT_TYPE + 1);
- __ j(equal, &function);
-
- __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ j(equal, &function);
- // Assume that there is no larger type.
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(eax, &null, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, eax);
+ __ j(below, &null, Label::kNear);
+
+ // Return 'Function' for JSFunction objects.
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &function, Label::kNear);
// Check if the constructor in the map is a JS function.
__ GetMapConstructor(eax, eax, ebx);
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ j(not_equal, &non_function_constructor);
+ __ j(not_equal, &non_function_constructor, Label::kNear);
// eax now contains the constructor function. Grab the
// instance class name from there.
__ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ mov(eax, isolate()->factory()->null_value());
+ __ jmp(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
__ mov(eax, isolate()->factory()->Function_string());
- __ jmp(&done);
+ __ jmp(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
__ mov(eax, isolate()->factory()->Object_string());
- __ jmp(&done);
-
- // Non-JS objects have class null.
- __ bind(&null);
- __ mov(eax, isolate()->factory()->null_value());
// All done.
__ bind(&done);
@@ -3430,43 +3302,6 @@ void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- DCHECK_NOT_NULL(args->at(1)->AsLiteral());
- Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Register object = eax;
- Register result = eax;
- Register scratch = ecx;
-
- if (index->value() == 0) {
- __ mov(result, FieldOperand(object, JSDate::kValueOffset));
- } else {
- Label runtime, done;
- if (index->value() < JSDate::kFirstUncachedField) {
- ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
- __ mov(scratch, Operand::StaticVariable(stamp));
- __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
- __ j(not_equal, &runtime, Label::kNear);
- __ mov(result, FieldOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done, Label::kNear);
- }
- __ bind(&runtime);
- __ PrepareCallCFunction(2, scratch);
- __ mov(Operand(esp, 0), object);
- __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
- __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ bind(&done);
- }
-
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3576,7 +3411,7 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
Label done_convert;
__ JumpIfSmi(eax, &done_convert, Label::kNear);
__ Push(eax);
- __ CallRuntime(Runtime::kToInteger, 1);
+ __ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
context()->Plug(eax);
}
@@ -3597,7 +3432,7 @@ void FullCodeGenerator::EmitToName(CallRuntime* expr) {
__ j(below_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ Push(eax);
- __ CallRuntime(Runtime::kToName, 1);
+ __ CallRuntime(Runtime::kToName);
__ bind(&done_convert);
context()->Plug(eax);
}
@@ -3737,54 +3572,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Evaluate new.target and super constructor.
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, args_set_up, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ebx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame);
- // default constructor has no arguments, so no adaptor frame means no args.
- __ mov(eax, Immediate(0));
- __ jmp(&args_set_up);
-
- // Copy arguments from adaptor frame.
- {
- __ bind(&adaptor_frame);
- __ mov(ebx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(ebx);
-
- __ mov(eax, ebx);
- __ lea(edx, Operand(edx, ebx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- Label loop;
- __ bind(&loop);
- __ push(Operand(edx, -1 * kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ebx);
- __ j(not_zero, &loop);
- }
-
- __ bind(&args_set_up);
-
- __ mov(edx, Operand(esp, eax, times_pointer_size, 1 * kPointerSize));
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0 * kPointerSize));
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
- context()->DropAndPlug(1, eax);
-}
-
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3823,6 +3610,17 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(eax);
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
@@ -4111,8 +3909,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
__ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
- __ mov(ebx, GlobalObjectOperand());
- __ mov(ebx, FieldOperand(ebx, JSGlobalObject::kNativeContextOffset));
+ __ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -4125,7 +3922,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+ __ CallRuntime(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(eax);
@@ -4136,9 +3933,7 @@ void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as receiver.
__ push(Immediate(isolate()->factory()->undefined_value()));
- __ mov(eax, GlobalObjectOperand());
- __ mov(eax, FieldOperand(eax, JSGlobalObject::kNativeContextOffset));
- __ mov(eax, ContextOperand(eax, expr->context_index()));
+ __ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -4146,7 +3941,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- SetCallPosition(expr, arg_count);
+ SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Set(eax, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
@@ -4217,8 +4012,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForStackValue(property->key());
__ CallRuntime(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy,
- 2);
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4227,9 +4021,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ push(GlobalObjectOperand());
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy, 2);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global variables is false. 'this' is
@@ -4241,7 +4036,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(eax);
}
} else {
@@ -4521,12 +4316,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
__ pop(StoreDescriptor::ReceiverRegister());
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallStoreIC();
- } else {
- CallStoreIC(expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4564,12 +4355,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->CountSlot());
- CallIC(ic);
- } else {
- CallIC(ic, expr->CountStoreFeedbackId());
- }
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -4640,8 +4427,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, edx);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, edx);
__ j(below, if_false);
// Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
@@ -4686,7 +4473,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty, 2);
+ __ CallRuntime(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4799,9 +4586,9 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
- // code. Pass a smi sentinel and let the runtime look up the empty
- // function.
- __ push(Immediate(Smi::FromInt(0)));
+ // code.
+ __ mov(eax, NativeContextOperand());
+ __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
@@ -4869,7 +4656,7 @@ void FullCodeGenerator::ClearPendingMessage() {
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
- DCHECK(FLAG_vector_stores && !slot.IsInvalid());
+ DCHECK(!slot.IsInvalid());
__ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
Immediate(SmiFromSlot(slot)));
}
@@ -4916,8 +4703,8 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
break;
}
- Assembler::set_target_address_at(call_target_address,
- unoptimized_code,
+ Assembler::set_target_address_at(unoptimized_code->GetIsolate(),
+ call_target_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index b0e514e8af..991e4c3711 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -262,7 +262,8 @@ Object* FutexEmulation::NumWaitersForTesting(Isolate* isolate,
int waiters = 0;
FutexWaitListNode* node = wait_list_.Pointer()->head_;
while (node) {
- if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
+ if (backing_store == node->backing_store_ && addr == node->wait_addr_ &&
+ node->waiting_) {
waiters++;
}
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index d0fd8223e1..819bd69e07 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1187,7 +1187,7 @@ class DebugInfoSection : public DebugSection {
DCHECK(Context::CLOSURE_INDEX == 0);
DCHECK(Context::PREVIOUS_INDEX == 1);
DCHECK(Context::EXTENSION_INDEX == 2);
- DCHECK(Context::GLOBAL_OBJECT_INDEX == 3);
+ DCHECK(Context::NATIVE_CONTEXT_INDEX == 3);
w->WriteULEB128(current_abbreviation++);
w->WriteString(".closure");
w->WriteULEB128(current_abbreviation++);
@@ -1195,7 +1195,7 @@ class DebugInfoSection : public DebugSection {
w->WriteULEB128(current_abbreviation++);
w->WriteString(".extension");
w->WriteULEB128(current_abbreviation++);
- w->WriteString(".global");
+ w->WriteString(".native_context");
for (int context_slot = 0;
context_slot < context_slots;
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 3608fe81b6..4a7292547f 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -78,6 +78,7 @@ class GlobalHandles::Node {
#endif
void Initialize(int index, Node** first_free) {
+ object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
index_ = static_cast<uint8_t>(index);
DCHECK(static_cast<int>(index_) == index);
set_state(FREE);
@@ -250,9 +251,9 @@ class GlobalHandles::Node {
}
void MakeWeak(void* parameter, WeakCallback weak_callback) {
- DCHECK(weak_callback != NULL);
+ DCHECK(weak_callback != nullptr);
DCHECK(IsInUse());
- CHECK(object_ != NULL);
+ CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
set_state(WEAK);
set_weakness_type(NORMAL_WEAK);
set_parameter(parameter);
@@ -264,7 +265,7 @@ class GlobalHandles::Node {
v8::WeakCallbackType type) {
DCHECK(phantom_callback != nullptr);
DCHECK(IsInUse());
- CHECK(object_ != nullptr);
+ CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
set_state(WEAK);
switch (type) {
case v8::WeakCallbackType::kParameter:
@@ -533,10 +534,10 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
}
void RunInternal() override {
- isolate_->heap()->CallGCPrologueCallbacks(
+ isolate()->heap()->CallGCPrologueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
- InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate_);
- isolate_->heap()->CallGCEpilogueCallbacks(
+ InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
+ isolate()->heap()->CallGCEpilogueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index c3358870e5..67bdb63b86 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -599,19 +599,6 @@ enum InlineCacheState {
};
-enum CallConstructorFlags {
- NO_CALL_CONSTRUCTOR_FLAGS = 0,
- // The call target is cached in the instruction stream.
- RECORD_CONSTRUCTOR_TARGET = 1,
- // TODO(bmeurer): Kill these SUPER_* modes and use the Construct builtin
- // directly instead; also there's no point in collecting any "targets" for
- // super constructor calls, since these are known when we optimize the
- // constructor that contains the super call.
- SUPER_CONSTRUCTOR_CALL = 1 << 1,
- SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
-};
-
-
enum CacheHolderFlag {
kCacheOnPrototype,
kCacheOnPrototypeReceiverIsDictionary,
@@ -1035,6 +1022,15 @@ inline bool IsClassConstructor(FunctionKind kind) {
}
+inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
+ if (IsAccessorFunction(kind)) return false;
+ if (IsConciseMethod(kind) && !IsGeneratorFunction(kind)) return false;
+ if (IsArrowFunction(kind)) return false;
+ if (is_strong(mode)) return IsClassConstructor(kind);
+ return true;
+}
+
+
inline bool IsInObjectLiteral(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind & FunctionKind::kInObjectLiteral;
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 4c10f20738..1f97d6ff7e 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -171,8 +171,6 @@ V8_INLINE Handle<T> handle(T* object) {
// into a Handle requires checking that it does not point to NULL. This
// ensures NULL checks before use.
//
-// Do not use MaybeHandle as argument type.
-//
// Also note that Handles do not provide default equality comparison or hashing
// operators on purpose. Such operators would be misleading, because intended
// semantics is ambiguous between Handle location and object identity.
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index f7bd8d07d2..ec1ad65391 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -21,6 +21,19 @@ static intptr_t CountTotalHolesSize(Heap* heap) {
}
+GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
+ : tracer_(tracer), scope_(scope) {
+ start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
+}
+
+
+GCTracer::Scope::~Scope() {
+ DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
+ tracer_->current_.scopes[scope_] +=
+ tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+}
+
+
GCTracer::AllocationEvent::AllocationEvent(double duration,
size_t allocation_in_bytes) {
duration_ = duration;
@@ -100,6 +113,9 @@ GCTracer::GCTracer(Heap* heap)
cumulative_incremental_marking_duration_(0.0),
cumulative_pure_incremental_marking_duration_(0.0),
longest_incremental_marking_step_(0.0),
+ cumulative_incremental_marking_finalization_steps_(0),
+ cumulative_incremental_marking_finalization_duration_(0.0),
+ longest_incremental_marking_finalization_step_(0.0),
cumulative_marking_duration_(0.0),
cumulative_sweeping_duration_(0.0),
allocation_time_ms_(0.0),
@@ -111,7 +127,7 @@ GCTracer::GCTracer(Heap* heap)
combined_mark_compact_speed_cache_(0.0),
start_counter_(0) {
current_ = Event(Event::START, NULL, NULL);
- current_.end_time = base::OS::TimeCurrentMillis();
+ current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
previous_ = previous_incremental_mark_compactor_event_ = current_;
}
@@ -261,6 +277,10 @@ void GCTracer::Stop(GarbageCollector collector) {
if (FLAG_trace_gc) {
heap_->PrintShortHeapStatistics();
}
+
+ longest_incremental_marking_finalization_step_ = 0.0;
+ cumulative_incremental_marking_finalization_steps_ = 0;
+ cumulative_incremental_marking_finalization_duration_ = 0.0;
}
@@ -334,6 +354,14 @@ void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
}
+void GCTracer::AddIncrementalMarkingFinalizationStep(double duration) {
+ cumulative_incremental_marking_finalization_steps_++;
+ cumulative_incremental_marking_finalization_duration_ += duration;
+ longest_incremental_marking_finalization_step_ =
+ Max(longest_incremental_marking_finalization_step_, duration);
+}
+
+
void GCTracer::Output(const char* format, ...) const {
if (FLAG_trace_gc) {
va_list arguments;
@@ -474,129 +502,128 @@ void GCTracer::PrintNVP() const {
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
- PrintIsolate(heap_->isolate(),
- "%8.0f ms: "
- "pause=%.1f "
- "mutator=%.1f "
- "gc=%s "
- "reduce_memory=%d "
- "external=%.1f "
- "mark=%.1f "
- "mark_inc=%.1f "
- "mark_prepcodeflush=%.1f "
- "mark_root=%.1f "
- "mark_topopt=%.1f "
- "mark_retainmaps=%.1f "
- "mark_weakclosure=%.1f "
- "mark_stringtable=%.1f "
- "mark_weakrefs=%.1f "
- "mark_globalhandles=%.1f "
- "mark_codeflush=%.1f "
- "mark_optimizedcodemaps=%.1f "
- "store_buffer_clear=%.1f "
- "slots_buffer_clear=%.1f "
- "sweep=%.2f "
- "sweepns=%.2f "
- "sweepos=%.2f "
- "sweepcode=%.2f "
- "sweepcell=%.2f "
- "sweepmap=%.2f "
- "sweepaborted=%.2f "
- "evacuate=%.1f "
- "new_new=%.1f "
- "root_new=%.1f "
- "old_new=%.1f "
- "compaction_ptrs=%.1f "
- "intracompaction_ptrs=%.1f "
- "misc_compaction=%.1f "
- "inc_weak_closure=%.1f "
- "weakcollection_process=%.1f "
- "weakcollection_clear=%.1f "
- "weakcollection_abort=%.1f "
- "weakcells=%.1f "
- "nonlive_refs=%.1f "
- "steps_count=%d "
- "steps_took=%.1f "
- "longest_step=%.1f "
- "incremental_marking_throughput=%" V8_PTR_PREFIX
- "d "
- "total_size_before=%" V8_PTR_PREFIX
- "d "
- "total_size_after=%" V8_PTR_PREFIX
- "d "
- "holes_size_before=%" V8_PTR_PREFIX
- "d "
- "holes_size_after=%" V8_PTR_PREFIX
- "d "
- "allocated=%" V8_PTR_PREFIX
- "d "
- "promoted=%" V8_PTR_PREFIX
- "d "
- "semi_space_copied=%" V8_PTR_PREFIX
- "d "
- "nodes_died_in_new=%d "
- "nodes_copied_in_new=%d "
- "nodes_promoted=%d "
- "promotion_ratio=%.1f%% "
- "average_survival_ratio=%.1f%% "
- "promotion_rate=%.1f%% "
- "semi_space_copy_rate=%.1f%% "
- "new_space_allocation_throughput=%" V8_PTR_PREFIX
- "d "
- "context_disposal_rate=%.1f "
- "compaction_speed=%" V8_PTR_PREFIX "d\n",
- heap_->isolate()->time_millis_since_init(), duration,
- spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory, current_.scopes[Scope::EXTERNAL],
- current_.scopes[Scope::MC_MARK],
- current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
- current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
- current_.scopes[Scope::MC_MARK_ROOT],
- current_.scopes[Scope::MC_MARK_TOPOPT],
- current_.scopes[Scope::MC_MARK_RETAIN_MAPS],
- current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
- current_.scopes[Scope::MC_MARK_STRING_TABLE],
- current_.scopes[Scope::MC_MARK_WEAK_REFERENCES],
- current_.scopes[Scope::MC_MARK_GLOBAL_HANDLES],
- current_.scopes[Scope::MC_MARK_CODE_FLUSH],
- current_.scopes[Scope::MC_MARK_OPTIMIZED_CODE_MAPS],
- current_.scopes[Scope::MC_STORE_BUFFER_CLEAR],
- current_.scopes[Scope::MC_SLOTS_BUFFER_CLEAR],
- current_.scopes[Scope::MC_SWEEP],
- current_.scopes[Scope::MC_SWEEP_NEWSPACE],
- current_.scopes[Scope::MC_SWEEP_OLDSPACE],
- current_.scopes[Scope::MC_SWEEP_CODE],
- current_.scopes[Scope::MC_SWEEP_CELL],
- current_.scopes[Scope::MC_SWEEP_MAP],
- current_.scopes[Scope::MC_SWEEP_ABORTED],
- current_.scopes[Scope::MC_EVACUATE_PAGES],
- current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS],
- current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS],
- current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS],
- current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED],
- current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED],
- current_.scopes[Scope::MC_UPDATE_MISC_POINTERS],
- current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
- current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS],
- current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR],
- current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT],
- current_.scopes[Scope::MC_WEAKCELL],
- current_.scopes[Scope::MC_NONLIVEREFERENCES],
- current_.incremental_marking_steps,
- current_.incremental_marking_duration,
- current_.longest_incremental_marking_step,
- IncrementalMarkingSpeedInBytesPerMillisecond(),
- current_.start_object_size, current_.end_object_size,
- current_.start_holes_size, current_.end_holes_size,
- allocated_since_last_gc, heap_->promoted_objects_size(),
- heap_->semi_space_copied_object_size(),
- heap_->nodes_died_in_new_space_,
- heap_->nodes_copied_in_new_space_, heap_->nodes_promoted_,
- heap_->promotion_ratio_, AverageSurvivalRatio(),
- heap_->promotion_rate_, heap_->semi_space_copied_rate_,
- NewSpaceAllocationThroughputInBytesPerMillisecond(),
- ContextDisposalRateInMilliseconds(),
- CompactionSpeedInBytesPerMillisecond());
+ PrintIsolate(
+ heap_->isolate(),
+ "%8.0f ms: "
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d "
+ "external=%.1f "
+ "clear=%1.f "
+ "clear.code_flush=%.1f "
+ "clear.dependent_code=%.1f "
+ "clear.global_handles=%.1f "
+ "clear.maps=%.1f "
+ "clear.slots_buffer=%.1f "
+ "clear.store_buffer=%.1f "
+ "clear.string_table=%.1f "
+ "clear.weak_cells=%.1f "
+ "clear.weak_collections=%.1f "
+ "clear.weak_lists=%.1f "
+ "evacuate=%.1f "
+ "evacuate.candidates=%.1f "
+ "evacuate.clean_up=%.1f "
+ "evacuate.new_space=%.1f "
+ "evacuate.update_pointers=%.1f "
+ "evacuate.update_pointers.between_evacuated=%.1f "
+ "evacuate.update_pointers.to_evacuated=%.1f "
+ "evacuate.update_pointers.to_new=%.1f "
+ "evacuate.update_pointers.weak=%.1f "
+ "finish=%.1f "
+ "mark=%.1f "
+ "mark.finish_incremental=%.1f "
+ "mark.prepare_code_flush=%.1f "
+ "mark.roots=%.1f "
+ "mark.weak_closure=%.1f "
+ "sweep=%.1f "
+ "sweep.code=%.1f "
+ "sweep.map=%.1f "
+ "sweep.old=%.1f "
+ "incremental_finalize=%.1f "
+ "steps_count=%d "
+ "steps_took=%.1f "
+ "longest_step=%.1f "
+ "finalization_steps_count=%d "
+ "finalization_steps_took=%.1f "
+ "finalization_longest_step=%.1f "
+ "incremental_marking_throughput=%" V8_PTR_PREFIX
+ "d "
+ "total_size_before=%" V8_PTR_PREFIX
+ "d "
+ "total_size_after=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_before=%" V8_PTR_PREFIX
+ "d "
+ "holes_size_after=%" V8_PTR_PREFIX
+ "d "
+ "allocated=%" V8_PTR_PREFIX
+ "d "
+ "promoted=%" V8_PTR_PREFIX
+ "d "
+ "semi_space_copied=%" V8_PTR_PREFIX
+ "d "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "semi_space_copy_rate=%.1f%% "
+ "new_space_allocation_throughput=%" V8_PTR_PREFIX
+ "d "
+ "context_disposal_rate=%.1f "
+ "compaction_speed=%" V8_PTR_PREFIX "d\n",
+ heap_->isolate()->time_millis_since_init(), duration,
+ spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
+ current_.scopes[Scope::EXTERNAL], current_.scopes[Scope::MC_CLEAR],
+ current_.scopes[Scope::MC_CLEAR_CODE_FLUSH],
+ current_.scopes[Scope::MC_CLEAR_DEPENDENT_CODE],
+ current_.scopes[Scope::MC_CLEAR_GLOBAL_HANDLES],
+ current_.scopes[Scope::MC_CLEAR_MAPS],
+ current_.scopes[Scope::MC_CLEAR_SLOTS_BUFFER],
+ current_.scopes[Scope::MC_CLEAR_STORE_BUFFER],
+ current_.scopes[Scope::MC_CLEAR_STRING_TABLE],
+ current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
+ current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
+ current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MC_EVACUATE],
+ current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
+ current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
+ current_.scopes[Scope::MC_EVACUATE_NEW_SPACE],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
+ current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
+ current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
+ current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
+ current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
+ current_.scopes[Scope::MC_MARK_ROOTS],
+ current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
+ current_.scopes[Scope::MC_SWEEP],
+ current_.scopes[Scope::MC_SWEEP_CODE],
+ current_.scopes[Scope::MC_SWEEP_MAP],
+ current_.scopes[Scope::MC_SWEEP_OLD],
+ current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
+ current_.incremental_marking_steps,
+ current_.incremental_marking_duration,
+ current_.longest_incremental_marking_step,
+ cumulative_incremental_marking_finalization_steps_,
+ cumulative_incremental_marking_finalization_duration_,
+ longest_incremental_marking_finalization_step_,
+ IncrementalMarkingSpeedInBytesPerMillisecond(),
+ current_.start_object_size, current_.end_object_size,
+ current_.start_holes_size, current_.end_holes_size,
+ allocated_since_last_gc, heap_->promoted_objects_size(),
+ heap_->semi_space_copied_object_size(),
+ heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
+ heap_->nodes_promoted_, heap_->promotion_ratio_,
+ AverageSurvivalRatio(), heap_->promotion_rate_,
+ heap_->semi_space_copied_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ ContextDisposalRateInMilliseconds(),
+ CompactionSpeedInBytesPerMillisecond());
break;
case Event::START:
break;
@@ -718,7 +745,7 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
- if (compaction_events_.size() < kRingBufferMaxSize) return 0.0;
+ if (compaction_events_.size() == 0) return 0;
intptr_t bytes = 0;
double durations = 0.0;
CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
@@ -851,7 +878,7 @@ size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
double GCTracer::ContextDisposalRateInMilliseconds() const {
if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
- double begin = base::OS::TimeCurrentMillis();
+ double begin = heap_->MonotonicallyIncreasingTimeInMs();
double end = 0.0;
ContextDisposalEventBuffer::const_iterator iter =
context_disposal_events_.begin();
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index c60317f941..e8ec168187 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -98,41 +98,37 @@ class GCTracer {
public:
enum ScopeId {
EXTERNAL,
+ MC_CLEAR,
+ MC_CLEAR_CODE_FLUSH,
+ MC_CLEAR_DEPENDENT_CODE,
+ MC_CLEAR_GLOBAL_HANDLES,
+ MC_CLEAR_MAPS,
+ MC_CLEAR_SLOTS_BUFFER,
+ MC_CLEAR_STORE_BUFFER,
+ MC_CLEAR_STRING_TABLE,
+ MC_CLEAR_WEAK_CELLS,
+ MC_CLEAR_WEAK_COLLECTIONS,
+ MC_CLEAR_WEAK_LISTS,
+ MC_EVACUATE,
+ MC_EVACUATE_CANDIDATES,
+ MC_EVACUATE_CLEAN_UP,
+ MC_EVACUATE_NEW_SPACE,
+ MC_EVACUATE_UPDATE_POINTERS,
+ MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED,
+ MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED,
+ MC_EVACUATE_UPDATE_POINTERS_TO_NEW,
+ MC_EVACUATE_UPDATE_POINTERS_WEAK,
+ MC_FINISH,
+ MC_INCREMENTAL_FINALIZE,
MC_MARK,
MC_MARK_FINISH_INCREMENTAL,
MC_MARK_PREPARE_CODE_FLUSH,
- MC_MARK_ROOT,
- MC_MARK_TOPOPT,
- MC_MARK_RETAIN_MAPS,
+ MC_MARK_ROOTS,
MC_MARK_WEAK_CLOSURE,
- MC_MARK_STRING_TABLE,
- MC_MARK_WEAK_REFERENCES,
- MC_MARK_GLOBAL_HANDLES,
- MC_MARK_CODE_FLUSH,
- MC_MARK_OPTIMIZED_CODE_MAPS,
- MC_STORE_BUFFER_CLEAR,
- MC_SLOTS_BUFFER_CLEAR,
MC_SWEEP,
- MC_SWEEP_NEWSPACE,
- MC_SWEEP_OLDSPACE,
MC_SWEEP_CODE,
- MC_SWEEP_CELL,
MC_SWEEP_MAP,
- MC_SWEEP_ABORTED,
- MC_EVACUATE_PAGES,
- MC_UPDATE_NEW_TO_NEW_POINTERS,
- MC_UPDATE_ROOT_TO_NEW_POINTERS,
- MC_UPDATE_OLD_TO_NEW_POINTERS,
- MC_UPDATE_POINTERS_TO_EVACUATED,
- MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
- MC_UPDATE_MISC_POINTERS,
- MC_INCREMENTAL_FINALIZE,
- MC_WEAKCOLLECTION_PROCESS,
- MC_WEAKCOLLECTION_CLEAR,
- MC_WEAKCOLLECTION_ABORT,
- MC_WEAKCELL,
- MC_NONLIVEREFERENCES,
- MC_FLUSH_CODE,
+ MC_SWEEP_OLD,
SCAVENGER_CODE_FLUSH_CANDIDATES,
SCAVENGER_OBJECT_GROUPS,
SCAVENGER_OLD_TO_NEW_POINTERS,
@@ -143,15 +139,8 @@ class GCTracer {
NUMBER_OF_SCOPES
};
- Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) {
- start_time_ = base::OS::TimeCurrentMillis();
- }
-
- ~Scope() {
- DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
- tracer_->current_.scopes[scope_] +=
- base::OS::TimeCurrentMillis() - start_time_;
- }
+ Scope(GCTracer* tracer, ScopeId scope);
+ ~Scope();
private:
GCTracer* tracer_;
@@ -358,6 +347,8 @@ class GCTracer {
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+ void AddIncrementalMarkingFinalizationStep(double duration);
+
// Log time spent in marking.
void AddMarkingTime(double duration) {
cumulative_marking_duration_ += duration;
@@ -508,6 +499,9 @@ class GCTracer {
cumulative_incremental_marking_duration_ = 0;
cumulative_pure_incremental_marking_duration_ = 0;
longest_incremental_marking_step_ = 0;
+ cumulative_incremental_marking_finalization_steps_ = 0;
+ cumulative_incremental_marking_finalization_duration_ = 0;
+ longest_incremental_marking_finalization_step_ = 0;
cumulative_marking_duration_ = 0;
cumulative_sweeping_duration_ = 0;
}
@@ -564,6 +558,17 @@ class GCTracer {
// Longest incremental marking step since start of marking.
double longest_incremental_marking_step_;
+ // Cumulative number of incremental marking finalization steps since creation
+ // of tracer.
+ int cumulative_incremental_marking_finalization_steps_;
+
+ // Cumulative duration of incremental marking finalization steps since
+ // creation of tracer.
+ double cumulative_incremental_marking_finalization_duration_;
+
+ // Longest incremental marking finalization step since start of marking.
+ double longest_incremental_marking_finalization_step_;
+
// Total marking time.
// This timer is precise when run with --print-cumulative-gc-stat
double cumulative_marking_duration_;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index c6185c6e30..a723b3bdae 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -257,20 +257,21 @@ void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
profiler->AllocationEvent(object->address(), size_in_bytes);
}
- ++allocations_count_;
-
if (FLAG_verify_predictable) {
+ ++allocations_count_;
+ // Advance synthetic time by making a time request.
+ MonotonicallyIncreasingTimeInMs();
+
UpdateAllocationsHash(object);
UpdateAllocationsHash(size_in_bytes);
- if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
- (--dump_allocations_hash_countdown_ == 0)) {
- dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAlloctionsHash();
}
}
if (FLAG_trace_allocation_stack_interval > 0) {
+ if (!FLAG_verify_predictable) ++allocations_count_;
if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
}
@@ -292,14 +293,14 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
if (FLAG_verify_predictable) {
++allocations_count_;
+ // Advance synthetic time by making a time request.
+ MonotonicallyIncreasingTimeInMs();
UpdateAllocationsHash(source);
UpdateAllocationsHash(target);
UpdateAllocationsHash(size_in_bytes);
- if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
- (--dump_allocations_hash_countdown_ == 0)) {
- dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+ if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
PrintAlloctionsHash();
}
}
@@ -506,21 +507,39 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
- ScratchpadSlotMode mode) {
- Heap* heap = object->GetHeap();
- DCHECK(heap->InFromSpace(object));
-
+void Heap::UpdateAllocationSite(HeapObject* object,
+ HashMap* pretenuring_feedback) {
+ DCHECK(InFromSpace(object));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
return;
+ AllocationMemento* memento = FindAllocationMemento(object);
+ if (memento == nullptr) return;
+
+ AllocationSite* key = memento->GetAllocationSite();
+ DCHECK(!key->IsZombie());
+
+ if (pretenuring_feedback == global_pretenuring_feedback_) {
+ // For inserting in the global pretenuring storage we need to first
+ // increment the memento found count on the allocation site.
+ if (key->IncrementMementoFoundCount()) {
+ global_pretenuring_feedback_->LookupOrInsert(
+ key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+ }
+ } else {
+ // Any other pretenuring storage than the global one is used as a cache,
+ // where the count is later on merge in the allocation site.
+ HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
+ key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+ DCHECK(e != nullptr);
+ (*bit_cast<intptr_t*>(&e->value))++;
+ }
+}
- AllocationMemento* memento = heap->FindAllocationMemento(object);
- if (memento == NULL) return;
- if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
- heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
- }
+void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
+ global_pretenuring_feedback_->Remove(
+ site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 5a135f0b7b..84b3c79b3e 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
@@ -31,8 +32,8 @@
#include "src/heap/store-buffer.h"
#include "src/interpreter/interpreter.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
-#include "src/scopeinfo.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
@@ -57,7 +58,7 @@ class IdleScavengeObserver : public InlineAllocationObserver {
IdleScavengeObserver(Heap& heap, intptr_t step_size)
: InlineAllocationObserver(step_size), heap_(heap) {}
- virtual void Step(int bytes_allocated) {
+ void Step(int bytes_allocated, Address, size_t) override {
heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
}
@@ -91,6 +92,7 @@ Heap::Heap()
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
contexts_disposed_(0),
+ number_of_disposed_maps_(0),
global_ic_age_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
@@ -102,7 +104,6 @@ Heap::Heap()
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
- dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
@@ -149,7 +150,7 @@ Heap::Heap()
old_generation_allocation_counter_(0),
old_generation_size_at_last_gc_(0),
gcs_since_last_deopt_(0),
- allocation_sites_scratchpad_length_(0),
+ global_pretenuring_feedback_(nullptr),
ring_buffer_full_(false),
ring_buffer_end_(0),
promotion_queue_(this),
@@ -162,9 +163,10 @@ Heap::Heap()
pending_unmapping_tasks_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
- concurrent_sweeping_enabled_(false),
strong_roots_list_(NULL),
- array_buffer_tracker_(NULL) {
+ array_buffer_tracker_(NULL),
+ heap_iterator_depth_(0),
+ force_oom_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -180,6 +182,7 @@ Heap::Heap()
set_allocation_sites_list(Smi::FromInt(0));
set_encountered_weak_collections(Smi::FromInt(0));
set_encountered_weak_cells(Smi::FromInt(0));
+ set_encountered_transition_arrays(Smi::FromInt(0));
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
@@ -426,10 +429,6 @@ void Heap::GarbageCollectionPrologue() {
AllowHeapAllocation for_the_first_part_of_prologue;
gc_count_++;
- if (FLAG_flush_code) {
- mark_compact_collector()->EnableCodeFlushing(true);
- }
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -500,37 +499,60 @@ const char* Heap::GetSpaceName(int idx) {
}
-void Heap::ClearAllKeyedStoreICs() {
- if (FLAG_vector_stores) {
- TypeFeedbackVector::ClearAllKeyedStoreICs(isolate_);
- return;
+void Heap::RepairFreeListsAfterDeserialization() {
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next(); space != NULL;
+ space = spaces.next()) {
+ space->RepairFreeListsAfterDeserialization();
}
+}
- // TODO(mvstanton): Remove this function when FLAG_vector_stores is turned on
- // permanently, and divert all callers to KeyedStoreIC::ClearAllKeyedStoreICs.
- HeapObjectIterator it(code_space());
- for (Object* object = it.Next(); object != NULL; object = it.Next()) {
- Code* code = Code::cast(object);
- Code::Kind current_kind = code->kind();
- if (current_kind == Code::FUNCTION ||
- current_kind == Code::OPTIMIZED_FUNCTION) {
- code->ClearInlineCaches(Code::KEYED_STORE_IC);
+void Heap::MergeAllocationSitePretenuringFeedback(
+ const HashMap& local_pretenuring_feedback) {
+ AllocationSite* site = nullptr;
+ for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
+ local_entry != nullptr;
+ local_entry = local_pretenuring_feedback.Next(local_entry)) {
+ site = reinterpret_cast<AllocationSite*>(local_entry->key);
+ MapWord map_word = site->map_word();
+ if (map_word.IsForwardingAddress()) {
+ site = AllocationSite::cast(map_word.ToForwardingAddress());
+ }
+ DCHECK(site->IsAllocationSite());
+ int value =
+ static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
+ DCHECK_GT(value, 0);
+
+ {
+ // TODO(mlippautz): For parallel processing we need synchronization here.
+ if (site->IncrementMementoFoundCount(value)) {
+ global_pretenuring_feedback_->LookupOrInsert(
+ site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
+ }
}
}
}
-void Heap::RepairFreeListsAfterDeserialization() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != NULL;
- space = spaces.next()) {
- space->RepairFreeListsAfterDeserialization();
+class Heap::PretenuringScope {
+ public:
+ explicit PretenuringScope(Heap* heap) : heap_(heap) {
+ heap_->global_pretenuring_feedback_ =
+ new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
}
-}
+
+ ~PretenuringScope() {
+ delete heap_->global_pretenuring_feedback_;
+ heap_->global_pretenuring_feedback_ = nullptr;
+ }
+
+ private:
+ Heap* heap_;
+};
-bool Heap::ProcessPretenuringFeedback() {
+void Heap::ProcessPretenuringFeedback() {
bool trigger_deoptimization = false;
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
@@ -539,48 +561,43 @@ bool Heap::ProcessPretenuringFeedback() {
int allocation_sites = 0;
int active_allocation_sites = 0;
- // If the scratchpad overflowed, we have to iterate over the allocation
- // sites list.
- // TODO(hpayer): We iterate over the whole list of allocation sites when
- // we grew to the maximum semi-space size to deopt maybe tenured
- // allocation sites. We could hold the maybe tenured allocation sites
- // in a seperate data structure if this is a performance problem.
- bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
- bool use_scratchpad =
- allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
- !deopt_maybe_tenured;
+ AllocationSite* site = nullptr;
- int i = 0;
- Object* list_element = allocation_sites_list();
+ // Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
- while (use_scratchpad ? i < allocation_sites_scratchpad_length_
- : list_element->IsAllocationSite()) {
- AllocationSite* site =
- use_scratchpad
- ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
- : AllocationSite::cast(list_element);
- allocation_mementos_found += site->memento_found_count();
- if (site->memento_found_count() > 0) {
- active_allocation_sites++;
- if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
- trigger_deoptimization = true;
- }
- if (site->GetPretenureMode() == TENURED) {
- tenure_decisions++;
- } else {
- dont_tenure_decisions++;
- }
- allocation_sites++;
- }
-
- if (deopt_maybe_tenured && site->IsMaybeTenure()) {
- site->set_deopt_dependent_code(true);
+ for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
+ e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
+ site = reinterpret_cast<AllocationSite*>(e->key);
+ int found_count = site->memento_found_count();
+ // The fact that we have an entry in the storage means that we've found
+ // the site at least once.
+ DCHECK_GT(found_count, 0);
+ DCHECK(site->IsAllocationSite());
+ allocation_sites++;
+ active_allocation_sites++;
+ allocation_mementos_found += found_count;
+ if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
trigger_deoptimization = true;
}
-
- if (use_scratchpad) {
- i++;
+ if (site->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
} else {
+ dont_tenure_decisions++;
+ }
+ }
+
+ // Step 2: Deopt maybe tenured allocation sites if necessary.
+ bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+ if (deopt_maybe_tenured) {
+ Object* list_element = allocation_sites_list();
+ while (list_element->IsAllocationSite()) {
+ site = AllocationSite::cast(list_element);
+ DCHECK(site->IsAllocationSite());
+ allocation_sites++;
+ if (site->IsMaybeTenure()) {
+ site->set_deopt_dependent_code(true);
+ trigger_deoptimization = true;
+ }
list_element = site->weak_next();
}
}
@@ -589,28 +606,24 @@ bool Heap::ProcessPretenuringFeedback() {
isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
- FlushAllocationSitesScratchpad();
-
if (FLAG_trace_pretenuring_statistics &&
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
- PrintF(
- "GC: (mode, #visited allocation sites, #active allocation sites, "
- "#mementos, #tenure decisions, #donttenure decisions) "
- "(%s, %d, %d, %d, %d, %d)\n",
- use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
- active_allocation_sites, allocation_mementos_found, tenure_decisions,
- dont_tenure_decisions);
+ PrintIsolate(isolate(),
+ "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
+ "active_sites=%d "
+ "mementos=%d tenured=%d not_tenured=%d\n",
+ deopt_maybe_tenured ? 1 : 0, allocation_sites,
+ active_allocation_sites, allocation_mementos_found,
+ tenure_decisions, dont_tenure_decisions);
}
}
- return trigger_deoptimization;
}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
- // performance issue, use a cache heap data structure instead (similar to the
- // allocation sites scratchpad).
+ // performance issue, use a cache data structure in heap instead.
Object* list_element = allocation_sites_list();
while (list_element->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(list_element);
@@ -786,10 +799,8 @@ void Heap::HandleGCRequest() {
IncrementalMarking::COMPLETE_MARKING) {
CollectAllGarbage(current_gc_flags_, "GC interrupt",
current_gc_callback_flags_);
- return;
- }
- DCHECK(FLAG_finalize_marking_incrementally);
- if (!incremental_marking()->finalize_marking_completed()) {
+ } else if (incremental_marking()->IsMarking() &&
+ !incremental_marking()->finalize_marking_completed()) {
FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
}
}
@@ -966,7 +977,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() && FLAG_incremental_marking) {
+ !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
+ OldGenerationAllocationLimitReached()) {
// Make progress in incremental marking.
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
@@ -1049,18 +1061,18 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kContextDisposed;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_->NotifyContextDisposed(event);
}
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compile_dispatcher()->Flush();
}
AgeInlineCaches();
- set_retained_maps(ArrayList::cast(empty_fixed_array()));
- tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
- MemoryReducer::Event event;
- event.type = MemoryReducer::kContextDisposed;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyContextDisposed(event);
+ number_of_disposed_maps_ = retained_maps()->Length();
+ tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
@@ -1285,22 +1297,27 @@ bool Heap::PerformGarbageCollection(
incremental_marking()->NotifyOfHighPromotionRate();
}
- if (collector == MARK_COMPACTOR) {
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
- MarkCompact();
- old_gen_exhausted_ = false;
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which can
- // cause another GC. Take into account the objects promoted during GC.
- old_generation_allocation_counter_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
- } else {
- Scavenge();
+ {
+ Heap::PretenuringScope pretenuring_scope(this);
+
+ if (collector == MARK_COMPACTOR) {
+ UpdateOldGenerationAllocationCounter();
+ // Perform mark-sweep with optional compaction.
+ MarkCompact();
+ old_gen_exhausted_ = false;
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during GC.
+ old_generation_allocation_counter_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
+ } else {
+ Scavenge();
+ }
+
+ ProcessPretenuringFeedback();
}
- ProcessPretenuringFeedback();
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
@@ -1403,6 +1420,8 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() {
+ PauseInlineAllocationObserversScope pause_observers(new_space());
+
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
@@ -1518,12 +1537,13 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
static bool IsUnmodifiedHeapObject(Object** p) {
Object* object = *p;
- DCHECK(object->IsHeapObject());
+ if (object->IsSmi()) return false;
HeapObject* heap_object = HeapObject::cast(object);
if (!object->IsJSObject()) return false;
Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
if (!obj_constructor->IsJSFunction()) return false;
JSFunction* constructor = JSFunction::cast(obj_constructor);
+ if (!constructor->shared()->IsApiFunction()) return false;
if (constructor != nullptr &&
constructor->initial_map() == heap_object->map()) {
return true;
@@ -1605,6 +1625,10 @@ void Heap::Scavenge() {
// trigger one during scavenge: scavenges allocation should always succeed.
AlwaysAllocateScope scope(isolate());
+ // Bump-pointer allocations done during scavenge are not real allocations.
+ // Pause the inline allocation steps.
+ PauseInlineAllocationObserversScope pause_observers(new_space());
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1734,10 +1758,6 @@ void Heap::Scavenge() {
// Set age mark.
new_space_.set_age_mark(new_space_.top());
- // We start a new step without accounting the objects copied into to space
- // as those are not allocations.
- new_space_.UpdateInlineAllocationLimitStep();
-
array_buffer_tracker()->FreeDead(true);
// Update how much has survived scavenge.
@@ -1850,6 +1870,7 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
casted->ResetPretenureDecision();
casted->set_deopt_dependent_code(true);
marked = true;
+ RemoveAllocationSitePretenuringFeedback(casted);
}
cur = casted->weak_next();
}
@@ -2064,7 +2085,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
- Map::Counter::encode(Map::kRetainingCounterStart);
+ Map::ConstructionCounter::encode(Map::kNoSlackTracking);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
return result;
@@ -2103,9 +2124,10 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true) |
- Map::Counter::encode(Map::kRetainingCounterStart);
+ Map::ConstructionCounter::encode(Map::kNoSlackTracking);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
+ map->set_new_target_is_base(true);
return map;
}
@@ -2352,6 +2374,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+ ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
for (unsigned i = 0; i < arraysize(struct_table); i++) {
const StructTable& entry = struct_table[i];
@@ -2508,11 +2531,26 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
}
result->set_map_no_write_barrier(weak_cell_map());
WeakCell::cast(result)->initialize(value);
- WeakCell::cast(result)->clear_next(this);
+ WeakCell::cast(result)->clear_next(the_hole_value());
return result;
}
+AllocationResult Heap::AllocateTransitionArray(int capacity) {
+ DCHECK(capacity > 0);
+ HeapObject* raw_array = nullptr;
+ {
+ AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
+ if (!allocation.To(&raw_array)) return allocation;
+ }
+ raw_array->set_map_no_write_barrier(transition_array_map());
+ TransitionArray* array = TransitionArray::cast(raw_array);
+ array->set_length(capacity);
+ MemsetPointer(array->data_start(), undefined_value(), capacity);
+ return array;
+}
+
+
void Heap::CreateApiObjects() {
HandleScope scope(isolate());
Factory* factory = isolate()->factory();
@@ -2714,6 +2752,11 @@ void Heap::CreateInitialObjects() {
Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
set_intrinsic_function_names(*intrinsic_names);
+ Handle<NameDictionary> empty_properties_dictionary =
+ NameDictionary::New(isolate(), 0, TENURED);
+ empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
+ set_empty_properties_dictionary(*empty_properties_dictionary);
+
set_number_string_cache(
*factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
@@ -2740,9 +2783,6 @@ void Heap::CreateInitialObjects() {
set_experimental_extra_natives_source_cache(
*factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
- set_code_stub_natives_source_cache(
- *factory->NewFixedArray(CodeStubNatives::GetBuiltinsCount()));
-
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// The symbol registry is initialized lazily.
@@ -2786,6 +2826,20 @@ void Heap::CreateInitialObjects() {
set_dummy_vector(*dummy_vector);
}
+ {
+ Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
+ set_empty_weak_cell(*cell);
+ cell->clear();
+
+ Handle<FixedArray> cleared_optimized_code_map =
+ factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
+ cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
+ *cell);
+ STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
+ SharedFunctionInfo::kSharedCodeIndex == 0);
+ set_cleared_optimized_code_map(*cleared_optimized_code_map);
+ }
+
set_detached_contexts(empty_fixed_array());
set_retained_maps(ArrayList::cast(empty_fixed_array()));
@@ -2827,10 +2881,6 @@ void Heap::CreateInitialObjects() {
*interpreter::Interpreter::CreateUninitializedInterpreterTable(
isolate()));
- set_allocation_sites_scratchpad(
- *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
- InitializeAllocationSitesScratchpad();
-
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -2859,7 +2909,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kSymbolRegistryRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
- case kAllocationSitesScratchpadRootIndex:
case kMicrotaskQueueRootIndex:
case kDetachedContextsRootIndex:
case kWeakObjectToCodeTableRootIndex:
@@ -2908,48 +2957,6 @@ void Heap::FlushNumberStringCache() {
}
-void Heap::FlushAllocationSitesScratchpad() {
- for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
- allocation_sites_scratchpad()->set_undefined(i);
- }
- allocation_sites_scratchpad_length_ = 0;
-}
-
-
-void Heap::InitializeAllocationSitesScratchpad() {
- DCHECK(allocation_sites_scratchpad()->length() ==
- kAllocationSiteScratchpadSize);
- for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
- allocation_sites_scratchpad()->set_undefined(i);
- }
-}
-
-
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
- ScratchpadSlotMode mode) {
- if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
- // We cannot use the normal write-barrier because slots need to be
- // recorded with non-incremental marking as well. We have to explicitly
- // record the slot to take evacuation candidates into account.
- allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
- site, SKIP_WRITE_BARRIER);
- Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
- allocation_sites_scratchpad_length_);
-
- if (mode == RECORD_SCRATCHPAD_SLOT) {
- // We need to allow slots buffer overflow here since the evacuation
- // candidates are not part of the global list of old space pages and
- // releasing an evacuation candidate due to a slots buffer overflow
- // results in lost pages.
- mark_compact_collector()->ForceRecordSlot(allocation_sites_scratchpad(),
- slot, *slot);
- }
- allocation_sites_scratchpad_length_++;
- }
-}
-
-
-
Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
}
@@ -3096,7 +3103,12 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
- if (incremental_marking()->IsMarking() &&
+ // As long as the inspected object is black and we are currently not iterating
+ // the heap using HeapIterator, we can update the live byte count. We cannot
+ // update while using HeapIterator because the iterator is temporarily
+ // marking the whole object graph, without updating live bytes.
+ if (!in_heap_iterator() &&
+ !mark_compact_collector()->sweeping_in_progress() &&
Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
if (mode == SEQUENTIAL_TO_SWEEPER) {
MemoryChunk::IncrementLiveBytesFromGC(object, by);
@@ -3110,6 +3122,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
DCHECK(!object->IsFixedTypedArrayBase());
+ DCHECK(!object->IsByteArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
Map* map = object->map();
@@ -3166,7 +3179,8 @@ template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
template<Heap::InvocationMode mode>
void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
const int len = object->length();
- DCHECK(elements_to_trim < len);
+ DCHECK_LE(elements_to_trim, len);
+ DCHECK_GE(elements_to_trim, 0);
int bytes_to_trim;
if (object->IsFixedTypedArrayBase()) {
@@ -3174,12 +3188,17 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
bytes_to_trim =
FixedTypedArrayBase::TypedArraySize(type, len) -
FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
+ } else if (object->IsByteArray()) {
+ int new_size = ByteArray::SizeFor(len - elements_to_trim);
+ bytes_to_trim = ByteArray::SizeFor(len) - new_size;
+ DCHECK_GE(bytes_to_trim, 0);
} else {
const int element_size =
object->IsFixedArray() ? kPointerSize : kDoubleSize;
bytes_to_trim = elements_to_trim * element_size;
}
+
// For now this trick is only applied to objects in new and paged space.
DCHECK(object->map() != fixed_cow_array_map());
@@ -3443,6 +3462,14 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify).
+ InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
+}
+
+
+void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
+ if (start_offset == map->instance_size()) return;
+ DCHECK_LT(start_offset, map->instance_size());
+
Object* filler;
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
@@ -3450,16 +3477,18 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// Pre-allocated fields need to be initialized with undefined_value as well
// so that object accesses before the constructor completes (e.g. in the
// debugger) will not cause a crash.
- Object* constructor = map->GetConstructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->IsInobjectSlackTrackingInProgress()) {
+
+ // In case of Array subclassing the |map| could already be transitioned
+ // to different elements kind from the initial map on which we track slack.
+ Map* initial_map = map->FindRootMap();
+ if (initial_map->IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
- DCHECK_EQ(0, obj->GetInternalFieldCount());
filler = Heap::one_pointer_filler_map();
} else {
filler = Heap::undefined_value();
}
- obj->InitializeBody(map, Heap::undefined_value(), filler);
+ obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
+ initial_map->InobjectSlackTrackingStep();
}
@@ -3510,9 +3539,10 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Make the clone.
Map* map = source->map();
- // We can only clone normal objects or arrays. Copying anything else
+ // We can only clone regexps, normal objects or arrays. Copying anything else
// will break invariants.
- CHECK(map->instance_type() == JS_OBJECT_TYPE ||
+ CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+ map->instance_type() == JS_OBJECT_TYPE ||
map->instance_type() == JS_ARRAY_TYPE);
int object_size = map->instance_size();
@@ -4087,8 +4117,7 @@ void Heap::ReduceNewSpaceSize() {
void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
- if (FLAG_finalize_marking_incrementally &&
- incremental_marking()->IsMarking() &&
+ if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
mark_compact_collector()->marking_deque()->IsEmpty()))) {
@@ -4105,12 +4134,11 @@ bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
static_cast<size_t>(
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
- if (FLAG_finalize_marking_incrementally &&
- (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
- (!incremental_marking()->finalize_marking_completed() &&
- mark_compact_collector()->marking_deque()->IsEmpty() &&
- gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
- static_cast<size_t>(idle_time_in_ms))))) {
+ if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+ (!incremental_marking()->finalize_marking_completed() &&
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
+ static_cast<size_t>(idle_time_in_ms)))) {
FinalizeIncrementalMarking(
"Idle notification: finalize incremental marking");
return true;
@@ -4480,10 +4508,34 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
}
+class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
+ public:
+ IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
+ bool record_slots,
+ ObjectSlotCallback callback)
+ : heap_(heap),
+ target_(target),
+ record_slots_(record_slots),
+ callback_(callback) {}
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ heap_->IterateAndMarkPointersToFromSpace(
+ target_, reinterpret_cast<Address>(start),
+ reinterpret_cast<Address>(end), record_slots_, callback_);
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+
+ private:
+ Heap* heap_;
+ HeapObject* target_;
+ bool record_slots_;
+ ObjectSlotCallback callback_;
+};
+
+
void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
ObjectSlotCallback callback) {
- Address obj_address = target->address();
-
// We are not collecting slots on new space objects during mutation
// thus we have to scan for pointers to evacuation candidates when we
// promote objects. But we should not record any slots in non-black
@@ -4496,53 +4548,9 @@ void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
record_slots = Marking::IsBlack(mark_bit);
}
- // Do not scavenge JSArrayBuffer's contents
- switch (target->ContentType()) {
- case HeapObjectContents::kTaggedValues: {
- IterateAndMarkPointersToFromSpace(target, obj_address, obj_address + size,
- record_slots, callback);
- break;
- }
- case HeapObjectContents::kMixedValues: {
- if (target->IsFixedTypedArrayBase()) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + FixedTypedArrayBase::kBasePointerOffset,
- obj_address + FixedTypedArrayBase::kHeaderSize, record_slots,
- callback);
- } else if (target->IsBytecodeArray()) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + BytecodeArray::kConstantPoolOffset,
- obj_address + BytecodeArray::kHeaderSize, record_slots, callback);
- } else if (target->IsJSArrayBuffer()) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address,
- obj_address + JSArrayBuffer::kByteLengthOffset + kPointerSize,
- record_slots, callback);
- IterateAndMarkPointersToFromSpace(
- target, obj_address + JSArrayBuffer::kSize, obj_address + size,
- record_slots, callback);
-#if V8_DOUBLE_FIELDS_UNBOXING
- } else if (FLAG_unbox_double_fields) {
- LayoutDescriptorHelper helper(target->map());
- DCHECK(!helper.all_fields_tagged());
-
- for (int offset = 0; offset < size;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, size, &end_of_region_offset)) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + offset,
- obj_address + end_of_region_offset, record_slots, callback);
- }
- offset = end_of_region_offset;
- }
-#endif
- }
- break;
- }
- case HeapObjectContents::kRawValues: {
- break;
- }
- }
+ IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
+ callback);
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
}
@@ -4991,6 +4999,10 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
factor = kMinHeapGrowingFactor;
}
+ if (FLAG_heap_growing_percent > 0) {
+ factor = 1.0 + FLAG_heap_growing_percent / 100.0;
+ }
+
old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
@@ -5073,8 +5085,6 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
- concurrent_sweeping_enabled_ = FLAG_concurrent_sweeping;
-
base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
// Set up memory allocator.
@@ -5174,6 +5184,7 @@ bool Heap::CreateHeapObjects() {
set_native_contexts_list(undefined_value());
set_allocation_sites_list(undefined_value());
+
return true;
}
@@ -5398,9 +5409,11 @@ DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
void Heap::AddRetainedMap(Handle<Map> map) {
- if (FLAG_retain_maps_for_n_gc == 0) return;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
Handle<ArrayList> array(retained_maps(), isolate());
+ if (array->IsFull()) {
+ CompactRetainedMaps(*array);
+ }
array = ArrayList::Add(
array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
ArrayList::kReloadLengthAfterAllocation);
@@ -5410,6 +5423,35 @@ void Heap::AddRetainedMap(Handle<Map> map) {
}
+void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
+ DCHECK_EQ(retained_maps, this->retained_maps());
+ int length = retained_maps->Length();
+ int new_length = 0;
+ int new_number_of_disposed_maps = 0;
+ // This loop compacts the array by removing cleared weak cells.
+ for (int i = 0; i < length; i += 2) {
+ DCHECK(retained_maps->Get(i)->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+ Object* age = retained_maps->Get(i + 1);
+ if (cell->cleared()) continue;
+ if (i != new_length) {
+ retained_maps->Set(new_length, cell);
+ retained_maps->Set(new_length + 1, age);
+ }
+ if (i < number_of_disposed_maps_) {
+ new_number_of_disposed_maps += 2;
+ }
+ new_length += 2;
+ }
+ number_of_disposed_maps_ = new_number_of_disposed_maps;
+ Object* undefined = undefined_value();
+ for (int i = new_length; i < length; i++) {
+ retained_maps->Clear(i, undefined);
+ }
+ if (new_length != length) retained_maps->SetLength(new_length);
+}
+
+
void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
}
@@ -5630,6 +5672,7 @@ HeapIterator::HeapIterator(Heap* heap,
filter_(nullptr),
space_iterator_(nullptr),
object_iterator_(nullptr) {
+ heap_->heap_iterator_start();
// Start the iteration.
space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
@@ -5644,6 +5687,7 @@ HeapIterator::HeapIterator(Heap* heap,
HeapIterator::~HeapIterator() {
+ heap_->heap_iterator_end();
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 92f0ded036..af9d0a6235 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -61,6 +61,7 @@ namespace internal {
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, weak_cell_map, WeakCellMap) \
+ V(Map, transition_array_map, TransitionArrayMap) \
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, function_context_map, FunctionContextMap) \
@@ -166,9 +167,9 @@ namespace internal {
V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
V(FixedArray, experimental_extra_natives_source_cache, \
ExperimentalExtraNativesSourceCache) \
- V(FixedArray, code_stub_natives_source_cache, CodeStubNativesSourceCache) \
V(Script, empty_script, EmptyScript) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
+ V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
V(Cell, undefined_cell, UndefinedCell) \
V(JSObject, observation_state, ObservationState) \
V(Object, symbol_registry, SymbolRegistry) \
@@ -176,20 +177,19 @@ namespace internal {
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(FixedArray, materialized_objects, MaterializedObjects) \
- V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(TypeFeedbackVector, dummy_vector, DummyVector) \
+ V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
V(PropertyCell, array_protector, ArrayProtector) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
- V(Object, code_stub_context, CodeStubContext) \
- V(JSObject, code_stub_exports_object, CodeStubExportsObject) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, interpreter_table, InterpreterTable) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(WeakCell, empty_weak_cell, EmptyWeakCell) \
V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
@@ -209,105 +209,128 @@ namespace internal {
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
-#define INTERNALIZED_STRING_LIST(V) \
- V(anonymous_string, "anonymous") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(Array_string, "Array") \
- V(bool16x8_string, "bool16x8") \
- V(Bool16x8_string, "Bool16x8") \
- V(bool32x4_string, "bool32x4") \
- V(Bool32x4_string, "Bool32x4") \
- V(bool8x16_string, "bool8x16") \
- V(Bool8x16_string, "Bool8x16") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(callee_string, "callee") \
- V(caller_string, "caller") \
- V(cell_value_string, "%cell_value") \
- V(char_at_string, "CharAt") \
- V(closure_string, "(closure)") \
- V(compare_ic_string, "==") \
- V(configurable_string, "configurable") \
- V(constructor_string, "constructor") \
- V(Date_string, "Date") \
- V(default_string, "default") \
- V(done_string, "done") \
- V(dot_result_string, ".result") \
- V(dot_string, ".") \
- V(enumerable_string, "enumerable") \
- V(Error_string, "Error") \
- V(eval_string, "eval") \
- V(false_string, "false") \
- V(float32x4_string, "float32x4") \
- V(Float32x4_string, "Float32x4") \
- V(for_api_string, "for_api") \
- V(for_string, "for") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(Generator_string, "Generator") \
- V(get_string, "get") \
- V(global_string, "global") \
- V(illegal_access_string, "illegal access") \
- V(illegal_argument_string, "illegal argument") \
- V(index_string, "index") \
- V(infinity_string, "Infinity") \
- V(input_string, "input") \
- V(int16x8_string, "int16x8") \
- V(Int16x8_string, "Int16x8") \
- V(int32x4_string, "int32x4") \
- V(Int32x4_string, "Int32x4") \
- V(int8x16_string, "int8x16") \
- V(Int8x16_string, "Int8x16") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(last_index_string, "lastIndex") \
- V(length_string, "length") \
- V(Map_string, "Map") \
- V(minus_infinity_string, "-Infinity") \
- V(minus_zero_string, "-0") \
- V(name_string, "name") \
- V(nan_string, "NaN") \
- V(next_string, "next") \
- V(null_string, "null") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(object_string, "object") \
- V(Object_string, "Object") \
- V(private_api_string, "private_api") \
- V(proto_string, "__proto__") \
- V(prototype_string, "prototype") \
- V(query_colon_string, "(?:)") \
- V(RegExp_string, "RegExp") \
- V(set_string, "set") \
- V(Set_string, "Set") \
- V(source_mapping_url_string, "source_mapping_url") \
- V(source_string, "source") \
- V(source_url_string, "source_url") \
- V(stack_string, "stack") \
- V(strict_compare_ic_string, "===") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(this_string, "this") \
- V(throw_string, "throw") \
- V(toJSON_string, "toJSON") \
- V(toString_string, "toString") \
- V(true_string, "true") \
- V(uint16x8_string, "uint16x8") \
- V(Uint16x8_string, "Uint16x8") \
- V(uint32x4_string, "uint32x4") \
- V(Uint32x4_string, "Uint32x4") \
- V(uint8x16_string, "uint8x16") \
- V(Uint8x16_string, "Uint8x16") \
- V(undefined_string, "undefined") \
- V(valueOf_string, "valueOf") \
- V(value_string, "value") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
+#define INTERNALIZED_STRING_LIST(V) \
+ V(anonymous_string, "anonymous") \
+ V(apply_string, "apply") \
+ V(assign_string, "assign") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(Array_string, "Array") \
+ V(bind_string, "bind") \
+ V(bool16x8_string, "bool16x8") \
+ V(Bool16x8_string, "Bool16x8") \
+ V(bool32x4_string, "bool32x4") \
+ V(Bool32x4_string, "Bool32x4") \
+ V(bool8x16_string, "bool8x16") \
+ V(Bool8x16_string, "Bool8x16") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(bound__string, "bound ") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(call_string, "call") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(compare_ic_string, "==") \
+ V(configurable_string, "configurable") \
+ V(constructor_string, "constructor") \
+ V(construct_string, "construct") \
+ V(create_string, "create") \
+ V(Date_string, "Date") \
+ V(default_string, "default") \
+ V(defineProperty_string, "defineProperty") \
+ V(deleteProperty_string, "deleteProperty") \
+ V(display_name_string, "displayName") \
+ V(done_string, "done") \
+ V(dot_result_string, ".result") \
+ V(dot_string, ".") \
+ V(enumerable_string, "enumerable") \
+ V(enumerate_string, "enumerate") \
+ V(Error_string, "Error") \
+ V(eval_string, "eval") \
+ V(false_string, "false") \
+ V(float32x4_string, "float32x4") \
+ V(Float32x4_string, "Float32x4") \
+ V(for_api_string, "for_api") \
+ V(for_string, "for") \
+ V(function_string, "function") \
+ V(Function_string, "Function") \
+ V(Generator_string, "Generator") \
+ V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(getPrototypeOf_string, "getPrototypeOf") \
+ V(get_string, "get") \
+ V(global_string, "global") \
+ V(has_string, "has") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(infinity_string, "Infinity") \
+ V(input_string, "input") \
+ V(int16x8_string, "int16x8") \
+ V(Int16x8_string, "Int16x8") \
+ V(int32x4_string, "int32x4") \
+ V(Int32x4_string, "Int32x4") \
+ V(int8x16_string, "int8x16") \
+ V(Int8x16_string, "Int8x16") \
+ V(isExtensible_string, "isExtensible") \
+ V(isView_string, "isView") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(last_index_string, "lastIndex") \
+ V(length_string, "length") \
+ V(Map_string, "Map") \
+ V(minus_infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(name_string, "name") \
+ V(nan_string, "NaN") \
+ V(next_string, "next") \
+ V(null_string, "null") \
+ V(null_to_string, "[object Null]") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(object_string, "object") \
+ V(Object_string, "Object") \
+ V(ownKeys_string, "ownKeys") \
+ V(preventExtensions_string, "preventExtensions") \
+ V(private_api_string, "private_api") \
+ V(Promise_string, "Promise") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(Proxy_string, "Proxy") \
+ V(query_colon_string, "(?:)") \
+ V(RegExp_string, "RegExp") \
+ V(setPrototypeOf_string, "setPrototypeOf") \
+ V(set_string, "set") \
+ V(Set_string, "Set") \
+ V(source_mapping_url_string, "source_mapping_url") \
+ V(source_string, "source") \
+ V(source_url_string, "source_url") \
+ V(stack_string, "stack") \
+ V(strict_compare_ic_string, "===") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(uint16x8_string, "uint16x8") \
+ V(Uint16x8_string, "Uint16x8") \
+ V(uint32x4_string, "uint32x4") \
+ V(Uint32x4_string, "Uint32x4") \
+ V(uint8x16_string, "uint8x16") \
+ V(Uint8x16_string, "Uint8x16") \
+ V(undefined_string, "undefined") \
+ V(undefined_to_string, "[object Undefined]") \
+ V(valueOf_string, "valueOf") \
+ V(value_string, "value") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
V(writable_string, "writable")
#define PRIVATE_SYMBOL_LIST(V) \
@@ -332,7 +355,10 @@ namespace internal {
V(internal_error_symbol) \
V(intl_impl_object_symbol) \
V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
V(nonexistent_symbol) \
V(nonextensible_symbol) \
V(normal_ic_symbol) \
@@ -349,18 +375,21 @@ namespace internal {
V(promise_value_symbol) \
V(sealed_symbol) \
V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
V(string_iterator_iterated_string_symbol) \
V(string_iterator_next_index_symbol) \
+ V(strong_function_transition_symbol) \
V(uninitialized_symbol)
-#define PUBLIC_SYMBOL_LIST(V) \
- V(has_instance_symbol, Symbol.hasInstance) \
- V(iterator_symbol, Symbol.iterator) \
- V(match_symbol, Symbol.match) \
- V(replace_symbol, Symbol.replace) \
- V(search_symbol, Symbol.search) \
- V(split_symbol, Symbol.split) \
- V(to_primitive_symbol, Symbol.toPrimitive) \
+#define PUBLIC_SYMBOL_LIST(V) \
+ V(has_instance_symbol, Symbol.hasInstance) \
+ V(iterator_symbol, Symbol.iterator) \
+ V(match_symbol, Symbol.match) \
+ V(replace_symbol, Symbol.replace) \
+ V(search_symbol, Symbol.search) \
+ V(species_symbol, Symbol.species) \
+ V(split_symbol, Symbol.split) \
+ V(to_primitive_symbol, Symbol.toPrimitive) \
V(unscopables_symbol, Symbol.unscopables)
// Well-Known Symbols are "Public" symbols, which have a bit set which causes
@@ -408,6 +437,7 @@ namespace internal {
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
V(WeakCellMap) \
+ V(TransitionArrayMap) \
V(NoInterceptorResultSentinel) \
V(HashTableMap) \
V(OrderedHashTableMap) \
@@ -433,6 +463,7 @@ namespace internal {
V(JSMessageObjectMap) \
V(ForeignMap) \
V(NeanderMap) \
+ V(EmptyWeakCell) \
V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
@@ -605,7 +636,7 @@ class Heap {
// - or mutator code (CONCURRENT_TO_SWEEPER).
enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
- enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
+ enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
@@ -731,12 +762,6 @@ class Heap {
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
- // An object may have an AllocationSite associated with it through a trailing
- // AllocationMemento. Its feedback should be updated when objects are found
- // in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object,
- ScratchpadSlotMode mode);
-
// Generated code can embed direct references to non-writable roots if
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
@@ -794,16 +819,14 @@ class Heap {
// TODO(hpayer): There is still a missmatch between capacity and actual
// committed memory size.
- bool CanExpandOldGeneration(int size) {
+ bool CanExpandOldGeneration(int size = 0) {
+ if (force_oom_) return false;
return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
}
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
- // Iterates the whole code space to clear all keyed store ICs.
- void ClearAllKeyedStoreICs();
-
// FreeSpace objects have a null map after deserialization. Update the map.
void RepairFreeListsAfterDeserialization();
@@ -876,6 +899,13 @@ class Heap {
}
Object* encountered_weak_cells() const { return encountered_weak_cells_; }
+ void set_encountered_transition_arrays(Object* transition_array) {
+ encountered_transition_arrays_ = transition_array;
+ }
+ Object* encountered_transition_arrays() const {
+ return encountered_transition_arrays_;
+ }
+
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
@@ -888,10 +918,6 @@ class Heap {
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
- // Returns deterministic "time" value in ms. Works only with
- // FLAG_verify_predictable.
- double synthetic_time() { return allocations_count() / 2.0; }
-
// Print short heap statistics.
void PrintShortHeapStatistics();
@@ -943,8 +969,6 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
-
inline bool OldGenerationAllocationLimitReached();
void QueueMemoryChunkForFree(MemoryChunk* chunk);
@@ -1151,14 +1175,6 @@ class Heap {
roots_[kMaterializedObjectsRootIndex] = objects;
}
- void SetRootCodeStubContext(Object* value) {
- roots_[kCodeStubContextRootIndex] = value;
- }
-
- void SetRootCodeStubExportsObject(JSObject* value) {
- roots_[kCodeStubExportsObjectRootIndex] = value;
- }
-
void SetRootScriptList(Object* value) {
roots_[kScriptListRootIndex] = value;
}
@@ -1393,13 +1409,13 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
inline void IncrementPromotedObjectsSize(int object_size) {
- DCHECK(object_size > 0);
+ DCHECK_GE(object_size, 0);
promoted_objects_size_ += object_size;
}
inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
- DCHECK(object_size > 0);
+ DCHECK_GE(object_size, 0);
semi_space_copied_object_size_ += object_size;
}
inline intptr_t semi_space_copied_object_size() {
@@ -1522,6 +1538,27 @@ class Heap {
return array_buffer_tracker_;
}
+ // ===========================================================================
+ // Allocation site tracking. =================================================
+ // ===========================================================================
+
+ // Updates the AllocationSite of a given {object}. If the global prenuring
+ // storage is passed as {pretenuring_feedback} the memento found count on
+ // the corresponding allocation site is immediately updated and an entry
+ // in the hash map is created. Otherwise the entry (including a the count
+ // value) is cached on the local pretenuring feedback.
+ inline void UpdateAllocationSite(HeapObject* object,
+ HashMap* pretenuring_feedback);
+
+ // Removes an entry from the global pretenuring storage.
+ inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
+
+ // Merges local pretenuring feedback into the global one. Note that this
+ // method needs to be called after evacuation, as allocation sites may be
+ // evacuated and this method resolves forward pointers accordingly.
+ void MergeAllocationSitePretenuringFeedback(
+ const HashMap& local_pretenuring_feedback);
+
// =============================================================================
#ifdef VERIFY_HEAP
@@ -1545,6 +1582,7 @@ class Heap {
#endif
private:
+ class PretenuringScope;
class UnmapFreeMemoryTask;
// External strings table is a place where all external strings are
@@ -1639,7 +1677,7 @@ class Heap {
static const int kMaxMarkCompactsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
- static const int kAllocationSiteScratchpadSize = 256;
+ static const int kInitialFeedbackCapacity = 256;
Heap();
@@ -1681,12 +1719,6 @@ class Heap {
void PreprocessStackTraces();
- // Pretenuring decisions are made based on feedback collected during new
- // space evacuation. Note that between feedback collection and calling this
- // method object in old space must not move.
- // Right now we only process pretenuring feedback in high promotion mode.
- bool ProcessPretenuringFeedback();
-
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
@@ -1720,6 +1752,10 @@ class Heap {
// Initializes a JSObject based on its map.
void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
Map* map);
+
+ // Initializes JSObject body starting at given offset.
+ void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
+
void InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site);
@@ -1762,16 +1798,6 @@ class Heap {
// Flush the number to string cache.
void FlushNumberStringCache();
- // Sets used allocation sites entries to undefined.
- void FlushAllocationSitesScratchpad();
-
- // Initializes the allocation sites scratchpad with undefined values.
- void InitializeAllocationSitesScratchpad();
-
- // Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site,
- ScratchpadSlotMode mode);
-
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
@@ -1806,6 +1832,8 @@ class Heap {
void AddToRingBuffer(const char* string);
void GetFromRingBuffer(char* buffer);
+ void CompactRetainedMaps(ArrayList* retained_maps);
+
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
@@ -1821,6 +1849,15 @@ class Heap {
HistogramTimer* GCTypeTimer(GarbageCollector collector);
// ===========================================================================
+ // Pretenuring. ==============================================================
+ // ===========================================================================
+
+ // Pretenuring decisions are made based on feedback collected during new space
+ // evacuation. Note that between feedback collection and calling this method
+ // object in old space must not move.
+ void ProcessPretenuringFeedback();
+
+ // ===========================================================================
// Actual GC. ================================================================
// ===========================================================================
@@ -1903,6 +1940,16 @@ class Heap {
void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
// ===========================================================================
+ // HeapIterator helpers. =====================================================
+ // ===========================================================================
+
+ void heap_iterator_start() { heap_iterator_depth_++; }
+
+ void heap_iterator_end() { heap_iterator_depth_--; }
+
+ bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
+
+ // ===========================================================================
// Allocation methods. =======================================================
// ===========================================================================
@@ -2089,6 +2136,8 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
+ MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
+
// Allocates a new utility object in the old generation.
MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
@@ -2103,6 +2152,10 @@ class Heap {
MUST_USE_RESULT AllocationResult InternalizeString(String* str);
+ // ===========================================================================
+
+ void set_force_oom(bool value) { force_oom_ = value; }
+
// The amount of external memory registered through the API kept alive
// by global handles
int64_t amount_of_external_allocated_memory_;
@@ -2141,6 +2194,11 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
+ // The length of the retained_maps array at the time of context disposal.
+ // This separates maps in the retained_maps array that were created before
+ // and after context disposal.
+ int number_of_disposed_maps_;
+
int global_ic_age_;
int scan_on_scavenge_pages_;
@@ -2163,9 +2221,6 @@ class Heap {
// Running hash over allocations performed.
uint32_t raw_allocations_hash_;
- // Countdown counter, dumps allocation hash when 0.
- uint32_t dump_allocations_hash_countdown_;
-
// How many mark-sweep collections happened.
unsigned int ms_count_;
@@ -2213,6 +2268,8 @@ class Heap {
Object* encountered_weak_cells_;
+ Object* encountered_transition_arrays_;
+
StoreBufferRebuilder store_buffer_rebuilder_;
List<GCCallbackPair> gc_epilogue_callbacks_;
@@ -2306,7 +2363,12 @@ class Heap {
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
- int allocation_sites_scratchpad_length_;
+ // The feedback storage is used to store allocation sites (keys) and how often
+ // they have been visited (values) by finding a memento behind an object. The
+ // storage is only alive temporary during a GC. The invariant is that all
+ // pointers in this map are already fixed, i.e., they do not point to
+ // forwarding pointers.
+ HashMap* global_pretenuring_feedback_;
char trace_ring_buffer_[kTraceRingBufferSize];
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
@@ -2343,12 +2405,16 @@ class Heap {
bool deserialization_complete_;
- bool concurrent_sweeping_enabled_;
-
StrongRootsList* strong_roots_list_;
ArrayBufferTracker* array_buffer_tracker_;
+ // The depth of HeapIterator nestings.
+ int heap_iterator_depth_;
+
+ // Used for testing purposes.
+ bool force_oom_;
+
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class GCCallbacksScope;
@@ -2356,6 +2422,7 @@ class Heap {
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
+ friend class IteratePointersToFromSpaceVisitor;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 7e8e5f251f..a69dfac2fa 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -91,7 +91,7 @@ void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
- Heap* heap = isolate_->heap();
+ Heap* heap = isolate()->heap();
double start_ms = heap->MonotonicallyIncreasingTimeInMs();
job_->NotifyIdleTask();
job_->NotifyIdleTaskProgress();
@@ -102,7 +102,7 @@ void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
double current_time_ms = heap->MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
double deadline_difference = deadline_in_ms - current_time_ms;
- PrintIsolate(isolate_, "%8.0f ms: ", isolate_->time_millis_since_init());
+ PrintIsolate(isolate(), "%8.0f ms: ", isolate()->time_millis_since_init());
PrintF(
"Idle task: requested idle time %.2f ms, used idle time %.2f "
"ms, deadline usage %.2f ms\n",
@@ -127,7 +127,7 @@ void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
void IncrementalMarkingJob::DelayedTask::RunInternal() {
- Heap* heap = isolate_->heap();
+ Heap* heap = isolate()->heap();
job_->NotifyDelayedTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (!incremental_marking->IsStopped()) {
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 94d8d946f1..52d0ca4e51 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -47,39 +47,26 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
request_type_(COMPLETE_MARKING) {}
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
- Object* value) {
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- if (chunk->IsLeftOfProgressBar(slot)) {
- WhiteToGreyAndPush(value_heap_obj, value_bit);
- RestartIfNotMarking();
- } else {
- return false;
- }
- } else {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- return false;
- }
- } else {
- return false;
- }
- }
- if (!is_compacting_) return false;
+ DCHECK(!Marking::IsImpossible(value_bit));
+
MarkBit obj_bit = Marking::MarkBitFrom(obj);
- return Marking::IsBlack(obj_bit);
+ DCHECK(!Marking::IsImpossible(obj_bit));
+ bool is_black = Marking::IsBlack(obj_bit);
+
+ if (is_black && Marking::IsWhite(value_bit)) {
+ WhiteToGreyAndPush(value_heap_obj, value_bit);
+ RestartIfNotMarking();
+ }
+ return is_compacting_ && is_black;
}
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
Object* value) {
- if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
+ if (BaseRecordWrite(obj, value) && slot != NULL) {
// Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
}
@@ -108,7 +95,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
HeapObject* value) {
if (IsMarking()) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordWriteIntoCode(host, &rinfo, value);
}
}
@@ -119,7 +106,7 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
Code* host = heap_->isolate()
->inner_pointer_to_code_cache()
->GcSafeFindCodeForInnerPointer(pc);
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordWriteIntoCode(host, &rinfo, value);
}
}
@@ -128,7 +115,7 @@ void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
Object** slot,
Code* value) {
- if (BaseRecordWrite(host, slot, value)) {
+ if (BaseRecordWrite(host, value)) {
DCHECK(slot != NULL);
heap_->mark_compact_collector()->RecordCodeEntrySlot(
host, reinterpret_cast<Address>(slot), value);
@@ -139,24 +126,10 @@ void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
RelocInfo* rinfo,
Object* value) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
- if (Marking::IsWhite(value_bit)) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- }
- // Object is either grey or white. It will be scanned if survives.
- return;
- }
-
- if (is_compacting_) {
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
- if (Marking::IsBlack(obj_bit)) {
+ if (BaseRecordWrite(obj, value)) {
// Object is not going to be rescanned. We need to record the slot.
heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
Code::cast(value));
- }
}
}
@@ -624,7 +597,6 @@ void IncrementalMarking::StartMarking() {
void IncrementalMarking::MarkRoots() {
- DCHECK(FLAG_finalize_marking_incrementally);
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -634,7 +606,6 @@ void IncrementalMarking::MarkRoots() {
void IncrementalMarking::MarkObjectGroups() {
- DCHECK(FLAG_finalize_marking_incrementally);
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -647,11 +618,113 @@ void IncrementalMarking::MarkObjectGroups() {
}
+void IncrementalMarking::ProcessWeakCells() {
+ DCHECK(!finalize_marking_completed_);
+ DCHECK(IsMarking());
+
+ Object* the_hole_value = heap()->the_hole_value();
+ Object* weak_cell_obj = heap()->encountered_weak_cells();
+ Object* weak_cell_head = Smi::FromInt(0);
+ WeakCell* prev_weak_cell_obj = NULL;
+ while (weak_cell_obj != Smi::FromInt(0)) {
+ WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be a Smi here.
+ HeapObject* value = HeapObject::cast(weak_cell->value());
+ // Remove weak cells with live objects from the list, they do not need
+ // clearing.
+ if (MarkCompactCollector::IsMarked(value)) {
+ // Record slot, if value is pointing to an evacuation candidate.
+ Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+ heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
+ // Remove entry somewhere after top.
+ if (prev_weak_cell_obj != NULL) {
+ prev_weak_cell_obj->set_next(weak_cell->next());
+ }
+ weak_cell_obj = weak_cell->next();
+ weak_cell->clear_next(the_hole_value);
+ } else {
+ if (weak_cell_head == Smi::FromInt(0)) {
+ weak_cell_head = weak_cell;
+ }
+ prev_weak_cell_obj = weak_cell;
+ weak_cell_obj = weak_cell->next();
+ }
+ }
+ // Top may have changed.
+ heap()->set_encountered_weak_cells(weak_cell_head);
+}
+
+
+bool ShouldRetainMap(Map* map, int age) {
+ if (age == 0) {
+ // The map has aged. Do not retain this map.
+ return false;
+ }
+ Object* constructor = map->GetConstructor();
+ if (!constructor->IsHeapObject() ||
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
+ // The constructor is dead, no new objects with this map can
+ // be created. Do not retain this map.
+ return false;
+ }
+ return true;
+}
+
+
+void IncrementalMarking::RetainMaps() {
+ // Do not retain dead maps if flag disables it or there is
+ // - memory pressure (reduce_memory_footprint_),
+ // - GC is requested by tests or dev-tools (abort_incremental_marking_).
+ bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
+ heap()->ShouldAbortIncrementalMarking() ||
+ FLAG_retain_maps_for_n_gc == 0;
+ ArrayList* retained_maps = heap()->retained_maps();
+ int length = retained_maps->Length();
+ // The number_of_disposed_maps separates maps in the retained_maps
+ // array that were created before and after context disposal.
+ // We do not age and retain disposed maps to avoid memory leaks.
+ int number_of_disposed_maps = heap()->number_of_disposed_maps_;
+ for (int i = 0; i < length; i += 2) {
+ DCHECK(retained_maps->Get(i)->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+ if (cell->cleared()) continue;
+ int age = Smi::cast(retained_maps->Get(i + 1))->value();
+ int new_age;
+ Map* map = Map::cast(cell->value());
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
+ Marking::IsWhite(map_mark)) {
+ if (ShouldRetainMap(map, age)) {
+ MarkObject(heap(), map);
+ }
+ Object* prototype = map->prototype();
+ if (age > 0 && prototype->IsHeapObject() &&
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
+ // The prototype is not marked, age the map.
+ new_age = age - 1;
+ } else {
+ // The prototype and the constructor are marked, this map keeps only
+ // transition tree alive, not JSObjects. Do not age the map.
+ new_age = age;
+ }
+ } else {
+ new_age = FLAG_retain_maps_for_n_gc;
+ }
+ // Compact the array and update the age.
+ if (new_age != age) {
+ retained_maps->Set(i + 1, Smi::FromInt(new_age));
+ }
+ }
+}
+
+
void IncrementalMarking::FinalizeIncrementally() {
- DCHECK(FLAG_finalize_marking_incrementally);
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
+
int old_marking_deque_top =
heap_->mark_compact_collector()->marking_deque()->top();
@@ -659,13 +732,34 @@ void IncrementalMarking::FinalizeIncrementally() {
// objects to reduce the marking load in the final pause.
// 1) We scan and mark the roots again to find all changes to the root set.
// 2) We mark the object groups.
+ // 3) Age and retain maps embedded in optimized code.
+ // 4) Remove weak cell with live values from the list of weak cells, they
+ // do not need processing during GC.
MarkRoots();
MarkObjectGroups();
+ if (incremental_marking_finalization_rounds_ == 0) {
+ // Map retaining is needed for perfromance, not correctness,
+ // so we can do it only once at the beginning of the finalization.
+ RetainMaps();
+ }
+ ProcessWeakCells();
int marking_progress =
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
+ double delta = end - start;
+ heap_->tracer()->AddMarkingTime(delta);
+ heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
+ if (FLAG_trace_incremental_marking) {
+ PrintF(
+ "[IncrementalMarking] Finalize incrementally round %d, "
+ "spent %d ms, marking progress %d.\n",
+ static_cast<int>(delta), incremental_marking_finalization_rounds_,
+ marking_progress);
+ }
+
++incremental_marking_finalization_rounds_;
if ((incremental_marking_finalization_rounds_ >=
FLAG_max_incremental_marking_finalization_rounds) ||
@@ -794,7 +888,7 @@ void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- start = base::OS::TimeCurrentMillis();
+ start = heap_->MonotonicallyIncreasingTimeInMs();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Hurry\n");
}
@@ -804,7 +898,7 @@ void IncrementalMarking::Hurry() {
ProcessMarkingDeque();
state_ = COMPLETE;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
- double end = base::OS::TimeCurrentMillis();
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
heap_->tracer()->AddMarkingTime(delta);
if (FLAG_trace_incremental_marking) {
@@ -886,7 +980,6 @@ void IncrementalMarking::Finalize() {
void IncrementalMarking::FinalizeMarking(CompletionAction action) {
- DCHECK(FLAG_finalize_marking_incrementally);
DCHECK(!finalize_marking_completed_);
if (FLAG_trace_incremental_marking) {
PrintF(
@@ -1066,7 +1159,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
{
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
- double start = base::OS::TimeCurrentMillis();
+ double start = heap_->MonotonicallyIncreasingTimeInMs();
// The marking speed is driven either by the allocation rate or by the rate
// at which we are having to check the color of objects in the write
@@ -1087,7 +1180,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(heap_->mark_compact_collector()->IsSweepingCompleted() ||
- !heap_->concurrent_sweeping_enabled())) {
+ !FLAG_concurrent_sweeping)) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
@@ -1099,8 +1192,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
- if (FLAG_finalize_marking_incrementally &&
- !finalize_marking_completed_) {
+ if (!finalize_marking_completed_) {
FinalizeMarking(action);
} else {
MarkingComplete(action);
@@ -1117,7 +1209,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
// with marking.
SpeedUp();
- double end = base::OS::TimeCurrentMillis();
+ double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
// when we just started incremental marking. In these cases we did not
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 3ab0f8d6c4..be630213ac 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -159,7 +159,7 @@ class IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
- INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+ INLINE(bool BaseRecordWrite(HeapObject* obj, Object* value));
INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
Object* value));
@@ -221,7 +221,7 @@ class IncrementalMarking {
: InlineAllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
- virtual void Step(int bytes_allocated) {
+ void Step(int bytes_allocated, Address, size_t) override {
incremental_marking_.Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
}
@@ -240,6 +240,10 @@ class IncrementalMarking {
void MarkRoots();
void MarkObjectGroups();
+ void ProcessWeakCells();
+ // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
+ // increase chances of reusing of map transition tree in future.
+ void RetainMaps();
void ActivateIncrementalWriteBarrier(PagedSpace* space);
static void ActivateIncrementalWriteBarrier(NewSpace* space);
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index a539c64b14..a59d36bfa1 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -140,6 +140,55 @@ void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
+
+template <LiveObjectIterationMode T>
+HeapObject* LiveObjectIterator<T>::Next() {
+ while (!it_.Done()) {
+ HeapObject* object = nullptr;
+ while (current_cell_ != 0) {
+ uint32_t trailing_zeros = base::bits::CountTrailingZeros32(current_cell_);
+ Address addr = cell_base_ + trailing_zeros * kPointerSize;
+
+ // Clear the first bit of the found object..
+ current_cell_ &= ~(1u << trailing_zeros);
+
+ uint32_t second_bit_index = 0;
+ if (trailing_zeros < Bitmap::kBitIndexMask) {
+ second_bit_index = 1u << (trailing_zeros + 1);
+ } else {
+ second_bit_index = 0x1;
+ // The overlapping case; there has to exist a cell after the current
+ // cell.
+ DCHECK(!it_.Done());
+ it_.Advance();
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ }
+ if (T == kBlackObjects && (current_cell_ & second_bit_index)) {
+ object = HeapObject::FromAddress(addr);
+ } else if (T == kGreyObjects && !(current_cell_ & second_bit_index)) {
+ object = HeapObject::FromAddress(addr);
+ } else if (T == kAllLiveObjects) {
+ object = HeapObject::FromAddress(addr);
+ }
+ // Clear the second bit of the found object.
+ current_cell_ &= ~second_bit_index;
+
+ // We found a live object.
+ if (object != nullptr) break;
+ }
+ if (current_cell_ == 0) {
+ if (!it_.Done()) {
+ it_.Advance();
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ }
+ }
+ if (object != nullptr) return object;
+ }
+ return nullptr;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index ffda9f159d..65bfdd92d8 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -33,8 +33,8 @@ namespace internal {
const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kBlackBitPattern = "11";
+const char* Marking::kGreyBitPattern = "10";
const char* Marking::kImpossibleBitPattern = "01";
@@ -59,14 +59,13 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
- code_flusher_(NULL),
+ code_flusher_(nullptr),
have_code_to_deoptimize_(false),
compacting_(false),
sweeping_in_progress_(false),
compaction_in_progress_(false),
pending_sweeper_tasks_semaphore_(0),
- pending_compaction_tasks_semaphore_(0),
- concurrent_compaction_tasks_active_(0) {
+ pending_compaction_tasks_semaphore_(0) {
}
#ifdef VERIFY_HEAP
@@ -116,6 +115,8 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
+ // The next word for sure belongs to the current object, jump over it.
+ current += kPointerSize;
}
}
}
@@ -237,12 +238,24 @@ static void VerifyEvacuation(Heap* heap) {
void MarkCompactCollector::SetUp() {
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
free_list_old_space_.Reset(new FreeList(heap_->old_space()));
free_list_code_space_.Reset(new FreeList(heap_->code_space()));
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
slots_buffer_allocator_ = new SlotsBufferAllocator();
+
+ if (FLAG_flush_code) {
+ code_flusher_ = new CodeFlusher(isolate());
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing is now on]\n");
+ }
+ }
}
@@ -250,6 +263,7 @@ void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
delete slots_buffer_allocator_;
+ delete code_flusher_;
}
@@ -299,19 +313,24 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_STORE_BUFFER_CLEAR);
+ GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
heap_->store_buffer()->ClearInvalidStoreBufferEntries();
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_SLOTS_BUFFER_CLEAR);
+ GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
int number_of_pages = evacuation_candidates_.length();
for (int i = 0; i < number_of_pages; i++) {
Page* p = evacuation_candidates_[i];
SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
}
}
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ VerifyValidStoreAndSlotsBufferEntries();
+ }
+#endif
}
@@ -350,41 +369,19 @@ void MarkCompactCollector::CollectGarbage() {
DCHECK(heap_->incremental_marking()->IsStopped());
- // ClearNonLiveReferences can deoptimize code in dependent code arrays.
- // Process weak cells before so that weak cells in dependent code
- // arrays are cleared or contain only live code objects.
- ProcessAndClearWeakCells();
-
ClearNonLiveReferences();
- ClearWeakCollections();
-
- heap_->set_encountered_weak_cells(Smi::FromInt(0));
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarking(heap_);
}
#endif
- ClearInvalidStoreAndSlotsBufferEntries();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyValidStoreAndSlotsBufferEntries();
- }
-#endif
-
SweepSpaces();
- Finish();
+ EvacuateNewSpaceAndCandidates();
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- DCHECK(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
+ Finish();
}
@@ -481,24 +478,24 @@ void MarkCompactCollector::ClearMarkbits() {
}
-class MarkCompactCollector::CompactionTask : public v8::Task {
+class MarkCompactCollector::CompactionTask : public CancelableTask {
public:
explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
- : heap_(heap), spaces_(spaces) {}
+ : CancelableTask(heap->isolate()), spaces_(spaces) {}
virtual ~CompactionTask() {}
private:
- // v8::Task overrides.
- void Run() override {
- MarkCompactCollector* mark_compact = heap_->mark_compact_collector();
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override {
+ MarkCompactCollector* mark_compact =
+ isolate()->heap()->mark_compact_collector();
SlotsBuffer* evacuation_slots_buffer = nullptr;
mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
mark_compact->pending_compaction_tasks_semaphore_.Signal();
}
- Heap* heap_;
CompactionSpaceCollection* spaces_;
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
@@ -556,7 +553,7 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
- if (heap()->concurrent_sweeping_enabled() && !IsSweepingCompleted()) {
+ if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(space->identity()), 0);
space->RefillFreeList();
}
@@ -568,13 +565,13 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
- if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
+ if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
- if (heap()->concurrent_sweeping_enabled()) {
+ if (FLAG_concurrent_sweeping) {
pending_sweeper_tasks_semaphore_.Wait();
pending_sweeper_tasks_semaphore_.Wait();
pending_sweeper_tasks_semaphore_.Wait();
@@ -661,6 +658,48 @@ const char* AllocationSpaceName(AllocationSpace space) {
}
+void MarkCompactCollector::ComputeEvacuationHeuristics(
+ int area_size, int* target_fragmentation_percent,
+ int* max_evacuated_bytes) {
+ // For memory reducing mode we directly define both constants.
+ const int kTargetFragmentationPercentForReduceMemory = 20;
+ const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
+
+ // For regular mode (which is latency critical) we define less aggressive
+ // defaults to start and switch to a trace-based (using compaction speed)
+ // approach as soon as we have enough samples.
+ const int kTargetFragmentationPercent = 70;
+ const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
+ // Time to take for a single area (=payload of page). Used as soon as there
+ // exist enough compaction speed samples.
+ const int kTargetMsPerArea = 1;
+
+ if (heap()->ShouldReduceMemory()) {
+ *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
+ *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
+ } else {
+ const intptr_t estimated_compaction_speed =
+ heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ if (estimated_compaction_speed != 0) {
+ // Estimate the target fragmentation based on traced compaction speed
+ // and a goal for a single page.
+ const intptr_t estimated_ms_per_area =
+ 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
+ *target_fragmentation_percent =
+ 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
+ if (*target_fragmentation_percent <
+ kTargetFragmentationPercentForReduceMemory) {
+ *target_fragmentation_percent =
+ kTargetFragmentationPercentForReduceMemory;
+ }
+ } else {
+ *target_fragmentation_percent = kTargetFragmentationPercent;
+ }
+ *max_evacuated_bytes = kMaxEvacuatedBytes;
+ }
+}
+
+
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
@@ -668,7 +707,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
- std::vector<std::pair<int, Page*> > pages;
+ typedef std::pair<int, Page*> LiveBytesPagePair;
+ std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
PageIterator it(space);
@@ -694,7 +734,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int candidate_count = 0;
int total_live_bytes = 0;
- bool reduce_memory = heap()->ShouldReduceMemory();
+ const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
for (size_t i = 0; i < pages.size(); i++) {
Page* p = pages[i].second;
@@ -715,23 +755,25 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
}
} else {
- const int kTargetFragmentationPercent = 50;
- const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
-
- const int kTargetFragmentationPercentForReduceMemory = 20;
- const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
-
+ // The following approach determines the pages that should be evacuated.
+ //
+ // We use two conditions to decide whether a page qualifies as an evacuation
+ // candidate, or not:
+ // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+ // between live bytes and capacity of this page (= area).
+ // * Evacuation quota: A global quota determining how much bytes should be
+ // compacted.
+ //
+ // The algorithm sorts all pages by live bytes and then iterates through
+ // them starting with the page with the most free memory, adding them to the
+ // set of evacuation candidates as long as both conditions (fragmentation
+ // and quota) hold.
int max_evacuated_bytes;
int target_fragmentation_percent;
+ ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+ &max_evacuated_bytes);
- if (reduce_memory) {
- target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
- max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
- } else {
- target_fragmentation_percent = kTargetFragmentationPercent;
- max_evacuated_bytes = kMaxEvacuatedBytes;
- }
- intptr_t free_bytes_threshold =
+ const intptr_t free_bytes_threshold =
target_fragmentation_percent * (area_size / 100);
// Sort pages from the most free to the least free, then select
@@ -739,25 +781,28 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// - the total size of evacuated objects does not exceed the specified
// limit.
// - fragmentation of (n+1)-th page does not exceed the specified limit.
- std::sort(pages.begin(), pages.end());
+ std::sort(pages.begin(), pages.end(),
+ [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+ return a.first < b.first;
+ });
for (size_t i = 0; i < pages.size(); i++) {
int live_bytes = pages[i].first;
int free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
- (free_bytes >= free_bytes_threshold &&
- total_live_bytes + live_bytes <= max_evacuated_bytes)) {
+ ((free_bytes >= free_bytes_threshold) &&
+ ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
candidate_count++;
total_live_bytes += live_bytes;
}
if (FLAG_trace_fragmentation_verbose) {
- PrintF(
- "Page in %s: %d KB free [fragmented if this >= %d KB], "
- "sum of live bytes in fragmented pages %d KB [max is %d KB]\n",
- AllocationSpaceName(space->identity()),
- static_cast<int>(free_bytes / KB),
- static_cast<int>(free_bytes_threshold / KB),
- static_cast<int>(total_live_bytes / KB),
- static_cast<int>(max_evacuated_bytes / KB));
+ PrintIsolate(isolate(),
+ "compaction-selection-page: space=%s free_bytes_page=%d "
+ "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
+ "sum_compaction_kb=%d "
+ "compaction_limit_kb=%d\n",
+ AllocationSpaceName(space->identity()), free_bytes / KB,
+ free_bytes_threshold / KB, target_fragmentation_percent,
+ total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
@@ -766,20 +811,20 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
- if (estimated_released_pages == 0 && !FLAG_always_compact)
+ if ((estimated_released_pages == 0) && !FLAG_always_compact) {
candidate_count = 0;
+ }
for (int i = 0; i < candidate_count; i++) {
AddEvacuationCandidate(pages[i].second);
}
}
if (FLAG_trace_fragmentation) {
- PrintF(
- "Collected %d evacuation candidates [%d KB live] for space %s "
- "[mode %s]\n",
- candidate_count, static_cast<int>(total_live_bytes / KB),
- AllocationSpaceName(space->identity()),
- (reduce_memory ? "reduce memory footprint" : "normal"));
+ PrintIsolate(isolate(),
+ "compaction-selection: space=%s reduce_memory=%d pages=%d "
+ "total_live_bytes=%d\n",
+ AllocationSpaceName(space->identity()), reduce_memory,
+ candidate_count, total_live_bytes / KB);
}
}
@@ -825,6 +870,7 @@ void MarkCompactCollector::Prepare() {
ClearMarkbits();
AbortWeakCollections();
AbortWeakCells();
+ AbortTransitionArrays();
AbortCompaction();
was_marked_incrementally_ = false;
}
@@ -850,10 +896,21 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+
+ // The hashing of weak_object_to_code_table is no longer valid.
+ heap()->weak_object_to_code_table()->Rehash(
+ heap()->isolate()->factory()->undefined_value());
+
+ // Clear the marking state of live large objects.
+ heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
+ heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
// The stub cache is not traversed during GC; clear the cache to
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
@@ -867,6 +924,13 @@ void MarkCompactCollector::Finish() {
}
heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
+
+ if (marking_parity_ == EVEN_MARKING_PARITY) {
+ marking_parity_ = ODD_MARKING_PARITY;
+ } else {
+ DCHECK(marking_parity_ == ODD_MARKING_PARITY);
+ marking_parity_ = EVEN_MARKING_PARITY;
+ }
}
@@ -919,7 +983,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
PrintF(" - age: %d]\n", code->GetAge());
}
// Always flush the optimized code map if there is one.
- if (!shared->optimized_code_map()->IsSmi()) {
+ if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap();
}
shared->set_code(lazy_compile);
@@ -966,7 +1030,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
PrintF(" - age: %d]\n", code->GetAge());
}
// Always flush the optimized code map if there is one.
- if (!candidate->optimized_code_map()->IsSmi()) {
+ if (!candidate->OptimizedCodeMapIsCleared()) {
candidate->ClearOptimizedCodeMap();
}
candidate->set_code(lazy_compile);
@@ -1054,30 +1118,6 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
}
-void CodeFlusher::EvictJSFunctionCandidates() {
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- DCHECK(jsfunction_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- DCHECK(shared_function_info_candidates_head_ == NULL);
-}
-
-
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
Heap* heap = isolate_->heap();
@@ -1093,14 +1133,6 @@ void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
}
-MarkCompactCollector::~MarkCompactCollector() {
- if (code_flusher_ != NULL) {
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
@@ -1455,8 +1487,9 @@ typedef StringTableCleaner<true> ExternalStringTableCleaner;
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- if (Marking::IsBlackOrGrey(
- Marking::MarkBitFrom(HeapObject::cast(object)))) {
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
+ DCHECK(!Marking::IsGrey(mark_bit));
+ if (Marking::IsBlack(mark_bit)) {
return object;
} else if (object->IsAllocationSite() &&
!(AllocationSite::cast(object)->IsZombie())) {
@@ -1494,117 +1527,224 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
}
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
DCHECK(!marking_deque()->IsFull());
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ LiveObjectIterator<kGreyObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ MarkBit markbit = Marking::MarkBitFrom(object);
+ DCHECK(Marking::IsGrey(markbit));
+ Marking::GreyToBlack(markbit);
+ PushBlack(object);
+ if (marking_deque()->IsFull()) return;
+ }
+}
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
- const MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
+class MarkCompactCollector::HeapObjectVisitor {
+ public:
+ virtual ~HeapObjectVisitor() {}
+ virtual bool Visit(HeapObject* object) = 0;
+};
- MarkBit::CellType grey_objects;
- if (it.HasNext()) {
- const MarkBit::CellType next_cell = *(cell + 1);
- grey_objects = current_cell & ((current_cell >> 1) |
- (next_cell << (Bitmap::kBitsPerCell - 1)));
- } else {
- grey_objects = current_cell & (current_cell >> 1);
- }
- int offset = 0;
- while (grey_objects != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
- grey_objects >>= trailing_zeros;
- offset += trailing_zeros;
- MarkBit markbit(cell, 1 << offset);
- DCHECK(Marking::IsGrey(markbit));
- Marking::GreyToBlack(markbit);
- Address addr = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(addr);
- PushBlack(object);
- if (marking_deque()->IsFull()) return;
- offset += 2;
- grey_objects >>= 2;
+class MarkCompactCollector::EvacuateVisitorBase
+ : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+ EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
+ : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
+
+ bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
+ HeapObject** target_object) {
+ int size = object->Size();
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = target_space->AllocateRaw(size, alignment);
+ if (allocation.To(target_object)) {
+ heap_->mark_compact_collector()->MigrateObject(
+ *target_object, object, size, target_space->identity(),
+ evacuation_slots_buffer_);
+ return true;
}
-
- grey_objects >>= (Bitmap::kBitsPerCell - 1);
+ return false;
}
-}
+ protected:
+ Heap* heap_;
+ SlotsBuffer** evacuation_slots_buffer_;
+};
-int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
- NewSpace* new_space, NewSpacePage* p) {
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- MarkBit::CellType* cells = p->markbits()->cells();
- int survivors_size = 0;
-
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
+class MarkCompactCollector::EvacuateNewSpaceVisitor final
+ : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+ static const intptr_t kLabSize = 4 * KB;
+ static const intptr_t kMaxLabObjectSize = 256;
+
+ explicit EvacuateNewSpaceVisitor(Heap* heap,
+ SlotsBuffer** evacuation_slots_buffer,
+ HashMap* local_pretenuring_feedback)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ buffer_(LocalAllocationBuffer::InvalidBuffer()),
+ space_to_allocate_(NEW_SPACE),
+ promoted_size_(0),
+ semispace_copied_size_(0),
+ local_pretenuring_feedback_(local_pretenuring_feedback) {}
+
+ bool Visit(HeapObject* object) override {
+ heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
+ int size = object->Size();
+ HeapObject* target_object = nullptr;
+ if (heap_->ShouldBePromoted(object->address(), size) &&
+ TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+ // If we end up needing more special cases, we should factor this out.
+ if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
+ heap_->array_buffer_tracker()->Promote(
+ JSArrayBuffer::cast(target_object));
+ }
+ promoted_size_ += size;
+ return true;
+ }
+ HeapObject* target = nullptr;
+ AllocationSpace space = AllocateTargetObject(object, &target);
+ heap_->mark_compact_collector()->MigrateObject(
+ HeapObject::cast(target), object, size, space,
+ (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
+ heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
+ }
+ semispace_copied_size_ += size;
+ return true;
+ }
- MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
+ intptr_t promoted_size() { return promoted_size_; }
+ intptr_t semispace_copied_size() { return semispace_copied_size_; }
- int offset = 0;
- while (current_cell != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
- current_cell >>= trailing_zeros;
- offset += trailing_zeros;
- Address address = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(address);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ private:
+ enum NewSpaceAllocationMode {
+ kNonstickyBailoutOldSpace,
+ kStickyBailoutOldSpace,
+ };
+
+ inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
+ HeapObject** target_object) {
+ const int size = old_object->Size();
+ AllocationAlignment alignment = old_object->RequiredAlignment();
+ AllocationResult allocation;
+ if (space_to_allocate_ == NEW_SPACE) {
+ if (size > kMaxLabObjectSize) {
+ allocation =
+ AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
+ } else {
+ allocation = AllocateInLab(size, alignment);
+ }
+ }
+ if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
+ allocation = AllocateInOldSpace(size, alignment);
+ }
+ bool ok = allocation.To(target_object);
+ DCHECK(ok);
+ USE(ok);
+ return space_to_allocate_;
+ }
- int size = object->Size();
- survivors_size += size;
+ inline bool NewLocalAllocationBuffer() {
+ AllocationResult result =
+ AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
+ LocalAllocationBuffer saved_old_buffer = buffer_;
+ buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (buffer_.IsValid()) {
+ buffer_.TryMerge(&saved_old_buffer);
+ return true;
+ }
+ return false;
+ }
- Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
+ inline AllocationResult AllocateInNewSpace(int size_in_bytes,
+ AllocationAlignment alignment,
+ NewSpaceAllocationMode mode) {
+ AllocationResult allocation =
+ heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ if (!heap_->new_space()->AddFreshPageSynchronized()) {
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
+ } else {
+ allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
+ alignment);
+ if (allocation.IsRetry()) {
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
+ }
+ }
+ }
+ return allocation;
+ }
- offset += 2;
- current_cell >>= 2;
+ inline AllocationResult AllocateInOldSpace(int size_in_bytes,
+ AllocationAlignment alignment) {
+ AllocationResult allocation =
+ heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ FatalProcessOutOfMemory(
+ "MarkCompactCollector: semi-space copy, fallback in old gen\n");
+ }
+ return allocation;
+ }
- // TODO(hpayer): Refactor EvacuateObject and call this function instead.
- if (heap()->ShouldBePromoted(object->address(), size) &&
- TryPromoteObject(object, size)) {
- continue;
+ inline AllocationResult AllocateInLab(int size_in_bytes,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!buffer_.IsValid()) {
+ if (!NewLocalAllocationBuffer()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
}
-
- AllocationAlignment alignment = object->RequiredAlignment();
- AllocationResult allocation = new_space->AllocateRaw(size, alignment);
- if (allocation.IsRetry()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room unless we are in an OOM situation.
- FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
+ }
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
}
- allocation = new_space->AllocateRaw(size, alignment);
- DCHECK(!allocation.IsRetry());
}
- Object* target = allocation.ToObjectChecked();
+ }
+ return allocation;
+ }
- MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
- }
- heap()->IncrementSemiSpaceCopiedObjectSize(size);
+ LocalAllocationBuffer buffer_;
+ AllocationSpace space_to_allocate_;
+ intptr_t promoted_size_;
+ intptr_t semispace_copied_size_;
+ HashMap* local_pretenuring_feedback_;
+};
+
+
+class MarkCompactCollector::EvacuateOldSpaceVisitor final
+ : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+ EvacuateOldSpaceVisitor(Heap* heap,
+ CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ compaction_spaces_(compaction_spaces) {}
+
+ bool Visit(HeapObject* object) override {
+ CompactionSpace* target_space = compaction_spaces_->Get(
+ Page::FromAddress(object->address())->owner()->identity());
+ HeapObject* target_object = nullptr;
+ if (TryEvacuateObject(target_space, object, &target_object)) {
+ DCHECK(object->map_word().IsForwardingAddress());
+ return true;
}
- *cells = 0;
+ return false;
}
- return survivors_size;
-}
+
+ private:
+ CompactionSpaceCollection* compaction_spaces_;
+};
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
@@ -1809,7 +1949,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
- code->CodeIterateBody(visitor);
+ Code::BodyDescriptor::IterateBody(code, visitor);
}
ProcessMarkingDeque();
return;
@@ -1818,71 +1958,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
-void MarkCompactCollector::RetainMaps() {
- if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() ||
- FLAG_retain_maps_for_n_gc == 0) {
- // Do not retain dead maps if flag disables it or there is
- // - memory pressure (reduce_memory_footprint_),
- // - GC is requested by tests or dev-tools (abort_incremental_marking_).
- return;
- }
-
- ArrayList* retained_maps = heap()->retained_maps();
- int length = retained_maps->Length();
- int new_length = 0;
- for (int i = 0; i < length; i += 2) {
- DCHECK(retained_maps->Get(i)->IsWeakCell());
- WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
- if (cell->cleared()) continue;
- int age = Smi::cast(retained_maps->Get(i + 1))->value();
- int new_age;
- Map* map = Map::cast(cell->value());
- MarkBit map_mark = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark)) {
- if (age == 0) {
- // The map has aged. Do not retain this map.
- continue;
- }
- Object* constructor = map->GetConstructor();
- if (!constructor->IsHeapObject() || Marking::IsWhite(Marking::MarkBitFrom(
- HeapObject::cast(constructor)))) {
- // The constructor is dead, no new objects with this map can
- // be created. Do not retain this map.
- continue;
- }
- Object* prototype = map->prototype();
- if (prototype->IsHeapObject() &&
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
- // The prototype is not marked, age the map.
- new_age = age - 1;
- } else {
- // The prototype and the constructor are marked, this map keeps only
- // transition tree alive, not JSObjects. Do not age the map.
- new_age = age;
- }
- MarkObject(map, map_mark);
- } else {
- new_age = FLAG_retain_maps_for_n_gc;
- }
- if (i != new_length) {
- retained_maps->Set(new_length, cell);
- Object** slot = retained_maps->Slot(new_length);
- RecordSlot(retained_maps, slot, cell);
- retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
- } else if (new_age != age) {
- retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
- }
- new_length += 2;
- }
- Object* undefined = heap()->undefined_value();
- for (int i = new_length; i < length; i++) {
- retained_maps->Clear(i, undefined);
- }
- if (new_length != length) retained_maps->SetLength(new_length);
- ProcessMarkingDeque();
-}
-
-
void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
DCHECK(!marking_deque_.in_use());
if (marking_deque_memory_ == NULL) {
@@ -1968,7 +2043,7 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
+ start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
@@ -2007,24 +2082,11 @@ void MarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(heap());
{
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOT);
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
MarkRoots(&root_visitor);
- }
-
- {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_TOPOPT);
ProcessTopOptimizedFrame(&root_visitor);
}
- // Retaining dying maps should happen before or during ephemeral marking
- // because a map could keep the key of an ephemeron alive. Note that map
- // aging is imprecise: maps that are kept alive only by ephemerons will age.
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_RETAIN_MAPS);
- RetainMaps();
- }
-
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
@@ -2055,18 +2117,25 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessEphemeralMarking(&root_visitor, true);
}
- AfterMarking();
-
if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
+ heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
+ start_time);
+ }
+ if (FLAG_track_gc_object_stats) {
+ if (FLAG_trace_gc_object_stats) {
+ heap()->object_stats_->TraceObjectStats();
+ }
+ heap()->object_stats_->CheckpointObjectStats();
}
}
-void MarkCompactCollector::AfterMarking() {
+void MarkCompactCollector::ClearNonLiveReferences() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
+
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_STRING_TABLE);
+ GCTracer::Scope::MC_CLEAR_STRING_TABLE);
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
@@ -2083,8 +2152,7 @@ void MarkCompactCollector::AfterMarking() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_WEAK_REFERENCES);
-
+ GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
@@ -2092,7 +2160,7 @@ void MarkCompactCollector::AfterMarking() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_GLOBAL_HANDLES);
+ GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
// Remove object groups after marking phase.
heap()->isolate()->global_handles()->RemoveObjectGroups();
@@ -2102,115 +2170,39 @@ void MarkCompactCollector::AfterMarking() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_CODE_FLUSH);
+ GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
code_flusher_->ProcessCandidates();
}
- // Process and clear all optimized code maps.
- if (!FLAG_flush_optimized_code_cache) {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_MARK_OPTIMIZED_CODE_MAPS);
- ProcessAndClearOptimizedCodeMaps();
- }
- if (FLAG_track_gc_object_stats) {
- if (FLAG_trace_gc_object_stats) {
- heap()->object_stats_->TraceObjectStats();
- }
- heap()->object_stats_->CheckpointObjectStats();
- }
-}
+ DependentCode* dependent_code_list;
+ Object* non_live_map_list;
+ ClearWeakCells(&non_live_map_list, &dependent_code_list);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
+ ClearSimpleMapTransitions(non_live_map_list);
+ ClearFullMapTransitions();
+ }
-void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() {
- SharedFunctionInfo::Iterator iterator(isolate());
- while (SharedFunctionInfo* shared = iterator.Next()) {
- if (shared->optimized_code_map()->IsSmi()) continue;
-
- // Process context-dependent entries in the optimized code map.
- FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
- int new_length = SharedFunctionInfo::kEntriesStart;
- int old_length = code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
- i += SharedFunctionInfo::kEntryLength) {
- // Each entry contains [ context, code, literals, ast-id ] as fields.
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
- Context* context =
- Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
- HeapObject* code = HeapObject::cast(
- code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
- FixedArray* literals = FixedArray::cast(
- code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
- Smi* ast_id =
- Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
- if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
- if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
- if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
- // Move every slot in the entry and record slots when needed.
- code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
- code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
- code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
- code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
- Object** code_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kCachedCodeOffset);
- RecordSlot(code_map, code_slot, *code_slot);
- Object** context_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kContextOffset);
- RecordSlot(code_map, context_slot, *context_slot);
- Object** literals_slot = code_map->RawFieldOfElementAt(
- new_length + SharedFunctionInfo::kLiteralsOffset);
- RecordSlot(code_map, literals_slot, *literals_slot);
- new_length += SharedFunctionInfo::kEntryLength;
- }
+ MarkDependentCodeForDeoptimization(dependent_code_list);
- // Process context-independent entry in the optimized code map.
- Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
- if (shared_object->IsCode()) {
- Code* shared_code = Code::cast(shared_object);
- if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
- code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
- } else {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
- Object** slot =
- code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
- RecordSlot(code_map, slot, *slot);
- }
- }
+ ClearWeakCollections();
- // Trim the optimized code map if entries have been removed.
- if (new_length < old_length) {
- shared->TrimOptimizedCodeMap(old_length - new_length);
- }
- }
+ ClearInvalidStoreAndSlotsBufferEntries();
}
-void MarkCompactCollector::ClearNonLiveReferences() {
+void MarkCompactCollector::MarkDependentCodeForDeoptimization(
+ DependentCode* list_head) {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_NONLIVEREFERENCES);
- // Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. This action
- // is carried out only on maps of JSObjects and related subtypes.
- HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next(); obj != NULL;
- obj = map_iterator.Next()) {
- Map* map = Map::cast(obj);
-
- if (!map->CanTransition()) continue;
-
- MarkBit map_mark = Marking::MarkBitFrom(map);
- ClearNonLivePrototypeTransitions(map);
- ClearNonLiveMapTransitions(map, map_mark);
-
- if (Marking::IsWhite(map_mark)) {
- have_code_to_deoptimize_ |=
- map->dependent_code()->MarkCodeForDeoptimization(
- isolate(), DependentCode::kWeakCodeGroup);
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
- }
+ GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
+ Isolate* isolate = this->isolate();
+ DependentCode* current = list_head;
+ while (current->length() > 0) {
+ have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
+ isolate, DependentCode::kWeakCodeGroup);
+ current = current->next_link();
}
WeakHashTable* table = heap_->weak_object_to_code_table();
@@ -2225,7 +2217,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (WeakCell::cast(key)->cleared()) {
have_code_to_deoptimize_ |=
DependentCode::cast(value)->MarkCodeForDeoptimization(
- isolate(), DependentCode::kWeakCodeGroup);
+ isolate, DependentCode::kWeakCodeGroup);
table->set(key_index, heap_->the_hole_value());
table->set(value_index, heap_->the_hole_value());
table->ElementRemoved();
@@ -2234,165 +2226,145 @@ void MarkCompactCollector::ClearNonLiveReferences() {
}
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
- FixedArray* prototype_transitions =
- TransitionArray::GetPrototypeTransitions(map);
- int number_of_transitions =
- TransitionArray::NumberOfPrototypeTransitions(prototype_transitions);
-
- const int header = TransitionArray::kProtoTransitionHeaderSize;
- int new_number_of_transitions = 0;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* cell = prototype_transitions->get(header + i);
- if (!WeakCell::cast(cell)->cleared()) {
- if (new_number_of_transitions != i) {
- prototype_transitions->set(header + new_number_of_transitions, cell);
- Object** slot = prototype_transitions->RawFieldOfElementAt(
- header + new_number_of_transitions);
- RecordSlot(prototype_transitions, slot, cell);
+void MarkCompactCollector::ClearSimpleMapTransitions(
+ Object* non_live_map_list) {
+ Object* the_hole_value = heap()->the_hole_value();
+ Object* weak_cell_obj = non_live_map_list;
+ while (weak_cell_obj != Smi::FromInt(0)) {
+ WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
+ Map* map = Map::cast(weak_cell->value());
+ DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
+ Object* potential_parent = map->constructor_or_backpointer();
+ if (potential_parent->IsMap()) {
+ Map* parent = Map::cast(potential_parent);
+ if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
+ parent->raw_transitions() == weak_cell) {
+ ClearSimpleMapTransition(parent, map);
}
- new_number_of_transitions++;
}
+ weak_cell->clear();
+ weak_cell_obj = weak_cell->next();
+ weak_cell->clear_next(the_hole_value);
}
-
- if (new_number_of_transitions != number_of_transitions) {
- TransitionArray::SetNumberOfPrototypeTransitions(prototype_transitions,
- new_number_of_transitions);
- }
-
- // Fill slots that became free with undefined value.
- for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
- prototype_transitions->set_undefined(header + i);
- }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
- MarkBit map_mark) {
- Object* potential_parent = map->GetBackPointer();
- if (!potential_parent->IsMap()) return;
- Map* parent = Map::cast(potential_parent);
-
- // Follow back pointer, check whether we are dealing with a map transition
- // from a live map to a dead path and in case clear transitions of parent.
- bool current_is_alive = Marking::IsBlackOrGrey(map_mark);
- bool parent_is_alive = Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
- if (!current_is_alive && parent_is_alive) {
- ClearMapTransitions(parent, map);
- }
-}
-
-
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
- if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(target))) return false;
- target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
- return true;
}
-void MarkCompactCollector::ClearMapTransitions(Map* map, Map* dead_transition) {
- Object* transitions = map->raw_transitions();
- int num_transitions = TransitionArray::NumberOfTransitions(transitions);
-
+void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
+ Map* dead_transition) {
+ // A previously existing simple transition (stored in a WeakCell) is going
+ // to be cleared. Clear the useless cell pointer, and take ownership
+ // of the descriptor array.
+ map->set_raw_transitions(Smi::FromInt(0));
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
-
- // A previously existing simple transition (stored in a WeakCell) may have
- // been cleared. Clear the useless cell pointer, and take ownership
- // of the descriptor array.
- if (transitions->IsWeakCell() && WeakCell::cast(transitions)->cleared()) {
- map->set_raw_transitions(Smi::FromInt(0));
- }
- if (num_transitions == 0 &&
- descriptors == dead_transition->instance_descriptors() &&
+ if (descriptors == dead_transition->instance_descriptors() &&
number_of_own_descriptors > 0) {
- TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
+ TrimDescriptorArray(map, descriptors);
DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
map->set_owns_descriptors(true);
- return;
}
+}
- int transition_index = 0;
- bool descriptors_owner_died = false;
+void MarkCompactCollector::ClearFullMapTransitions() {
+ HeapObject* undefined = heap()->undefined_value();
+ Object* obj = heap()->encountered_transition_arrays();
+ while (obj != Smi::FromInt(0)) {
+ TransitionArray* array = TransitionArray::cast(obj);
+ int num_transitions = array->number_of_entries();
+ DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
+ if (num_transitions > 0) {
+ Map* map = array->GetTarget(0);
+ Map* parent = Map::cast(map->constructor_or_backpointer());
+ bool parent_is_alive =
+ Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
+ DescriptorArray* descriptors =
+ parent_is_alive ? parent->instance_descriptors() : nullptr;
+ bool descriptors_owner_died =
+ CompactTransitionArray(parent, array, descriptors);
+ if (descriptors_owner_died) {
+ TrimDescriptorArray(parent, descriptors);
+ }
+ }
+ obj = array->next_link();
+ array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+ }
+ heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
- // Compact all live descriptors to the left.
+bool MarkCompactCollector::CompactTransitionArray(
+ Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+ int num_transitions = transitions->number_of_entries();
+ bool descriptors_owner_died = false;
+ int transition_index = 0;
+ // Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
- Map* target = TransitionArray::GetTarget(transitions, i);
- if (ClearMapBackPointer(target)) {
- if (target->instance_descriptors() == descriptors) {
+ Map* target = transitions->GetTarget(i);
+ DCHECK_EQ(target->constructor_or_backpointer(), map);
+ if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
+ if (descriptors != nullptr &&
+ target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- DCHECK(TransitionArray::IsFullTransitionArray(transitions));
- TransitionArray* t = TransitionArray::cast(transitions);
- Name* key = t->GetKey(i);
- t->SetKey(transition_index, key);
- Object** key_slot = t->GetKeySlot(transition_index);
- RecordSlot(t, key_slot, key);
+ Name* key = transitions->GetKey(i);
+ transitions->SetKey(transition_index, key);
+ Object** key_slot = transitions->GetKeySlot(transition_index);
+ RecordSlot(transitions, key_slot, key);
// Target slots do not need to be recorded since maps are not compacted.
- t->SetTarget(transition_index, t->GetTarget(i));
+ transitions->SetTarget(transition_index, transitions->GetTarget(i));
}
transition_index++;
}
}
-
// If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (transition_index == num_transitions) return;
-
- if (descriptors_owner_died) {
- if (number_of_own_descriptors > 0) {
- TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- map->set_owns_descriptors(true);
- } else {
- DCHECK(descriptors == heap_->empty_descriptor_array());
- }
+ if (transition_index == num_transitions) {
+ DCHECK(!descriptors_owner_died);
+ return false;
}
-
// Note that we never eliminate a transition array, though we might right-trim
// such that number_of_transitions() == 0. If this assumption changes,
// TransitionArray::Insert() will need to deal with the case that a transition
// array disappeared during GC.
int trim = TransitionArray::Capacity(transitions) - transition_index;
if (trim > 0) {
- // Non-full-TransitionArray cases can never reach this point.
- DCHECK(TransitionArray::IsFullTransitionArray(transitions));
- TransitionArray* t = TransitionArray::cast(transitions);
heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- t, trim * TransitionArray::kTransitionSize);
- t->SetNumberOfTransitions(transition_index);
- // The map still has a full transition array.
- DCHECK(TransitionArray::IsFullTransitionArray(map->raw_transitions()));
+ transitions, trim * TransitionArray::kTransitionSize);
+ transitions->SetNumberOfTransitions(transition_index);
}
+ return descriptors_owner_died;
}
void MarkCompactCollector::TrimDescriptorArray(Map* map,
- DescriptorArray* descriptors,
- int number_of_own_descriptors) {
+ DescriptorArray* descriptors) {
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) {
+ DCHECK(descriptors == heap_->empty_descriptor_array());
+ return;
+ }
+
int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
- if (to_trim == 0) return;
-
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- descriptors, to_trim * DescriptorArray::kDescriptorSize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+ if (to_trim > 0) {
+ heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ descriptors, to_trim * DescriptorArray::kDescriptorSize);
+ descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
- if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
- descriptors->Sort();
+ if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+ descriptors->Sort();
- if (FLAG_unbox_double_fields) {
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
- layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
- number_of_own_descriptors);
- SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ if (FLAG_unbox_double_fields) {
+ LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
+ number_of_own_descriptors);
+ SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ }
}
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ map->set_owns_descriptors(true);
}
@@ -2400,7 +2372,8 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
- live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+ live_enum =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -2419,8 +2392,6 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2447,7 +2418,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
void MarkCompactCollector::ClearWeakCollections() {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+ GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2470,8 +2441,6 @@ void MarkCompactCollector::ClearWeakCollections() {
void MarkCompactCollector::AbortWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2483,11 +2452,21 @@ void MarkCompactCollector::AbortWeakCollections() {
}
-void MarkCompactCollector::ProcessAndClearWeakCells() {
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCELL);
- Object* weak_cell_obj = heap()->encountered_weak_cells();
+void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
+ DependentCode** dependent_code_list) {
+ Heap* heap = this->heap();
+ GCTracer::Scope gc_scope(heap->tracer(),
+ GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
+ Object* weak_cell_obj = heap->encountered_weak_cells();
+ Object* the_hole_value = heap->the_hole_value();
+ DependentCode* dependent_code_head =
+ DependentCode::cast(heap->empty_fixed_array());
+ Object* non_live_map_head = Smi::FromInt(0);
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+ Object* next_weak_cell = weak_cell->next();
+ bool clear_value = true;
+ bool clear_next = true;
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
HeapObject* value = HeapObject::cast(weak_cell->value());
@@ -2508,34 +2487,71 @@ void MarkCompactCollector::ProcessAndClearWeakCells() {
RecordSlot(value, slot, *slot);
slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
RecordSlot(weak_cell, slot, *slot);
- } else {
- weak_cell->clear();
+ clear_value = false;
}
- } else {
- weak_cell->clear();
+ }
+ if (value->IsMap()) {
+ // The map is non-live.
+ Map* map = Map::cast(value);
+ // Add dependent code to the dependent_code_list.
+ DependentCode* candidate = map->dependent_code();
+ // We rely on the fact that the weak code group comes first.
+ STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
+ if (candidate->length() > 0 &&
+ candidate->group() == DependentCode::kWeakCodeGroup) {
+ candidate->set_next_link(dependent_code_head);
+ dependent_code_head = candidate;
+ }
+ // Add the weak cell to the non_live_map list.
+ weak_cell->set_next(non_live_map_head);
+ non_live_map_head = weak_cell;
+ clear_value = false;
+ clear_next = false;
}
} else {
+ // The value of the weak cell is alive.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
RecordSlot(weak_cell, slot, *slot);
+ clear_value = false;
}
- weak_cell_obj = weak_cell->next();
- weak_cell->clear_next(heap());
+ if (clear_value) {
+ weak_cell->clear();
+ }
+ if (clear_next) {
+ weak_cell->clear_next(the_hole_value);
+ }
+ weak_cell_obj = next_weak_cell;
}
- heap()->set_encountered_weak_cells(Smi::FromInt(0));
+ heap->set_encountered_weak_cells(Smi::FromInt(0));
+ *non_live_map_list = non_live_map_head;
+ *dependent_code_list = dependent_code_head;
}
void MarkCompactCollector::AbortWeakCells() {
+ Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
- weak_cell->clear_next(heap());
+ weak_cell->clear_next(the_hole_value);
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
+void MarkCompactCollector::AbortTransitionArrays() {
+ HeapObject* undefined = heap()->undefined_value();
+ Object* obj = heap()->encountered_transition_arrays();
+ while (obj != Smi::FromInt(0)) {
+ TransitionArray* array = TransitionArray::cast(obj);
+ obj = array->next_link();
+ array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+ }
+ heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
+
void MarkCompactCollector::RecordMigratedSlot(
Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
// When parallel compaction is in progress, store and slots buffer entries
@@ -2621,6 +2637,40 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
}
+class RecordMigratedSlotVisitor final : public ObjectVisitor {
+ public:
+ RecordMigratedSlotVisitor(MarkCompactCollector* collector,
+ SlotsBuffer** evacuation_slots_buffer)
+ : collector_(collector),
+ evacuation_slots_buffer_(evacuation_slots_buffer) {}
+
+ V8_INLINE void VisitPointer(Object** p) override {
+ collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
+ evacuation_slots_buffer_);
+ }
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ while (start < end) {
+ collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
+ evacuation_slots_buffer_);
+ ++start;
+ }
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ if (collector_->compacting_) {
+ Address code_entry = Memory::Address_at(code_entry_slot);
+ collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
+ evacuation_slots_buffer_);
+ }
+ }
+
+ private:
+ MarkCompactCollector* collector_;
+ SlotsBuffer** evacuation_slots_buffer_;
+};
+
+
// We scavenge new space simultaneously with sweeping. This is done in two
// passes.
//
@@ -2646,26 +2696,10 @@ void MarkCompactCollector::MigrateObject(
DCHECK_OBJECT_SIZE(size);
DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
- switch (src->ContentType()) {
- case HeapObjectContents::kTaggedValues:
- MigrateObjectTagged(dst, src, size, evacuation_slots_buffer);
- break;
-
- case HeapObjectContents::kMixedValues:
- MigrateObjectMixed(dst, src, size, evacuation_slots_buffer);
- break;
-
- case HeapObjectContents::kRawValues:
- MigrateObjectRaw(dst, src, size);
- break;
- }
- if (compacting_ && dst->IsJSFunction()) {
- Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
- Address code_entry = Memory::Address_at(code_entry_slot);
- RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
- evacuation_slots_buffer);
- }
+ heap()->MoveBlock(dst->address(), src->address(), size);
+ RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+ dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
DCHECK(evacuation_slots_buffer != nullptr);
@@ -2684,101 +2718,16 @@ void MarkCompactCollector::MigrateObject(
}
-void MarkCompactCollector::MigrateObjectTagged(
- HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer) {
- Address src_slot = src->address();
- Address dst_slot = dst->address();
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
- Memory::Object_at(dst_slot) = value;
- RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
- }
-}
-
-
-void MarkCompactCollector::MigrateObjectMixed(
- HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer) {
- if (src->IsFixedTypedArrayBase()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
- Address base_pointer_slot =
- dst->address() + FixedTypedArrayBase::kBasePointerOffset;
- RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot,
- evacuation_slots_buffer);
- } else if (src->IsBytecodeArray()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
- Address constant_pool_slot =
- dst->address() + BytecodeArray::kConstantPoolOffset;
- RecordMigratedSlot(Memory::Object_at(constant_pool_slot),
- constant_pool_slot, evacuation_slots_buffer);
- } else if (src->IsJSArrayBuffer()) {
- heap()->MoveBlock(dst->address(), src->address(), size);
-
- // Visit inherited JSObject properties and byte length of ArrayBuffer
- Address regular_slot =
- dst->address() + JSArrayBuffer::BodyDescriptor::kStartOffset;
- Address regular_slots_end =
- dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize;
- while (regular_slot < regular_slots_end) {
- RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot,
- evacuation_slots_buffer);
- regular_slot += kPointerSize;
- }
-
- // Skip backing store and visit just internal fields
- Address internal_field_slot = dst->address() + JSArrayBuffer::kSize;
- Address internal_fields_end =
- dst->address() + JSArrayBuffer::kSizeWithInternalFields;
- while (internal_field_slot < internal_fields_end) {
- RecordMigratedSlot(Memory::Object_at(internal_field_slot),
- internal_field_slot, evacuation_slots_buffer);
- internal_field_slot += kPointerSize;
- }
- } else if (FLAG_unbox_double_fields) {
- Address dst_addr = dst->address();
- Address src_addr = src->address();
- Address src_slot = src_addr;
- Address dst_slot = dst_addr;
-
- LayoutDescriptorHelper helper(src->map());
- DCHECK(!helper.all_fields_tagged());
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
- if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
- RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
- }
-
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
-void MarkCompactCollector::MigrateObjectRaw(HeapObject* dst, HeapObject* src,
- int size) {
- heap()->MoveBlock(dst->address(), src->address(), size);
-}
-
-
static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
SlotsBuffer::SlotType slot_type, Address addr) {
switch (slot_type) {
case SlotsBuffer::CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::CELL_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CELL, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
@@ -2788,16 +2737,17 @@ static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
}
case SlotsBuffer::RELOCATED_CODE_OBJECT: {
HeapObject* obj = HeapObject::FromAddress(addr);
- Code::cast(obj)->CodeIterateBody(v);
+ Code::BodyDescriptor::IterateBody(obj, v);
break;
}
case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
+ NULL);
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
break;
}
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
rinfo.Visit(isolate, v);
break;
}
@@ -2958,28 +2908,6 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
}
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
- int object_size) {
- OldSpace* old_space = heap()->old_space();
-
- HeapObject* target = nullptr;
- AllocationAlignment alignment = object->RequiredAlignment();
- AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
- if (allocation.To(&target)) {
- MigrateObject(target, object, object_size, old_space->identity(),
- &migration_slots_buffer_);
- // If we end up needing more special cases, we should factor this out.
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
- }
- heap()->IncrementPromotedObjectsSize(object_size);
- return true;
- }
-
- return false;
-}
-
-
bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
HeapObject** out_object) {
Space* owner = p->owner();
@@ -2997,68 +2925,77 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
}
uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
- unsigned int start_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType index_in_cell = 1U
- << (mark_bit_index & Bitmap::kBitIndexMask);
+ unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
MarkBit::CellType* cells = p->markbits()->cells();
- Address cell_base = p->area_start();
- unsigned int cell_base_start_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(cell_base)));
+ Address base_address = p->area_start();
+ unsigned int base_address_cell_index = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
// Check if the slot points to the start of an object. This can happen e.g.
// when we left trim a fixed array. Such slots are invalid and we can remove
// them.
- if ((cells[start_index] & index_in_cell) != 0) {
- return false;
+ if (index_mask > 1) {
+ if ((cells[cell_index] & index_mask) != 0 &&
+ (cells[cell_index] & (index_mask >> 1)) == 0) {
+ return false;
+ }
+ } else {
+ // Left trimming moves the mark bits so we cannot be in the very first cell.
+ DCHECK(cell_index != base_address_cell_index);
+ if ((cells[cell_index] & index_mask) != 0 &&
+ (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
+ return false;
+ }
}
// Check if the object is in the current cell.
MarkBit::CellType slot_mask;
- if ((cells[start_index] == 0) ||
- (base::bits::CountTrailingZeros32(cells[start_index]) >
- base::bits::CountTrailingZeros32(cells[start_index] | index_in_cell))) {
+ if ((cells[cell_index] == 0) ||
+ (base::bits::CountTrailingZeros32(cells[cell_index]) >
+ base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
// If we are already in the first cell, there is no live object.
- if (start_index == cell_base_start_index) return false;
+ if (cell_index == base_address_cell_index) return false;
// If not, find a cell in a preceding cell slot that has a mark bit set.
do {
- start_index--;
- } while (start_index > cell_base_start_index && cells[start_index] == 0);
+ cell_index--;
+ } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
// The slot must be in a dead object if there are no preceding cells that
// have mark bits set.
- if (cells[start_index] == 0) {
+ if (cells[cell_index] == 0) {
return false;
}
// The object is in a preceding cell. Set the mask to find any object.
- slot_mask = 0xffffffff;
+ slot_mask = ~0u;
} else {
- // The object start is before the the slot index. Hence, in this case the
- // slot index can not be at the beginning of the cell.
- CHECK(index_in_cell > 1);
// We are interested in object mark bits right before the slot.
- slot_mask = index_in_cell - 1;
+ slot_mask = index_mask + (index_mask - 1);
}
- MarkBit::CellType current_cell = cells[start_index];
+ MarkBit::CellType current_cell = cells[cell_index];
CHECK(current_cell != 0);
// Find the last live object in the cell.
unsigned int leading_zeros =
base::bits::CountLeadingZeros32(current_cell & slot_mask);
- CHECK(leading_zeros != 32);
- unsigned int offset = Bitmap::kBitIndexMask - leading_zeros;
+ CHECK(leading_zeros != Bitmap::kBitsPerCell);
+ int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
- cell_base += (start_index - cell_base_start_index) * 32 * kPointerSize;
- Address address = cell_base + offset * kPointerSize;
+ base_address += (cell_index - base_address_cell_index) *
+ Bitmap::kBitsPerCell * kPointerSize;
+ Address address = base_address + offset * kPointerSize;
HeapObject* object = HeapObject::FromAddress(address);
CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
CHECK(object->address() < reinterpret_cast<Address>(slot));
- if (object->address() <= slot &&
+ if ((object->address() + kPointerSize) <= slot &&
(object->address() + object->Size()) > slot) {
// If the slot is within the last found object in the cell, the slot is
// in a live object.
+ // Slots pointing to the first word of an object are invalid and removed.
+ // This can happen when we move the object header while left trimming.
*out_object = object;
return true;
}
@@ -3069,32 +3006,26 @@ bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
// This function does not support large objects right now.
Space* owner = p->owner();
- if (owner == heap_->lo_space() || owner == NULL) return true;
-
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
-
- MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
-
- int offset = 0;
- while (current_cell != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
- current_cell >>= trailing_zeros;
- offset += trailing_zeros;
- Address address = cell_base + offset * kPointerSize;
-
- HeapObject* object = HeapObject::FromAddress(address);
- int size = object->Size();
+ if (owner == heap_->lo_space() || owner == NULL) {
+ Object* large_object = heap_->lo_space()->FindObject(slot);
+ // This object has to exist, otherwise we would not have recorded a slot
+ // for it.
+ CHECK(large_object->IsHeapObject());
+ HeapObject* large_heap_object = HeapObject::cast(large_object);
+ if (IsMarked(large_heap_object)) {
+ return true;
+ }
+ return false;
+ }
- if (object->address() > slot) return false;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return true;
- }
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ int size = object->Size();
- offset++;
- current_cell >>= 1;
+ if (object->address() > slot) return false;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return true;
}
}
return false;
@@ -3111,45 +3042,8 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
}
DCHECK(object != NULL);
-
- switch (object->ContentType()) {
- case HeapObjectContents::kTaggedValues:
- return true;
-
- case HeapObjectContents::kRawValues: {
- InstanceType type = object->map()->instance_type();
- // Slots in maps and code can't be invalid because they are never
- // shrunk.
- if (type == MAP_TYPE || type == CODE_TYPE) return true;
-
- // Consider slots in objects that contain ONLY raw data as invalid.
- return false;
- }
-
- case HeapObjectContents::kMixedValues: {
- if (object->IsFixedTypedArrayBase()) {
- return static_cast<int>(slot - object->address()) ==
- FixedTypedArrayBase::kBasePointerOffset;
- } else if (object->IsBytecodeArray()) {
- return static_cast<int>(slot - object->address()) ==
- BytecodeArray::kConstantPoolOffset;
- } else if (object->IsJSArrayBuffer()) {
- int off = static_cast<int>(slot - object->address());
- return (off >= JSArrayBuffer::BodyDescriptor::kStartOffset &&
- off <= JSArrayBuffer::kByteLengthOffset) ||
- (off >= JSArrayBuffer::kSize &&
- off < JSArrayBuffer::kSizeWithInternalFields);
- } else if (FLAG_unbox_double_fields) {
- // Filter out slots that happen to point to unboxed double fields.
- LayoutDescriptorHelper helper(object->map());
- DCHECK(!helper.all_fields_tagged());
- return helper.IsTagged(static_cast<int>(slot - object->address()));
- }
- break;
- }
- }
- UNREACHABLE();
- return true;
+ int offset = static_cast<int>(slot - object->address());
+ return object->IsValidSlot(offset);
}
@@ -3165,7 +3059,7 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
}
-void MarkCompactCollector::EvacuateNewSpace() {
+void MarkCompactCollector::EvacuateNewSpacePrologue() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
@@ -3182,20 +3076,38 @@ void MarkCompactCollector::EvacuateNewSpace() {
new_space->Flip();
new_space->ResetAllocationInfo();
- int survivors_size = 0;
+ newspace_evacuation_candidates_.Clear();
+ NewSpacePageIterator it(from_bottom, from_top);
+ while (it.has_next()) {
+ newspace_evacuation_candidates_.Add(it.next());
+ }
+}
+
+HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
+ HashMap* local_pretenuring_feedback = new HashMap(
+ HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
+ EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
+ local_pretenuring_feedback);
// First pass: traverse all objects in inactive semispace, remove marks,
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
- NewSpacePageIterator it(from_bottom, from_top);
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
+ for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
+ NewSpacePage* p =
+ reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
+ bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
+ USE(ok);
+ DCHECK(ok);
}
-
- heap_->IncrementYoungSurvivorsCounter(survivors_size);
- new_space->set_age_mark(new_space->top());
+ heap_->IncrementPromotedObjectsSize(
+ static_cast<int>(new_space_visitor.promoted_size()));
+ heap_->IncrementSemiSpaceCopiedObjectSize(
+ static_cast<int>(new_space_visitor.semispace_copied_size()));
+ heap_->IncrementYoungSurvivorsCounter(
+ static_cast<int>(new_space_visitor.promoted_size()) +
+ static_cast<int>(new_space_visitor.semispace_copied_size()));
+ return local_pretenuring_feedback;
}
@@ -3206,51 +3118,6 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
}
-bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
- Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
- AlwaysAllocateScope always_allocate(isolate());
- DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
-
- int offsets[16];
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
-
- if (*cell == 0) continue;
-
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- for (int i = 0; i < live_objects; i++) {
- Address object_addr = cell_base + offsets[i] * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(object_addr);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
- int size = object->Size();
- AllocationAlignment alignment = object->RequiredAlignment();
- HeapObject* target_object = nullptr;
- AllocationResult allocation = target_space->AllocateRaw(size, alignment);
- if (!allocation.To(&target_object)) {
- // We need to abort compaction for this page. Make sure that we reset
- // the mark bits for objects that have already been migrated.
- if (i > 0) {
- p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
- p->AddressToMarkbitIndex(object_addr));
- }
- return false;
- }
-
- MigrateObject(target_object, object, size, target_space->identity(),
- evacuation_slots_buffer);
- DCHECK(object->map_word().IsForwardingAddress());
- }
-
- // Clear marking bits for current cell.
- *cell = 0;
- }
- p->ResetLiveBytes();
- return true;
-}
-
-
int MarkCompactCollector::NumberOfParallelCompactionTasks() {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
@@ -3298,7 +3165,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
const int num_tasks = NumberOfParallelCompactionTasks();
-
// Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
@@ -3311,19 +3177,12 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
num_tasks);
- compaction_in_progress_ = true;
+ uint32_t* task_ids = new uint32_t[num_tasks - 1];
// Kick off parallel tasks.
- for (int i = 1; i < num_tasks; i++) {
- concurrent_compaction_tasks_active_++;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
- v8::Platform::kShortRunningTask);
- }
-
- // Contribute in main thread. Counter and signal are in principal not needed.
- EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
-
- WaitUntilCompactionCompleted();
+ StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
+ // Wait for unfinished and not-yet-started tasks.
+ WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
+ delete[] task_ids;
double compaction_duration = 0.0;
intptr_t compacted_memory = 0;
@@ -3357,8 +3216,13 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// happens upon moving (which we potentially didn't do).
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
+ // - Mark them for rescanning for store buffer entries as we otherwise
+ // might have stale store buffer entries that become "valid" again
+ // after reusing the memory. Note that all existing store buffer
+ // entries of such pages are filtered before rescanning.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ p->set_scan_on_scavenge(true);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
@@ -3389,10 +3253,32 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
-void MarkCompactCollector::WaitUntilCompactionCompleted() {
- while (concurrent_compaction_tasks_active_ > 0) {
- pending_compaction_tasks_semaphore_.Wait();
- concurrent_compaction_tasks_active_--;
+void MarkCompactCollector::StartParallelCompaction(
+ CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
+ int len) {
+ compaction_in_progress_ = true;
+ for (int i = 1; i < len; i++) {
+ CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
+ task_ids[i - 1] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+
+ // Contribute in main thread.
+ EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
+}
+
+
+void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
+ int len) {
+ // Try to cancel compaction tasks that have not been run (as they might be
+ // stuck in a worker queue). Tasks that cannot be canceled, have either
+ // already completed or are still running, hence we need to wait for their
+ // semaphore signal.
+ for (int i = 0; i < len; i++) {
+ if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
+ pending_compaction_tasks_semaphore_.Wait();
+ }
}
compaction_in_progress_ = false;
}
@@ -3401,6 +3287,8 @@ void MarkCompactCollector::WaitUntilCompactionCompleted() {
void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer) {
+ EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
+ evacuation_slots_buffer);
for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
@@ -3414,9 +3302,9 @@ void MarkCompactCollector::EvacuatePages(
MemoryChunk::kCompactingInProgress);
double start = heap()->MonotonicallyIncreasingTimeInMs();
intptr_t live_bytes = p->LiveBytes();
- if (EvacuateLiveObjectsFromPage(
- p, compaction_spaces->Get(p->owner()->identity()),
- evacuation_slots_buffer)) {
+ AlwaysAllocateScope always_allocate(isolate());
+ if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
+ p->ResetLiveBytes();
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
compaction_spaces->ReportCompactionProgress(
@@ -3490,7 +3378,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- int offsets[16];
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
@@ -3504,42 +3391,39 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
intptr_t max_freed_bytes = 0;
int curr_region = -1;
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- int live_index = 0;
- for (; live_objects != 0; live_objects--) {
- Address free_end = cell_base + offsets[live_index++] * kPointerSize;
- if (free_end != free_start) {
- int size = static_cast<int>(free_end - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
- }
- freed_bytes = Free<parallelism>(space, free_list, free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
- }
- HeapObject* live_object = HeapObject::FromAddress(free_end);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
- Map* map = live_object->synchronized_map();
- int size = live_object->SizeFromMap(map);
- if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
- live_object->IterateBody(map->instance_type(), size, v);
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ int size = static_cast<int>(free_end - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
}
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
- int new_region_start = SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region || new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
+ freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ }
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+ object->IterateBody(map->instance_type(), size, v);
+ }
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+ int new_region_start = SkipList::RegionNumber(free_end);
+ int new_region_end =
+ SkipList::RegionNumber(free_end + size - kPointerSize);
+ if (new_region_start != curr_region || new_region_end != curr_region) {
+ skip_list->AddObject(free_end, size);
+ curr_region = new_region_end;
}
- free_start = free_end + size;
}
- // Clear marking bits for current cell.
- *cell = 0;
+ free_start = free_end + size;
}
+
+ // Clear the mark bits of that page and reset live bytes count.
+ Bitmap::Clear(p);
+
if (free_start != p->area_end()) {
int size = static_cast<int>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
@@ -3548,7 +3432,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
- p->ResetLiveBytes();
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
@@ -3602,24 +3485,70 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
}
-void MarkCompactCollector::VisitLiveObjects(Page* page,
- ObjectVisitor* visitor) {
- // First pass on aborted pages.
- int offsets[16];
- for (MarkBitCellIterator it(page); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
- if (*cell == 0) continue;
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- for (int i = 0; i < live_objects; i++) {
- Address object_addr = cell_base + offsets[i] * kPointerSize;
- HeapObject* live_object = HeapObject::FromAddress(object_addr);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
- Map* map = live_object->synchronized_map();
- int size = live_object->SizeFromMap(map);
- live_object->IterateBody(map->instance_type(), size, visitor);
+#ifdef VERIFY_HEAP
+static void VerifyAllBlackObjects(MemoryChunk* page) {
+ LiveObjectIterator<kAllLiveObjects> it(page);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ }
+}
+#endif // VERIFY_HEAP
+
+
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
+ HeapObjectVisitor* visitor,
+ IterationMode mode) {
+#ifdef VERIFY_HEAP
+ VerifyAllBlackObjects(page);
+#endif // VERIFY_HEAP
+
+ LiveObjectIterator<kBlackObjects> it(page);
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ if (!visitor->Visit(object)) {
+ if (mode == kClearMarkbits) {
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(page->area_start()),
+ page->AddressToMarkbitIndex(object->address()));
+ RecomputeLiveBytes(page);
+ }
+ return false;
}
}
+ if (mode == kClearMarkbits) {
+ Bitmap::Clear(page);
+ }
+ return true;
+}
+
+
+void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
+ LiveObjectIterator<kBlackObjects> it(page);
+ int new_live_size = 0;
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ new_live_size += object->Size();
+ }
+ page->SetLiveBytes(new_live_size);
+}
+
+
+void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
+ ObjectVisitor* visitor) {
+#ifdef VERIFY_HEAP
+ VerifyAllBlackObjects(page);
+#endif // VERIFY_HEAP
+
+ LiveObjectIterator<kBlackObjects> it(page);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, visitor);
+ }
}
@@ -3654,25 +3583,64 @@ void MarkCompactCollector::SweepAbortedPages() {
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
+ HashMap* local_pretenuring_feedback = nullptr;
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_NEWSPACE);
+ GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
EvacuationScope evacuation_scope(this);
- EvacuateNewSpace();
+ EvacuateNewSpacePrologue();
+ local_pretenuring_feedback = EvacuateNewSpaceInParallel();
+ heap_->new_space()->set_age_mark(heap_->new_space()->top());
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_PAGES);
+ GCTracer::Scope::MC_EVACUATE_CANDIDATES);
EvacuationScope evacuation_scope(this);
EvacuatePagesInParallel();
}
{
+ heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
+ delete local_pretenuring_feedback;
+ }
+
+ UpdatePointersAfterEvacuation();
+
+ {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+ GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+ // After updating all pointers, we can finally sweep the aborted pages,
+ // effectively overriding any forward pointers.
+ SweepAbortedPages();
+
+ // EvacuateNewSpaceAndCandidates iterates over new space objects and for
+ // ArrayBuffers either re-registers them as live or promotes them. This is
+ // needed to properly free them.
+ heap()->array_buffer_tracker()->FreeDead(false);
+
+ // Deallocate evacuated candidate pages.
+ ReleaseEvacuationCandidates();
+ }
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap && !sweeping_in_progress_) {
+ VerifyEvacuation(heap());
+ }
+#endif
+}
+
+
+void MarkCompactCollector::UpdatePointersAfterEvacuation() {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+ {
+ GCTracer::Scope gc_scope(
+ heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
UpdateSlotsRecordedIn(migration_slots_buffer_);
if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
@@ -3696,8 +3664,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
PointersUpdatingVisitor updating_visitor(heap());
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+ GCTracer::Scope gc_scope(
+ heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
// Update pointers in to space.
SemiSpaceIterator to_it(heap()->new_space());
for (HeapObject* object = to_it.Next(); object != NULL;
@@ -3706,18 +3674,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
object->IterateBody(map->instance_type(), object->SizeFromMap(map),
&updating_visitor);
}
- }
-
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- }
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
@@ -3727,7 +3686,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
GCTracer::Scope gc_scope(
heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
@@ -3749,11 +3708,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// First pass on aborted pages, fixing up all live objects.
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- // Clearing the evacuation candidate flag here has the effect of
- // stopping recording of slots for it in the following pointer
- // update phases.
p->ClearEvacuationCandidate();
- VisitLiveObjects(p, &updating_visitor);
+ VisitLiveObjectsBody(p, &updating_visitor);
}
}
@@ -3792,7 +3748,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
heap_->string_table()->Iterate(&updating_visitor);
// Update pointers from external string table.
@@ -3802,20 +3758,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuationWeakObjectRetainer evacuation_object_retainer;
heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
}
-
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_ABORTED);
- // After updating all pointers, we can finally sweep the aborted pages,
- // effectively overriding any forward pointers.
- SweepAbortedPages();
- }
-
- heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-
- // The hashing of weak_object_to_code_table is no longer valid.
- heap()->weak_object_to_code_table()->Rehash(
- heap()->isolate()->factory()->undefined_value());
}
@@ -3850,400 +3792,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
-
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte. The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
- 0, _, _,
- _, _, // 0
- 1, 0, _,
- _, _, // 1
- 1, 1, _,
- _, _, // 2
- X, _, _,
- _, _, // 3
- 1, 2, _,
- _, _, // 4
- 2, 0, 2,
- _, _, // 5
- X, _, _,
- _, _, // 6
- X, _, _,
- _, _, // 7
- 1, 3, _,
- _, _, // 8
- 2, 0, 3,
- _, _, // 9
- 2, 1, 3,
- _, _, // 10
- X, _, _,
- _, _, // 11
- X, _, _,
- _, _, // 12
- X, _, _,
- _, _, // 13
- X, _, _,
- _, _, // 14
- X, _, _,
- _, _, // 15
- 1, 4, _,
- _, _, // 16
- 2, 0, 4,
- _, _, // 17
- 2, 1, 4,
- _, _, // 18
- X, _, _,
- _, _, // 19
- 2, 2, 4,
- _, _, // 20
- 3, 0, 2,
- 4, _, // 21
- X, _, _,
- _, _, // 22
- X, _, _,
- _, _, // 23
- X, _, _,
- _, _, // 24
- X, _, _,
- _, _, // 25
- X, _, _,
- _, _, // 26
- X, _, _,
- _, _, // 27
- X, _, _,
- _, _, // 28
- X, _, _,
- _, _, // 29
- X, _, _,
- _, _, // 30
- X, _, _,
- _, _, // 31
- 1, 5, _,
- _, _, // 32
- 2, 0, 5,
- _, _, // 33
- 2, 1, 5,
- _, _, // 34
- X, _, _,
- _, _, // 35
- 2, 2, 5,
- _, _, // 36
- 3, 0, 2,
- 5, _, // 37
- X, _, _,
- _, _, // 38
- X, _, _,
- _, _, // 39
- 2, 3, 5,
- _, _, // 40
- 3, 0, 3,
- 5, _, // 41
- 3, 1, 3,
- 5, _, // 42
- X, _, _,
- _, _, // 43
- X, _, _,
- _, _, // 44
- X, _, _,
- _, _, // 45
- X, _, _,
- _, _, // 46
- X, _, _,
- _, _, // 47
- X, _, _,
- _, _, // 48
- X, _, _,
- _, _, // 49
- X, _, _,
- _, _, // 50
- X, _, _,
- _, _, // 51
- X, _, _,
- _, _, // 52
- X, _, _,
- _, _, // 53
- X, _, _,
- _, _, // 54
- X, _, _,
- _, _, // 55
- X, _, _,
- _, _, // 56
- X, _, _,
- _, _, // 57
- X, _, _,
- _, _, // 58
- X, _, _,
- _, _, // 59
- X, _, _,
- _, _, // 60
- X, _, _,
- _, _, // 61
- X, _, _,
- _, _, // 62
- X, _, _,
- _, _, // 63
- 1, 6, _,
- _, _, // 64
- 2, 0, 6,
- _, _, // 65
- 2, 1, 6,
- _, _, // 66
- X, _, _,
- _, _, // 67
- 2, 2, 6,
- _, _, // 68
- 3, 0, 2,
- 6, _, // 69
- X, _, _,
- _, _, // 70
- X, _, _,
- _, _, // 71
- 2, 3, 6,
- _, _, // 72
- 3, 0, 3,
- 6, _, // 73
- 3, 1, 3,
- 6, _, // 74
- X, _, _,
- _, _, // 75
- X, _, _,
- _, _, // 76
- X, _, _,
- _, _, // 77
- X, _, _,
- _, _, // 78
- X, _, _,
- _, _, // 79
- 2, 4, 6,
- _, _, // 80
- 3, 0, 4,
- 6, _, // 81
- 3, 1, 4,
- 6, _, // 82
- X, _, _,
- _, _, // 83
- 3, 2, 4,
- 6, _, // 84
- 4, 0, 2,
- 4, 6, // 85
- X, _, _,
- _, _, // 86
- X, _, _,
- _, _, // 87
- X, _, _,
- _, _, // 88
- X, _, _,
- _, _, // 89
- X, _, _,
- _, _, // 90
- X, _, _,
- _, _, // 91
- X, _, _,
- _, _, // 92
- X, _, _,
- _, _, // 93
- X, _, _,
- _, _, // 94
- X, _, _,
- _, _, // 95
- X, _, _,
- _, _, // 96
- X, _, _,
- _, _, // 97
- X, _, _,
- _, _, // 98
- X, _, _,
- _, _, // 99
- X, _, _,
- _, _, // 100
- X, _, _,
- _, _, // 101
- X, _, _,
- _, _, // 102
- X, _, _,
- _, _, // 103
- X, _, _,
- _, _, // 104
- X, _, _,
- _, _, // 105
- X, _, _,
- _, _, // 106
- X, _, _,
- _, _, // 107
- X, _, _,
- _, _, // 108
- X, _, _,
- _, _, // 109
- X, _, _,
- _, _, // 110
- X, _, _,
- _, _, // 111
- X, _, _,
- _, _, // 112
- X, _, _,
- _, _, // 113
- X, _, _,
- _, _, // 114
- X, _, _,
- _, _, // 115
- X, _, _,
- _, _, // 116
- X, _, _,
- _, _, // 117
- X, _, _,
- _, _, // 118
- X, _, _,
- _, _, // 119
- X, _, _,
- _, _, // 120
- X, _, _,
- _, _, // 121
- X, _, _,
- _, _, // 122
- X, _, _,
- _, _, // 123
- X, _, _,
- _, _, // 124
- X, _, _,
- _, _, // 125
- X, _, _,
- _, _, // 126
- X, _, _,
- _, _, // 127
- 1, 7, _,
- _, _, // 128
- 2, 0, 7,
- _, _, // 129
- 2, 1, 7,
- _, _, // 130
- X, _, _,
- _, _, // 131
- 2, 2, 7,
- _, _, // 132
- 3, 0, 2,
- 7, _, // 133
- X, _, _,
- _, _, // 134
- X, _, _,
- _, _, // 135
- 2, 3, 7,
- _, _, // 136
- 3, 0, 3,
- 7, _, // 137
- 3, 1, 3,
- 7, _, // 138
- X, _, _,
- _, _, // 139
- X, _, _,
- _, _, // 140
- X, _, _,
- _, _, // 141
- X, _, _,
- _, _, // 142
- X, _, _,
- _, _, // 143
- 2, 4, 7,
- _, _, // 144
- 3, 0, 4,
- 7, _, // 145
- 3, 1, 4,
- 7, _, // 146
- X, _, _,
- _, _, // 147
- 3, 2, 4,
- 7, _, // 148
- 4, 0, 2,
- 4, 7, // 149
- X, _, _,
- _, _, // 150
- X, _, _,
- _, _, // 151
- X, _, _,
- _, _, // 152
- X, _, _,
- _, _, // 153
- X, _, _,
- _, _, // 154
- X, _, _,
- _, _, // 155
- X, _, _,
- _, _, // 156
- X, _, _,
- _, _, // 157
- X, _, _,
- _, _, // 158
- X, _, _,
- _, _, // 159
- 2, 5, 7,
- _, _, // 160
- 3, 0, 5,
- 7, _, // 161
- 3, 1, 5,
- 7, _, // 162
- X, _, _,
- _, _, // 163
- 3, 2, 5,
- 7, _, // 164
- 4, 0, 2,
- 5, 7, // 165
- X, _, _,
- _, _, // 166
- X, _, _,
- _, _, // 167
- 3, 3, 5,
- 7, _, // 168
- 4, 0, 3,
- 5, 7, // 169
- 4, 1, 3,
- 5, 7 // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits. Returns the number of objects that start in the
-// range. Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
- int objects = 0;
- int offset = 0;
-
- // No consecutive 1 bits.
- DCHECK((mark_bits & 0x180) != 0x180);
- DCHECK((mark_bits & 0x18000) != 0x18000);
- DCHECK((mark_bits & 0x1800000) != 0x1800000);
-
- while (mark_bits != 0) {
- int byte = (mark_bits & 0xff);
- mark_bits >>= 8;
- if (byte != 0) {
- DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
- char* table = kStartTable + byte * kStartTableEntriesPerLine;
- int objects_in_these_8_words = table[0];
- DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
- DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
- for (int i = 0; i < objects_in_these_8_words; i++) {
- starts[objects++] = offset + table[1 + i];
- }
- }
- offset += 8;
- }
- return objects;
-}
-
-
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes) {
int max_freed = 0;
@@ -4298,7 +3846,7 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
}
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4325,12 +3873,21 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
continue;
}
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ // We need to sweep the page to get it into an iterable state again. Note
+ // that this adds unusable memory into the free list that is later on
+ // (in the free list) dropped again. Since we only use the flag for
+ // testing this is fine.
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ continue;
+ }
+
// One unused page is kept, all further are released before sweeping them.
if (p->LiveBytes() == 0) {
if (unused_page_present) {
if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
- reinterpret_cast<intptr_t>(p));
+ PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
space->ReleasePage(p);
continue;
@@ -4338,64 +3895,38 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
unused_page_present = true;
}
- switch (sweeper) {
- case CONCURRENT_SWEEPING:
- if (!parallel_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
- reinterpret_cast<intptr_t>(p));
- }
- if (space->identity() == CODE_SPACE) {
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
- pages_swept++;
- parallel_sweeping_active = true;
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
- reinterpret_cast<intptr_t>(p));
- }
- p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
- int to_sweep = p->area_size() - p->LiveBytes();
- space->accounting_stats_.ShrinkSpace(to_sweep);
- }
- space->set_end_of_unswept_pages(p);
- break;
- case SEQUENTIAL_SWEEPING: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
- }
- if (space->identity() == CODE_SPACE) {
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
+ if (!parallel_sweeping_active) {
+ if (FLAG_gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: %p", p);
+ }
+ if (space->identity() == CODE_SPACE) {
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, NULL);
} else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
- pages_swept++;
- break;
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+ }
+ pages_swept++;
+ parallel_sweeping_active = true;
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
}
- default: { UNREACHABLE(); }
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
+ int to_sweep = p->area_size() - p->LiveBytes();
+ space->accounting_stats_.ShrinkSpace(to_sweep);
}
+ space->set_end_of_unswept_pages(p);
}
if (FLAG_gc_verbose) {
- PrintF("SweepSpace: %s (%d pages swept)\n",
- AllocationSpaceName(space->identity()), pages_swept);
+ PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
+ AllocationSpaceName(space->identity()), pages_swept);
}
}
@@ -4404,7 +3935,7 @@ void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
+ start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
#ifdef DEBUG
@@ -4414,23 +3945,23 @@ void MarkCompactCollector::SweepSpaces() {
MoveEvacuationCandidatesToEndOfPagesList();
{
+ sweeping_in_progress_ = true;
{
GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_OLDSPACE);
- SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING);
+ GCTracer::Scope::MC_SWEEP_OLD);
+ StartSweepSpace(heap()->old_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CODE);
- SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING);
+ StartSweepSpace(heap()->code_space());
}
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_MAP);
- SweepSpace(heap()->map_space(), CONCURRENT_SWEEPING);
+ StartSweepSpace(heap()->map_space());
}
- sweeping_in_progress_ = true;
- if (heap()->concurrent_sweeping_enabled()) {
+ if (FLAG_concurrent_sweeping) {
StartSweeperThreads();
}
}
@@ -4442,29 +3973,10 @@ void MarkCompactCollector::SweepSpaces() {
// buffer entries are already filter out. We can just release the memory.
heap()->FreeQueuedChunks();
- EvacuateNewSpaceAndCandidates();
-
- // EvacuateNewSpaceAndCandidates iterates over new space objects and for
- // ArrayBuffers either re-registers them as live or promotes them. This is
- // needed to properly free them.
- heap()->array_buffer_tracker()->FreeDead(false);
-
- // Clear the marking state of live large objects.
- heap_->lo_space()->ClearMarkingStateOfLiveObjects();
-
- // Deallocate evacuated candidate pages.
- ReleaseEvacuationCandidates();
-
if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+ heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
}
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !sweeping_in_progress_) {
- VerifyEvacuation(heap());
- }
-#endif
}
@@ -4489,25 +4001,6 @@ void MarkCompactCollector::ParallelSweepSpacesComplete() {
}
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (isolate()->debug()->is_active()) enable = false;
-
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(isolate());
- } else {
- if (code_flusher_ == NULL) return;
- code_flusher_->EvictAllCandidates();
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
- }
-}
-
-
// TODO(1466) ReportDeleteIfNeeded is not called currently.
// Our profiling tools do not expect intersections between
// code objects. We should either reenable it or change our tools.
@@ -4574,7 +4067,7 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
pc);
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordRelocSlot(&rinfo, target);
}
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index c489eaf3f4..cfb2d9d270 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -45,10 +45,10 @@ class Marking : public AllStatic {
return !mark_bit.Get() && mark_bit.Next().Get();
}
- // Black markbits: 10 - this is required by the sweeper.
+ // Black markbits: 11
static const char* kBlackBitPattern;
INLINE(static bool IsBlack(MarkBit mark_bit)) {
- return mark_bit.Get() && !mark_bit.Next().Get();
+ return mark_bit.Get() && mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
@@ -58,10 +58,10 @@ class Marking : public AllStatic {
return !mark_bit.Get();
}
- // Grey markbits: 11
+ // Grey markbits: 10
static const char* kGreyBitPattern;
INLINE(static bool IsGrey(MarkBit mark_bit)) {
- return mark_bit.Get() && mark_bit.Next().Get();
+ return mark_bit.Get() && !mark_bit.Next().Get();
}
// IsBlackOrGrey assumes that the first bit is set for black or grey
@@ -70,7 +70,7 @@ class Marking : public AllStatic {
INLINE(static void MarkBlack(MarkBit mark_bit)) {
mark_bit.Set();
- mark_bit.Next().Clear();
+ mark_bit.Next().Set();
}
INLINE(static void MarkWhite(MarkBit mark_bit)) {
@@ -81,6 +81,7 @@ class Marking : public AllStatic {
INLINE(static void BlackToWhite(MarkBit markbit)) {
DCHECK(IsBlack(markbit));
markbit.Clear();
+ markbit.Next().Clear();
}
INLINE(static void GreyToWhite(MarkBit markbit)) {
@@ -91,23 +92,23 @@ class Marking : public AllStatic {
INLINE(static void BlackToGrey(MarkBit markbit)) {
DCHECK(IsBlack(markbit));
- markbit.Next().Set();
+ markbit.Next().Clear();
}
INLINE(static void WhiteToGrey(MarkBit markbit)) {
DCHECK(IsWhite(markbit));
markbit.Set();
- markbit.Next().Set();
}
INLINE(static void WhiteToBlack(MarkBit markbit)) {
DCHECK(IsWhite(markbit));
markbit.Set();
+ markbit.Next().Set();
}
INLINE(static void GreyToBlack(MarkBit markbit)) {
DCHECK(IsGrey(markbit));
- markbit.Next().Clear();
+ markbit.Next().Set();
}
INLINE(static void BlackToGrey(HeapObject* obj)) {
@@ -116,7 +117,7 @@ class Marking : public AllStatic {
INLINE(static void AnyToGrey(MarkBit markbit)) {
markbit.Set();
- markbit.Next().Set();
+ markbit.Next().Clear();
}
static void TransferMark(Heap* heap, Address old_start, Address new_start);
@@ -160,16 +161,15 @@ class Marking : public AllStatic {
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
MarkBit from_mark_bit = MarkBitFrom(from);
MarkBit to_mark_bit = MarkBitFrom(to);
- bool is_black = false;
+ DCHECK(Marking::IsWhite(to_mark_bit));
if (from_mark_bit.Get()) {
to_mark_bit.Set();
- is_black = true; // Looks black so far.
- }
- if (from_mark_bit.Next().Get()) {
- to_mark_bit.Next().Set();
- is_black = false; // Was actually gray.
+ if (from_mark_bit.Next().Get()) {
+ to_mark_bit.Next().Set();
+ return true;
+ }
}
- return is_black;
+ return false;
}
private:
@@ -287,18 +287,11 @@ class CodeFlusher {
ProcessJSFunctionCandidates();
}
- void EvictAllCandidates() {
- EvictJSFunctionCandidates();
- EvictSharedFunctionInfoCandidates();
- }
-
void IteratePointersToFromSpace(ObjectVisitor* v);
private:
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
- void EvictJSFunctionCandidates();
- void EvictSharedFunctionInfoCandidates();
static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
static inline JSFunction* GetNextCandidate(JSFunction* candidate);
@@ -329,6 +322,11 @@ class ThreadLocalTop;
// Mark-Compact collector
class MarkCompactCollector {
public:
+ enum IterationMode {
+ kKeepMarking,
+ kClearMarkbits,
+ };
+
static void Initialize();
void SetUp();
@@ -374,12 +372,6 @@ class MarkCompactCollector {
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
- void EnableCodeFlushing(bool enable);
-
- enum SweeperType {
- CONCURRENT_SWEEPING,
- SEQUENTIAL_SWEEPING
- };
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
@@ -416,14 +408,6 @@ class MarkCompactCollector {
AllocationSpace to_old_space,
SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size,
- SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectRaw(HeapObject* dst, HeapObject* src, int size);
-
- bool TryPromoteObject(HeapObject* object, int object_size);
-
void InvalidateCode(Code* code);
void ClearMarkbits();
@@ -521,10 +505,15 @@ class MarkCompactCollector {
private:
class CompactionTask;
+ class EvacuateNewSpaceVisitor;
+ class EvacuateOldSpaceVisitor;
+ class EvacuateVisitorBase;
+ class HeapObjectVisitor;
class SweeperTask;
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+
explicit MarkCompactCollector(Heap* heap);
- ~MarkCompactCollector();
bool WillBeDeoptimized(Code* code);
void EvictPopularEvacuationCandidate(Page* page);
@@ -532,6 +521,10 @@ class MarkCompactCollector {
void StartSweeperThreads();
+ void ComputeEvacuationHeuristics(int area_size,
+ int* target_fragmentation_percent,
+ int* max_evacuated_bytes);
+
#ifdef DEBUG
enum CollectorState {
IDLE,
@@ -570,11 +563,12 @@ class MarkCompactCollector {
// After: Live objects are marked and non-live objects are unmarked.
friend class CodeMarkingVisitor;
+ friend class IncrementalMarkingMarkingVisitor;
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
+ friend class RecordMigratedSlotVisitor;
friend class RootMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
- friend class IncrementalMarkingMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
// from being flushed.
@@ -585,8 +579,6 @@ class MarkCompactCollector {
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
- void AfterMarking();
-
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
INLINE(void PushBlack(HeapObject* obj));
@@ -628,9 +620,8 @@ class MarkCompactCollector {
// otherwise a map can die and deoptimize the code.
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
- // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
- // increase chances of reusing of map transition tree in future.
- void RetainMaps();
+ // Collects a list of dependent code from maps embedded in optimize code.
+ DependentCode* DependentCodeListFromNonLiveMaps();
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
@@ -655,15 +646,20 @@ class MarkCompactCollector {
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
- // Map transitions from a live map to a dead map must be killed.
- // We replace them with a null descriptor, with the same key.
+ // Clear non-live references in weak cells, transition and descriptor arrays,
+ // and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences();
- void ClearNonLivePrototypeTransitions(Map* map);
- void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearMapTransitions(Map* map, Map* dead_transition);
- bool ClearMapBackPointer(Map* map);
- void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
- int number_of_own_descriptors);
+ void MarkDependentCodeForDeoptimization(DependentCode* list);
+ // Find non-live targets of simple transitions in the given list. Clear
+ // transitions to non-live targets and if needed trim descriptors arrays.
+ void ClearSimpleMapTransitions(Object* non_live_map_list);
+ void ClearSimpleMapTransition(Map* map, Map* dead_transition);
+ // Compact every array in the global list of transition arrays and
+ // trim the corresponding descriptor array if a transition target is non-live.
+ void ClearFullMapTransitions();
+ bool CompactTransitionArray(Map* map, TransitionArray* transitions,
+ DescriptorArray* descriptors);
+ void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
void TrimEnumCache(Map* map, DescriptorArray* descriptors);
// Mark all values associated with reachable keys in weak collections
@@ -680,13 +676,11 @@ class MarkCompactCollector {
// collections when incremental marking is aborted.
void AbortWeakCollections();
- void ProcessAndClearWeakCells();
+ void ClearWeakCells(Object** non_live_map_list,
+ DependentCode** dependent_code_list);
void AbortWeakCells();
- // After all reachable objects have been marked, those entries within
- // optimized code maps that became unreachable are removed, potentially
- // trimming or clearing out the entire optimized code map.
- void ProcessAndClearOptimizedCodeMaps();
+ void AbortTransitionArrays();
// -----------------------------------------------------------------------
// Phase 2: Sweeping to clear mark bits and free non-live objects for
@@ -704,13 +698,10 @@ class MarkCompactCollector {
// regions to each space's free list.
void SweepSpaces();
- int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
- NewSpacePage* p);
-
- void EvacuateNewSpace();
+ void EvacuateNewSpacePrologue();
- bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
- SlotsBuffer** evacuation_slots_buffer);
+ // Returns local pretenuring feedback.
+ HashMap* EvacuateNewSpaceInParallel();
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
@@ -723,11 +714,23 @@ class MarkCompactCollector {
// The number of parallel compaction tasks, including the main thread.
int NumberOfParallelCompactionTasks();
- void WaitUntilCompactionCompleted();
+
+ void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
+ uint32_t* task_ids, int len);
+ void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
void EvacuateNewSpaceAndCandidates();
- void VisitLiveObjects(Page* page, ObjectVisitor* visitor);
+ void UpdatePointersAfterEvacuation();
+
+ // Iterates through all live objects on a page using marking information.
+ // Returns whether all objects have successfully been visited.
+ bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
+ IterationMode mode);
+
+ void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
+
+ void RecomputeLiveBytes(MemoryChunk* page);
void SweepAbortedPages();
@@ -737,7 +740,9 @@ class MarkCompactCollector {
// corresponding space pages list.
void MoveEvacuationCandidatesToEndOfPagesList();
- void SweepSpace(PagedSpace* space, SweeperType sweeper);
+ // Starts sweeping of a space by contributing on the main thread and setting
+ // up other pages for sweeping.
+ void StartSweepSpace(PagedSpace* space);
// Finalizes the parallel sweeping phase. Marks all the pages that were
// swept in parallel.
@@ -774,6 +779,8 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
+ List<MemoryChunk*> newspace_evacuation_candidates_;
+
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses
// AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
@@ -802,10 +809,8 @@ class MarkCompactCollector {
// Semaphore used to synchronize compaction tasks.
base::Semaphore pending_compaction_tasks_semaphore_;
- // Number of active compaction tasks (including main thread).
- intptr_t concurrent_compaction_tasks_active_;
-
friend class Heap;
+ friend class StoreBuffer;
};
@@ -841,6 +846,14 @@ class MarkBitCellIterator BASE_EMBEDDED {
cell_base_ += 32 * kPointerSize;
}
+ // Return the next mark bit cell. If there is no next it returns 0;
+ inline MarkBit::CellType PeekNext() {
+ if (HasNext()) {
+ return cells_[cell_index_ + 1];
+ }
+ return 0;
+ }
+
private:
MemoryChunk* chunk_;
MarkBit::CellType* cells_;
@@ -849,6 +862,26 @@ class MarkBitCellIterator BASE_EMBEDDED {
Address cell_base_;
};
+enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
+
+template <LiveObjectIterationMode T>
+class LiveObjectIterator BASE_EMBEDDED {
+ public:
+ explicit LiveObjectIterator(MemoryChunk* chunk)
+ : chunk_(chunk),
+ it_(chunk_),
+ cell_base_(it_.CurrentCellBase()),
+ current_cell_(*it_.CurrentCell()) {}
+
+ HeapObject* Next();
+
+ private:
+ MemoryChunk* chunk_;
+ MarkBitCellIterator it_;
+ Address cell_base_;
+ MarkBit::CellType current_cell_;
+};
+
class EvacuationScope BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 9fadd08dca..33e624978f 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -24,7 +24,7 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
void MemoryReducer::TimerTask::RunInternal() {
- const double kJsCallsPerMsThreshold = 0.25;
+ const double kJsCallsPerMsThreshold = 0.5;
Heap* heap = memory_reducer_->heap();
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 195723e86d..c1566abfc5 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -186,12 +186,6 @@ void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
heap->object_stats_->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
fixed_array_size);
}
- if (TransitionArray::IsFullTransitionArray(map_obj->raw_transitions())) {
- int fixed_array_size =
- TransitionArray::cast(map_obj->raw_transitions())->Size();
- heap->object_stats_->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
if (map_obj->has_code_cache()) {
CodeCache* cache = CodeCache::cast(map_obj->code_cache());
heap->object_stats_->RecordFixedArraySubTypeStats(
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index d6a189a98d..a29ba4b08c 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -9,6 +9,7 @@
#include "src/heap/objects-visiting.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
+#include "src/objects-body-descriptors-inl.h"
namespace v8 {
namespace internal {
@@ -43,8 +44,15 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
FixedArray::BodyDescriptor, int>::Visit);
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
- table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
- table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
+ table_.Register(
+ kVisitFixedTypedArray,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ int>::Visit);
+
+ table_.Register(
+ kVisitFixedFloat64Array,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ int>::Visit);
table_.Register(
kVisitNativeContext,
@@ -71,10 +79,6 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
- table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
- table_.Register(kVisitJSDataView, &VisitJSDataView);
-
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
@@ -94,36 +98,14 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
template <typename StaticVisitor>
int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
+ typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor, int>
+ JSArrayBufferBodyVisitor;
- JSArrayBuffer::JSArrayBufferIterateBody<
- StaticNewSpaceVisitor<StaticVisitor> >(heap, object);
if (!JSArrayBuffer::cast(object)->is_external()) {
+ Heap* heap = map->GetHeap();
heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
}
- return JSArrayBuffer::kSizeWithInternalFields;
-}
-
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
- Map* map, HeapObject* object) {
- VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
- return JSTypedArray::kSizeWithInternalFields;
-}
-
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
- HeapObject* object) {
- VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
- return JSDataView::kSizeWithInternalFields;
+ return JSArrayBufferBodyVisitor::Visit(map, object);
}
@@ -160,9 +142,15 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+ table_.Register(
+ kVisitFixedTypedArray,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ void>::Visit);
- table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
+ table_.Register(
+ kVisitFixedFloat64Array,
+ &FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
+ void>::Visit);
table_.Register(kVisitNativeContext, &VisitNativeContext);
@@ -194,10 +182,6 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
- table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
- table_.Register(kVisitJSDataView, &VisitJSDataView);
-
// Registration for kVisitJSRegExp is done by StaticVisitor.
table_.Register(
@@ -208,6 +192,8 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitWeakCell, &VisitWeakCell);
+ table_.Register(kVisitTransitionArray, &VisitTransitionArray);
+
table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
kVisitDataObjectGeneric>();
@@ -303,13 +289,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
Map* map, HeapObject* object) {
FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
void>::Visit(map, object);
-
- MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
- for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS;
- ++idx) {
- Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
- collector->RecordSlot(object, slot, *slot);
- }
}
@@ -378,6 +357,31 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
+ Map* map, HeapObject* object) {
+ TransitionArray* array = TransitionArray::cast(object);
+ Heap* heap = array->GetHeap();
+ // Visit strong references.
+ if (array->HasPrototypeTransitions()) {
+ StaticVisitor::VisitPointer(heap, array,
+ array->GetPrototypeTransitionsSlot());
+ }
+ int num_transitions = TransitionArray::NumberOfTransitions(array);
+ for (int i = 0; i < num_transitions; ++i) {
+ StaticVisitor::VisitPointer(heap, array, array->GetKeySlot(i));
+ }
+ // Enqueue the array in linked list of encountered transition arrays if it is
+ // not already in the list.
+ if (array->next_link()->IsUndefined()) {
+ Heap* heap = map->GetHeap();
+ array->set_next_link(heap->encountered_transition_arrays(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ heap->set_encountered_transition_arrays(array);
+ }
+}
+
+
+template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
@@ -392,6 +396,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
Map* map, HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor,
+ JSWeakCollection::BodyDescriptorWeak,
+ void> JSWeakCollectionBodyVisitor;
Heap* heap = map->GetHeap();
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(object);
@@ -404,14 +411,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
- StaticVisitor::VisitPointers(
- heap, object,
- HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
- HeapObject::RawField(object, JSWeakCollection::kTableOffset));
- STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
- JSWeakCollection::kNextOffset);
- STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
- JSWeakCollection::kSize);
+ JSWeakCollectionBodyVisitor::Visit(map, object);
// Partially initialized weak collection is enqueued, but table is ignored.
if (!weak_collection->table()->IsHashTable()) return;
@@ -427,19 +427,14 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
HeapObject* object) {
+ typedef FlexibleBodyVisitor<StaticVisitor, Code::BodyDescriptor, void>
+ CodeBodyVisitor;
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
code->MakeOlder(heap->mark_compact_collector()->marking_parity());
}
- MarkCompactCollector* collector = heap->mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- // Visit all unoptimized code objects to prevent flushing them.
- MarkInlinedFunctionsCode(heap, code);
- }
- }
- code->CodeIterateBody<StaticVisitor>(heap);
+ CodeBodyVisitor::Visit(map, object);
}
@@ -455,18 +450,10 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
shared->ClearTypeFeedbackInfoAtGCTime();
}
if (FLAG_flush_optimized_code_cache) {
- if (!shared->optimized_code_map()->IsSmi()) {
+ if (!shared->OptimizedCodeMapIsCleared()) {
// Always flush the optimized code map if requested by flag.
shared->ClearOptimizedCodeMap();
}
- } else {
- if (!shared->optimized_code_map()->IsSmi()) {
- // Treat some references within the code map weakly by marking the
- // code map itself but not pushing it onto the marking deque. The
- // map will be processed after marking.
- FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
- MarkOptimizedCodeMap(heap, code_map);
- }
}
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
@@ -502,14 +489,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
// non-flushable, because it is required for bailing out from
// optimized code.
collector->code_flusher()->AddCandidate(function);
- // Visit shared function info immediately to avoid double checking
- // of its flushability later. This is just an optimization because
- // the shared function info would eventually be visited.
- SharedFunctionInfo* shared = function->shared();
- if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
- StaticVisitor::MarkObject(heap, shared->map());
- VisitSharedFunctionInfoWeakCode(heap, shared);
- }
// Treat the reference to the code object weakly.
VisitJSFunctionWeakCode(map, object);
return;
@@ -525,12 +504,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
HeapObject* object) {
- int last_property_offset =
- JSRegExp::kSize + kPointerSize * map->GetInObjectProperties();
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
- HeapObject::RawField(object, last_property_offset));
+ JSObjectVisitor::Visit(map, object);
}
@@ -539,7 +513,11 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
- JSArrayBuffer::JSArrayBufferIterateBody<StaticVisitor>(heap, object);
+ typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+ void> JSArrayBufferBodyVisitor;
+
+ JSArrayBufferBodyVisitor::Visit(map, object);
+
if (!JSArrayBuffer::cast(object)->is_external() &&
!heap->InNewSpace(object)) {
heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
@@ -548,26 +526,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
- Map* map, HeapObject* object) {
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
- HeapObject* object) {
- StaticVisitor::VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
- HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
-}
-
-
-template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
Map* map, HeapObject* object) {
StaticVisitor::VisitPointers(
@@ -580,11 +538,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
Map* map) {
- Object* raw_transitions = map->raw_transitions();
- if (TransitionArray::IsFullTransitionArray(raw_transitions)) {
- MarkTransitionArray(heap, TransitionArray::cast(raw_transitions));
- }
-
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
@@ -617,64 +570,6 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
}
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
- Heap* heap, TransitionArray* transitions) {
- if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
-
- if (transitions->HasPrototypeTransitions()) {
- StaticVisitor::VisitPointer(heap, transitions,
- transitions->GetPrototypeTransitionsSlot());
- }
-
- int num_transitions = TransitionArray::NumberOfTransitions(transitions);
- for (int i = 0; i < num_transitions; ++i) {
- StaticVisitor::VisitPointer(heap, transitions, transitions->GetKeySlot(i));
- }
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkOptimizedCodeMap(
- Heap* heap, FixedArray* code_map) {
- if (!StaticVisitor::MarkObjectWithoutPush(heap, code_map)) return;
-
- // Mark the context-independent entry in the optimized code map. Depending on
- // the age of the code object, we treat it as a strong or a weak reference.
- Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
- if (FLAG_turbo_preserve_shared_code && shared_object->IsCode() &&
- FLAG_age_code && !Code::cast(shared_object)->IsOld()) {
- StaticVisitor::VisitPointer(
- heap, code_map,
- code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex));
- }
-}
-
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
- Code* code) {
- // For optimized functions we should retain both non-optimized version
- // of its code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- if (code->deoptimization_data() != heap->empty_fixed_array()) {
- DeoptimizationInputData* const data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- FixedArray* const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
- for (int i = 0; i < inlined_count; ++i) {
- StaticVisitor::MarkObject(
- heap, SharedFunctionInfo::cast(literals->get(i))->code());
- }
- }
-}
-
-
-inline static bool HasValidNonBuiltinContext(JSFunction* function) {
- return function->context()->IsContext() && !function->shared()->IsBuiltin();
-}
-
-
inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
Object* undefined = heap->undefined_value();
return (info->script() != undefined) &&
@@ -694,11 +589,6 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
return false;
}
- // The function must have a valid context and not be a builtin.
- if (!HasValidNonBuiltinContext(function)) {
- return false;
- }
-
// We do not (yet) flush code for optimized functions.
if (function->code() != shared_info->code()) {
return false;
@@ -756,6 +646,16 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
+ // The function must not be a builtin.
+ if (shared_info->IsBuiltin()) {
+ return false;
+ }
+
+ // Maintain debug break slots in the code.
+ if (shared_info->HasDebugCode()) {
+ return false;
+ }
+
// If this is a function initialized with %SetCode then the one-to-one
// relation between SharedFunctionInfo and Code is broken.
if (shared_info->dont_flush()) {
@@ -822,66 +722,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
}
-void Code::CodeIterateBody(ObjectVisitor* v) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::kDebugBreakSlotMask;
-
- // There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
- IteratePointer(v, kRelocationInfoOffset);
- IteratePointer(v, kHandlerTableOffset);
- IteratePointer(v, kDeoptimizationDataOffset);
- IteratePointer(v, kTypeFeedbackInfoOffset);
- IterateNextCodeLink(v, kNextCodeLinkOffset);
-
- RelocIterator it(this, mode_mask);
- Isolate* isolate = this->GetIsolate();
- for (; !it.done(); it.next()) {
- it.rinfo()->Visit(isolate, v);
- }
-}
-
-
-template <typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::CELL) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::kDebugBreakSlotMask;
-
- // There are two places where we iterate code bodies: here and the non-
- // templated CodeIterateBody (above). They should be kept in sync.
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
- StaticVisitor::VisitPointer(
- heap, this,
- reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
- StaticVisitor::VisitNextCodeLink(
- heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
-
-
- RelocIterator it(this, mode_mask);
- for (; !it.done(); it.next()) {
- it.rinfo()->template Visit<StaticVisitor>(heap);
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 3d6cb73095..315c897bec 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -79,13 +79,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case WEAK_CELL_TYPE:
return kVisitWeakCell;
- case JS_SET_TYPE:
- return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSSet::kSize, has_unboxed_fields);
-
- case JS_MAP_TYPE:
- return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSMap::kSize, has_unboxed_fields);
+ case TRANSITION_ARRAY_TYPE:
+ return kVisitTransitionArray;
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
@@ -99,31 +94,14 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_PROXY_TYPE:
return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSProxy::kSize, has_unboxed_fields);
-
- case JS_FUNCTION_PROXY_TYPE:
- return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
- JSFunctionProxy::kSize, has_unboxed_fields);
-
- case FOREIGN_TYPE:
- return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
- Foreign::kSize, has_unboxed_fields);
+ instance_size, has_unboxed_fields);
case SYMBOL_TYPE:
return kVisitSymbol;
- case FILLER_TYPE:
- return kVisitDataObjectGeneric;
-
case JS_ARRAY_BUFFER_TYPE:
return kVisitJSArrayBuffer;
- case JS_TYPED_ARRAY_TYPE:
- return kVisitJSTypedArray;
-
- case JS_DATA_VIEW_TYPE:
- return kVisitJSDataView;
-
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -134,15 +112,25 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_ITERATOR_RESULT_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
instance_size, has_unboxed_fields);
case JS_FUNCTION_TYPE:
return kVisitJSFunction;
+ case FILLER_TYPE:
+ if (instance_size == kPointerSize) return kVisitDataObjectGeneric;
+ // Fall through.
+ case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case SIMD128_VALUE_TYPE:
@@ -179,138 +167,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
}
-void HeapObject::IterateBody(InstanceType type, int object_size,
- ObjectVisitor* v) {
- // Avoiding <Type>::cast(this) because it accesses the map pointer field.
- // During GC, the map pointer field is encoded.
- if (type < FIRST_NONSTRING_TYPE) {
- switch (type & kStringRepresentationMask) {
- case kSeqStringTag:
- break;
- case kConsStringTag:
- ConsString::BodyDescriptor::IterateBody(this, v);
- break;
- case kSlicedStringTag:
- SlicedString::BodyDescriptor::IterateBody(this, v);
- break;
- case kExternalStringTag:
- if ((type & kStringEncodingMask) == kOneByteStringTag) {
- reinterpret_cast<ExternalOneByteString*>(this)
- ->ExternalOneByteStringIterateBody(v);
- } else {
- reinterpret_cast<ExternalTwoByteString*>(this)
- ->ExternalTwoByteStringIterateBody(v);
- }
- break;
- }
- return;
- }
-
- switch (type) {
- case FIXED_ARRAY_TYPE:
- FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case FIXED_DOUBLE_ARRAY_TYPE:
- break;
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_TYPED_ARRAY_TYPE:
- case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
- case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
- case JS_ITERATOR_RESULT_TYPE:
- case JS_WEAK_MAP_TYPE:
- case JS_WEAK_SET_TYPE:
- case JS_REGEXP_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
- JSObject::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::JSArrayBufferIterateBody(this, v);
- break;
- case JS_FUNCTION_TYPE:
- JSFunction::BodyDescriptor::IterateBody(this, object_size, v);
- break;
- case ODDBALL_TYPE:
- Oddball::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_PROXY_TYPE:
- JSProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::BodyDescriptor::IterateBody(this, v);
- break;
- case FOREIGN_TYPE:
- reinterpret_cast<Foreign*>(this)->ForeignIterateBody(v);
- break;
- case MAP_TYPE:
- Map::BodyDescriptor::IterateBody(this, v);
- break;
- case CODE_TYPE:
- reinterpret_cast<Code*>(this)->CodeIterateBody(v);
- break;
- case CELL_TYPE:
- Cell::BodyDescriptor::IterateBody(this, v);
- break;
- case PROPERTY_CELL_TYPE:
- PropertyCell::BodyDescriptor::IterateBody(this, v);
- break;
- case WEAK_CELL_TYPE:
- WeakCell::BodyDescriptor::IterateBody(this, v);
- break;
- case SYMBOL_TYPE:
- Symbol::BodyDescriptor::IterateBody(this, v);
- break;
- case BYTECODE_ARRAY_TYPE:
- reinterpret_cast<BytecodeArray*>(this)->BytecodeArrayIterateBody(v);
- break;
-
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- case SIMD128_VALUE_TYPE:
- case FILLER_TYPE:
- case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
- break;
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- reinterpret_cast<FixedTypedArrayBase*>(this) \
- ->FixedTypedArrayBaseIterateBody(v); \
- break;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- case SHARED_FUNCTION_INFO_TYPE: {
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
- break;
- }
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- if (type == ALLOCATION_SITE_TYPE) {
- AllocationSite::BodyDescriptor::IterateBody(this, v);
- } else {
- StructBodyDescriptor::IterateBody(this, object_size, v);
- }
- break;
- default:
- PrintF("Unknown type: %d\n", type);
- UNREACHABLE();
- }
-}
-
-
// We don't record weak slots during marking or scavenges. Instead we do it
// once when we complete mark-compact cycle. Note that write barrier has no
// effect if we are already in the middle of compacting mark-sweep cycle and we
@@ -439,9 +295,17 @@ struct WeakListVisitor<Context> {
DoWeakList<JSFunction>(heap, context, retainer,
Context::OPTIMIZED_FUNCTIONS_LIST);
- // Code objects are always allocated in Code space, we do not have to visit
- // them during scavenges.
if (heap->gc_state() == Heap::MARK_COMPACT) {
+ // Record the slots of the weak entries in the native context.
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ for (int idx = Context::FIRST_WEAK_SLOT;
+ idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
+ Object** slot = Context::cast(context)->RawFieldOfElementAt(idx);
+ collector->RecordSlot(context, slot, *slot);
+ }
+ // Code objects are always allocated in Code space, we do not have to
+ // visit
+ // them during scavenges.
DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 787410d76f..1fe8a1749a 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -9,6 +9,7 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
+#include "src/objects-body-descriptors.h"
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
@@ -75,12 +76,11 @@ class StaticVisitorBase : public AllStatic {
V(Cell) \
V(PropertyCell) \
V(WeakCell) \
+ V(TransitionArray) \
V(SharedFunctionInfo) \
V(JSFunction) \
V(JSWeakCollection) \
V(JSArrayBuffer) \
- V(JSTypedArray) \
- V(JSDataView) \
V(JSRegExp)
// For data objects, JS objects and structs along with generic visitor which
@@ -248,7 +248,9 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
Object** start, Object** end)) {
- for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
+ for (Object** p = start; p < end; p++) {
+ StaticVisitor::VisitPointer(heap, object, p);
+ }
}
// Although we are using the JSFunction body descriptor which does not
@@ -269,10 +271,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FixedDoubleArray::SizeFor(length);
}
- INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
- return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- }
-
INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
return JSObjectVisitor::Visit(map, object);
}
@@ -292,8 +290,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
}
INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
- INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
- INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
class DataObjectVisitor {
@@ -350,6 +346,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
INLINE(static void VisitWeakCell(Map* map, HeapObject* object));
+ INLINE(static void VisitTransitionArray(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
@@ -372,23 +369,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
- INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
- INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
- // Mark pointers in a Map and its TransitionArray together, possibly
- // treating transitions or back pointers weak.
+ // Mark pointers in a Map treating some elements of the descriptor array weak.
static void MarkMapContents(Heap* heap, Map* map);
- static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
-
- // Mark pointers in the optimized code map that should act as strong
- // references, possibly treating some entries weak.
- static void MarkOptimizedCodeMap(Heap* heap, FixedArray* code_map);
-
- // Mark non-optimized code for functions inlined into the given optimized
- // code. This will prevent it from being flushed.
- static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
// Code flushing support.
INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index faf90face5..52ba97a9c7 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -17,7 +17,7 @@ namespace internal {
const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
- Heap* heap = isolate_->heap();
+ Heap* heap = isolate()->heap();
double deadline_in_ms =
deadline_in_seconds *
static_cast<double>(base::Time::kMillisecondsPerSecond);
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 6ac64f2eb6..cd35c7d7e3 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -28,7 +28,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- Heap::UpdateAllocationSiteFeedback(object, Heap::IGNORE_SCRATCHPAD_SLOT);
+ object->GetHeap()->UpdateAllocationSite(
+ object, object->GetHeap()->global_pretenuring_feedback_);
// AllocationMementos are unrooted and shouldn't survive a scavenge
DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
@@ -38,7 +39,8 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, Object** p) {
+void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
+ Object** p) {
Object* object = *p;
if (!heap->InNewSpace(object)) return;
Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 31f1ee55b7..40aeb74aa9 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -67,23 +67,10 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitJSWeakCollection,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- table_.Register(kVisitJSTypedArray,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSDataView,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
- if (marks_handling == IGNORE_MARKS) {
- table_.Register(
- kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- JSFunction::kSize>);
- } else {
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
- }
+ table_.Register(kVisitJSFunction, &EvacuateJSFunction);
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject, kVisitDataObjectGeneric>();
@@ -199,12 +186,7 @@ class ScavengingVisitor : public StaticVisitorBase {
*slot = target;
if (object_contents == POINTER_OBJECT) {
- if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(target,
- JSFunction::kNonWeakFieldsEndOffset);
- } else {
- heap->promotion_queue()->insert(target, object_size);
- }
+ heap->promotion_queue()->insert(target, object_size);
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
@@ -242,8 +224,9 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- JSFunction::kSize>(map, slot, object);
+ ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
+
+ if (marks_handling == IGNORE_MARKS) return;
MapWord map_word = object->map_word();
DCHECK(map_word.IsForwardingAddress());
@@ -266,7 +249,8 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
HeapObject* object) {
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
+ int object_size = FixedArray::SizeFor(length);
EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
object_size);
}
@@ -283,28 +267,16 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- FixedTypedArrayBase* target =
- reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- if (target->base_pointer() != Smi::FromInt(0))
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ object_size);
}
static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- FixedTypedArrayBase* target =
- reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- if (target->base_pointer() != Smi::FromInt(0))
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ EvacuateObject<POINTER_OBJECT, kDoubleAligned>(map, slot, object,
+ object_size);
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index b180879db2..5d0abf49d3 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -63,7 +63,7 @@ class ScavengeVisitor : public ObjectVisitor {
class StaticScavengeVisitor
: public StaticNewSpaceVisitor<StaticScavengeVisitor> {
public:
- static inline void VisitPointer(Heap* heap, Object** p);
+ static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
};
} // namespace internal
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 76011768fa..3023fbf51e 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_SPACES_INL_H_
#define V8_HEAP_SPACES_INL_H_
+#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/msan.h"
@@ -49,20 +50,21 @@ Page* PageIterator::next() {
// SemiSpaceIterator
HeapObject* SemiSpaceIterator::Next() {
- if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- DCHECK(!page->is_anchor());
- current_ = page->area_start();
- if (current_ == limit_) return NULL;
+ while (current_ != limit_) {
+ if (NewSpacePage::IsAtEnd(current_)) {
+ NewSpacePage* page = NewSpacePage::FromLimit(current_);
+ page = page->next_page();
+ DCHECK(!page->is_anchor());
+ current_ = page->area_start();
+ if (current_ == limit_) return nullptr;
+ }
+ HeapObject* object = HeapObject::FromAddress(current_);
+ current_ += object->Size();
+ if (!object->IsFiller()) {
+ return object;
+ }
}
-
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = object->Size();
-
- current_ += size;
- return object;
+ return nullptr;
}
@@ -319,6 +321,24 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
}
+AllocationResult LocalAllocationBuffer::AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ Address current_top = allocation_info_.top();
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+ Address new_top = current_top + filler_size + size_in_bytes;
+ if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
+
+ allocation_info_.set_top(new_top);
+ if (filler_size > 0) {
+ return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+ filler_size);
+ }
+
+ return AllocationResult(HeapObject::FromAddress(current_top));
+}
+
+
HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
@@ -482,6 +502,13 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
}
+MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return AllocateRaw(size_in_bytes, alignment);
+}
+
+
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
@@ -492,6 +519,34 @@ intptr_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
}
+
+LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
+ return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
+}
+
+
+LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
+ AllocationResult result,
+ intptr_t size) {
+ if (result.IsRetry()) return InvalidBuffer();
+ HeapObject* obj = nullptr;
+ bool ok = result.To(&obj);
+ USE(ok);
+ DCHECK(ok);
+ Address top = HeapObject::cast(obj)->address();
+ return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
+}
+
+
+bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
+ if (allocation_info_.top() == other->allocation_info_.limit()) {
+ allocation_info_.set_top(other->allocation_info_.top());
+ other->allocation_info_.Reset(nullptr, nullptr);
+ return true;
+ }
+ return false;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index a5e2760bb0..90d252abb5 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -958,8 +958,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
anchor_.InitializeAsAnchor(this);
}
@@ -1248,8 +1247,7 @@ void PagedSpace::ReleasePage(Page* page) {
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
}
// If page is still in a list, unlink it from that list.
@@ -1390,8 +1388,8 @@ void NewSpace::TearDown() {
}
start_ = NULL;
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
+
to_space_.TearDown();
from_space_.TearDown();
@@ -1480,10 +1478,50 @@ void NewSpace::Shrink() {
}
+void LocalAllocationBuffer::Close() {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
+ AllocationInfo allocation_info)
+ : heap_(heap), allocation_info_(allocation_info) {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(
+ const LocalAllocationBuffer& other) {
+ *this = other;
+}
+
+
+LocalAllocationBuffer& LocalAllocationBuffer::operator=(
+ const LocalAllocationBuffer& other) {
+ Close();
+ heap_ = other.heap_;
+ allocation_info_ = other.allocation_info_;
+
+ // This is needed since we (a) cannot yet use move-semantics, and (b) want
+ // to make the use of the class easy by it as value and (c) implicitly call
+ // {Close} upon copy.
+ const_cast<LocalAllocationBuffer&>(other)
+ .allocation_info_.Reset(nullptr, nullptr);
+ return *this;
+}
+
+
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(to_space_.page_low());
- allocation_info_.set_limit(to_space_.page_high());
+ allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -1499,7 +1537,7 @@ void NewSpace::ResetAllocationInfo() {
while (it.has_next()) {
Bitmap::Clear(it.next());
}
- InlineAllocationStep(old_top, allocation_info_.top());
+ InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}
@@ -1509,14 +1547,15 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (top_on_previous_step_ == 0) {
+ } else if (inline_allocation_observers_paused_ ||
+ top_on_previous_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
// Lower limit during incremental marking.
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
- Address new_limit = new_top + inline_allocation_limit_step_;
+ Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
allocation_info_.set_limit(Min(new_limit, high));
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -1565,6 +1604,12 @@ bool NewSpace::AddFreshPage() {
}
+bool NewSpace::AddFreshPageSynchronized() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return AddFreshPage();
+}
+
+
bool NewSpace::EnsureAllocation(int size_in_bytes,
AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
@@ -1578,7 +1623,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- InlineAllocationStep(old_top, allocation_info_.top());
+ InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
old_top = allocation_info_.top();
high = to_space_.page_high();
@@ -1594,28 +1639,38 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
// or because idle scavenge job wants to get a chance to post a task.
// Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
- InlineAllocationStep(new_top, new_top);
+ Address soon_object = old_top + filler_size;
+ InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true;
}
-void NewSpace::UpdateInlineAllocationLimitStep() {
- intptr_t step = 0;
+void NewSpace::StartNextInlineAllocationStep() {
+ if (!inline_allocation_observers_paused_) {
+ top_on_previous_step_ =
+ inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+ UpdateInlineAllocationLimit(0);
+ }
+}
+
+
+intptr_t NewSpace::GetNextInlineAllocationStepSize() {
+ intptr_t next_step = 0;
for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
- InlineAllocationObserver* observer = inline_allocation_observers_[i];
- step = step ? Min(step, observer->step_size()) : observer->step_size();
+ InlineAllocationObserver* o = inline_allocation_observers_[i];
+ next_step = next_step ? Min(next_step, o->bytes_to_next_step())
+ : o->bytes_to_next_step();
}
- inline_allocation_limit_step_ = step;
- top_on_previous_step_ = step ? allocation_info_.top() : 0;
- UpdateInlineAllocationLimit(0);
+ DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+ return next_step;
}
void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
inline_allocation_observers_.Add(observer);
- UpdateInlineAllocationLimitStep();
+ StartNextInlineAllocationStep();
}
@@ -1625,15 +1680,33 @@ void NewSpace::RemoveInlineAllocationObserver(
// Only used in assertion. Suppress unused variable warning.
static_cast<void>(removed);
DCHECK(removed);
- UpdateInlineAllocationLimitStep();
+ StartNextInlineAllocationStep();
}
-void NewSpace::InlineAllocationStep(Address top, Address new_top) {
+void NewSpace::PauseInlineAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ InlineAllocationStep(top(), top(), nullptr, 0);
+ inline_allocation_observers_paused_ = true;
+ top_on_previous_step_ = 0;
+ UpdateInlineAllocationLimit(0);
+}
+
+
+void NewSpace::ResumeInlineAllocationObservers() {
+ DCHECK(top_on_previous_step_ == 0);
+ inline_allocation_observers_paused_ = false;
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::InlineAllocationStep(Address top, Address new_top,
+ Address soon_object, size_t size) {
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
- inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated);
+ inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
+ soon_object, size);
}
top_on_previous_step_ = new_top;
}
@@ -2262,7 +2335,7 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
if (node == nullptr) return nullptr;
Page* page = Page::FromAddress(node->address());
- while ((node != nullptr) && page->IsEvacuationCandidate()) {
+ while ((node != nullptr) && !page->CanAllocate()) {
available_ -= node->size();
page->add_available_in_free_list(type_, -(node->Size()));
node = node->next();
@@ -2304,7 +2377,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
int size = cur_node->size();
Page* page_for_node = Page::FromAddress(cur_node->address());
- if ((size >= size_in_bytes) || page_for_node->IsEvacuationCandidate()) {
+ if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
// The node is either large enough or contained in an evacuation
// candidate. In both cases we need to unlink it from the list.
available_ -= size;
@@ -2318,7 +2391,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
prev_non_evac_node->set_next(cur_node->next());
}
// For evacuation candidates we continue.
- if (page_for_node->IsEvacuationCandidate()) {
+ if (!page_for_node->CanAllocate()) {
page_for_node->add_available_in_free_list(type_, -size);
continue;
}
@@ -2700,7 +2773,8 @@ void PagedSpace::PrepareForMarkCompact() {
intptr_t PagedSpace::SizeOfObjects() {
const intptr_t size = Size() - (limit() - top());
- DCHECK_GE(size, 0);
+ CHECK_GE(limit(), top());
+ CHECK_GE(size, 0);
USE(size);
return size;
}
@@ -2728,15 +2802,12 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top())
- ->IsEvacuationCandidate()) {
+ if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
-
- allocation_info_.set_top(nullptr);
- allocation_info_.set_limit(nullptr);
+ allocation_info_.Reset(nullptr, nullptr);
}
}
@@ -2985,7 +3056,6 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
@@ -2994,7 +3064,7 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
size_(0),
page_count_(0),
objects_size_(0),
- chunk_map_(ComparePointers, 1024) {}
+ chunk_map_(HashMap::PointersMatch, 1024) {}
LargeObjectSpace::~LargeObjectSpace() {}
@@ -3029,8 +3099,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!heap()->always_allocate() &&
- !heap()->CanExpandOldGeneration(object_size)) {
+ if (!heap()->CanExpandOldGeneration(object_size)) {
return AllocationResult::Retry(identity());
}
@@ -3117,7 +3186,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
while (current != NULL) {
HeapObject* object = current->GetObject();
MarkBit mark_bit = Marking::MarkBitFrom(object);
- DCHECK(Marking::IsBlackOrGrey(mark_bit));
+ DCHECK(Marking::IsBlack(mark_bit));
Marking::BlackToWhite(mark_bit);
Page::FromAddress(object->address())->ResetProgressBar();
Page::FromAddress(object->address())->ResetLiveBytes();
@@ -3132,7 +3201,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
while (current != NULL) {
HeapObject* object = current->GetObject();
MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (Marking::IsBlackOrGrey(mark_bit)) {
+ DCHECK(!Marking::IsGrey(mark_bit));
+ if (Marking::IsBlack(mark_bit)) {
previous = current;
current = current->next_page();
} else {
@@ -3205,11 +3275,6 @@ void LargeObjectSpace::Verify() {
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map));
- // Double unboxing in LO space is not allowed. This would break the
- // lookup mechanism for store and slot buffer entries which use the
- // page header tag.
- CHECK(object->ContentType() != HeapObjectContents::kMixedValues);
-
// We have only code, sequential strings, external strings
// (sequential strings that have been morphed into external
// strings), fixed arrays, byte arrays, and constant pool arrays in the
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index df3adebe1d..a8102cabc7 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -323,6 +323,9 @@ class MemoryChunk {
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+ // This flag is inteded to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE,
+
// The memory chunk is already logically freed, however the actual freeing
// still has to be performed.
PRE_FREED,
@@ -592,6 +595,7 @@ class MemoryChunk {
}
live_byte_count_ = 0;
}
+
void IncrementLiveBytes(int by) {
if (FLAG_gc_verbose) {
printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
@@ -599,13 +603,21 @@ class MemoryChunk {
live_byte_count_ + by);
}
live_byte_count_ += by;
+ DCHECK_GE(live_byte_count_, 0);
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
}
+
int LiveBytes() {
- DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
+ DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
return live_byte_count_;
}
+ void SetLiveBytes(int live_bytes) {
+ DCHECK_GE(live_bytes, 0);
+ DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+ live_byte_count_ = live_bytes;
+ }
+
int write_barrier_counter() {
return static_cast<int>(write_barrier_counter_);
}
@@ -631,13 +643,6 @@ class MemoryChunk {
}
}
- bool IsLeftOfProgressBar(Object** slot) {
- Address slot_address = reinterpret_cast<Address>(slot);
- DCHECK(slot_address > this->address());
- return (slot_address - (this->address() + kObjectStartOffset)) <
- progress_bar();
- }
-
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -689,6 +694,10 @@ class MemoryChunk {
return IsFlagSet(EVACUATION_CANDIDATE);
}
+ bool CanAllocate() {
+ return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
+ }
+
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
@@ -1472,7 +1481,13 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
- AllocationInfo() : top_(NULL), limit_(NULL) {}
+ AllocationInfo() : top_(nullptr), limit_(nullptr) {}
+ AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+
+ void Reset(Address top, Address limit) {
+ set_top(top);
+ set_limit(limit);
+ }
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
@@ -1489,15 +1504,10 @@ class AllocationInfo {
Address* top_address() { return &top_; }
INLINE(void set_limit(Address limit)) {
- SLOW_DCHECK(limit == NULL ||
- (reinterpret_cast<intptr_t>(limit) & kHeapObjectTagMask) == 0);
limit_ = limit;
}
INLINE(Address limit()) const {
- SLOW_DCHECK(limit_ == NULL ||
- (reinterpret_cast<intptr_t>(limit_) & kHeapObjectTagMask) ==
- 0);
return limit_;
}
@@ -1546,7 +1556,10 @@ class AllocationStats BASE_EMBEDDED {
// Accessors for the allocation statistics.
intptr_t Capacity() { return capacity_; }
intptr_t MaxCapacity() { return max_capacity_; }
- intptr_t Size() { return size_; }
+ intptr_t Size() {
+ CHECK_GE(size_, 0);
+ return size_;
+ }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
@@ -1557,7 +1570,7 @@ class AllocationStats BASE_EMBEDDED {
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Shrink the space by removing available bytes. Since shrinking is done
@@ -1566,19 +1579,19 @@ class AllocationStats BASE_EMBEDDED {
void ShrinkSpace(int size_in_bytes) {
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
- DCHECK(size_ >= 0);
+ CHECK(size_ >= 0);
}
// Free allocated bytes, making them available (size -> available).
void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes;
- DCHECK_GE(size_, 0);
+ CHECK_GE(size_, 0);
}
// Merge {other} into {this}.
@@ -1588,12 +1601,13 @@ class AllocationStats BASE_EMBEDDED {
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
}
+ CHECK_GE(size_, 0);
}
void DecreaseCapacity(intptr_t size_in_bytes) {
capacity_ -= size_in_bytes;
- DCHECK_GE(capacity_, 0);
- DCHECK_GE(capacity_, size_);
+ CHECK_GE(capacity_, 0);
+ CHECK_GE(capacity_, size_);
}
void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
@@ -1870,6 +1884,60 @@ class AllocationResult {
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
+// LocalAllocationBuffer represents a linear allocation area that is created
+// from a given {AllocationResult} and can be used to allocate memory without
+// synchronization.
+//
+// The buffer is properly closed upon destruction and reassignment.
+// Example:
+// {
+// AllocationResult result = ...;
+// LocalAllocationBuffer a(heap, result, size);
+// LocalAllocationBuffer b = a;
+// CHECK(!a.IsValid());
+// CHECK(b.IsValid());
+// // {a} is invalid now and cannot be used for further allocations.
+// }
+// // Since {b} went out of scope, the LAB is closed, resulting in creating a
+// // filler object for the remaining area.
+class LocalAllocationBuffer {
+ public:
+ // Indicates that a buffer cannot be used for allocations anymore. Can result
+ // from either reassigning a buffer, or trying to construct it from an
+ // invalid {AllocationResult}.
+ static inline LocalAllocationBuffer InvalidBuffer();
+
+ // Creates a new LAB from a given {AllocationResult}. Results in
+ // InvalidBuffer if the result indicates a retry.
+ static inline LocalAllocationBuffer FromResult(Heap* heap,
+ AllocationResult result,
+ intptr_t size);
+
+ ~LocalAllocationBuffer() { Close(); }
+
+ // Convert to C++11 move-semantics once allowed by the style guide.
+ LocalAllocationBuffer(const LocalAllocationBuffer& other);
+ LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
+
+ MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment);
+
+ inline bool IsValid() { return allocation_info_.top() != nullptr; }
+
+ // Try to merge LABs, which is only possible when they are adjacent in memory.
+ // Returns true if the merge was successful, false otherwise.
+ inline bool TryMerge(LocalAllocationBuffer* other);
+
+ private:
+ LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
+
+ void Close();
+
+ Heap* heap_;
+ AllocationInfo allocation_info_;
+};
+
+
class PagedSpace : public Space {
public:
static const intptr_t kCompactionMemoryWanted = 500 * KB;
@@ -2000,8 +2068,7 @@ class PagedSpace : public Space {
DCHECK(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(top);
- allocation_info_.set_limit(limit);
+ allocation_info_.Reset(top, limit);
}
// Empty space allocation info, returning unused area to free list.
@@ -2515,26 +2582,37 @@ class InlineAllocationObserver {
public:
explicit InlineAllocationObserver(intptr_t step_size)
: step_size_(step_size), bytes_to_next_step_(step_size) {
- DCHECK(step_size >= kPointerSize && (step_size & kHeapObjectTagMask) == 0);
+ DCHECK(step_size >= kPointerSize);
}
virtual ~InlineAllocationObserver() {}
private:
intptr_t step_size() const { return step_size_; }
-
- // Pure virtual method provided by the subclasses that gets called when more
- // than step_size byte have been allocated.
- virtual void Step(int bytes_allocated) = 0;
+ intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
+
+ // Pure virtual method provided by the subclasses that gets called when at
+ // least step_size bytes have been allocated. soon_object is the address just
+ // allocated (but not yet initialized.) size is the size of the object as
+ // requested (i.e. w/o the alignment fillers). Some complexities to be aware
+ // of:
+ // 1) soon_object will be nullptr in cases where we end up observing an
+ // allocation that happens to be a filler space (e.g. page boundaries.)
+ // 2) size is the requested size at the time of allocation. Right-trimming
+ // may change the object size dynamically.
+ // 3) soon_object may actually be the first object in an allocation-folding
+ // group. In such a case size is the size of the group rather than the
+ // first object.
+ virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
// Called each time the new space does an inline allocation step. This may be
// more frequently than the step_size we are monitoring (e.g. when there are
- // multiple observers, or when page or space boundary is encountered.) The
- // Step method is only called once more than step_size bytes have been
- // allocated.
- void InlineAllocationStep(int bytes_allocated) {
+ // multiple observers, or when page or space boundary is encountered.)
+ void InlineAllocationStep(int bytes_allocated, Address soon_object,
+ size_t size) {
bytes_to_next_step_ -= bytes_allocated;
if (bytes_to_next_step_ <= 0) {
- Step(static_cast<int>(step_size_ - bytes_to_next_step_));
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
+ size);
bytes_to_next_step_ = step_size_;
}
}
@@ -2561,8 +2639,8 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- inline_allocation_limit_step_(0),
- top_on_previous_step_(0) {}
+ top_on_previous_step_(0),
+ inline_allocation_observers_paused_(false) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2731,11 +2809,13 @@ class NewSpace : public Space {
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment));
+ MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
+ int size_in_bytes, AllocationAlignment alignment);
+
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
void UpdateInlineAllocationLimit(int size_in_bytes);
- void UpdateInlineAllocationLimitStep();
// Allows observation of inline allocation. The observer->Step() method gets
// called after every step_size bytes have been allocated (approximately).
@@ -2747,7 +2827,6 @@ class NewSpace : public Space {
void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
void DisableInlineAllocationSteps() {
- inline_allocation_limit_step_ = 0;
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
@@ -2782,6 +2861,7 @@ class NewSpace : public Space {
// are no pages, or the current page is already empty), or true
// if successful.
bool AddFreshPage();
+ bool AddFreshPageSynchronized();
#ifdef VERIFY_HEAP
// Verify the active semispace.
@@ -2825,6 +2905,8 @@ class NewSpace : public Space {
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
+ base::Mutex mutex_;
+
Address chunk_base_;
uintptr_t chunk_size_;
@@ -2849,10 +2931,9 @@ class NewSpace : public Space {
// once in a while. This is done by setting allocation_info_.limit to be lower
// than the actual limit and and increasing it in steps to guarantee that the
// observers are notified periodically.
- intptr_t inline_allocation_limit_step_;
List<InlineAllocationObserver*> inline_allocation_observers_;
-
Address top_on_previous_step_;
+ bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2865,11 +2946,32 @@ class NewSpace : public Space {
// allocated since the last step.) new_top is the address of the bump pointer
// where the next byte is going to be allocated from. top and new_top may be
// different when we cross a page boundary or reset the space.
- void InlineAllocationStep(Address top, Address new_top);
-
+ void InlineAllocationStep(Address top, Address new_top, Address soon_object,
+ size_t size);
+ intptr_t GetNextInlineAllocationStepSize();
+ void StartNextInlineAllocationStep();
+ void PauseInlineAllocationObservers();
+ void ResumeInlineAllocationObservers();
+
+ friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator;
};
+class PauseInlineAllocationObserversScope {
+ public:
+ explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
+ : new_space_(new_space) {
+ new_space_->PauseInlineAllocationObservers();
+ }
+ ~PauseInlineAllocationObserversScope() {
+ new_space_->ResumeInlineAllocationObservers();
+ }
+
+ private:
+ NewSpace* new_space_;
+ DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+};
+
// -----------------------------------------------------------------------------
// Compaction space that is used temporarily during compaction.
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 2ed9deccff..a8a1e5bbf1 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -412,6 +412,26 @@ void StoreBuffer::VerifyValidStoreBufferEntries() {
}
+class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
+ public:
+ FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
+ ObjectSlotCallback callback)
+ : store_buffer_(store_buffer), callback_(callback) {}
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ store_buffer_->FindPointersToNewSpaceInRegion(
+ reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
+ callback_);
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
+
+ private:
+ StoreBuffer* store_buffer_;
+ ObjectSlotCallback callback_;
+};
+
+
void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
// We do not sort or remove duplicated entries from the store buffer because
// we expect that callback will rebuild the store buffer thus removing
@@ -438,6 +458,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
PointerChunkIterator it(heap_);
MemoryChunk* chunk;
+ FindPointersToNewSpaceVisitor visitor(this, slot_callback);
while ((chunk = it.next()) != NULL) {
if (chunk->scan_on_scavenge()) {
chunk->set_scan_on_scavenge(false);
@@ -469,69 +490,22 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
}
}
} else {
- heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
- page);
- HeapObjectIterator iterator(page);
- for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
- heap_object = iterator.Next()) {
- // We iterate over objects that contain new space pointers only.
- Address obj_address = heap_object->address();
- const int start_offset = HeapObject::kHeaderSize;
- const int end_offset = heap_object->Size();
-
- switch (heap_object->ContentType()) {
- case HeapObjectContents::kTaggedValues: {
- Address start_address = obj_address + start_offset;
- Address end_address = obj_address + end_offset;
- // Object has only tagged fields.
- FindPointersToNewSpaceInRegion(start_address, end_address,
- slot_callback);
- break;
- }
-
- case HeapObjectContents::kMixedValues: {
- if (heap_object->IsFixedTypedArrayBase()) {
- FindPointersToNewSpaceInRegion(
- obj_address + FixedTypedArrayBase::kBasePointerOffset,
- obj_address + FixedTypedArrayBase::kHeaderSize,
- slot_callback);
- } else if (heap_object->IsBytecodeArray()) {
- FindPointersToNewSpaceInRegion(
- obj_address + BytecodeArray::kConstantPoolOffset,
- obj_address + BytecodeArray::kHeaderSize,
- slot_callback);
- } else if (heap_object->IsJSArrayBuffer()) {
- FindPointersToNewSpaceInRegion(
- obj_address +
- JSArrayBuffer::BodyDescriptor::kStartOffset,
- obj_address + JSArrayBuffer::kByteLengthOffset +
- kPointerSize,
- slot_callback);
- FindPointersToNewSpaceInRegion(
- obj_address + JSArrayBuffer::kSize,
- obj_address + JSArrayBuffer::kSizeWithInternalFields,
- slot_callback);
- } else if (FLAG_unbox_double_fields) {
- LayoutDescriptorHelper helper(heap_object->map());
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset,
- &end_of_region_offset)) {
- FindPointersToNewSpaceInRegion(
- obj_address + offset,
- obj_address + end_of_region_offset, slot_callback);
- }
- offset = end_of_region_offset;
- }
- } else {
- UNREACHABLE();
- }
- break;
- }
-
- case HeapObjectContents::kRawValues:
- break;
+ if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ // Aborted pages require iterating using mark bits because they
+ // don't have an iterable object layout before sweeping (which can
+ // only happen later). Note that we can never reach an
+ // aborted page through the scavenger.
+ DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
+ heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
+ &visitor);
+ } else {
+ heap_->mark_compact_collector()
+ ->SweepOrWaitUntilSweepingCompleted(page);
+ HeapObjectIterator iterator(page);
+ for (HeapObject* heap_object = iterator.Next();
+ heap_object != nullptr; heap_object = iterator.Next()) {
+ // We iterate over objects that contain new space pointers only.
+ heap_object->IterateBody(&visitor);
}
}
}
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 08dcebfc08..9eeb00117b 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -19,9 +19,6 @@ class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
- ObjectSlotCallback slot_callback);
-
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
class StoreBuffer {
@@ -147,23 +144,15 @@ class StoreBuffer {
void FindPointersToNewSpaceInRegion(Address start, Address end,
ObjectSlotCallback slot_callback);
- // For each region of pointers on a page in use from an old space call
- // visit_pointer_region callback.
- // If either visit_pointer_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- void IteratePointersOnPage(PagedSpace* space, Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
#ifdef VERIFY_HEAP
void VerifyPointers(LargeObjectSpace* space);
#endif
- friend class StoreBufferRebuildScope;
friend class DontMoveStoreBufferEntriesScope;
+ friend class FindPointersToNewSpaceVisitor;
+ friend class StoreBufferRebuildScope;
};
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 7899f2937d..8de2d2998a 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -16,6 +16,7 @@
#include "unicode/decimfmt.h"
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
+#include "unicode/gregocal.h"
#include "unicode/locid.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
@@ -96,6 +97,16 @@ icu::SimpleDateFormat* CreateICUDateFormat(
icu::Calendar* calendar =
icu::Calendar::createInstance(tz, icu_locale, status);
+ if (calendar->getDynamicClassID() ==
+ icu::GregorianCalendar::getStaticClassID()) {
+ icu::GregorianCalendar* gc = (icu::GregorianCalendar*)calendar;
+ UErrorCode status = U_ZERO_ERROR;
+ // The beginning of ECMAScript time, namely -(2**53)
+ const double start_of_time = -9007199254740992;
+ gc->setGregorianChange(start_of_time, status);
+ DCHECK(U_SUCCESS(status));
+ }
+
// Make formatter from skeleton. Calendar and numbering system are added
// to the locale as Unicode extension (if they were specified at all).
icu::SimpleDateFormat* date_format = NULL;
@@ -134,7 +145,7 @@ void SetResolvedDateSettings(Isolate* isolate,
icu::UnicodeString pattern;
date_format->toPattern(pattern);
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("pattern"),
+ resolved, factory->intl_pattern_symbol(),
factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
@@ -356,7 +367,7 @@ void SetResolvedNumberSettings(Isolate* isolate,
icu::UnicodeString pattern;
number_format->toPattern(pattern);
JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("pattern"),
+ resolved, factory->intl_pattern_symbol(),
factory->NewStringFromTwoByte(
Vector<const uint16_t>(
reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 6c9c538cc9..d957872cab 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -104,7 +104,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
@@ -133,7 +134,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -199,7 +200,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -229,8 +230,8 @@ void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
}
@@ -244,7 +245,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
- Assembler::set_target_address_at(location, host_, target);
+ Assembler::set_target_address_at(isolate_, location, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -259,7 +260,8 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -280,7 +282,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -305,7 +307,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
@@ -453,13 +455,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, sizeof(int32_t));
+ Assembler::FlushICache(isolate, p, sizeof(int32_t));
}
}
@@ -499,7 +501,7 @@ void Assembler::emit_near_disp(Label* L) {
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 59d0025939..f120a6233e 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -187,37 +187,6 @@ bool RelocInfo::IsInConstantPool() {
}
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- DCHECK_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -338,6 +307,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -2194,6 +2164,19 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
}
+void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x0A);
+ emit_sse_operand(dst, src);
+ // Mask precision exeption.
+ EMIT(static_cast<byte>(mode) | 0x8);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2793,6 +2776,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2929,7 +2913,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 15092951d7..0b202529f9 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -486,19 +486,17 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) {
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target);
+ set_target_address_at(isolate, pc, constant_pool, target);
}
// Return the code target address at a call site from the return address
@@ -508,13 +506,14 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static const int kSpecialTargetSize = kPointerSize;
@@ -989,6 +988,7 @@ class Assembler : public AssemblerBase {
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
+ void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
@@ -1400,7 +1400,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 4da4cb1db2..a2aec74162 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -22,9 +22,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- edi : called function
+ // -- edi : target
+ // -- edx : new.target
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
@@ -37,37 +36,26 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ if (extra_args != BuiltinExtraArguments::kNone) {
+ __ PopReturnAddressTo(ecx);
+ if (extra_args & BuiltinExtraArguments::kTarget) {
+ ++num_extra_args;
+ __ Push(edi);
+ }
+ if (extra_args & BuiltinExtraArguments::kNewTarget) {
+ ++num_extra_args;
+ __ Push(edx);
+ }
+ __ PushReturnAddressFrom(ecx);
}
// JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments. But eax is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- Label argc, done_argc;
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(ebx);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &argc, Label::kNear);
- __ lea(eax, Operand(ebx, num_extra_args + 1));
- __ jmp(&done_argc, Label::kNear);
- __ bind(&argc);
+ // including the receiver and the extra arguments.
__ add(eax, Immediate(num_extra_args + 1));
- __ bind(&done_argc);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -75,14 +63,21 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
+ // Push a copy of the target function and the new target.
__ push(edi);
+ __ push(edx);
// Function is also the parameter to the runtime call.
__ push(edi);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ pop(edx);
__ pop(edi);
}
@@ -122,12 +117,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
// -- ebx: allocation site or undefined
- // -- edx: original constructor
+ // -- edx: new target
// -----------------------------------
// Enter a construct frame.
@@ -139,177 +135,166 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
__ SmiTag(eax);
__ push(eax);
- __ push(edi);
- __ push(edx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // edx: original constructor
- __ mov(eax, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (!is_api_function) {
- Label allocate;
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
- // Check if slack tracking is enabled.
- __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
- __ shr(esi, Map::Counter::kShift);
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &allocate);
- // Decrease generous allocation count.
- __ sub(FieldOperand(eax, Map::kBitField3Offset),
- Immediate(1 << Map::Counter::kShift));
-
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(not_equal, &allocate);
-
- __ push(eax);
- __ push(edx);
- __ push(edi);
-
- __ push(eax); // initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(edx);
- __ pop(eax);
- __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
-
- __ bind(&allocate);
- }
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
-
- __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
- Factory* factory = masm->isolate()->factory();
-
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // esi: slack tracking counter (non-API function case)
- __ mov(edx, factory->undefined_value());
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ movzx_b(
- esi,
- FieldOperand(
- eax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ sub(esi, eax);
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
+ if (create_implicit_receiver) {
+ __ push(edi);
+ __ push(edx);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // edx: new target
+ __ mov(eax,
+ FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject (not HeapObject tagged - the actual address).
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // ebx: JSObject (tagged)
+ // ecx: First in-object property of JSObject (not tagged)
+ __ mov(edx, factory->undefined_value());
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCounter::kShift);
+ __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
+ __ push(esi); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Allocate object with a slack.
+ __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ neg(esi);
+ __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(ecx, esi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ mov(edx, factory->one_pointer_filler_map());
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+ __ pop(esi); // Restore allocation count value before decreasing.
+ __ cmp(esi, Map::kSlackTrackingCounterEnd);
+ __ j(not_equal, &allocated);
+
+ // Push the object to the stack, and then the initial map as
+ // an argument to the runtime call.
+ __ push(ebx);
+ __ push(eax); // initial map
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ pop(ebx);
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(ecx, esi, edx);
- __ mov(edx, factory->one_pointer_filler_map());
- // Fill the remaining fields with one pointer filler map.
-
- __ bind(&no_inobject_slack_tracking);
- }
- __ InitializeFieldsWithFiller(ecx, edi, edx);
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- // ebx: JSObject (untagged)
- __ or_(ebx, Immediate(kHeapObjectTag));
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+ }
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
+ // Allocate the new receiver object using the runtime call.
+ // edx: new target
+ __ bind(&rt_call);
+ int offset = kPointerSize;
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi); // constructor function
+ __ push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(ebx, eax); // store result in ebx
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+
+ // Restore the parameters.
+ __ pop(edx); // new.target
+ __ pop(edi); // Constructor function.
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
}
- // Allocate the new receiver object using the runtime call.
- // edx: original constructor
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi); // constructor function
- __ push(edx); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
-
- // Restore the parameters.
- __ pop(edx); // new.target
- __ pop(edi); // Constructor function.
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
__ SmiUntag(eax);
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ push(edx);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -332,40 +317,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The arguments
- // count is stored below the reciever and the new.target.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and leave the construct frame. The
+ // arguments count is stored below the receiver.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ } else {
+ __ mov(ebx, Operand(esp, 0));
+ }
// Leave construct frame.
}
@@ -375,91 +364,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ }
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -- ebx: allocation site or undefined
- // -- edx: original constructor
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve allocation site.
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(ebx);
-
- // Preserve actual arguments count.
- __ SmiTag(eax);
- __ push(eax);
- __ SmiUntag(eax);
-
- // Push new.target.
- __ push(edx);
-
- // receiver is the hole.
- __ push(Immediate(masm->isolate()->factory()->the_hole_value()));
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(equal, &skip_step_in);
-
- __ push(eax);
- __ push(edi);
- __ push(edi);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&skip_step_in);
-
- // Invoke function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Get arguments count, skipping over new.target.
- __ mov(ebx, Operand(esp, kPointerSize));
- }
- __ pop(ecx); // Return address.
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -492,7 +422,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -578,6 +508,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o edi: the JS function object being called
+// o edx: the new target
// o esi: our context
// o ebp: the caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -595,6 +526,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ __ push(edx); // Callee's new target.
+
+ // Push zero for bytecode array offset.
+ __ push(Immediate(0));
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
@@ -624,7 +559,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -656,7 +591,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -665,9 +600,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ mov(kInterpreterRegisterFileRegister, ebp);
- __ sub(
- kInterpreterRegisterFileRegister,
- Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
@@ -675,7 +609,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
__ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
- // Push context as a stack located parameter to the bytecode handler.
+ // Push dispatch table as a stack located parameter to the bytecode handler.
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
__ push(ebx);
@@ -691,6 +625,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ebx);
+ __ nop(); // Ensure that return address still counts as interpreter entry
+ // trampoline.
}
@@ -766,7 +702,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor
+ // -- edx : the new target
// -- edi : the constructor
// -- ebx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
@@ -799,39 +735,108 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Push(ecx);
// Call the constructor with unmodified eax, edi, ebi values.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Initialize register file register.
+ __ mov(kInterpreterRegisterFileRegister, ebp);
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+
+ // Get the bytecode array pointer from the frame.
+ __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ ebx);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ mov(
+ kInterpreterBytecodeOffsetRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Push dispatch table as a stack located parameter to the bytecode handler -
+ // overwrite the state slot (we don't use these for interpreter deopts).
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ mov(Operand(esp, kPointerSize), ebx);
+
+ // Dispatch to the target bytecode.
+ __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ mov(kContextRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(ebx);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
- __ push(edi);
- // Function is also the parameter to the runtime call.
- __ push(edi);
- // Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(edi);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -927,7 +932,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -954,7 +959,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
@@ -996,7 +1001,136 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into eax and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ JumpIfSmi(eax, &receiver_not_date);
+ __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+ __ j(not_equal, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(edx, Operand::StaticVariable(
+ ExternalReference::date_cache_stamp(masm->isolate())));
+ __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+ __ j(not_equal, &stamp_mismatch, Label::kNear);
+ __ mov(eax, FieldOperand(
+ eax, JSDate::kValueOffset + field_index * kPointerSize));
+ __ ret(1 * kPointerSize);
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 0), eax);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ ret(1 * kPointerSize);
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowNotDateError);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argArray
+ // -- esp[8] : thisArg
+ // -- esp[12] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into edi, argArray into eax (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg_array, no_this_arg;
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ mov(ebx, edx);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ {
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+ __ cmp(eax, Immediate(1));
+ __ j(equal, &no_arg_array, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+ __ bind(&no_arg_array);
+ }
+ __ bind(&no_this_arg);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
+
+ // ----------- S t a t e -------------
+ // -- eax : argArray
+ // -- edi : receiver
+ // -- esp[0] : return address
+ // -- esp[4] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &receiver_not_callable, Label::kNear);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ Label::kNear);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Set(eax, 0);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// esp[0] : Return address
// esp[8] : Argument n
@@ -1042,201 +1176,142 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ mov(key, Operand(ebp, indexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ mov(slot, Immediate(Smi::FromInt(slot_index)));
- __ mov(vector, Operand(ebp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register key.
- __ mov(key, Operand(ebp, indexOffset));
- __ add(key, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, indexOffset), key);
-
- __ bind(&entry);
- __ cmp(key, Operand(ebp, limitOffset));
- __ j(not_equal, &loop);
-
- // On exit, the pushed arguments count is in eax, untagged
- __ Move(eax, key);
- __ SmiUntag(eax);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
-
- // Stack at entry:
- // esp : return address
- // esp[4] : arguments
- // esp[8] : receiver ("this")
- // esp[12] : function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : function arguments
- // ebp[12] : receiver
- // ebp[16] : function
- static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argumentsList
+ // -- esp[8] : thisArgument
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ j(equal, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ Push(Immediate(0)); // index
- __ Push(Operand(ebp, kReceiverOffset)); // receiver
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : thisArgument
+ // -----------------------------------
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &target_not_callable, Label::kNear);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Leave internal frame.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
}
-// Used by ReflectConstruct
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : new.target (optional)
+ // -- esp[8] : argumentsList
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- // Stack at entry:
- // esp : return address
- // esp[4] : original constructor (new.target)
- // esp[8] : arguments
- // esp[16] : constructor
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // new.target into edx (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : original constructor (new.target)
- // ebp[12] : arguments
- // ebp[16] : constructor
- static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ mov(eax, Operand(ebp, kNewTargetOffset));
- __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &validate_arguments, Label::kNear);
- __ mov(eax, Operand(ebp, kFunctionOffset));
- __ mov(Operand(ebp, kNewTargetOffset), eax);
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ push(Operand(ebp, kFunctionOffset));
- __ push(Operand(ebp, kArgumentsOffset));
- __ push(Operand(ebp, kNewTargetOffset));
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
-
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ push(Immediate(0)); // index
- // Push the constructor function as callee.
- __ push(Operand(ebp, kFunctionOffset));
-
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ mov(ecx, Operand(ebp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ mov(edx, edi);
+ __ j(equal, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
}
- // remove this, target, arguments, and newTarget
- __ ret(kStackSize * kPointerSize);
-}
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edx : new.target
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &target_not_constructor, Label::kNear);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &new_target_not_constructor, Label::kNear);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edx);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1300,6 +1375,113 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into eax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, ebx);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in eax).
+ __ bind(&no_arguments);
+ __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- edx : new target
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ Move(ebx, Smi::FromInt(0));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ }
+
+ // 3. Make sure ebx is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(ebx, &done_convert);
+ __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &done_convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Move(eax, ebx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(ebx, eax);
+ __ Pop(edx);
+ __ Pop(edi);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ }
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
@@ -1354,7 +1536,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -1364,13 +1546,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
- // -- edx : original constructor
+ // -- edx : new target
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into ebx and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -1386,7 +1571,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ PushReturnAddressFrom(ecx);
}
- // 2. Make sure ebx is a string.
+ // 3. Make sure ebx is a string.
{
Label convert, done_convert;
__ JumpIfSmi(ebx, &convert, Label::kNear);
@@ -1407,62 +1592,26 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- ebx : the first argument
- // -- edi : constructor function
- // -- edx : original constructor
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
-
- // Fall back to runtime if the original constructor and constructor differ.
- __ cmp(edx, edi);
- __ j(not_equal, &rt_call);
-
- __ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edi);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(edi);
- __ Pop(ebx);
- }
- __ jmp(&done_allocate);
-
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edi);
- __ Push(edi); // constructor function
- __ Push(edx); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(edi);
- __ Pop(ebx);
- }
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
+ __ Ret();
}
@@ -1471,24 +1620,24 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
- // -- edi : function (passed through to callee)
+ // -- edx : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
__ mov(ecx, esp);
- __ sub(ecx, edx);
- // Make edx the space we need for the array when it is unrolled onto the
+ __ sub(ecx, edi);
+ // Make edi the space we need for the array when it is unrolled onto the
// stack.
- __ mov(edx, ebx);
- __ shl(edx, kPointerSizeLog2);
+ __ mov(edi, ebx);
+ __ shl(edi, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
+ __ cmp(ecx, edi);
__ j(less_equal, stack_overflow); // Signed comparison.
}
@@ -1528,6 +1677,139 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(eax, &create_runtime);
+
+ // Load the map of argumentsList into ecx.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Load native context into ebx.
+ __ mov(ebx, NativeContextOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+ __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+ __ j(equal, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(edx);
+ __ Pop(edi);
+ __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ mov(ebx,
+ FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+ __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ j(not_equal, &create_runtime);
+ __ SmiUntag(ebx);
+ __ mov(eax, ecx);
+ __ jmp(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(ecx);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(above, &create_runtime);
+ __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+ __ j(equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ neg(ecx);
+ __ add(ecx, esp);
+ __ sar(ecx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, ebx);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- edi : target
+ // -- eax : args (a FixedArray built from argumentsList)
+ // -- ebx : len (number of elements to push from args)
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ movd(xmm0, edx);
+ __ PopReturnAddressTo(edx);
+ __ Move(ecx, Immediate(0));
+ Label done, loop;
+ __ bind(&loop);
+ __ cmp(ecx, ebx);
+ __ j(equal, &done, Label::kNear);
+ __ Push(
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ inc(ecx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(edx);
+ __ movd(edx, xmm0);
+ __ Move(eax, ebx);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1623,15 +1905,126 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
- actual, JUMP_FUNCTION, NullCallWrapper());
-
+ __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into ecx and length of that into ebx.
+ Label no_bound_arguments;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ test(ebx, ebx);
+ __ j(zero, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- ebx : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+ __ sub(esp, ecx);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ // Restore the stack pointer.
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Adjust effective number of arguments to include return address.
+ __ inc(eax);
+
+ // Relocate arguments and return address down the stack.
+ {
+ Label loop;
+ __ Set(ecx, 0);
+ __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+ __ bind(&loop);
+ __ movd(xmm0, Operand(ebx, ecx, times_pointer_size, 0));
+ __ movd(Operand(esp, ecx, times_pointer_size, 0), xmm0);
+ __ inc(ecx);
+ __ cmp(ecx, eax);
+ __ j(less, &loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ bind(&loop);
+ __ dec(ebx);
+ __ movd(xmm0, FieldOperand(ecx, ebx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ movd(Operand(esp, eax, times_pointer_size, 0), xmm0);
+ __ lea(eax, Operand(eax, 1));
+ __ j(greater, &loop);
+ }
+
+ // Adjust effective number of arguments (eax contains the number of
+ // arguments from the call plus return address plus the number of
+ // [[BoundArguments]]), so we need to subtract one for the return address.
+ __ dec(eax);
}
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Patch the receiver to [[BoundThis]].
+ __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(ExternalReference(
+ Builtins::kCall_ReceiverIsAny, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
}
@@ -1648,14 +2041,22 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(edi);
- __ jmp(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ PushReturnAddressFrom(ecx);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ add(eax, Immediate(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1676,7 +2077,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1685,10 +2086,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (checked to be a JSFunction)
+ // -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(edx);
__ AssertFunction(edi);
// Calling convention for function specific ConstructStubs require
@@ -1705,17 +2105,54 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target (checked to be a constructor)
+ // -- edi : the constructor to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ cmp(edi, edx);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(
+ ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edi : the constructor to call (checked to be a JSProxy)
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (checked to be a JSFunctionProxy)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ // Include the pushed new_target, constructor and the receiver.
+ __ add(eax, Immediate(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1723,23 +2160,32 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(edi, &non_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
- __ j(zero, &non_constructor, Label::kNear);
// Dispatch based on instance type.
- __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(equal, masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+
+ // Check if target has a [[Construct]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(equal, masm->isolate()->builtins()->ConstructProxy(),
RelocInfo::CODE_TARGET);
@@ -1756,11 +2202,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1768,17 +2211,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
+ // -- edx : new target (passed through to callee)
// -- edi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
- Label stack_overflow;
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
Label enough, too_few;
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1787,6 +2227,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1823,11 +2264,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Remember expected arguments in ecx.
__ mov(ecx, ebx);
@@ -1866,8 +2308,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// eax : expected number of arguments
+ // edx : new target (passed through to callee)
// edi : function (passed through to callee)
- __ call(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ call(ecx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1880,18 +2324,128 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ jmp(ecx);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Label* receiver_check_failed) {
+ // If there is no signature, return the holder.
+ __ CompareRoot(FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset),
+ Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // Walk the prototype chain.
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(scratch0, scratch0, scratch1);
+ __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+ Label next_prototype;
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Get the constructor's signature.
+ __ mov(scratch0,
+ FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(scratch0,
+ FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(scratch0, FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+ __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Otherwise load the parent function template and iterate.
+ __ mov(scratch0,
+ FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+ __ jmp(&function_template_loop, Label::kNear);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, receiver_check_failed);
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ test(FieldOperand(scratch0, Map::kBitField3Offset),
+ Immediate(Map::IsHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+ // Iterate.
+ __ jmp(&prototype_loop_start, Label::kNear);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments (not including the receiver)
+ // -- edi : callee
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[eax * 4] : first argument
+ // -- esp[(eax + 1) * 4] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+ __ Push(eax);
+ CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+ __ Pop(eax);
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+ __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(edx);
+
+ // Compatible receiver check failed: pop return address, arguments and
+ // receiver and throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ __ Pop(eax);
+ __ PopReturnAddressTo(ebx);
+ __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+ __ add(esp, eax);
+ __ PushReturnAddressFrom(ebx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+ }
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1899,7 +2453,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
Label skip;
@@ -1938,7 +2492,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 215a194d04..6e597e2814 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -617,7 +617,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -688,7 +688,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ push(scratch); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -787,7 +787,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -819,7 +819,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -892,8 +892,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// esp[8] = parameter count (tagged)
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
+ __ mov(edi, NativeContextOperand());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -1055,7 +1054,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1096,10 +1095,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
- const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
- __ mov(edi, Operand(edi, offset));
+ __ mov(edi, NativeContextOperand());
+ __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -1151,7 +1148,35 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // ebx : rest parameter index (tagged)
+ // esp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ pop(eax); // Save return address.
+ __ push(ecx); // Push number of parameters.
+ __ push(edx); // Push parameters pointer.
+ __ push(ebx); // Push rest parameter index.
+ __ push(eax); // Push return address.
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1160,7 +1185,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1438,7 +1463,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(equal, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure to match, return null.
@@ -1530,7 +1555,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1685,7 +1710,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
@@ -1753,8 +1778,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
@@ -1768,7 +1793,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1852,9 +1877,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
__ j(below, &runtime_call, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
@@ -1883,8 +1908,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
if (cc == equal) {
__ push(ecx);
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
__ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
@@ -1893,9 +1917,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -1903,16 +1926,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
- if (is_super) {
- __ pop(ecx);
- }
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1923,29 +1941,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
__ push(edi);
__ push(edx);
__ push(ebx);
- if (is_super) {
- __ push(ecx);
- }
__ CallStub(stub);
- if (is_super) {
- __ pop(ecx);
- }
__ pop(ebx);
__ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
-
- if (is_super) {
- __ push(ecx);
- }
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -1953,7 +1961,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@@ -2019,12 +2026,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -2032,14 +2039,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
- if (IsSuperConstructorCall()) {
- __ push(ecx);
- }
-
Label non_function;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function);
@@ -2047,29 +2049,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map =
- isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(ebx);
- }
+ __ AssertUndefinedOrAllocationSite(ebx);
- if (IsSuperConstructorCall()) {
- __ pop(edx);
- } else {
- // Pass original constructor to construct stub.
- __ mov(edx, edi);
- }
+ // Pass new target to construct stub.
+ __ mov(edx, edi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2079,7 +2074,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ jmp(ecx);
__ bind(&non_function);
- if (IsSuperConstructorCall()) __ Drop(1);
__ mov(edx, edi);
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -2117,11 +2111,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2155,9 +2145,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
- __ bind(&call);
+ __ bind(&call_function);
__ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2192,10 +2183,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ mov(
FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- // We have to update statistics for runtime profiling.
- __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
- __ jmp(&call);
+
+ __ bind(&call);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2212,8 +2204,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(edi, ecx);
__ j(equal, &miss);
- // Update stats.
- __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+ // Make sure the function belongs to the same native context.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
+ __ cmp(ecx, NativeContextOperand());
+ __ j(not_equal, &miss);
// Initialize the call counter.
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
@@ -2232,7 +2227,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
}
- __ jmp(&call);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2255,7 +2250,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2562,14 +2557,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ mov(shared_info,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ BooleanBitTest(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- __ j(not_zero, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ mov(function_prototype,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2595,28 +2582,48 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
- Label done, loop;
+ Label done, loop, fast_runtime_fallback;
__ mov(eax, isolate()->factory()->true_value());
__ bind(&loop);
- __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+ // Check if the current object is a Proxy.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ __ j(equal, &fast_runtime_fallback, Label::kNear);
+
+ __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ j(equal, &done, Label::kNear);
- __ cmp(object_prototype, isolate()->factory()->null_value());
- __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ cmp(object, isolate()->factory()->null_value());
__ j(not_equal, &loop);
__ mov(eax, isolate()->factory()->false_value());
+
__ bind(&done);
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(0);
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime.
+ __ bind(&fast_runtime_fallback);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function_prototype);
+ __ PushReturnAddressFrom(scratch);
+ // Invalidate the instanceof cache.
+ __ Move(eax, Immediate(Smi::FromInt(0)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
- __ pop(scratch); // Pop return address.
- __ push(object); // Push {object}.
- __ push(function); // Push {function}.
- __ push(scratch); // Push return address.
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function);
+ __ PushReturnAddressFrom(scratch);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -2677,11 +2684,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -2711,7 +2718,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2757,7 +2764,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -3007,7 +3014,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// eax: string
@@ -3052,7 +3059,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3065,7 +3072,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3084,7 +3091,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3119,7 +3126,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3282,7 +3289,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Push(edx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3327,14 +3334,16 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
__ AssertSmi(eax);
__ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
__ AssertSmi(edx);
- __ xchg(eax, edx);
+ __ push(eax);
+ __ mov(eax, edx);
+ __ pop(edx);
}
__ sub(eax, edx);
__ Ret();
@@ -3623,9 +3632,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ push(right);
__ push(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3633,19 +3642,20 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
- DCHECK(GetCondition() == equal);
+ DCHECK_EQ(equal, GetCondition());
__ sub(eax, edx);
__ ret(0);
@@ -3654,7 +3664,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ mov(ecx, edx);
@@ -3671,14 +3681,14 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(eax, edx);
__ ret(0);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(eax);
__ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3695,7 +3705,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op())));
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
@@ -4086,11 +4096,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
__ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object, Label::kNear);
__ pop(regs_.object());
regs_.Restore(masm);
@@ -4110,91 +4119,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : element value to store
- // -- ecx : element index as smi
- // -- esp[0] : return address
- // -- esp[4] : array literal index in function
- // -- esp[8] : array literal
- // clobbers ebx, edx, edi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label slow_elements_from_double;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-
- __ CheckFastElements(edi, &double_elements);
-
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(edi); // Pop return address and remember to put back later for tail
- // call.
- __ push(ebx);
- __ push(ecx);
- __ push(eax);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(edx);
- __ push(edi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- __ bind(&slow_elements_from_double);
- __ pop(edx);
- __ jmp(&slow_elements);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize), eax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ push(edx);
- __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(eax,
- edx,
- ecx,
- edi,
- xmm0,
- &slow_elements_from_double);
- __ pop(edx);
- __ ret(0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5076,6 +5000,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label subclassing;
+ // Enter the context of the Array function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
__ cmp(edx, edi);
__ j(not_equal, &subclassing);
@@ -5097,27 +5024,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ pop(ecx); // return address.
- __ push(edi);
- __ push(edx);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(eax, Immediate(2));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ add(eax, Immediate(3));
break;
case NONE:
- __ mov(eax, Immediate(2));
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ mov(eax, Immediate(3));
break;
case ONE:
- __ mov(eax, Immediate(3));
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ __ mov(eax, Immediate(4));
break;
}
-
- __ push(ecx);
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(ebx);
+ __ PushReturnAddressFrom(ecx);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5234,7 +5160,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Pop(result_reg); // Pop return address.
__ Push(slot_reg);
__ Push(result_reg); // Push return address.
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5357,8 +5283,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(cell_reg); // Push return address.
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5495,7 +5420,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
__ cmp(map, isolate->factory()->heap_number_map());
@@ -5529,7 +5454,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 93f4cee636..2f94f35665 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -34,15 +34,15 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
{
@@ -65,19 +65,20 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ if (buffer == nullptr) return nullptr;
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
@@ -94,9 +95,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -186,13 +187,14 @@ class LabelConverter {
};
-MemMoveFunction CreateMemMoveFunction() {
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return NULL;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ if (buffer == nullptr) return nullptr;
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
@@ -505,7 +507,7 @@ MemMoveFunction CreateMemMoveFunction() {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
@@ -986,9 +988,11 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
- CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ CodePatcher patcher(isolate, young_sequence_.start(),
+ young_sequence_.length());
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
@@ -1035,7 +1039,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length);
+ CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 03bb128dd6..133b1adbdf 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -5,7 +5,7 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index c644ffa60f..efe6476203 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -75,7 +75,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@@ -101,14 +101,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->int3();
}
}
@@ -137,14 +138,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(call_address, patch_size());
+ CodePatcher patcher(isolate, call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
+ reinterpret_cast<intptr_t>(deopt_entry), NULL);
reloc_info_writer.Write(&rinfo);
DCHECK_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@@ -157,18 +157,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
// Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- DCHECK(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
+ const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
+
+ // Right trim the relocation info to free up remaining space.
+ const int delta = reloc_info->length() - new_reloc_length;
+ if (delta > 0) {
+ isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ reloc_info, delta);
+ }
}
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 2077dd76e6..ad381c7eb2 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -68,6 +68,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
+
+
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -131,6 +136,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi, eax, ecx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx, ecx};
@@ -191,7 +203,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
+ // ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
@@ -210,6 +222,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ // ebx : allocation site or undefined
+ Register registers[] = {edi, edx, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ Register registers[] = {edi, edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, ebx, eax};
@@ -348,6 +381,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // JSFunction
+ edx, // the new target
eax, // actual number of arguments
ebx, // expected number of arguments
};
@@ -380,27 +414,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
- ebx // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -416,7 +429,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
- edx, // original constructor
+ edx, // new target
edi, // constructor
ebx, // address of first argument
};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 4a595783e2..5f80b4d52f 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -19,11 +19,12 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -820,6 +821,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -1068,10 +1081,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch1, FieldOperand(scratch1, offset));
- mov(scratch1, FieldOperand(scratch1, JSGlobalObject::kNativeContextOffset));
+ mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1666,6 +1676,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ mov(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
@@ -1733,16 +1764,16 @@ void MacroAssembler::CopyBytes(Register source,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
+ mov(Operand(current_address, 0), filler);
+ add(current_address, Immediate(kPointerSize));
bind(&entry);
- cmp(start_offset, end_offset);
+ cmp(current_address, end_address);
j(below, &loop);
}
@@ -1890,24 +1921,27 @@ void MacroAssembler::CallExternalReference(ExternalReference ref,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Move(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[8] : argument num_arguments - 1
+ // ...
+ // -- esp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- eax : number of arguments
+ // -----------------------------------
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(eax, Immediate(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -1921,8 +1955,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1973,13 +2005,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
@@ -1995,20 +2020,76 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ cmpb(Operand::StaticVariable(step_in_enabled), 0);
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(edi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ mov(edx, isolate()->factory()->undefined_value());
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ Label::kNear, call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -2023,6 +2104,7 @@ void MacroAssembler::InvokeCode(const Operand& code,
void MacroAssembler::InvokeFunction(Register fun,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -2030,14 +2112,13 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
}
@@ -2052,8 +2133,7 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
}
@@ -2072,35 +2152,21 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinFunction(edi, native_context_index);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, GlobalObjectOperand());
- mov(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
+ mov(target, NativeContextOperand());
mov(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, native_context_index);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -2128,8 +2194,8 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
- mov(dst, GlobalObjectOperand());
- mov(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
+ mov(dst, NativeContextOperand());
+ mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
}
@@ -2139,34 +2205,26 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
+ mov(scratch, NativeContextOperand());
+ cmp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
+ mov(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
+ // Load the native context from the current context.
+ mov(function, NativeContextOperand());
// Load the function from the native context.
- mov(function, Operand(function, Context::SlotOffset(index)));
+ mov(function, ContextOperand(function, index));
}
@@ -2790,10 +2848,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -2803,7 +2861,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -2863,10 +2921,9 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch1,
Label* on_black,
Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
+ 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -2920,110 +2977,22 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, isolate()->factory()->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- DCHECK(SeqOneByteString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (emit_debug_code()) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, kLiveBytesCountOverflowChunkSize);
- }
-
- bind(&done);
+ j(zero, value_is_white, Label::kNear);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index bff3c041a4..76c4890027 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -23,6 +23,7 @@ const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
@@ -40,33 +41,20 @@ enum PointersToHereCheck {
kPointersToHereAreAlwaysInteresting
};
-
-enum RegisterValueType {
- REGISTER_VALUE_IS_SMI,
- REGISTER_VALUE_IS_INT32
-};
-
+enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
@@ -93,7 +81,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kNear) {
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
@@ -101,84 +95,64 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kNear) {
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
- Register addr,
- Register scratch,
+ Register addr, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
+ Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, Label::Distance has_color_distance,
+ int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -186,10 +160,7 @@ class MacroAssembler: public Assembler {
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
- Register object,
- int offset,
- Register value,
- Register scratch,
+ Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
@@ -199,22 +170,14 @@ class MacroAssembler: public Assembler {
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
void RecordWriteContextSlot(
- Register context,
- int offset,
- Register value,
- Register scratch,
+ Register context, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting) {
- RecordWriteField(context,
- offset + kHeapObjectTag,
- value,
- scratch,
- save_fp,
- remembered_set_action,
- smi_check,
+ RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
+ remembered_set_action, smi_check,
pointers_to_here_check_for_value);
}
@@ -225,10 +188,7 @@ class MacroAssembler: public Assembler {
// filters out smis so it does not update the write barrier if the
// value is a smi.
void RecordWriteArray(
- Register array,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
+ Register array, Register value, Register index, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -240,10 +200,7 @@ class MacroAssembler: public Assembler {
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(
- Register object,
- Register address,
- Register value,
- SaveFPRegsMode save_fp,
+ Register object, Register address, Register value, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK,
PointersToHereCheck pointers_to_here_check_for_value =
@@ -252,12 +209,8 @@ class MacroAssembler: public Assembler {
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
- void RecordWriteForMap(
- Register object,
- Handle<Map> map,
- Register scratch1,
- Register scratch2,
- SaveFPRegsMode save_fp);
+ void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
+ Register scratch2, SaveFPRegsMode save_fp);
// ---------------------------------------------------------------------------
// Debugger Support
@@ -295,12 +248,11 @@ class MacroAssembler: public Assembler {
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
+ void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -353,37 +305,29 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
- }
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function.
@@ -393,9 +337,6 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, int native_context_index);
-
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -417,31 +358,25 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
+ void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
+ void CheckFastObjectElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
+ void CheckFastSmiElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
+ void StoreNumberToDoubleElements(Register maybe_number, Register elements,
+ Register key, Register scratch1,
+ XMMRegister scratch2, Label* fail,
int offset = 0);
// Compare an object's map with the specified map.
@@ -451,9 +386,7 @@ class MacroAssembler: public Assembler {
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
+ void CheckMap(Register obj, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
@@ -468,8 +401,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
+ Condition IsObjectStringType(Register heap_object, Register map,
Register instance_type);
// Check if the object in register heap_object is a name. Afterwards the
@@ -477,8 +409,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectNameType(Register heap_object,
- Register map,
+ Condition IsObjectNameType(Register heap_object, Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
@@ -487,8 +418,7 @@ class MacroAssembler: public Assembler {
void ClampUint8(Register reg);
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister scratch_reg,
+ void ClampDoubleToUint8(XMMRegister input_reg, XMMRegister scratch_reg,
Register result_reg);
void SlowTruncateToI(Register result_reg, Register input_reg,
@@ -526,22 +456,19 @@ class MacroAssembler: public Assembler {
void LoadUint32(XMMRegister dst, const Operand& src);
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value,
- Label* smi_label,
+ inline void JumpIfSmi(Register value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value,
- Label* smi_label,
+ inline void JumpIfSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value,
- Label* not_smi_label,
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
@@ -597,6 +524,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -616,22 +547,15 @@ class MacroAssembler: public Assembler {
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss);
+ void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
+ Register scratch2, Label* miss);
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
+ void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+ Register r0, Register r1, Register r2,
Register result);
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -645,48 +569,29 @@ class MacroAssembler: public Assembler {
// result is known to be the allocation top on entry (could be result_end
// from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
+
+ void Allocate(int header_size, ScaleFactor element_size,
+ Register element_count, RegisterValueType element_count_type,
+ Register result, Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags);
+
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Label* gc_required, MutableMode mode = IMMUTABLE);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -695,36 +600,34 @@ class MacroAssembler: public Assembler {
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteConsString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
+ void CopyBytes(Register source, Register destination, Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -771,35 +674,31 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -841,6 +740,7 @@ class MacroAssembler: public Assembler {
void Push(const Operand& src) { push(src); }
void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
+ void Pop(const Operand& dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
@@ -878,9 +778,11 @@ class MacroAssembler: public Assembler {
void Move(XMMRegister dst, uint64_t src);
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+ void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+ void Push(Smi* smi) { Push(Immediate(smi)); }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -900,7 +802,6 @@ class MacroAssembler: public Assembler {
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
// ---------------------------------------------------------------------------
// Debugging
@@ -951,10 +852,8 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
+ void EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value, uint32_t encoding_mask);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
@@ -1004,14 +903,10 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
+ const ParameterCount& actual, Label* done,
+ bool* definitely_mismatches, InvokeFlag flag,
Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -1019,18 +914,14 @@ class MacroAssembler: public Assembler {
void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
+ void LoadAllocationTopHelper(Register result, Register scratch,
AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end,
- Register scratch,
+ void UpdateAllocationTopHelper(Register result_end, Register scratch,
AllocationFlags flags);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
+ void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
@@ -1038,8 +929,7 @@ class MacroAssembler: public Assembler {
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
+ inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
@@ -1051,7 +941,6 @@ class MacroAssembler: public Assembler {
friend class StandardFrame;
};
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
@@ -1059,19 +948,18 @@ class MacroAssembler: public Assembler {
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
+ CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1080,39 +968,30 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
-
// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
+inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-
-inline Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
+inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
-
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand NativeContextOperand() {
+ return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
-
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1134,7 +1013,6 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index af7ee3c71b..076bde83e6 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -12,7 +12,7 @@ namespace internal {
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
@@ -21,7 +21,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
@@ -36,11 +37,15 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
} // namespace internal
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index 951966e7de..0f1b7b9bf1 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -55,8 +55,7 @@ Register PropertyAccessCompiler::slot() const {
if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
return LoadDescriptor::SlotRegister();
}
- DCHECK(FLAG_vector_stores &&
- (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
return VectorStoreICDescriptor::SlotRegister();
}
@@ -65,8 +64,7 @@ Register PropertyAccessCompiler::vector() const {
if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
return LoadWithVectorDescriptor::VectorRegister();
}
- DCHECK(FLAG_vector_stores &&
- (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
return VectorStoreICDescriptor::VectorRegister();
}
} // namespace internal
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index 223bde479a..50c2cc7303 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -40,7 +40,7 @@ class PropertyAccessCompiler BASE_EMBEDDED {
kind_(kind),
cache_holder_(cache_holder),
isolate_(isolate),
- masm_(isolate, NULL, 256) {
+ masm_(isolate, NULL, 256, CodeObjectRequired::kYes) {
// TODO(yangguo): remove this once we can serialize IC stubs.
masm_.enable_serializer();
}
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 62f554792f..d360f5a62b 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || r3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r3, r4, r5};
return registers;
}
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 1b6b51538e..e293965e6f 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(r1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -143,7 +145,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmp(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, miss_label);
// Load properties array.
@@ -169,10 +171,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ldr(result, MemOperand(cp, offset));
- __ ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- __ ldr(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ ldr(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -223,8 +222,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -293,6 +294,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -307,15 +315,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -324,7 +327,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -333,8 +336,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -707,8 +709,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -733,7 +734,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(ip, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -745,7 +746,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 89b3cc38d4..f59ac074be 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -309,8 +309,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -323,8 +322,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -339,8 +337,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -353,8 +350,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Perform tail call to the entry.
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -462,23 +458,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -493,8 +483,11 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = r4;
+ Register scratch = r4;
Register address = r5;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, address));
+
if (check_map == kCheckMap) {
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
@@ -507,12 +500,10 @@ static void KeyedStoreGenerateMegamorphicHelper(
// there may be a callback on the element
Label holecheck_passed1;
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch_value,
- MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
- __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
+ __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
+ __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
__ b(ne, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -522,8 +513,8 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, key, Operand(Smi::FromInt(1)));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -532,22 +523,21 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, key, Operand(Smi::FromInt(1)));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand::PointerOffsetFromSmiKey(key));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ __ mov(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -565,33 +555,31 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ add(address, elements,
Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
kHeapObjectTag));
- __ ldr(scratch_value,
- MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
- __ cmp(scratch_value, Operand(kHoleNanUpper32));
+ __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
__ b(ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ add(scratch_value, key, Operand(Smi::FromInt(1)));
- __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ add(scratch, key, Operand(Smi::FromInt(1)));
+ __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+ __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -602,7 +590,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -614,7 +602,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -690,27 +678,24 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
- // We use register r8 when FLAG_vector_stores is enabled, because otherwise
- // probing the megamorphic stub cache would require pushing temporaries on
- // the stack.
+ // We use register r8, because otherwise probing the megamorphic stub cache
+ // would require pushing temporaries on the stack.
// TODO(mvstanton): quit using register r8 when
// FLAG_enable_embedded_constant_pool is turned on.
- DCHECK(!FLAG_vector_stores || !FLAG_enable_embedded_constant_pool);
- Register temporary2 = FLAG_vector_stores ? r8 : r4;
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
-
- DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
- }
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ Register temporary2 = r8;
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+
+ DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -788,8 +773,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -853,7 +837,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
@@ -892,7 +877,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// tst rx, #kSmiTagMask
// b ne/eq, <target>
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsCmpRegister(instr_at_patch));
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index 9b8abd3298..318523199a 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -22,109 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(r0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ cmp(this->name(), Operand(name));
- __ b(ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ b(ne, &next_map);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 13b0887a82..892ce85dfb 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -38,7 +38,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, value, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || x3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, x3, x4, x5};
return registers;
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 576d333428..7cfef6a1b4 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -59,7 +59,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ Cmp(scratch0, FIRST_JS_RECEIVER_TYPE);
__ B(lt, miss_label);
// Load properties array.
@@ -78,9 +78,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- __ Ldr(result, GlobalObjectMemOperand());
- __ Ldr(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- __ Ldr(result, ContextMemOperand(result, index));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ Ldr(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -132,9 +130,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -207,6 +206,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -248,7 +254,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(x1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -285,7 +292,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(x1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(x1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(x1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -300,15 +308,10 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -317,7 +320,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -327,8 +330,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -378,7 +380,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -767,8 +769,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -796,7 +797,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 90b89018fe..eb933c78ec 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -293,8 +293,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadWithVectorDescriptor::NameRegister(),
LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -305,8 +304,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -324,8 +322,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadWithVectorDescriptor::VectorRegister());
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -336,8 +333,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -470,24 +466,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
StoreIC_PushArgs(masm);
-
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -690,19 +679,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(x10, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ Mov(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -778,8 +765,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -839,7 +825,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
// Activate a SMI fast-path by patching the instructions generated by
// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
// JumpPatchSite::EmitPatchInfo().
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The patch information is encoded in the instruction stream using
// instructions which have no side effects, so we can safely execute them.
// The patch information is encoded directly after the call to the helper
@@ -864,7 +851,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// to
// tb(!n)z test_reg, #0, <target>
Instruction* to_patch = info.SmiCheck();
- PatchingAssembler patcher(to_patch, 1);
+ PatchingAssembler patcher(isolate, to_patch, 1);
DCHECK(to_patch->IsTestBranch());
DCHECK(to_patch->ImmTestBranchBit5() == 0);
DCHECK(to_patch->ImmTestBranchBit40() == 0);
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index b4a4163fed..c99c637ab1 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -23,115 +23,9 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(x10);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- Label try_next;
- __ B(ne, &try_next);
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ Bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ Bind(&try_next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
-
- ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
-
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; i++) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- Label skip;
- __ B(&skip, ne);
- if (!transitioned_maps->at(i).is_null()) {
- // This argument is used by the handler stub. For example, see
- // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- }
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ Bind(&skip);
- }
-
- __ Bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 2b6f88ac95..b353628053 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -427,7 +427,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss;
- if (FLAG_vector_stores) PushVectorAndSlot();
+ PushVectorAndSlot();
// Check that we are allowed to write this.
bool is_nonexistent = holder()->map() == transition->GetBackPointer();
@@ -471,7 +471,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
if (virtual_args) {
// This will move the map from tmp into map_reg.
RearrangeVectorAndSlot(tmp, map_reg);
- } else if (FLAG_vector_stores) {
+ } else {
PopVectorAndSlot();
}
GenerateRestoreName(name);
@@ -493,7 +493,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
GenerateRestoreMap(transition, tmp, scratch2(), &miss);
if (virtual_args) {
RearrangeVectorAndSlot(tmp, map_reg);
- } else if (FLAG_vector_stores) {
+ } else {
PopVectorAndSlot();
}
GenerateRestoreName(name);
@@ -504,7 +504,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
}
GenerateRestoreName(&miss, name);
- if (FLAG_vector_stores) PopVectorAndSlot();
+ PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index acb3526d9d..1825202366 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -30,8 +30,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores ||
- ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index d5011fb7e9..0b380b3ee2 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -92,7 +92,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -114,10 +114,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(result, Operand(esi, offset));
- __ mov(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset));
- __ mov(result, Operand(result, Context::SlotOffset(index)));
+ __ LoadGlobalFunction(index, result);
// Load its initial map. The global functions all have initial maps.
__ mov(result,
FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -206,6 +203,12 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the code.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ mov(api_function_address, Immediate(function_address));
@@ -261,7 +264,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -294,8 +297,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -303,25 +308,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // which contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
}
@@ -330,7 +325,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -339,8 +334,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -732,8 +726,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ push(scratch2()); // restore old return address
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -758,7 +751,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -774,7 +767,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index d0a2e0bd54..d93b67bffc 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -27,104 +27,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ push(ebx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
- __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
- Register map_reg = scratch1();
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 1754d5a6fc..88947e47e7 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -561,26 +561,22 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, key, edi, no_reg);
- if (FLAG_vector_stores) {
- __ pop(VectorStoreICDescriptor::VectorRegister());
- __ pop(VectorStoreICDescriptor::SlotRegister());
- }
+ __ pop(VectorStoreICDescriptor::VectorRegister());
+ __ pop(VectorStoreICDescriptor::SlotRegister());
// Cache miss.
__ jmp(&miss);
@@ -676,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -695,8 +690,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -707,8 +701,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -726,27 +719,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- // This shouldn't be called.
- __ int3();
- return;
- }
-
- // Return address is on the stack.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
+ // This shouldn't be called.
+ // TODO(mvstanton): remove this method.
+ __ int3();
+ return;
}
@@ -754,25 +735,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // Contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // Contains the return address.
}
@@ -781,8 +752,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -798,25 +768,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
- if (FLAG_vector_stores) {
- __ push(vector);
- __ push(slot);
- }
+ __ push(vector);
+ __ push(slot);
Register dictionary = ebx;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(FLAG_vector_stores ? 3 : 1);
+ __ Drop(3);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
- if (FLAG_vector_stores) {
- __ pop(slot);
- __ pop(vector);
- }
+ __ pop(slot);
+ __ pop(vector);
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
@@ -828,8 +794,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -867,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index 20e4fedc23..ae4b2a5d58 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -33,60 +33,6 @@ bool PropertyICCompiler::IncludesNumberMap(MapHandleList* maps) {
}
-Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<Map> map,
- Handle<Code> handler,
- Handle<Name> name,
- IcCheckType check) {
- MapHandleList maps(1);
- CodeHandleList handlers(1);
- maps.Add(map);
- handlers.Add(handler);
- Code::StubType stub_type = handler->type();
- return CompilePolymorphic(&maps, &handlers, name, stub_type, check);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeMonomorphic(
- Code::Kind kind, Handle<Name> name, Handle<Map> map, Handle<Code> handler,
- ExtraICState extra_ic_state) {
- Isolate* isolate = name->GetIsolate();
- if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
- handler.is_identical_to(isolate->builtins()->LoadIC_Normal_Strong()) ||
- handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
- name = isolate->factory()->normal_ic_symbol();
- }
-
- CacheHolderFlag flag;
- Handle<Map> stub_holder = IC::GetICCacheHolder(map, isolate, &flag);
- if (kind == Code::KEYED_STORE_IC) {
- // Always set the "property" bit.
- extra_ic_state =
- KeyedStoreIC::IcCheckTypeField::update(extra_ic_state, PROPERTY);
- DCHECK(STANDARD_STORE ==
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
- } else if (kind == Code::KEYED_LOAD_IC) {
- extra_ic_state = KeyedLoadIC::IcCheckTypeField::update(extra_ic_state,
- PROPERTY);
- }
-
- Handle<Code> ic;
- // There are multiple string maps that all use the same prototype. That
- // prototype cannot hold multiple handlers, one for each of the string maps,
- // for a single name. Hence, turn off caching of the IC.
- bool can_be_cached = map->instance_type() >= FIRST_NONSTRING_TYPE;
- if (can_be_cached) {
- ic = Find(name, stub_holder, kind, extra_ic_state, flag);
- if (!ic.is_null()) return ic;
- }
-
- PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
- ic = ic_compiler.CompileMonomorphic(map, handler, name, PROPERTY);
-
- if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
- return ic;
-}
-
-
Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
Handle<Map> receiver_map, ExtraICState extra_ic_state) {
Isolate* isolate = receiver_map->GetIsolate();
@@ -138,35 +84,6 @@ Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
}
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode) {
- Isolate* isolate = receiver_map->GetIsolate();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
-
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
- Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
-
- Map::UpdateCodeCache(receiver_map, name, code);
- DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
- store_mode);
- return code;
-}
-
-
Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
ExtraICState state) {
Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
@@ -239,17 +156,6 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
}
-Handle<Code> PropertyICCompiler::ComputePolymorphic(
- Code::Kind kind, MapHandleList* maps, CodeHandleList* handlers,
- int valid_maps, Handle<Name> name, ExtraICState extra_ic_state) {
- Handle<Code> handler = handlers->at(0);
- Code::StubType type = valid_maps == 1 ? handler->type() : Code::NORMAL;
- DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
- PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
- return ic_compiler.CompilePolymorphic(maps, handlers, name, type, PROPERTY);
-}
-
-
void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
@@ -267,31 +173,6 @@ void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
}
-Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- LanguageMode language_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
- Code::Flags flags =
- Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code =
- compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
LoadIC::GenerateInitialize(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
@@ -394,22 +275,6 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- CompileKeyedStorePolymorphicHandlers(receiver_maps, &transitioned_maps,
- &handlers, store_mode);
-
- Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
- &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
- return code;
-}
-
-
#define __ ACCESS_MASM(masm())
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index ff32404afa..08444df654 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -21,15 +21,6 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
ExtraICState extra_state);
- static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
- Handle<Map> map, Handle<Code> handler,
- ExtraICState extra_ic_state);
- static Handle<Code> ComputePolymorphic(Code::Kind kind, MapHandleList* maps,
- CodeHandleList* handlers,
- int number_of_valid_maps,
- Handle<Name> name,
- ExtraICState extra_ic_state);
-
// Keyed
static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
Handle<Map> receiver_map, ExtraICState extra_ic_state);
@@ -37,16 +28,10 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedStoreMonomorphic(
- Handle<Map> receiver_map, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode);
static void ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
LanguageMode language_mode);
- static Handle<Code> ComputeKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
- LanguageMode language_mode);
// Compare nil
static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
@@ -77,25 +62,14 @@ class PropertyICCompiler : public PropertyAccessCompiler {
Handle<Code> CompileStoreGeneric(Code::Flags flags);
Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
- Handle<Code> CompileMonomorphic(Handle<Map> map, Handle<Code> handler,
- Handle<Name> name, IcCheckType check);
- Handle<Code> CompilePolymorphic(MapHandleList* maps, CodeHandleList* handlers,
- Handle<Name> name, Code::StubType type,
- IcCheckType check);
-
Handle<Code> CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- KeyedAccessStoreMode store_mode);
void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
MapHandleList* transitioned_maps,
CodeHandleList* handlers,
KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps);
bool IncludesNumberMap(MapHandleList* maps);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 65a5a2ddec..6dab006ad5 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -60,9 +60,8 @@ void IC::SetTargetAtAddress(Address address, Code* target,
DCHECK(!target->is_inline_cache_stub() ||
(target->kind() != Code::LOAD_IC &&
target->kind() != Code::KEYED_LOAD_IC &&
- target->kind() != Code::CALL_IC &&
- (!FLAG_vector_stores || (target->kind() != Code::STORE_IC &&
- target->kind() != Code::KEYED_STORE_IC))));
+ target->kind() != Code::CALL_IC && target->kind() != Code::STORE_IC &&
+ target->kind() != Code::KEYED_STORE_IC));
Heap* heap = target->GetHeap();
Code* old_target = GetTargetAtAddress(address, constant_pool);
@@ -75,7 +74,7 @@ void IC::SetTargetAtAddress(Address address, Code* target,
StoreICState::GetLanguageMode(target->extra_ic_state()));
}
#endif
- Assembler::set_target_address_at(address, constant_pool,
+ Assembler::set_target_address_at(heap->isolate(), address, constant_pool,
target->instruction_start());
if (heap->gc_state() == Heap::MARK_COMPACT) {
heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 298eaa2707..4bdaf3ff03 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -191,17 +191,17 @@ void BinaryOpICState::GenerateAheadOfTime(
}
-Type* BinaryOpICState::GetResultType(Zone* zone) const {
+Type* BinaryOpICState::GetResultType() const {
Kind result_kind = result_kind_;
if (HasSideEffects()) {
result_kind = NONE;
} else if (result_kind == GENERIC && op_ == Token::ADD) {
- return Type::Union(Type::Number(zone), Type::String(zone), zone);
+ return Type::NumberOrString();
} else if (result_kind == NUMBER && op_ == Token::SHR) {
- return Type::Unsigned32(zone);
+ return Type::Unsigned32();
}
DCHECK_NE(GENERIC, result_kind);
- return KindToType(result_kind, zone);
+ return KindToType(result_kind);
}
@@ -320,20 +320,20 @@ const char* BinaryOpICState::KindToString(Kind kind) {
// static
-Type* BinaryOpICState::KindToType(Kind kind, Zone* zone) {
+Type* BinaryOpICState::KindToType(Kind kind) {
switch (kind) {
case NONE:
- return Type::None(zone);
+ return Type::None();
case SMI:
- return Type::SignedSmall(zone);
+ return Type::SignedSmall();
case INT32:
- return Type::Signed32(zone);
+ return Type::Signed32();
case NUMBER:
- return Type::Number(zone);
+ return Type::Number();
case STRING:
- return Type::String(zone);
+ return Type::String();
case GENERIC:
- return Type::Any(zone);
+ return Type::Any();
}
UNREACHABLE();
return NULL;
@@ -356,10 +356,10 @@ const char* CompareICState::GetStateName(State state) {
return "STRING";
case UNIQUE_NAME:
return "UNIQUE_NAME";
- case OBJECT:
- return "OBJECT";
- case KNOWN_OBJECT:
- return "KNOWN_OBJECT";
+ case RECEIVER:
+ return "RECEIVER";
+ case KNOWN_RECEIVER:
+ return "KNOWN_RECEIVER";
case GENERIC:
return "GENERIC";
}
@@ -384,9 +384,9 @@ Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
return Type::InternalizedString(zone);
case UNIQUE_NAME:
return Type::UniqueName(zone);
- case OBJECT:
+ case RECEIVER:
return Type::Receiver(zone);
- case KNOWN_OBJECT:
+ case KNOWN_RECEIVER:
return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
case GENERIC:
return Type::Any(zone);
@@ -406,7 +406,7 @@ CompareICState::State CompareICState::NewInputState(State old_state,
if (value->IsInternalizedString()) return INTERNALIZED_STRING;
if (value->IsString()) return STRING;
if (value->IsSymbol()) return UNIQUE_NAME;
- if (value->IsJSObject()) return OBJECT;
+ if (value->IsJSReceiver()) return RECEIVER;
break;
case BOOLEAN:
if (value->IsBoolean()) return BOOLEAN;
@@ -429,12 +429,12 @@ CompareICState::State CompareICState::NewInputState(State old_state,
case UNIQUE_NAME:
if (value->IsUniqueName()) return UNIQUE_NAME;
break;
- case OBJECT:
- if (value->IsJSObject()) return OBJECT;
+ case RECEIVER:
+ if (value->IsJSReceiver()) return RECEIVER;
break;
case GENERIC:
break;
- case KNOWN_OBJECT:
+ case KNOWN_RECEIVER:
UNREACHABLE();
break;
}
@@ -465,12 +465,12 @@ CompareICState::State CompareICState::TargetState(
return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING;
}
if (x->IsString() && y->IsString()) return STRING;
- if (x->IsJSObject() && y->IsJSObject()) {
- if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map()) {
- return KNOWN_OBJECT;
+ if (x->IsJSReceiver() && y->IsJSReceiver()) {
+ if (Handle<JSReceiver>::cast(x)->map() ==
+ Handle<JSReceiver>::cast(y)->map()) {
+ return KNOWN_RECEIVER;
} else {
- return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
+ return Token::IsEqualityOp(op) ? RECEIVER : GENERIC;
}
}
if (!Token::IsEqualityOp(op)) return GENERIC;
@@ -490,15 +490,15 @@ CompareICState::State CompareICState::TargetState(
if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
return GENERIC;
- case KNOWN_OBJECT:
- if (x->IsJSObject() && y->IsJSObject()) {
- return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
+ case KNOWN_RECEIVER:
+ if (x->IsJSReceiver() && y->IsJSReceiver()) {
+ return Token::IsEqualityOp(op) ? RECEIVER : GENERIC;
}
return GENERIC;
case BOOLEAN:
case STRING:
case UNIQUE_NAME:
- case OBJECT:
+ case RECEIVER:
case GENERIC:
return GENERIC;
}
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index ebc686b738..1982fbe08b 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -120,9 +120,9 @@ class BinaryOpICState final BASE_EMBEDDED {
Token::Value op() const { return op_; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
- Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); }
- Type* GetRightType(Zone* zone) const { return KindToType(right_kind_, zone); }
- Type* GetResultType(Zone* zone) const;
+ Type* GetLeftType() const { return KindToType(left_kind_); }
+ Type* GetRightType() const { return KindToType(right_kind_); }
+ Type* GetResultType() const;
void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
@@ -136,7 +136,7 @@ class BinaryOpICState final BASE_EMBEDDED {
Kind UpdateKind(Handle<Object> object, Kind kind) const;
static const char* KindToString(Kind kind);
- static Type* KindToType(Kind kind, Zone* zone);
+ static Type* KindToType(Kind kind);
static bool KindMaybeSmi(Kind kind) {
return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
}
@@ -174,7 +174,7 @@ class CompareICState {
// SMI < NUMBER
// INTERNALIZED_STRING < STRING
// INTERNALIZED_STRING < UNIQUE_NAME
- // KNOWN_OBJECT < OBJECT
+ // KNOWN_RECEIVER < RECEIVER
enum State {
UNINITIALIZED,
BOOLEAN,
@@ -182,9 +182,9 @@ class CompareICState {
NUMBER,
STRING,
INTERNALIZED_STRING,
- UNIQUE_NAME, // Symbol or InternalizedString
- OBJECT, // JSObject
- KNOWN_OBJECT, // JSObject with specific map (faster check)
+ UNIQUE_NAME, // Symbol or InternalizedString
+ RECEIVER, // JSReceiver
+ KNOWN_RECEIVER, // JSReceiver with specific map (faster check)
GENERIC
};
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 3dc3029300..73ac666a41 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -117,13 +117,10 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
stdout, true);
}
- ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier = "";
if (new_target->kind() == Code::KEYED_STORE_IC) {
KeyedAccessStoreMode mode =
- FLAG_vector_stores
- ? casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode()
- : KeyedStoreIC::GetKeyedAccessStoreMode(extra_state);
+ casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
modifier = GetTransitionMarkModifier(mode);
}
PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
@@ -418,19 +415,9 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
// static
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host,
- TypeFeedbackVector* vector, State old_state,
- State new_state) {
+void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
if (host->kind() != Code::FUNCTION) return;
- if (FLAG_type_info_threshold > 0) {
- int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
- int generic_delta = 0; // "Generic" here includes megamorphic.
- ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
- &generic_delta);
- vector->change_ic_with_type_info_count(polymorphic_delta);
- vector->change_ic_generic_count(generic_delta);
- }
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
info->change_own_type_change_checksum();
host->set_profiler_ticks(0);
@@ -470,13 +457,9 @@ void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
switch (target->kind()) {
case Code::LOAD_IC:
case Code::KEYED_LOAD_IC:
- return;
case Code::STORE_IC:
- if (FLAG_vector_stores) return;
- return StoreIC::Clear(isolate, address, target, constant_pool);
case Code::KEYED_STORE_IC:
- if (FLAG_vector_stores) return;
- return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+ return;
case Code::COMPARE_IC:
return CompareIC::Clear(isolate, address, target, constant_pool);
case Code::COMPARE_NIL_IC:
@@ -498,9 +481,8 @@ void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -512,16 +494,15 @@ void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
if (state != UNINITIALIZED && !feedback->IsAllocationSite()) {
nexus->ConfigureUninitialized();
// The change in state must be processed.
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, UNINITIALIZED);
+ OnTypeFeedbackChanged(isolate, host);
}
}
void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
if (IsCleared(nexus)) return;
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -536,9 +517,8 @@ void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
if (IsCleared(nexus)) return;
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -554,9 +534,8 @@ void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
KeyedStoreICNexus* nexus) {
if (IsCleared(nexus)) return;
- State state = nexus->StateFromFeedback();
nexus->ConfigurePremonomorphic();
- OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+ OnTypeFeedbackChanged(isolate, host);
}
@@ -565,11 +544,11 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
CompareICStub stub(target->stub_key(), isolate);
// Only clear CompareICs that can retain objects.
- if (stub.state() != CompareICState::KNOWN_OBJECT) return;
+ if (stub.state() != CompareICState::KNOWN_RECEIVER) return;
SetTargetAtAddress(address,
GetRawUninitialized(isolate, stub.op(), stub.strength()),
constant_pool);
- PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
}
@@ -606,8 +585,7 @@ void IC::ConfigureVectorState(IC::State new_state) {
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- new_state);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -630,8 +608,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- MONOMORPHIC);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -654,8 +631,7 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
}
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- POLYMORPHIC);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -668,8 +644,7 @@ void IC::ConfigureVectorState(MapHandleList* maps,
nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
vector_set_ = true;
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
- POLYMORPHIC);
+ OnTypeFeedbackChanged(isolate(), get_host());
}
@@ -810,12 +785,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
if (number_of_valid_maps > 1 && target()->is_keyed_stub()) return false;
Handle<Code> ic;
if (number_of_valid_maps == 1) {
- if (UseVector()) {
- ConfigureVectorState(name, receiver_map(), code);
- } else {
- ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, map, code,
- extra_ic_state());
- }
+ ConfigureVectorState(name, receiver_map(), code);
} else {
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, code);
@@ -827,13 +797,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
handlers.Add(code);
}
- if (UseVector()) {
- ConfigureVectorState(name, &maps, &handlers);
- } else {
- ic = PropertyICCompiler::ComputePolymorphic(kind(), &maps, &handlers,
- number_of_valid_maps, name,
- extra_ic_state());
- }
+ ConfigureVectorState(name, &maps, &handlers);
}
if (!UseVector()) set_target(*ic);
@@ -843,13 +807,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
DCHECK(handler->is_handler());
- if (UseVector()) {
- ConfigureVectorState(name, receiver_map(), handler);
- } else {
- Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
- kind(), name, receiver_map(), handler, extra_ic_state());
- set_target(*ic);
- }
+ ConfigureVectorState(name, receiver_map(), handler);
}
@@ -973,7 +931,7 @@ static Handle<Code> KeyedStoreICInitializeStubHelper(
Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state) {
- if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ if (initialization_state != MEGAMORPHIC) {
VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -985,7 +943,7 @@ Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
Isolate* isolate, LanguageMode language_mode, State initialization_state) {
- if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ if (initialization_state != MEGAMORPHIC) {
VectorKeyedStoreICStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -1638,13 +1596,8 @@ Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
DCHECK(initialization_state == UNINITIALIZED ||
initialization_state == PREMONOMORPHIC ||
initialization_state == MEGAMORPHIC);
- if (FLAG_vector_stores) {
- VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
- return stub.GetCode();
- }
-
- return StoreICInitializeStubHelper(
- isolate, ComputeExtraICState(language_mode), initialization_state);
+ VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+ return stub.GetCode();
}
@@ -1653,7 +1606,7 @@ Handle<Code> StoreIC::initialize_stub_in_optimized_code(
DCHECK(initialization_state == UNINITIALIZED ||
initialization_state == PREMONOMORPHIC ||
initialization_state == MEGAMORPHIC);
- if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
+ if (initialization_state != MEGAMORPHIC) {
VectorStoreICStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -1700,11 +1653,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
- if (FLAG_vector_stores) {
- ConfigureVectorState(PREMONOMORPHIC);
- } else {
- set_target(*pre_monomorphic_stub());
- }
+ ConfigureVectorState(PREMONOMORPHIC);
TRACE_IC("StoreIC", lookup->name());
return;
}
@@ -1811,8 +1760,6 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
break;
}
- // When debugging we need to go the slow path to flood the accessor.
- if (GetSharedFunctionInfo()->HasDebugInfo()) break;
Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
CallOptimization call_optimization(function);
NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
@@ -1900,25 +1847,18 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
- if (FLAG_vector_stores) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- monomorphic_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- monomorphic_map, language_mode(), store_mode);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ monomorphic_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
+ return null_handle;
}
// There are several special cases where an IC that is MONOMORPHIC can still
// transition to a different GetNonTransitioningStoreMode IC that handles a
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
- KeyedAccessStoreMode old_store_mode =
- FLAG_vector_stores
- ? GetKeyedAccessStoreMode()
- : KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ KeyedAccessStoreMode old_store_mode = GetKeyedAccessStoreMode();
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1934,16 +1874,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
- if (FLAG_vector_stores) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- transitioned_receiver_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
- handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- transitioned_receiver_map, language_mode(), store_mode);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ transitioned_receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
+ handler);
+ return null_handle;
} else if (receiver_map.is_identical_to(previous_receiver_map) &&
old_store_mode == STANDARD_STORE &&
(store_mode == STORE_AND_GROW_NO_TRANSITION ||
@@ -1952,15 +1888,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
- if (FLAG_vector_stores) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- receiver_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
- }
- return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
- receiver_map, language_mode(), store_mode);
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
+ return null_handle;
}
}
@@ -2019,18 +1951,13 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
}
}
- if (FLAG_vector_stores) {
- MapHandleList transitioned_maps(target_receiver_maps.length());
- CodeHandleList handlers(target_receiver_maps.length());
- PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
- &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
- language_mode());
- ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
- return null_handle;
- }
-
- return PropertyICCompiler::ComputeKeyedStorePolymorphic(
- &target_receiver_maps, store_mode, language_mode());
+ MapHandleList transitioned_maps(target_receiver_maps.length());
+ CodeHandleList handlers(target_receiver_maps.length());
+ PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
+ &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
+ language_mode());
+ ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
+ return null_handle;
}
@@ -2124,44 +2051,6 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
}
-void KeyedStoreIC::ValidateStoreMode(Handle<Code> stub) {
-#ifdef DEBUG
- DCHECK(!FLAG_vector_stores);
- if (stub.is_null() || *stub == *megamorphic_stub() || *stub == *slow_stub()) {
- return;
- }
-
- // Query the keyed store mode.
- ExtraICState state = stub->extra_ic_state();
- KeyedAccessStoreMode stub_mode = GetKeyedAccessStoreMode(state);
-
- MapHandleList map_list;
- stub->FindAllMaps(&map_list);
- CodeHandleList list;
- stub->FindHandlers(&list, map_list.length());
- for (int i = 0; i < list.length(); i++) {
- Handle<Code> handler = list.at(i);
- CHECK(handler->is_handler());
- CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
- uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
- // Ensure that we only see handlers we know have the store mode embedded.
- CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
- major_key == CodeStub::StoreFastElement ||
- major_key == CodeStub::StoreElement ||
- major_key == CodeStub::ElementsTransitionAndStore ||
- *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
- // Ensure that the store mode matches that of the IC.
- CHECK(major_key == CodeStub::NoCache ||
- stub_mode == CommonStoreModeBits::decode(minor_key));
- // The one exception is the keyed store slow builtin, which doesn't include
- // store mode.
- CHECK(major_key != CodeStub::NoCache ||
- *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
- }
-#endif // DEBUG
-}
-
-
MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value) {
@@ -2192,20 +2081,11 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
StoreIC::Store(object, Handle<Name>::cast(key), value,
JSReceiver::MAY_BE_STORE_FROM_KEYED),
Object);
- if (FLAG_vector_stores) {
- if (!is_vector_set()) {
- ConfigureVectorState(MEGAMORPHIC);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unhandled internalized string key");
- TRACE_IC("StoreIC", key);
- }
- } else {
- if (!is_target_set()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- "unhandled internalized string key");
- TRACE_IC("StoreIC", key);
- set_target(*stub);
- }
+ if (!is_vector_set()) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ "unhandled internalized string key");
+ TRACE_IC("StoreIC", key);
}
return store_handle;
}
@@ -2262,10 +2142,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// from fast path keyed stores.
if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
stub = StoreElementStub(old_receiver_map, store_mode);
-
- // Validate that the store_mode in the stub can also be derived
- // from peeking in the code bits of the handlers.
- if (!FLAG_vector_stores) ValidateStoreMode(stub);
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
"dictionary or proxy prototype");
@@ -2278,27 +2154,12 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
- if (FLAG_vector_stores) {
- if (!is_vector_set() || stub.is_null()) {
- Code* megamorphic = *megamorphic_stub();
- if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
- ConfigureVectorState(MEGAMORPHIC);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- *stub == megamorphic ? "set generic" : "slow stub");
- }
- }
- } else {
- DCHECK(!is_target_set());
+ if (!is_vector_set() || stub.is_null()) {
Code* megamorphic = *megamorphic_stub();
- if (*stub == megamorphic) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
- } else if (*stub == *slow_stub()) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
- }
-
- DCHECK(!stub.is_null());
- if (!AddressIsDeoptimizedCode()) {
- set_target(*stub);
+ if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
+ ConfigureVectorState(MEGAMORPHIC);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+ *stub == megamorphic ? "set generic" : "slow stub");
}
}
TRACE_IC("StoreIC", key);
@@ -2328,6 +2189,12 @@ void CallIC::HandleMiss(Handle<Object> function) {
if (array_function.is_identical_to(js_function)) {
// Alter the slot.
nexus->ConfigureMonomorphicArray();
+ } else if (js_function->context()->native_context() !=
+ *isolate()->native_context()) {
+ // Don't collect cross-native context feedback for the CallIC.
+ // TODO(bmeurer): We should collect the SharedFunctionInfo as
+ // feedback in this case instead.
+ nexus->ConfigureMegamorphic();
} else {
nexus->ConfigureMonomorphic(js_function);
}
@@ -2338,8 +2205,7 @@ void CallIC::HandleMiss(Handle<Object> function) {
name = handle(js_function->shared()->name(), isolate());
}
- IC::State new_state = nexus->StateFromFeedback();
- OnTypeFeedbackChanged(isolate(), get_host(), *vector(), state(), new_state);
+ OnTypeFeedbackChanged(isolate(), get_host());
TRACE_IC("CallIC", name);
}
@@ -2448,29 +2314,21 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5 || args.length() == 6);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- }
+ DCHECK(args.length() == 5 || args.length() == 6);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
} else {
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
@@ -2487,49 +2345,41 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- int length = args.length();
- DCHECK(length == 5 || length == 6);
- // We might have slot and vector, for a normal miss (slot(3), vector(4)).
- // Or, map and vector for a transitioning store miss (map(3), vector(4)).
- // In this case, we need to recover the slot from a virtual register.
- // If length == 6, then a map is included (map(3), slot(4), vector(5)).
- Handle<Smi> slot;
- Handle<TypeFeedbackVector> vector;
- if (length == 5) {
- if (args.at<Object>(3)->IsMap()) {
- vector = args.at<TypeFeedbackVector>(4);
- slot = handle(
- *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
- isolate);
- } else {
- vector = args.at<TypeFeedbackVector>(4);
- slot = args.at<Smi>(3);
- }
+ int length = args.length();
+ DCHECK(length == 5 || length == 6);
+ // We might have slot and vector, for a normal miss (slot(3), vector(4)).
+ // Or, map and vector for a transitioning store miss (map(3), vector(4)).
+ // In this case, we need to recover the slot from a virtual register.
+ // If length == 6, then a map is included (map(3), slot(4), vector(5)).
+ Handle<Smi> slot;
+ Handle<TypeFeedbackVector> vector;
+ if (length == 5) {
+ if (args.at<Object>(3)->IsMap()) {
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = handle(
+ *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
+ isolate);
} else {
- vector = args.at<TypeFeedbackVector>(5);
- slot = args.at<Smi>(4);
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = args.at<Smi>(3);
}
+ } else {
+ vector = args.at<TypeFeedbackVector>(5);
+ slot = args.at<Smi>(4);
+ }
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
- StoreICNexus nexus(vector, vector_slot);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
- vector->GetKind(vector_slot));
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- }
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
} else {
- DCHECK(args.length() == 3 || args.length() == 4);
- StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
@@ -2547,23 +2397,15 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- }
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
return *result;
}
@@ -2576,42 +2418,29 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
Handle<Object> value = args.at<Object>(2);
Handle<Object> result;
- if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- KeyedStoreICNexus nexus(vector, vector_slot);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- } else {
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
- }
+ DCHECK(args.length() == 5);
+ Handle<Smi> slot = args.at<Smi>(3);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ ic.Store(receiver, key, value));
return *result;
}
RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
+ DCHECK(args.length() == 5);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
LanguageMode language_mode;
- if (FLAG_vector_stores) {
- StoreICNexus nexus(isolate);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- } else {
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- language_mode = ic.language_mode();
- }
+ StoreICNexus nexus(isolate);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2622,19 +2451,14 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
+ DCHECK(args.length() == 5);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
LanguageMode language_mode;
- if (FLAG_vector_stores) {
- KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- } else {
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
- language_mode = ic.language_mode();
- }
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2646,23 +2470,17 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- // Without vector stores, length == 4.
- // With vector stores, length == 5 or 6, depending on whether the vector slot
+ // Length == 5 or 6, depending on whether the vector slot
// is passed in a virtual register or not.
- DCHECK(!FLAG_vector_stores || args.length() == 5 || args.length() == 6);
+ DCHECK(args.length() == 5 || args.length() == 6);
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
Handle<Map> map = args.at<Map>(3);
LanguageMode language_mode;
- if (FLAG_vector_stores) {
- KeyedStoreICNexus nexus(isolate);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- } else {
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
- language_mode = ic.language_mode();
- }
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2795,9 +2613,9 @@ MaybeHandle<Object> BinaryOpIC::Transition(
// Patch the inlined smi code as necessary.
if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK);
} else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
- PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate(), address(), DISABLE_INLINED_SMI_CHECK);
}
return result;
@@ -2868,9 +2686,9 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HasInlinedSmiCode(address()), x, y);
CompareICStub stub(isolate(), op_, old_stub.strength(), new_left, new_right,
state);
- if (state == CompareICState::KNOWN_OBJECT) {
+ if (state == CompareICState::KNOWN_RECEIVER) {
stub.set_known_map(
- Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
+ Handle<Map>(Handle<JSReceiver>::cast(x)->map(), isolate()));
}
Handle<Code> new_target = stub.GetCode();
set_target(*new_target);
@@ -2890,7 +2708,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (old_stub.state() == CompareICState::UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ PatchInlinedSmiCode(isolate(), address(), ENABLE_INLINED_SMI_CHECK);
}
return *new_target;
@@ -2980,7 +2798,7 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode();
set_target(*code);
- return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
+ return isolate()->factory()->ToBoolean(to_boolean_value);
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 47883b46af..a3265d70b9 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -77,9 +77,8 @@ class IC {
static bool ICUseVector(Code::Kind kind) {
return kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
- kind == Code::CALL_IC ||
- (FLAG_vector_stores &&
- (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC));
+ kind == Code::CALL_IC || kind == Code::STORE_IC ||
+ kind == Code::KEYED_STORE_IC;
}
protected:
@@ -144,9 +143,7 @@ class IC {
State old_state, State new_state,
bool target_remains_ic_stub);
// As a vector-based IC, type feedback must be updated differently.
- static void OnTypeFeedbackChanged(Isolate* isolate, Code* host,
- TypeFeedbackVector* vector, State old_state,
- State new_state);
+ static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
@@ -532,22 +529,10 @@ class KeyedStoreIC : public StoreIC {
IcCheckTypeField::encode(ELEMENT);
}
- static KeyedAccessStoreMode GetKeyedAccessStoreMode(
- ExtraICState extra_state) {
- DCHECK(!FLAG_vector_stores);
- return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
- }
-
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
- DCHECK(FLAG_vector_stores);
return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
}
- static IcCheckType GetKeyType(ExtraICState extra_state) {
- DCHECK(!FLAG_vector_stores);
- return IcCheckTypeField::decode(extra_state);
- }
-
KeyedStoreIC(FrameDepth depth, Isolate* isolate,
KeyedStoreICNexus* nexus = NULL)
: StoreIC(depth, isolate, nexus) {
@@ -604,8 +589,6 @@ class KeyedStoreIC : public StoreIC {
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
- void ValidateStoreMode(Handle<Code> stub);
-
friend class IC;
};
@@ -679,7 +662,8 @@ class ToBooleanIC : public IC {
// Helper for BinaryOpIC and CompareIC.
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/mips/OWNERS b/deps/v8/src/ic/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/ic/mips/OWNERS
+++ b/deps/v8/src/ic/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index f2f6c62c71..b122946577 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, t0, t1};
return registers;
}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 200d1f6ebe..554d0c56ff 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
@@ -165,10 +167,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ lw(result, MemOperand(cp, offset));
- __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- __ lw(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ lw(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -216,8 +215,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -283,6 +284,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
__ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -297,15 +305,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -314,7 +317,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -323,8 +326,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -698,8 +700,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -722,7 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(at, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -734,7 +735,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index 64f1662880..86a602b3ec 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -10,114 +10,6 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Branch(&miss, ne, this->name(), Operand(name));
- }
- }
-
- Label number_case;
- Register match = scratch2();
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match.
- // Separate compare from branch, to provide path for above JumpIfSmi().
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ GetWeakValue(match, cell);
- __ Subu(match, match, Operand(map_reg));
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
- Operand(zero_reg));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- Register match = scratch2();
- __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ GetWeakValue(match, cell);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
- Operand(map_reg));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, match, Operand(map_reg));
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
#define __ ACCESS_MASM(masm)
@@ -130,7 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(a0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 60c06a3eb4..a27d6b56f7 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -316,8 +316,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -330,8 +329,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -346,8 +344,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -359,8 +356,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -477,8 +473,13 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = t0;
+ Register scratch = t0;
+ Register scratch2 = t4;
+ Register scratch3 = t5;
Register address = t1;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, scratch2, scratch3, address));
+
if (check_map == kCheckMap) {
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(fast_double, ne, elements_map,
@@ -492,11 +493,10 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
__ addu(address, address, at);
- __ lw(scratch_value, MemOperand(address));
- __ Branch(&holecheck_passed1, ne, scratch_value,
+ __ lw(scratch, MemOperand(address));
+ __ Branch(&holecheck_passed1, ne, scratch,
Operand(masm->isolate()->factory()->the_hole_value()));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -506,35 +506,34 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch);
__ sw(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch_value);
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(address, address, scratch);
__ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+ __ mov(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -553,34 +552,31 @@ static void KeyedStoreGenerateMegamorphicHelper(
kHoleNanUpper32Offset - kHeapObjectTag));
__ sll(at, key, kPointerSizeLog2);
__ addu(address, address, at);
- __ lw(scratch_value, MemOperand(address));
- __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ __ lw(scratch, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch,
Operand(kHoleNanUpper32));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key,
- elements, // Overwritten.
- a3, // Scratch regs...
- t0, t1, &transition_double_elements);
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
+ scratch3, &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, t0, Operand(at));
+ __ Branch(&non_double_value, ne, scratch, Operand(at));
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -591,7 +587,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, t0, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -603,7 +599,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, t0, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -675,19 +671,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(t0, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -741,23 +735,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -783,8 +771,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -849,7 +836,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -887,7 +875,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// andi at, rx, #kSmiTagMask
// Branch <target>, ne, at, Operand(zero_reg)
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
diff --git a/deps/v8/src/ic/mips64/OWNERS b/deps/v8/src/ic/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/ic/mips64/OWNERS
+++ b/deps/v8/src/ic/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index 500a6d65c7..96e921c7c6 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, a4, a5};
return registers;
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 942c42c221..d94a292228 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(a1, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(a1, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
@@ -165,11 +167,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- // Check we're still in the same context.
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ ld(result, MemOperand(cp, offset));
- __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- __ ld(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ ld(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -217,8 +215,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -284,6 +284,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
__ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -298,15 +305,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -315,7 +317,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -324,8 +326,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -699,8 +700,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -723,7 +723,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(at, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -735,7 +735,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index 8cdd8f03bc..276f3afd38 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -10,114 +10,6 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Branch(&miss, ne, this->name(), Operand(name));
- }
- }
-
- Label number_case;
- Register match = scratch2();
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi.
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- // Check map and tail call if there's a match.
- // Separate compare from branch, to provide path for above JumpIfSmi().
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ GetWeakValue(match, cell);
- __ Dsubu(match, match, Operand(map_reg));
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
- Operand(zero_reg));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- Register match = scratch2();
- __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ GetWeakValue(match, cell);
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
- Operand(map_reg));
- } else {
- Label next_map;
- __ Branch(&next_map, ne, match, Operand(map_reg));
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
#define __ ACCESS_MASM(masm)
@@ -130,7 +22,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ Push(a0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index e73921a317..c5da5fbb42 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -313,8 +313,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -327,8 +326,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -343,8 +341,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -356,8 +353,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -474,8 +470,12 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = a4;
+ Register scratch = a4;
+ Register scratch2 = t0;
Register address = a5;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, scratch2, address));
+
if (check_map == kCheckMap) {
__ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(fast_double, ne, elements_map,
@@ -489,12 +489,11 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
- __ ld(scratch_value, MemOperand(address));
+ __ ld(scratch, MemOperand(address));
- __ Branch(&holecheck_passed1, ne, scratch_value,
+ __ Branch(&holecheck_passed1, ne, scratch,
Operand(masm->isolate()->factory()->the_hole_value()));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -504,37 +503,36 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Daddu(address, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiScale(scratch_value, key, kPointerSizeLog2);
- __ Daddu(address, address, scratch_value);
+ __ SmiScale(scratch, key, kPointerSizeLog2);
+ __ Daddu(address, address, scratch);
__ sd(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Daddu(address, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiScale(scratch_value, key, kPointerSizeLog2);
- __ Daddu(address, address, scratch_value);
+ __ SmiScale(scratch, key, kPointerSizeLog2);
+ __ Daddu(address, address, scratch);
__ sd(value, MemOperand(address));
// Update write barrier for the elements array address.
- __ mov(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+ __ mov(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -554,34 +552,31 @@ static void KeyedStoreGenerateMegamorphicHelper(
kHeapObjectTag));
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
- __ lw(scratch_value, MemOperand(address));
- __ Branch(&fast_double_without_map_check, ne, scratch_value,
+ __ lw(scratch, MemOperand(address));
+ __ Branch(&fast_double_without_map_check, ne, scratch,
Operand(static_cast<int32_t>(kHoleNanUpper32)));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key,
- elements, // Overwritten.
- a3, // Scratch regs...
- a4, &transition_double_elements);
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
+ &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
- __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
+ __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, a4, Operand(at));
+ __ Branch(&non_double_value, ne, scratch, Operand(at));
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -592,7 +587,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, a4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -604,7 +599,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, a4, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -673,20 +668,18 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(a4, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
-
- DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+
+ DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ li(slot, Operand(Smi::FromInt(slot_index)));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -740,23 +733,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -782,8 +769,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -846,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -876,8 +863,6 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
- Instr branch_instr =
- Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
// andi at, rx, 0
@@ -886,7 +871,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// andi at, rx, #kSmiTagMask
// Branch <target>, ne, at, Operand(zero_reg)
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
@@ -897,13 +882,44 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
DCHECK(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
+ Instr branch_instr =
+ Assembler::instr_at(patch_address + Instruction::kInstrSize);
DCHECK(Assembler::IsBranch(branch_instr));
- if (Assembler::IsBeq(branch_instr)) {
- patcher.ChangeBranchCondition(ne);
- } else {
- DCHECK(Assembler::IsBne(branch_instr));
- patcher.ChangeBranchCondition(eq);
+
+ uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
+ // Currently only the 'eq' and 'ne' cond values are supported and the simple
+ // branch instructions and their r6 variants (with opcode being the branch
+ // type). There are some special cases (see Assembler::IsBranch()) so
+ // extending this would be tricky.
+ DCHECK(opcode == BEQ || // BEQ
+ opcode == BNE || // BNE
+ opcode == POP10 || // BEQC
+ opcode == POP30 || // BNEC
+ opcode == POP66 || // BEQZC
+ opcode == POP76); // BNEZC
+ switch (opcode) {
+ case BEQ:
+ opcode = BNE; // change BEQ to BNE.
+ break;
+ case POP10:
+ opcode = POP30; // change BEQC to BNEC.
+ break;
+ case POP66:
+ opcode = POP76; // change BEQZC to BNEZC.
+ break;
+ case BNE:
+ opcode = BEQ; // change BNE to BEQ.
+ break;
+ case POP30:
+ opcode = POP10; // change BNEC to BEQC.
+ break;
+ case POP76:
+ opcode = POP66; // change BNEZC to BEQZC.
+ break;
+ default:
+ UNIMPLEMENTED();
}
+ patcher.ChangeBranchCondition(branch_instr, opcode);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index fcbbc66121..b1e06e16e1 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -31,7 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores || r6.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r6, r7, r8};
return registers;
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 0335362fbb..8b48755bbf 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -40,7 +40,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r4, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -81,7 +82,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(r4, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(r4, expected, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -141,7 +143,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Check that receiver is a JSObject.
__ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmpi(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
__ blt(miss_label);
// Load properties array.
@@ -167,11 +169,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ LoadP(result, MemOperand(cp, offset));
- __ LoadP(result,
- FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- __ LoadP(result, MemOperand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ LoadP(result,
FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -222,8 +220,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -293,6 +293,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
ApiFunction fun(function_address);
@@ -307,15 +314,10 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
@@ -324,7 +326,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -333,8 +335,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -705,8 +706,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -730,7 +730,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(ip, value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -742,7 +742,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index 578b73d40e..c6b36f29f4 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -20,112 +20,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
StoreDescriptor::ValueRegister(), r0);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
- __ lbz(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Cmpi(this->name(), Operand(name), r0);
- __ bne(&miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
-
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- Label next;
- __ bne(&next);
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
- __ bind(&next);
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- int receiver_count = receiver_maps->length();
- Register map_reg = scratch1();
- __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
- } else {
- Label next_map;
- __ bne(&next_map);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index ea8239a3e2..78daac2657 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -319,8 +319,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -333,8 +332,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -349,8 +347,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -362,8 +359,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
@@ -472,23 +468,17 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
- } else {
- __ Push(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
- }
+ __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -503,13 +493,15 @@ static void KeyedStoreGenerateMegamorphicHelper(
// Fast case: Do the store, could be either Object or double.
__ bind(fast_object);
- Register scratch_value = r7;
+ Register scratch = r7;
Register address = r8;
+ DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+ scratch, address));
+
if (check_map == kCheckMap) {
__ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ mov(scratch_value,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ cmp(elements_map, scratch_value);
+ __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
+ __ cmp(elements_map, scratch);
__ bne(fast_double);
}
@@ -518,13 +510,11 @@ static void KeyedStoreGenerateMegamorphicHelper(
// there may be a callback on the element
Label holecheck_passed1;
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch_value, key);
- __ LoadPX(scratch_value, MemOperand(address, scratch_value));
- __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()),
- r0);
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ LoadPX(scratch, MemOperand(address, scratch));
+ __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
__ bne(&holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&holecheck_passed1);
@@ -534,35 +524,32 @@ static void KeyedStoreGenerateMegamorphicHelper(
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
- __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
- r0);
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch_value, key);
- __ StorePX(value, MemOperand(address, scratch_value));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ StorePX(value, MemOperand(address, scratch));
__ Ret();
__ bind(&non_smi_value);
// Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch_value,
- &transition_smi_elements);
+ __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
__ bind(&finish_object_store);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
- __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
- r0);
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
}
__ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch_value, key);
- __ StorePUX(value, MemOperand(address, scratch_value));
+ __ SmiToPtrArrayOffset(scratch, key);
+ __ StorePUX(value, MemOperand(address, scratch));
// Update write barrier for the elements array address.
- __ mr(scratch_value, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+ __ mr(scratch, value); // Preserve the value which is returned.
+ __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
@@ -580,34 +567,32 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ addi(address, elements,
Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
kHeapObjectTag)));
- __ SmiToDoubleArrayOffset(scratch_value, key);
- __ lwzx(scratch_value, MemOperand(address, scratch_value));
- __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0);
+ __ SmiToDoubleArrayOffset(scratch, key);
+ __ lwzx(scratch, MemOperand(address, scratch));
+ __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
__ bne(&fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
- slow);
+ __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r6, d0,
+ __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
- __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
- __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
- r0);
+ __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
+ __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
}
__ Ret();
__ bind(&transition_smi_elements);
// Transition the array appropriately depending on the value type.
- __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex);
+ __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ bne(&non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow);
+ FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
AllocationSiteMode mode =
AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -618,7 +603,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r7, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -630,7 +615,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, r7, slow);
+ receiver_map, scratch, slow);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(
masm, receiver, key, value, receiver_map, mode, slow);
@@ -704,19 +689,17 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r7, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+ __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -794,8 +777,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -862,7 +844,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
//
// This code is paired with the JumpPatchSite class in full-codegen-ppc.cc
//
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
@@ -900,7 +883,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// rlwinm(r0, value, 0, 31, 31, SetRC);
// bc(label, BT/BF, 2)
// and vice-versa to be disabled again.
- CodePatcher patcher(patch_address, 2);
+ CodePatcher patcher(isolate, patch_address, 2);
Register reg = Assembler::GetRA(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(Assembler::IsCmpRegister(instr_at_patch));
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 85b44ef475..b8d50b3d2c 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -31,8 +31,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores ||
- rbx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, rbx, rdi, r8};
return registers;
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 6bc3aafa89..c09eca68dd 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -56,7 +56,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -78,10 +78,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ movp(result, Operand(rsi, offset));
- __ movp(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset));
- __ movp(result, Operand(result, Context::SlotOffset(index)));
+ __ LoadNativeContextSlot(index, result);
// Load its initial map. The global functions all have initial maps.
__ movp(result,
FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -115,8 +112,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -188,6 +187,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the fast handler if present.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
+
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ Move(api_function_address, function_address,
@@ -241,8 +247,8 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount actual(1);
ParameterCount expected(expected_arguments);
__ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_SETTER);
- __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -284,8 +290,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount actual(0);
ParameterCount expected(expected_arguments);
__ LoadAccessor(rdi, holder, accessor_index, ACCESSOR_GETTER);
- __ InvokeFunction(rdi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(rdi, no_reg, expected, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -304,26 +310,16 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ PopReturnAddressTo(r11);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ Push(slot);
- __ Push(vector);
- __ PushReturnAddressFrom(r11);
- } else {
- DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ PushReturnAddressFrom(rbx);
- }
+ __ PopReturnAddressTo(r11);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ Push(slot);
+ __ Push(vector);
+ __ PushReturnAddressFrom(r11);
}
@@ -332,7 +328,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -341,8 +337,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -722,8 +717,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ PushReturnAddressFrom(scratch2());
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -748,7 +742,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -764,7 +758,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index fd92cca570..9d734338bb 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -28,111 +28,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss);
-
- Register map_reg = scratch1();
- __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = receiver_maps->length();
- for (int i = 0; i < receiver_count; ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- // Check map and tail call if there's a match
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
-
- __ bind(&miss);
-
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
- __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ Cmp(this->name(), name);
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- // Check map and tail call if there's a match
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
- }
- }
- DCHECK(number_of_handled_maps > 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 3fc8747c66..bf4ad96f69 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -564,18 +564,16 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
- if (FLAG_vector_stores) {
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ Move(vector, dummy_vector);
- __ Move(slot, Smi::FromInt(slot_index));
- }
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot_index = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ Move(vector, dummy_vector);
+ __ Move(slot, Smi::FromInt(slot_index));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -674,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -694,8 +691,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -707,8 +703,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -727,28 +722,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- // This shouldn't be called.
- __ int3();
- return;
- }
-
- // The return address is on the stack.
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), rbx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
+ // This shouldn't be called.
+ __ int3();
}
@@ -763,13 +743,11 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(receiver);
__ Push(name);
__ Push(value);
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
- DCHECK(!temp.is(slot) && !temp.is(vector));
- __ Push(slot);
- __ Push(vector);
- }
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(!temp.is(slot) && !temp.is(vector));
+ __ Push(slot);
+ __ Push(vector);
__ PushReturnAddressFrom(temp);
}
@@ -779,8 +757,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -789,8 +766,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Register dictionary = r11;
- DCHECK(!FLAG_vector_stores ||
- !AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
+ DCHECK(!AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
VectorStoreICDescriptor::SlotRegister()));
Label miss;
@@ -812,8 +788,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -851,7 +826,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index a80c649e45..2c1b942756 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -30,8 +30,6 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(FLAG_vector_stores ||
- ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index bb3b25a47f..cc43ed298d 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -36,7 +36,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_GETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -92,7 +92,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(scratch0, FIRST_JS_RECEIVER_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -114,10 +114,7 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register result, Label* miss) {
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
- __ mov(result, Operand(esi, offset));
- __ mov(result, FieldOperand(result, JSGlobalObject::kNativeContextOffset));
- __ mov(result, Operand(result, Context::SlotOffset(index)));
+ __ LoadGlobalFunction(index, result);
// Load its initial map. The global functions all have initial maps.
__ mov(result,
FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
@@ -206,6 +203,12 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
+ if (api_call_info->fast_handler()->IsCode()) {
+ // Just tail call into the code.
+ __ Jump(handle(Code::cast(api_call_info->fast_handler())),
+ RelocInfo::CODE_TARGET);
+ return;
+ }
// Put api_function_address in place.
Address function_address = v8::ToCData<Address>(api_call_info->callback());
__ mov(api_function_address, Immediate(function_address));
@@ -261,7 +264,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
ParameterCount expected(expected_arguments);
__ LoadAccessor(edi, holder, accessor_index, ACCESSOR_SETTER);
__ InvokeFunction(edi, expected, actual, CALL_FUNCTION,
- NullCallWrapper());
+ CheckDebugStepCallWrapper());
} else {
// If we generate a global code snippet for deoptimization only, remember
// the place to continue after deoptimization.
@@ -294,8 +297,10 @@ static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
static void CompileCallLoadPropertyWithInterceptor(
MacroAssembler* masm, Register receiver, Register holder, Register name,
Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+ DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+ Runtime::FunctionForId(id)->nargs);
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
- __ CallRuntime(id, NamedLoadHandlerCompiler::kInterceptorArgsLength);
+ __ CallRuntime(id);
}
@@ -303,25 +308,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // which contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
}
@@ -330,7 +325,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow);
}
@@ -339,8 +334,7 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
- 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
@@ -732,8 +726,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
holder());
__ push(scratch2()); // restore old return address
- __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor,
- NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+ __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
}
@@ -758,7 +751,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStoreCallbackProperty, 5, 1);
+ __ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
@@ -774,7 +767,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
- __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor, 3, 1);
+ __ TailCallRuntime(Runtime::kStorePropertyWithInterceptor);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index d29e32108b..9edf63b722 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -27,104 +27,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
__ push(ebx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers,
- Handle<Name> name,
- Code::StubType type,
- IcCheckType check) {
- Label miss;
-
- if (check == PROPERTY &&
- (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
- // In case we are compiling an IC for dictionary loads or stores, just
- // check whether the name is unique.
- if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
- // Keyed loads with dictionaries shouldn't be here, they go generic.
- // The DCHECK is to protect assumptions when --vector-ics is on.
- DCHECK(kind() != Code::KEYED_LOAD_IC);
- Register tmp = scratch1();
- __ JumpIfSmi(this->name(), &miss);
- __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
- __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
- } else {
- __ cmp(this->name(), Immediate(name));
- __ j(not_equal, &miss);
- }
- }
-
- Label number_case;
- Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
- __ JumpIfSmi(receiver(), smi_target);
-
- // Polymorphic keyed stores may use the map register
- Register map_reg = scratch1();
- DCHECK(kind() != Code::KEYED_STORE_IC ||
- map_reg.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- int receiver_count = maps->length();
- int number_of_handled_maps = 0;
- for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
- if (!map->is_deprecated()) {
- number_of_handled_maps++;
- Handle<WeakCell> cell = Map::WeakCellForMap(map);
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (map->instance_type() == HEAP_NUMBER_TYPE) {
- DCHECK(!number_case.is_unused());
- __ bind(&number_case);
- }
- __ j(equal, handlers->at(current));
- }
- }
- DCHECK(number_of_handled_maps != 0);
-
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- InlineCacheState state =
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
- return GetCode(kind(), type, name, state);
-}
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
- MapHandleList* transitioned_maps) {
- Label miss;
- __ JumpIfSmi(receiver(), &miss, Label::kNear);
- Register map_reg = scratch1();
- __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
- __ CmpWeakValue(map_reg, cell, scratch2());
- if (transitioned_maps->at(i).is_null()) {
- __ j(equal, handler_stubs->at(i));
- } else {
- Label next_map;
- __ j(not_equal, &next_map, Label::kNear);
- Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- Register transition_map = scratch1();
- DCHECK(!FLAG_vector_stores &&
- transition_map.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadWeakValue(transition_map, cell, &miss);
- __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
- __ bind(&next_map);
- }
- }
- __ bind(&miss);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- // Return the generated code.
- return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+ __ TailCallRuntime(Runtime::kSetProperty);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index 6ef5b635c7..d4cc3ce80a 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -561,26 +561,22 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfNotUniqueNameInstanceType(ebx, &slow);
- if (FLAG_vector_stores) {
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
- }
+ // The handlers in the stub cache expect a vector and slot. Since we won't
+ // change the IC from any downstream misses, a dummy vector can be used.
+ Handle<TypeFeedbackVector> dummy_vector =
+ TypeFeedbackVector::DummyVector(masm->isolate());
+ int slot = dummy_vector->GetIndex(
+ FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+ __ push(Immediate(Smi::FromInt(slot)));
+ __ push(Immediate(dummy_vector));
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, key, edi, no_reg);
- if (FLAG_vector_stores) {
- __ pop(VectorStoreICDescriptor::VectorRegister());
- __ pop(VectorStoreICDescriptor::SlotRegister());
- }
+ __ pop(VectorStoreICDescriptor::VectorRegister());
+ __ pop(VectorStoreICDescriptor::SlotRegister());
// Cache miss.
__ jmp(&miss);
@@ -676,8 +672,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kLoadIC_Miss);
}
@@ -695,8 +690,7 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty,
- 2, 1);
+ : Runtime::kGetProperty);
}
@@ -707,8 +701,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
- int arg_count = 4;
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, arg_count, 1);
+ __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
@@ -726,27 +719,15 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
// Do tail-call to runtime routine.
__ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty,
- 2, 1);
+ : Runtime::kKeyedGetProperty);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- if (FLAG_vector_stores) {
- // This shouldn't be called.
- __ int3();
- return;
- }
-
- // Return address is on the stack.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister(), ebx, no_reg);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
+ // This shouldn't be called.
+ // TODO(mvstanton): remove this method.
+ __ int3();
+ return;
}
@@ -754,25 +735,15 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- if (FLAG_vector_stores) {
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
-
- __ xchg(receiver, Operand(esp, 0));
- __ push(name);
- __ push(value);
- __ push(slot);
- __ push(vector);
- __ push(receiver); // Contains the return address.
- } else {
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
- }
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // Contains the return address.
}
@@ -781,8 +752,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Perform tail call to the entry.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Miss);
}
@@ -798,25 +768,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
- if (FLAG_vector_stores) {
- __ push(vector);
- __ push(slot);
- }
+ __ push(vector);
+ __ push(slot);
Register dictionary = ebx;
__ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(FLAG_vector_stores ? 3 : 1);
+ __ Drop(3);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
- if (FLAG_vector_stores) {
- __ pop(slot);
- __ pop(vector);
- }
+ __ pop(slot);
+ __ pop(vector);
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
@@ -828,8 +794,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- int args = FLAG_vector_stores ? 5 : 3;
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, args, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
@@ -867,7 +832,8 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+ InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 297722c255..94ed7020c3 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -81,6 +81,12 @@ void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific(
}
+void VoidDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
+}
+
+
Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
@@ -92,6 +98,7 @@ Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
+
void LoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
@@ -363,6 +370,27 @@ void ArgumentsAccessNewDescriptor::InitializePlatformSpecific(
}
+Type::FunctionType*
+RestParamAccessDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, SmiType(zone));
+ function->InitParameter(1, ExternalPointer(zone));
+ function->InitParameter(2, SmiType(zone));
+ return function;
+}
+
+
+void RestParamAccessDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {parameter_count(), parameter_pointer(),
+ rest_parameter_index()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ContextOnlyDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr);
@@ -377,6 +405,20 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
Type::FunctionType*
+FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone)); // closure
+ function->InitParameter(1, SmiType(zone)); // literal_index
+ function->InitParameter(2, AnyTagged(zone)); // pattern
+ function->InitParameter(3, AnyTagged(zone)); // flags
+ return function;
+}
+
+
+Type::FunctionType*
FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
@@ -420,9 +462,35 @@ CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
- function->InitParameter(0, AnyTagged(zone)); // target
- function->InitParameter(
- 1, UntaggedIntegral32(zone)); // actual number of arguments
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(1, UntaggedIntegral32(zone)); // actual #arguments
+ return function;
+}
+
+
+Type::FunctionType*
+ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(1, AnyTagged(zone)); // new.target
+ function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
+ function->InitParameter(3, AnyTagged(zone)); // opt. allocation site
+ return function;
+}
+
+
+Type::FunctionType*
+ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(1, AnyTagged(zone)); // new.target
+ function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
return function;
}
@@ -482,13 +550,11 @@ ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(
- 1, UntaggedIntegral32(zone)); // actual number of arguments
- function->InitParameter(
- 2,
- UntaggedIntegral32(zone)); // expected number of arguments
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, Type::Receiver()); // JSFunction
+ function->InitParameter(1, AnyTagged(zone)); // the new target
+ function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
+ function->InitParameter(3, UntaggedIntegral32(zone)); // expected #arguments
return function;
}
@@ -499,12 +565,11 @@ ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Zone* zone = isolate->interface_descriptor_zone();
Type::FunctionType* function =
Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
- function->InitParameter(0, AnyTagged(zone)); // callee
- function->InitParameter(1, AnyTagged(zone)); // call_data
- function->InitParameter(2, AnyTagged(zone)); // holder
- function->InitParameter(3, ExternalPointer(zone)); // api_function_address
- function->InitParameter(
- 4, UntaggedIntegral32(zone)); // actual number of arguments
+ function->InitParameter(0, AnyTagged(zone)); // callee
+ function->InitParameter(1, AnyTagged(zone)); // call_data
+ function->InitParameter(2, AnyTagged(zone)); // holder
+ function->InitParameter(3, ExternalPointer(zone)); // api_function_address
+ function->InitParameter(4, UntaggedIntegral32(zone)); // actual #arguments
return function;
}
@@ -523,32 +588,5 @@ ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
}
-Type::FunctionType* MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
- function->InitParameter(0, Type::Receiver());
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
- function->InitParameter(3, AnyTagged(zone));
- return function;
-}
-
-
-Type::FunctionType* MathRoundVariantCallFromOptimizedCodeDescriptor::
- BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
- function->InitParameter(0, Type::Receiver());
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
- function->InitParameter(3, AnyTagged(zone));
- function->InitParameter(4, AnyTagged(zone));
- return function;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 2c5ac4b052..2814daeded 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -14,6 +14,7 @@ namespace internal {
class PlatformInterfaceDescriptor;
#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Void) \
V(Load) \
V(Store) \
V(StoreTransition) \
@@ -30,6 +31,7 @@ class PlatformInterfaceDescriptor;
V(ToObject) \
V(NumberToString) \
V(Typeof) \
+ V(FastCloneRegExp) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
@@ -39,6 +41,8 @@ class PlatformInterfaceDescriptor;
V(CallFunctionWithFeedbackAndVector) \
V(CallConstruct) \
V(CallTrampoline) \
+ V(ConstructStub) \
+ V(ConstructTrampoline) \
V(RegExpConstructResult) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
@@ -64,6 +68,7 @@ class PlatformInterfaceDescriptor;
V(ApiGetter) \
V(ArgumentsAccessRead) \
V(ArgumentsAccessNew) \
+ V(RestParamAccess) \
V(StoreArrayLiteralElement) \
V(LoadGlobalViaContext) \
V(StoreGlobalViaContext) \
@@ -71,8 +76,6 @@ class PlatformInterfaceDescriptor;
V(MathPowInteger) \
V(ContextOnly) \
V(GrowArrayElements) \
- V(MathRoundVariantCallFromUnoptimizedCode) \
- V(MathRoundVariantCallFromOptimizedCode) \
V(InterpreterPushArgsAndCall) \
V(InterpreterPushArgsAndConstruct) \
V(InterpreterCEntry)
@@ -231,6 +234,14 @@ class CallInterfaceDescriptor {
Isolate* isolate, int register_param_count) override; \
\
public:
+
+
+class VoidDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
+};
+
+
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
public:
@@ -417,6 +428,13 @@ class TypeofDescriptor : public CallInterfaceDescriptor {
};
+class FastCloneRegExpDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneRegExpDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneShallowArrayDescriptor,
@@ -458,6 +476,20 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
};
+class ConstructStubDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructStubDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
+class ConstructTrampolineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructTrampolineDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class CallFunctionDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
@@ -680,6 +712,16 @@ class ArgumentsAccessNewDescriptor : public CallInterfaceDescriptor {
};
+class RestParamAccessDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RestParamAccessDescriptor,
+ CallInterfaceDescriptor)
+ static const Register parameter_count();
+ static const Register parameter_pointer();
+ static const Register rest_parameter_index();
+};
+
+
class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
@@ -703,23 +745,6 @@ class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
};
-class MathRoundVariantCallFromOptimizedCodeDescriptor
- : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- MathRoundVariantCallFromOptimizedCodeDescriptor, CallInterfaceDescriptor)
-};
-
-
-class MathRoundVariantCallFromUnoptimizedCodeDescriptor
- : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- MathRoundVariantCallFromUnoptimizedCodeDescriptor,
- CallInterfaceDescriptor)
-};
-
-
class ContextOnlyDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index f2f5c07251..1b15fc6668 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -8,16 +8,72 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class BytecodeArrayBuilder::PreviousBytecodeHelper {
+ public:
+ explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
+ : array_builder_(array_builder),
+ previous_bytecode_start_(array_builder_.last_bytecode_start_) {
+ // This helper is expected to be instantiated only when the last bytecode is
+ // in the same basic block.
+ DCHECK(array_builder_.LastBytecodeInSameBlock());
+ }
+
+ // Returns the previous bytecode in the same basic block.
+ MUST_USE_RESULT Bytecode GetBytecode() const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ return Bytecodes::FromByte(
+ array_builder_.bytecodes()->at(previous_bytecode_start_));
+ }
+
+ // Returns the operand at operand_index for the previous bytecode in the
+ // same basic block.
+ MUST_USE_RESULT uint32_t GetOperand(int operand_index) const {
+ DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
+ Bytecode bytecode = GetBytecode();
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode));
+ size_t operand_offset =
+ previous_bytecode_start_ +
+ Bytecodes::GetOperandOffset(bytecode, operand_index);
+ OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
+ switch (size) {
+ default:
+ case OperandSize::kNone:
+ UNREACHABLE();
+ case OperandSize::kByte:
+ return static_cast<uint32_t>(
+ array_builder_.bytecodes()->at(operand_offset));
+ case OperandSize::kShort:
+ uint16_t operand =
+ (array_builder_.bytecodes()->at(operand_offset) << 8) +
+ array_builder_.bytecodes()->at(operand_offset + 1);
+ return static_cast<uint32_t>(operand);
+ }
+ }
+
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const {
+ return array_builder_.constant_array_builder()->At(
+ GetOperand(operand_index));
+ }
+
+ private:
+ const BytecodeArrayBuilder& array_builder_;
+ size_t previous_bytecode_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
+};
+
+
BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
zone_(zone),
bytecodes_(zone),
bytecode_generated_(false),
+ constant_array_builder_(isolate, zone),
last_block_end_(0),
last_bytecode_start_(~0),
exit_seen_in_block_(false),
- constants_map_(isolate->heap(), zone),
- constants_(zone),
+ unbound_jumps_(0),
parameter_count_(-1),
local_register_count_(-1),
context_register_count_(-1),
@@ -25,6 +81,9 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
free_temporaries_(zone) {}
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
+
+
void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
local_register_count_ = number_of_locals;
DCHECK_LE(context_register_count_, 0);
@@ -85,21 +144,14 @@ bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK_EQ(bytecode_generated_, false);
-
EnsureReturn();
int bytecode_size = static_cast<int>(bytecodes_.size());
int register_count = fixed_register_count() + temporary_register_count_;
int frame_size = register_count * kPointerSize;
-
Factory* factory = isolate_->factory();
- int constants_count = static_cast<int>(constants_.size());
Handle<FixedArray> constant_pool =
- factory->NewFixedArray(constants_count, TENURED);
- for (int i = 0; i < constants_count; i++) {
- constant_pool->set(i, *constants_[i]);
- }
-
+ constant_array_builder()->ToFixedArray(factory);
Handle<BytecodeArray> output =
factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
parameter_count(), constant_pool);
@@ -137,6 +189,14 @@ void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3) {
+ uint32_t operands[] = {operand0, operand1, operand2, operand3};
+ Output(bytecode, operands);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2) {
uint32_t operands[] = {operand0, operand1, operand2};
Output(bytecode, operands);
@@ -269,11 +329,21 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
+ if (value) {
+ LoadTrue();
+ } else {
+ LoadFalse();
+ }
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- // TODO(oth): Avoid loading the accumulator with the register if the
- // previous bytecode stored the accumulator with the same register.
- Output(Bytecode::kLdar, reg.ToOperand());
+ if (!IsRegisterInAccumulator(reg)) {
+ Output(Bytecode::kLdar, reg.ToOperand());
+ }
return *this;
}
@@ -282,17 +352,47 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
// TODO(oth): Avoid storing the accumulator in the register if the
// previous bytecode loaded the accumulator with the same register.
+ //
+ // TODO(oth): If the previous bytecode is a MOV into this register,
+ // the previous instruction can be removed. The logic for determining
+ // these redundant MOVs appears complex.
Output(Bytecode::kStar, reg.ToOperand());
+ if (!IsRegisterInAccumulator(reg)) {
+ Output(Bytecode::kStar, reg.ToOperand());
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
+ Register to) {
+ DCHECK(from != to);
+ Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
+ Register reg1) {
+ DCHECK(reg0 != reg1);
+ if (FitsInReg8Operand(reg0)) {
+ Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
+ } else if (FitsInReg8Operand(reg1)) {
+ Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+ } else {
+ Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
- size_t name_index, int feedback_slot, LanguageMode language_mode,
+ const Handle<String> name, int feedback_slot, LanguageMode language_mode,
TypeofMode typeof_mode) {
// TODO(rmcilroy): Potentially store language and typeof information in an
// operand rather than having extra bytecodes.
Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -308,8 +408,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
- size_t name_index, int feedback_slot, LanguageMode language_mode) {
+ const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -330,6 +431,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
if (FitsInIdx8Operand(slot_index)) {
Output(Bytecode::kLdaContextSlot, context.ToOperand(),
static_cast<uint8_t>(slot_index));
+ } else if (FitsInIdx16Operand(slot_index)) {
+ Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+ static_cast<uint16_t>(slot_index));
} else {
UNIMPLEMENTED();
}
@@ -343,6 +447,43 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
if (FitsInIdx8Operand(slot_index)) {
Output(Bytecode::kStaContextSlot, context.ToOperand(),
static_cast<uint8_t>(slot_index));
+ } else if (FitsInIdx16Operand(slot_index)) {
+ Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+ static_cast<uint16_t>(slot_index));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
+ const Handle<String> name, TypeofMode typeof_mode) {
+ Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
+ ? Bytecode::kLdaLookupSlotInsideTypeof
+ : Bytecode::kLdaLookupSlot;
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index)) {
+ Output(bytecode, static_cast<uint8_t>(name_index));
+ } else if (FitsInIdx16Operand(name_index)) {
+ Output(BytecodeForWideOperands(bytecode),
+ static_cast<uint16_t>(name_index));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
+ const Handle<String> name, LanguageMode language_mode) {
+ Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
+ if (FitsInIdx8Operand(name_index)) {
+ Output(bytecode, static_cast<uint8_t>(name_index));
+ } else if (FitsInIdx16Operand(name_index)) {
+ Output(BytecodeForWideOperands(bytecode),
+ static_cast<uint16_t>(name_index));
} else {
UNIMPLEMENTED();
}
@@ -351,9 +492,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
- Register object, size_t name_index, int feedback_slot,
+ Register object, const Handle<String> name, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForLoadIC(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -385,9 +527,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
- Register object, size_t name_index, int feedback_slot,
+ Register object, const Handle<String> name, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreIC(language_mode);
+ size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
@@ -421,9 +564,18 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
- PretenureFlag tenured) {
+ Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
+ size_t entry = GetConstantPoolEntry(shared_info);
DCHECK(FitsInImm8Operand(tenured));
- Output(Bytecode::kCreateClosure, static_cast<uint8_t>(tenured));
+ if (FitsInIdx8Operand(entry)) {
+ Output(Bytecode::kCreateClosure, static_cast<uint8_t>(entry),
+ static_cast<uint8_t>(tenured));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(Bytecode::kCreateClosureWide, static_cast<uint16_t>(entry),
+ static_cast<uint8_t>(tenured));
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
@@ -440,10 +592,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
- int literal_index, Register flags) {
- if (FitsInIdx8Operand(literal_index)) {
- Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(literal_index),
- flags.ToOperand());
+ Handle<String> pattern, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t pattern_entry = GetConstantPoolEntry(pattern);
+ if (FitsInIdx8Operand(literal_index) && FitsInIdx8Operand(pattern_entry)) {
+ Output(Bytecode::kCreateRegExpLiteral, static_cast<uint8_t>(pattern_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(pattern_entry)) {
+ Output(Bytecode::kCreateRegExpLiteralWide,
+ static_cast<uint16_t>(pattern_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -452,11 +611,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
- int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bytes.
- if (FitsInIdx8Operand(literal_index)) {
- Output(Bytecode::kCreateArrayLiteral, static_cast<uint8_t>(literal_index),
- static_cast<uint8_t>(flags));
+ Handle<FixedArray> constant_elements, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+ if (FitsInIdx8Operand(literal_index) &&
+ FitsInIdx8Operand(constant_elements_entry)) {
+ Output(Bytecode::kCreateArrayLiteral,
+ static_cast<uint8_t>(constant_elements_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(constant_elements_entry)) {
+ Output(Bytecode::kCreateArrayLiteralWide,
+ static_cast<uint16_t>(constant_elements_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -465,11 +632,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- int literal_index, int flags) {
- DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bytes.
- if (FitsInIdx8Operand(literal_index)) {
- Output(Bytecode::kCreateObjectLiteral, static_cast<uint8_t>(literal_index),
- static_cast<uint8_t>(flags));
+ Handle<FixedArray> constant_properties, int literal_index, int flags) {
+ DCHECK(FitsInImm8Operand(flags)); // Flags should fit in 8 bits.
+ size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
+ if (FitsInIdx8Operand(literal_index) &&
+ FitsInIdx8Operand(constant_properties_entry)) {
+ Output(Bytecode::kCreateObjectLiteral,
+ static_cast<uint8_t>(constant_properties_entry),
+ static_cast<uint8_t>(literal_index), static_cast<uint8_t>(flags));
+ } else if (FitsInIdx16Operand(literal_index) &&
+ FitsInIdx16Operand(constant_properties_entry)) {
+ Output(Bytecode::kCreateObjectLiteralWide,
+ static_cast<uint16_t>(constant_properties_entry),
+ static_cast<uint16_t>(literal_index), static_cast<uint8_t>(flags));
} else {
UNIMPLEMENTED();
}
@@ -491,14 +666,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
bool BytecodeArrayBuilder::NeedToBooleanCast() {
if (!LastBytecodeInSameBlock()) {
- // If the previous bytecode was from a different block return false.
return true;
}
-
- // If the previous bytecode puts a boolean in the accumulator return true.
- switch (Bytecodes::FromByte(bytecodes()->at(last_bytecode_start_))) {
- case Bytecode::kToBoolean:
- UNREACHABLE();
+ PreviousBytecodeHelper previous_bytecode(*this);
+ switch (previous_bytecode.GetBytecode()) {
+ // If the previous bytecode puts a boolean in the accumulator return true.
case Bytecode::kLdaTrue:
case Bytecode::kLdaFalse:
case Bytecode::kLogicalNot:
@@ -520,16 +692,6 @@ bool BytecodeArrayBuilder::NeedToBooleanCast() {
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToBoolean() {
- // If the previous bytecode puts a boolean in the accumulator
- // there is no need to emit an instruction.
- if (NeedToBooleanCast()) {
- Output(Bytecode::kToBoolean);
- }
- return *this;
-}
-
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
Output(Bytecode::kToObject);
return *this;
@@ -537,6 +699,22 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
+ if (LastBytecodeInSameBlock()) {
+ PreviousBytecodeHelper previous_bytecode(*this);
+ switch (previous_bytecode.GetBytecode()) {
+ case Bytecode::kToName:
+ case Bytecode::kTypeOf:
+ return *this;
+ case Bytecode::kLdaConstantWide:
+ case Bytecode::kLdaConstant: {
+ Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
+ if (object->IsName()) return *this;
+ break;
+ }
+ default:
+ break;
+ }
+ }
Output(Bytecode::kToName);
return *this;
}
@@ -594,42 +772,32 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
return Bytecode::kJumpIfUndefinedConstant;
default:
UNREACHABLE();
- return Bytecode::kJumpConstant;
+ return static_cast<Bytecode>(-1);
}
}
-void BytecodeArrayBuilder::PatchJump(
- const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
- int delta = static_cast<int>(jump_target - jump_location);
-
- DCHECK(Bytecodes::IsJump(jump_bytecode));
- DCHECK_EQ(Bytecodes::Size(jump_bytecode), 2);
- DCHECK_NE(delta, 0);
-
- if (FitsInImm8Operand(delta)) {
- // Just update the operand
- jump_location++;
- *jump_location = static_cast<uint8_t>(delta);
- } else {
- // Update the jump type and operand
- size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdx8Operand(entry)) {
- jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- *jump_location++ = Bytecodes::ToByte(jump_bytecode);
- *jump_location = static_cast<uint8_t>(entry);
- } else {
- // TODO(oth): OutputJump should reserve a constant pool entry
- // when jump is written. The reservation should be used here if
- // needed, or cancelled if not. This is due to the patch needing
- // to match the size of the code it's replacing. In future,
- // there will probably be a jump with 32-bit operand for cases
- // when constant pool is full, but that needs to be emitted in
- // OutputJump too.
- UNIMPLEMENTED();
- }
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
+ Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ return Bytecode::kJumpConstantWide;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfTrueConstantWide;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfFalseConstantWide;
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfToBooleanTrueConstantWide;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfToBooleanFalseConstantWide;
+ case Bytecode::kJumpIfNull:
+ return Bytecode::kJumpIfNullConstantWide;
+ case Bytecode::kJumpIfUndefined:
+ return Bytecode::kJumpIfUndefinedConstantWide;
+ default:
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
}
}
@@ -652,6 +820,66 @@ Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
}
+void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ DCHECK_EQ(*operand_location, 0);
+ if (FitsInImm8Operand(delta)) {
+ // The jump fits within the range of an Imm8 operand, so cancel
+ // the reservation and jump directly.
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+ *operand_location = static_cast<uint8_t>(delta);
+ } else {
+ // The jump does not fit within the range of an Imm8 operand, so
+ // commit reservation putting the offset into the constant pool,
+ // and update the jump instruction and operand.
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+ DCHECK(FitsInIdx8Operand(entry));
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ *jump_location = Bytecodes::ToByte(jump_bytecode);
+ *operand_location = static_cast<uint8_t>(entry);
+ }
+}
+
+
+void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
+ DCHECK(Bytecodes::IsJumpConstantWide(Bytecodes::FromByte(*jump_location)));
+ ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+ DCHECK(FitsInIdx16Operand(entry));
+ uint8_t operand_bytes[2];
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+ DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
+ *operand_location++ = operand_bytes[0];
+ *operand_location = operand_bytes[1];
+}
+
+
+void BytecodeArrayBuilder::PatchJump(
+ const ZoneVector<uint8_t>::iterator& jump_target,
+ const ZoneVector<uint8_t>::iterator& jump_location) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ int delta = static_cast<int>(jump_target - jump_location);
+ DCHECK(Bytecodes::IsJump(jump_bytecode));
+ switch (Bytecodes::GetOperandSize(jump_bytecode, 0)) {
+ case OperandSize::kByte:
+ PatchIndirectJumpWith8BitOperand(jump_location, delta);
+ break;
+ case OperandSize::kShort:
+ PatchIndirectJumpWith16BitOperand(jump_location, delta);
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ unbound_jumps_--;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
// Don't emit dead code.
@@ -663,29 +891,48 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
}
- int delta;
if (label->is_bound()) {
// Label has been bound already so this is a backwards jump.
CHECK_GE(bytecodes()->size(), label->offset());
CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
size_t abs_delta = bytecodes()->size() - label->offset();
- delta = -static_cast<int>(abs_delta);
- } else {
- // Label has not yet been bound so this is a forward reference
- // that will be patched when the label is bound.
- label->set_referrer(bytecodes()->size());
- delta = 0;
- }
+ int delta = -static_cast<int>(abs_delta);
- if (FitsInImm8Operand(delta)) {
- Output(jump_bytecode, static_cast<uint8_t>(delta));
- } else {
- size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
- if (FitsInIdx8Operand(entry)) {
- Output(GetJumpWithConstantOperand(jump_bytecode),
- static_cast<uint8_t>(entry));
+ if (FitsInImm8Operand(delta)) {
+ Output(jump_bytecode, static_cast<uint8_t>(delta));
} else {
- UNIMPLEMENTED();
+ size_t entry =
+ GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
+ if (FitsInIdx8Operand(entry)) {
+ Output(GetJumpWithConstantOperand(jump_bytecode),
+ static_cast<uint8_t>(entry));
+ } else if (FitsInIdx16Operand(entry)) {
+ Output(GetJumpWithConstantWideOperand(jump_bytecode),
+ static_cast<uint16_t>(entry));
+ } else {
+ UNREACHABLE();
+ }
+ }
+ } else {
+ // The label has not yet been bound so this is a forward reference
+ // that will be patched when the label is bound. We create a
+ // reservation in the constant pool so the jump can be patched
+ // when the label is bound. The reservation means the maximum size
+ // of the operand for the constant is known and the jump can
+ // be emitted into the bytecode stream with space for the operand.
+ label->set_referrer(bytecodes()->size());
+ unbound_jumps_++;
+ OperandSize reserved_operand_size =
+ constant_array_builder()->CreateReservedEntry();
+ switch (reserved_operand_size) {
+ case OperandSize::kByte:
+ Output(jump_bytecode, 0);
+ break;
+ case OperandSize::kShort:
+ Output(GetJumpWithConstantWideOperand(jump_bytecode), 0);
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
}
}
LeaveBasicBlock();
@@ -733,21 +980,33 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(Register receiver) {
- Output(Bytecode::kForInPrepare, receiver.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
+ Register cache_type, Register cache_array, Register cache_length) {
+ Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
+ cache_array.ToOperand(), cache_length.ToOperand());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register for_in_state,
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
+ Register cache_length) {
+ Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
+ Register cache_type,
+ Register cache_array,
Register index) {
- Output(Bytecode::kForInNext, for_in_state.ToOperand(), index.ToOperand());
+ Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
+ cache_array.ToOperand(), index.ToOperand());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register for_in_state) {
- Output(Bytecode::kForInDone, for_in_state.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
+ Output(Bytecode::kForInStep, index.ToOperand());
return *this;
}
@@ -768,10 +1027,17 @@ void BytecodeArrayBuilder::EnsureReturn() {
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
Register receiver,
- size_t arg_count) {
- if (FitsInIdx8Operand(arg_count)) {
+ size_t arg_count,
+ int feedback_slot) {
+ if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
- static_cast<uint8_t>(arg_count));
+ static_cast<uint8_t>(arg_count),
+ static_cast<uint8_t>(feedback_slot));
+ } else if (FitsInIdx16Operand(arg_count) &&
+ FitsInIdx16Operand(feedback_slot)) {
+ Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
+ static_cast<uint16_t>(arg_count),
+ static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -795,6 +1061,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
+ DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
DCHECK(FitsInIdx16Operand(function_id));
DCHECK(FitsInIdx8Operand(arg_count));
if (!first_arg.is_valid()) {
@@ -807,6 +1074,23 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
+ Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
+ Register first_return) {
+ DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
+ DCHECK(FitsInIdx16Operand(function_id));
+ DCHECK(FitsInIdx8Operand(arg_count));
+ if (!first_arg.is_valid()) {
+ DCHECK_EQ(0u, arg_count);
+ first_arg = Register(0);
+ }
+ Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+ first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
+ first_return.ToOperand());
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
Register receiver,
size_t arg_count) {
@@ -825,23 +1109,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
}
-size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
- // These constants shouldn't be added to the constant pool, the should use
- // specialzed bytecodes instead.
- DCHECK(!object.is_identical_to(isolate_->factory()->undefined_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->null_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->the_hole_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->true_value()));
- DCHECK(!object.is_identical_to(isolate_->factory()->false_value()));
+BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
+ Output(Bytecode::kDeleteLookupSlot);
+ return *this;
+}
- size_t* entry = constants_map_.Find(object);
- if (!entry) {
- entry = constants_map_.Get(object);
- *entry = constants_.size();
- constants_.push_back(object);
- }
- DCHECK(constants_[*entry].is_identical_to(object));
- return *entry;
+
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
+ return constant_array_builder()->Insert(object);
}
@@ -858,6 +1133,28 @@ int BytecodeArrayBuilder::BorrowTemporaryRegister() {
}
+int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
+ int end_index) {
+ auto index = free_temporaries_.lower_bound(start_index);
+ if (index == free_temporaries_.begin()) {
+ // If start_index is the first free register, check for a register
+ // greater than end_index.
+ index = free_temporaries_.upper_bound(end_index);
+ if (index == free_temporaries_.end()) {
+ temporary_register_count_ += 1;
+ return last_temporary_register().index();
+ }
+ } else {
+ // If there is a free register < start_index
+ index--;
+ }
+
+ int retval = *index;
+ free_temporaries_.erase(index);
+ return retval;
+}
+
+
void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
free_temporaries_.erase(reg_index);
@@ -917,12 +1214,28 @@ bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
}
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
+ if (reg.is_function_context() || reg.is_function_closure() ||
+ reg.is_new_target()) {
+ return true;
+ } else if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count_);
+ return parameter_index >= 0 && parameter_index < parameter_count_;
+ } else if (reg.index() < fixed_register_count()) {
+ return true;
+ } else {
+ return TemporaryRegisterIsLive(reg);
+ }
+}
+
+
bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
uint32_t operand_value) const {
OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
switch (operand_type) {
case OperandType::kNone:
return false;
+ case OperandType::kCount16:
case OperandType::kIdx16:
return static_cast<uint16_t>(operand_value) == operand_value;
case OperandType::kCount8:
@@ -934,30 +1247,47 @@ bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
return true;
}
// Fall-through to kReg8 case.
- case OperandType::kReg8: {
- Register reg = Register::FromOperand(static_cast<uint8_t>(operand_value));
- if (reg.is_function_context() || reg.is_function_closure()) {
- return true;
- } else if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count_);
- return parameter_index >= 0 && parameter_index < parameter_count_;
- } else if (reg.index() < fixed_register_count()) {
- return true;
- } else {
- return TemporaryRegisterIsLive(reg);
- }
+ case OperandType::kReg8:
+ return RegisterIsValid(
+ Register::FromOperand(static_cast<uint8_t>(operand_value)));
+ case OperandType::kRegPair8: {
+ Register reg0 =
+ Register::FromOperand(static_cast<uint8_t>(operand_value));
+ Register reg1 = Register(reg0.index() + 1);
+ return RegisterIsValid(reg0) && RegisterIsValid(reg1);
}
+ case OperandType::kReg16:
+ if (bytecode != Bytecode::kExchange &&
+ bytecode != Bytecode::kExchangeWide) {
+ return false;
+ }
+ return RegisterIsValid(
+ Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
}
UNREACHABLE();
return false;
}
+
bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
return last_bytecode_start_ < bytecodes()->size() &&
last_bytecode_start_ >= last_block_end_;
}
+bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
+ if (LastBytecodeInSameBlock()) {
+ PreviousBytecodeHelper previous_bytecode(*this);
+ Bytecode bytecode = previous_bytecode.GetBytecode();
+ if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
+ (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
// static
Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
switch (op) {
@@ -1065,6 +1395,14 @@ Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
return Bytecode::kStaGlobalSloppyWide;
case Bytecode::kStaGlobalStrict:
return Bytecode::kStaGlobalStrictWide;
+ case Bytecode::kLdaLookupSlot:
+ return Bytecode::kLdaLookupSlotWide;
+ case Bytecode::kLdaLookupSlotInsideTypeof:
+ return Bytecode::kLdaLookupSlotInsideTypeofWide;
+ case Bytecode::kStaLookupSlotStrict:
+ return Bytecode::kStaLookupSlotStrictWide;
+ case Bytecode::kStaLookupSlotSloppy:
+ return Bytecode::kStaLookupSlotSloppyWide;
default:
UNREACHABLE();
return static_cast<Bytecode>(-1);
@@ -1177,6 +1515,23 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
// static
+Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
+ LanguageMode language_mode) {
+ switch (language_mode) {
+ case SLOPPY:
+ return Bytecode::kStaLookupSlotSloppy;
+ case STRICT:
+ return Bytecode::kStaLookupSlotStrict;
+ case STRONG:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
+
+
+// static
Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
CreateArgumentsType type) {
switch (type) {
@@ -1221,7 +1576,7 @@ bool BytecodeArrayBuilder::FitsInIdx8Operand(size_t value) {
// static
bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
- return kMinInt8 <= value && value < kMaxInt8;
+ return kMinInt8 <= value && value <= kMaxInt8;
}
@@ -1237,53 +1592,15 @@ bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
}
-TemporaryRegisterScope::TemporaryRegisterScope(BytecodeArrayBuilder* builder)
- : builder_(builder),
- allocated_(builder->zone()),
- next_consecutive_register_(-1),
- next_consecutive_count_(-1) {}
-
-
-TemporaryRegisterScope::~TemporaryRegisterScope() {
- for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
- builder_->ReturnTemporaryRegister(*i);
- }
- allocated_.clear();
-}
-
-
-Register TemporaryRegisterScope::NewRegister() {
- int allocated = builder_->BorrowTemporaryRegister();
- allocated_.push_back(allocated);
- return Register(allocated);
-}
-
-
-bool TemporaryRegisterScope::RegisterIsAllocatedInThisScope(
- Register reg) const {
- for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
- if (*i == reg.index()) return true;
- }
- return false;
-}
-
-
-void TemporaryRegisterScope::PrepareForConsecutiveAllocations(size_t count) {
- if (static_cast<int>(count) > next_consecutive_count_) {
- next_consecutive_register_ =
- builder_->PrepareForConsecutiveTemporaryRegisters(count);
- next_consecutive_count_ = static_cast<int>(count);
- }
+// static
+bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
+ return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
}
-Register TemporaryRegisterScope::NextConsecutiveRegister() {
- DCHECK_GE(next_consecutive_register_, 0);
- DCHECK_GT(next_consecutive_count_, 0);
- builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
- allocated_.push_back(next_consecutive_register_);
- next_consecutive_count_--;
- return Register(next_consecutive_register_++);
+// static
+bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
+ return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index b766ccd4a6..7c23dc3f22 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -5,12 +5,9 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
-#include <vector>
-
-#include "src/ast.h"
-#include "src/identity-map.h"
+#include "src/ast/ast.h"
#include "src/interpreter/bytecodes.h"
-#include "src/zone.h"
+#include "src/interpreter/constant-array-builder.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -21,15 +18,18 @@ class Isolate;
namespace interpreter {
class BytecodeLabel;
+class ConstantArrayBuilder;
class Register;
// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
// when rest parameters implementation has settled down.
enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
-class BytecodeArrayBuilder {
+class BytecodeArrayBuilder final {
public:
BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+ ~BytecodeArrayBuilder();
+
Handle<BytecodeArray> ToBytecodeArray();
// Set the number of parameters expected by function.
@@ -68,9 +68,6 @@ class BytecodeArrayBuilder {
// Return true if the register |reg| represents a temporary register.
bool RegisterIsTemporary(Register reg) const;
- // Gets a constant pool entry for the |object|.
- size_t GetConstantPoolEntry(Handle<Object> object);
-
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
@@ -79,12 +76,14 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& LoadTheHole();
BytecodeArrayBuilder& LoadTrue();
BytecodeArrayBuilder& LoadFalse();
+ BytecodeArrayBuilder& LoadBooleanConstant(bool value);
// Global loads to the accumulator and stores from the accumulator.
- BytecodeArrayBuilder& LoadGlobal(size_t name_index, int feedback_slot,
+ BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
LanguageMode language_mode,
TypeofMode typeof_mode);
- BytecodeArrayBuilder& StoreGlobal(size_t name_index, int feedback_slot,
+ BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
+ int feedback_slot,
LanguageMode language_mode);
// Load the object at |slot_index| in |context| into the accumulator.
@@ -97,8 +96,13 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
+ // Register-register transfer.
+ BytecodeArrayBuilder& MoveRegister(Register from, Register to);
+ BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
+
// Named load property.
- BytecodeArrayBuilder& LoadNamedProperty(Register object, size_t name_index,
+ BytecodeArrayBuilder& LoadNamedProperty(Register object,
+ const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
// Keyed load property. The key should be in the accumulator.
@@ -106,23 +110,36 @@ class BytecodeArrayBuilder {
LanguageMode language_mode);
// Store properties. The value to be stored should be in the accumulator.
- BytecodeArrayBuilder& StoreNamedProperty(Register object, size_t name_index,
+ BytecodeArrayBuilder& StoreNamedProperty(Register object,
+ const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
int feedback_slot,
LanguageMode language_mode);
- // Create a new closure for the SharedFunctionInfo in the accumulator.
- BytecodeArrayBuilder& CreateClosure(PretenureFlag tenured);
+ // Lookup the variable with |name|.
+ BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
+ TypeofMode typeof_mode);
+
+ // Store value in the accumulator into the variable with |name|.
+ BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
+ LanguageMode language_mode);
+
+ // Create a new closure for the SharedFunctionInfo.
+ BytecodeArrayBuilder& CreateClosure(Handle<SharedFunctionInfo> shared_info,
+ PretenureFlag tenured);
// Create a new arguments object in the accumulator.
BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
// Literals creation. Constant elements should be in the accumulator.
- BytecodeArrayBuilder& CreateRegExpLiteral(int literal_index, Register flags);
- BytecodeArrayBuilder& CreateArrayLiteral(int literal_index, int flags);
- BytecodeArrayBuilder& CreateObjectLiteral(int literal_index, int flags);
+ BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
+ int literal_index, int flags);
+ BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+ int literal_index, int flags);
+ BytecodeArrayBuilder& CreateObjectLiteral(
+ Handle<FixedArray> constant_properties, int literal_index, int flags);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -136,7 +153,7 @@ class BytecodeArrayBuilder {
// arguments should be in registers <receiver + 1> to
// <receiver + 1 + arg_count>.
BytecodeArrayBuilder& Call(Register callable, Register receiver,
- size_t arg_count);
+ size_t arg_count, int feedback_slot);
// Call the new operator. The |constructor| register is followed by
// |arg_count| consecutive registers containing arguments to be
@@ -150,6 +167,14 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
Register first_arg, size_t arg_count);
+ // Call the runtime function with |function_id| that returns a pair of values.
+ // The first argument should be in |first_arg| and all subsequent arguments
+ // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+ // return values will be returned in <first_return> and <first_return + 1>.
+ BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
+ Register first_arg, size_t arg_count,
+ Register first_return);
+
// Call the JS runtime function with |context_index|. The the receiver should
// be in |receiver| and all subsequent arguments should be in registers
// <receiver + 1> to <receiver + 1 + arg_count>.
@@ -170,6 +195,7 @@ class BytecodeArrayBuilder {
// Deletes property from an object. This expects that accumulator contains
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
+ BytecodeArrayBuilder& DeleteLookupSlot();
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
@@ -195,9 +221,12 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& Return();
// Complex flow control.
- BytecodeArrayBuilder& ForInPrepare(Register receiver);
- BytecodeArrayBuilder& ForInNext(Register for_in_state, Register index);
- BytecodeArrayBuilder& ForInDone(Register for_in_state);
+ BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
+ Register cache_length);
+ BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
+ BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
+ Register cache_array, Register index);
+ BytecodeArrayBuilder& ForInStep(Register index);
// Accessors
Zone* zone() const { return zone_; }
@@ -206,6 +235,12 @@ class BytecodeArrayBuilder {
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
Isolate* isolate() const { return isolate_; }
+ ConstantArrayBuilder* constant_array_builder() {
+ return &constant_array_builder_;
+ }
+ const ConstantArrayBuilder* constant_array_builder() const {
+ return &constant_array_builder_;
+ }
static Bytecode BytecodeForBinaryOperation(Token::Value op);
static Bytecode BytecodeForCountOperation(Token::Value op);
@@ -218,6 +253,7 @@ class BytecodeArrayBuilder {
static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
TypeofMode typeof_mode);
static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
static Bytecode BytecodeForDelete(LanguageMode language_mode);
@@ -226,12 +262,20 @@ class BytecodeArrayBuilder {
static bool FitsInImm8Operand(int value);
static bool FitsInIdx16Operand(int value);
static bool FitsInIdx16Operand(size_t value);
+ static bool FitsInReg8Operand(Register value);
+ static bool FitsInReg16Operand(Register value);
- static Bytecode GetJumpWithConstantOperand(Bytecode jump_with_smi8_operand);
- static Bytecode GetJumpWithToBoolean(Bytecode jump);
+ static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
+ static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
+ static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
+
+ Register MapRegister(Register reg);
+ Register MapRegisters(Register reg, Register args_base, int args_length = 1);
template <size_t N>
- INLINE(void Output(Bytecode bytecode, uint32_t(&oprands)[N]));
+ INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3);
void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2);
void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
@@ -241,7 +285,11 @@ class BytecodeArrayBuilder {
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
- ZoneVector<uint8_t>::iterator jump_location);
+ const ZoneVector<uint8_t>::iterator& jump_location);
+ void PatchIndirectJumpWith8BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+ void PatchIndirectJumpWith16BitOperand(
+ const ZoneVector<uint8_t>::iterator& jump_location, int delta);
void LeaveBasicBlock();
void EnsureReturn();
@@ -251,8 +299,13 @@ class BytecodeArrayBuilder {
bool LastBytecodeInSameBlock() const;
bool NeedToBooleanCast();
+ bool IsRegisterInAccumulator(Register reg);
+ bool RegisterIsValid(Register reg) const;
+
+ // Temporary register management.
int BorrowTemporaryRegister();
+ int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
void ReturnTemporaryRegister(int reg_index);
int PrepareForConsecutiveTemporaryRegisters(size_t count);
void BorrowConsecutiveTemporaryRegister(int reg_index);
@@ -261,25 +314,28 @@ class BytecodeArrayBuilder {
Register first_temporary_register() const;
Register last_temporary_register() const;
+ // Gets a constant pool entry for the |object|.
+ size_t GetConstantPoolEntry(Handle<Object> object);
+
Isolate* isolate_;
Zone* zone_;
ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
+ ConstantArrayBuilder constant_array_builder_;
size_t last_block_end_;
size_t last_bytecode_start_;
bool exit_seen_in_block_;
-
- IdentityMap<size_t> constants_map_;
- ZoneVector<Handle<Object>> constants_;
+ int unbound_jumps_;
int parameter_count_;
int local_register_count_;
int context_register_count_;
int temporary_register_count_;
-
ZoneSet<int> free_temporaries_;
- friend class TemporaryRegisterScope;
+ class PreviousBytecodeHelper;
+ friend class BytecodeRegisterAllocator;
+
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
@@ -292,22 +348,24 @@ class BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
- INLINE(bool is_bound() const) { return bound_; }
+ bool is_bound() const { return bound_; }
+ size_t offset() const { return offset_; }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
- INLINE(void bind_to(size_t offset)) {
+ void bind_to(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset);
offset_ = offset;
bound_ = true;
}
- INLINE(void set_referrer(size_t offset)) {
+
+ void set_referrer(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
offset_ = offset;
}
- INLINE(size_t offset() const) { return offset_; }
- INLINE(bool is_forward_target() const) {
+
+ bool is_forward_target() const {
return offset() != kInvalidOffset && !is_bound();
}
@@ -322,36 +380,6 @@ class BytecodeLabel final {
friend class BytecodeArrayBuilder;
};
-
-// A stack-allocated class than allows the instantiator to allocate
-// temporary registers that are cleaned up when scope is closed.
-// TODO(oth): Deprecate TemporaryRegisterScope use. Code should be
-// using result scopes as far as possible.
-class TemporaryRegisterScope {
- public:
- explicit TemporaryRegisterScope(BytecodeArrayBuilder* builder);
- ~TemporaryRegisterScope();
- Register NewRegister();
-
- void PrepareForConsecutiveAllocations(size_t count);
- Register NextConsecutiveRegister();
-
- bool RegisterIsAllocatedInThisScope(Register reg) const;
-
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
-
- BytecodeArrayBuilder* builder_;
- const TemporaryRegisterScope* outer_;
- ZoneVector<int> allocated_;
- int next_consecutive_register_;
- int next_consecutive_count_;
-
- DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterScope);
-};
-
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index b84215660e..d09d72f01a 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -32,6 +32,11 @@ Bytecode BytecodeArrayIterator::current_bytecode() const {
}
+int BytecodeArrayIterator::current_bytecode_size() const {
+ return Bytecodes::Size(current_bytecode());
+}
+
+
uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
OperandType operand_type) const {
DCHECK_GE(operand_index, 0);
@@ -60,17 +65,21 @@ int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
- uint32_t operand = GetRawOperand(operand_index, OperandType::kCount8);
+ OperandSize size =
+ Bytecodes::GetOperandSize(current_bytecode(), operand_index);
+ OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
+ : OperandType::kCount16;
+ uint32_t operand = GetRawOperand(operand_index, type);
return static_cast<int>(operand);
}
int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
- OperandSize size =
- Bytecodes::GetOperandSize(current_bytecode(), operand_index);
- OperandType type =
- (size == OperandSize::kByte) ? OperandType::kIdx8 : OperandType::kIdx16;
- uint32_t operand = GetRawOperand(operand_index, type);
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kIdx8 ||
+ operand_type == OperandType::kIdx16);
+ uint32_t operand = GetRawOperand(operand_index, operand_type);
return static_cast<int>(operand);
}
@@ -79,7 +88,9 @@ Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
DCHECK(operand_type == OperandType::kReg8 ||
- operand_type == OperandType::kMaybeReg8);
+ operand_type == OperandType::kRegPair8 ||
+ operand_type == OperandType::kMaybeReg8 ||
+ operand_type == OperandType::kReg16);
uint32_t operand = GetRawOperand(operand_index, operand_type);
return Register::FromOperand(operand);
}
@@ -91,6 +102,22 @@ Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
return FixedArray::get(constants, GetIndexOperand(operand_index));
}
+
+int BytecodeArrayIterator::GetJumpTargetOffset() const {
+ Bytecode bytecode = current_bytecode();
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+ int relative_offset = GetImmediateOperand(0);
+ return current_offset() + relative_offset;
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode) ||
+ interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+ Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+ return current_offset() + smi->value();
+ } else {
+ UNREACHABLE();
+ return kMinInt;
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 31e237f098..e67fa974bd 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -20,6 +20,7 @@ class BytecodeArrayIterator {
void Advance();
bool done() const;
Bytecode current_bytecode() const;
+ int current_bytecode_size() const;
int current_offset() const { return bytecode_offset_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
@@ -35,6 +36,11 @@ class BytecodeArrayIterator {
// typed versions above which cast the return to an appropriate type.
uint32_t GetRawOperand(int operand_index, OperandType operand_type) const;
+ // Returns the absolute offset of the branch target at the current
+ // bytecode. It is an error to call this method if the bytecode is
+ // not for a jump or conditional jump.
+ int GetJumpTargetOffset() const;
+
private:
Handle<BytecodeArray> bytecode_array_;
int bytecode_offset_;
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 02061a7514..959e155149 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,12 +4,13 @@
#include "src/interpreter/bytecode-generator.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
+#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
-#include "src/parser.h"
-#include "src/scopes.h"
-#include "src/token.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -104,65 +105,65 @@ class BytecodeGenerator::ControlScope BASE_EMBEDDED {
};
-// Scoped class for enabling 'break' and 'continue' in iteration
-// constructs, e.g. do...while, while..., for...
-class BytecodeGenerator::ControlScopeForIteration
+// Scoped class for enabling break inside blocks and switch blocks.
+class BytecodeGenerator::ControlScopeForBreakable final
: public BytecodeGenerator::ControlScope {
public:
- ControlScopeForIteration(BytecodeGenerator* generator,
- IterationStatement* statement,
- LoopBuilder* loop_builder)
+ ControlScopeForBreakable(BytecodeGenerator* generator,
+ BreakableStatement* statement,
+ BreakableControlFlowBuilder* control_builder)
: ControlScope(generator),
statement_(statement),
- loop_builder_(loop_builder) {}
+ control_builder_(control_builder) {}
protected:
virtual bool Execute(Command command, Statement* statement) {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
- loop_builder_->Break();
+ control_builder_->Break();
return true;
case CMD_CONTINUE:
- loop_builder_->Continue();
- return true;
+ break;
}
return false;
}
private:
Statement* statement_;
- LoopBuilder* loop_builder_;
+ BreakableControlFlowBuilder* control_builder_;
};
-// Scoped class for enabling 'break' in switch statements.
-class BytecodeGenerator::ControlScopeForSwitch
+// Scoped class for enabling 'break' and 'continue' in iteration
+// constructs, e.g. do...while, while..., for...
+class BytecodeGenerator::ControlScopeForIteration final
: public BytecodeGenerator::ControlScope {
public:
- ControlScopeForSwitch(BytecodeGenerator* generator,
- SwitchStatement* statement,
- SwitchBuilder* switch_builder)
+ ControlScopeForIteration(BytecodeGenerator* generator,
+ IterationStatement* statement,
+ LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
- switch_builder_(switch_builder) {}
+ loop_builder_(loop_builder) {}
protected:
virtual bool Execute(Command command, Statement* statement) {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
- switch_builder_->Break();
+ loop_builder_->Break();
return true;
case CMD_CONTINUE:
- break;
+ loop_builder_->Continue();
+ return true;
}
return false;
}
private:
Statement* statement_;
- SwitchBuilder* switch_builder_;
+ LoopBuilder* loop_builder_;
};
@@ -177,6 +178,63 @@ void BytecodeGenerator::ControlScope::PerformCommand(Command command,
}
+class BytecodeGenerator::RegisterAllocationScope {
+ public:
+ explicit RegisterAllocationScope(BytecodeGenerator* generator)
+ : generator_(generator),
+ outer_(generator->register_allocator()),
+ allocator_(builder()) {
+ generator_->set_register_allocator(this);
+ }
+
+ virtual ~RegisterAllocationScope() {
+ generator_->set_register_allocator(outer_);
+ }
+
+ Register NewRegister() {
+ RegisterAllocationScope* current_scope = generator()->register_allocator();
+ if ((current_scope == this) ||
+ (current_scope->outer() == this &&
+ !current_scope->allocator_.HasConsecutiveAllocations())) {
+ // Regular case - Allocating registers in current or outer context.
+ // VisitForRegisterValue allocates register in outer context.
+ return allocator_.NewRegister();
+ } else {
+ // If it is required to allocate a register other than current or outer
+ // scopes, allocate a new temporary register. It might be expensive to
+ // walk the full context chain and compute the list of consecutive
+ // reservations in the innerscopes.
+ UNIMPLEMENTED();
+ return Register(-1);
+ }
+ }
+
+ void PrepareForConsecutiveAllocations(size_t count) {
+ allocator_.PrepareForConsecutiveAllocations(count);
+ }
+
+ Register NextConsecutiveRegister() {
+ return allocator_.NextConsecutiveRegister();
+ }
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const {
+ return allocator_.RegisterIsAllocatedInThisScope(reg);
+ }
+
+ RegisterAllocationScope* outer() const { return outer_; }
+
+ private:
+ BytecodeGenerator* generator() const { return generator_; }
+ BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+ BytecodeGenerator* generator_;
+ RegisterAllocationScope* outer_;
+ BytecodeRegisterAllocator allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
+};
+
+
// Scoped base class for determining where the result of an expression
// is stored.
class BytecodeGenerator::ExpressionResultScope {
@@ -185,7 +243,7 @@ class BytecodeGenerator::ExpressionResultScope {
: generator_(generator),
kind_(kind),
outer_(generator->execution_result()),
- allocator_(builder()),
+ allocator_(generator),
result_identified_(false) {
generator_->set_execution_result(this);
}
@@ -201,21 +259,11 @@ class BytecodeGenerator::ExpressionResultScope {
virtual void SetResultInAccumulator() = 0;
virtual void SetResultInRegister(Register reg) = 0;
- BytecodeGenerator* generator() const { return generator_; }
- BytecodeArrayBuilder* builder() const { return generator()->builder(); }
+ protected:
ExpressionResultScope* outer() const { return outer_; }
+ BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+ const RegisterAllocationScope* allocator() const { return &allocator_; }
- Register NewRegister() { return allocator_.NewRegister(); }
-
- void PrepareForConsecutiveAllocations(size_t count) {
- allocator_.PrepareForConsecutiveAllocations(count);
- }
-
- Register NextConsecutiveRegister() {
- return allocator_.NextConsecutiveRegister();
- }
-
- protected:
void set_result_identified() {
DCHECK(!result_identified());
result_identified_ = true;
@@ -223,13 +271,11 @@ class BytecodeGenerator::ExpressionResultScope {
bool result_identified() const { return result_identified_; }
- const TemporaryRegisterScope* allocator() const { return &allocator_; }
-
private:
BytecodeGenerator* generator_;
Expression::Context kind_;
ExpressionResultScope* outer_;
- TemporaryRegisterScope allocator_;
+ RegisterAllocationScope allocator_;
bool result_identified_;
DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
@@ -277,7 +323,7 @@ class BytecodeGenerator::RegisterResultScope final
: ExpressionResultScope(generator, Expression::kValue) {}
virtual void SetResultInAccumulator() {
- result_register_ = outer()->NewRegister();
+ result_register_ = allocator()->outer()->NewRegister();
builder()->StoreAccumulatorInRegister(result_register_);
set_result_identified();
}
@@ -307,15 +353,11 @@ BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
- binary_expression_depth_(0),
- binary_expression_hazard_set_(zone) {
+ register_allocator_(nullptr) {
InitializeAstVisitor(isolate);
}
-BytecodeGenerator::~BytecodeGenerator() {}
-
-
Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
set_info(info);
set_scope(info->scope());
@@ -348,6 +390,12 @@ void BytecodeGenerator::MakeBytecodeBody() {
// Build the arguments object if it is used.
VisitArgumentsObject(scope()->arguments());
+ // TODO(mythria): Build rest arguments array if it is used.
+ int rest_index;
+ if (scope()->rest_parameter(&rest_index)) {
+ UNIMPLEMENTED();
+ }
+
// Build assignment to {.this_function} variable if it is used.
VisitThisFunctionVariable(scope()->this_function_var());
@@ -374,6 +422,9 @@ void BytecodeGenerator::MakeBytecodeBody() {
void BytecodeGenerator::VisitBlock(Block* stmt) {
+ BlockBuilder block_builder(this->builder());
+ ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+
if (stmt->scope() == NULL) {
// Visit statements in the same scope, no declarations.
VisitStatements(stmt->statements());
@@ -389,6 +440,7 @@ void BytecodeGenerator::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
}
+ if (stmt->labels() != nullptr) block_builder.EndBlock();
}
@@ -479,6 +531,7 @@ void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
void BytecodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
+ RegisterAllocationScope register_scope(this);
DCHECK(globals()->empty());
AstVisitor::VisitDeclarations(declarations);
if (globals()->empty()) return;
@@ -490,12 +543,11 @@ void BytecodeGenerator::VisitDeclarations(
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
DeclareGlobalsLanguageMode::encode(language_mode());
- TemporaryRegisterScope temporary_register_scope(builder());
- Register pairs = temporary_register_scope.NewRegister();
+ Register pairs = register_allocator()->NewRegister();
builder()->LoadLiteral(data);
builder()->StoreAccumulatorInRegister(pairs);
- Register flags = temporary_register_scope.NewRegister();
+ Register flags = register_allocator()->NewRegister();
builder()->LoadLiteral(Smi::FromInt(encoded_flags));
builder()->StoreAccumulatorInRegister(flags);
DCHECK(flags.index() == pairs.index() + 1);
@@ -505,9 +557,18 @@ void BytecodeGenerator::VisitDeclarations(
}
+void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ // Allocate an outer register allocations scope for the statement.
+ RegisterAllocationScope allocation_scope(this);
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
+ }
+}
+
+
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
- // TODO(rmcilroy): Replace this with a StatementResultScope when it exists.
- EffectResultScope effect_scope(this);
VisitForEffect(stmt->expression());
}
@@ -519,14 +580,17 @@ void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
BytecodeLabel else_label, end_label;
if (stmt->condition()->ToBooleanIsTrue()) {
- // Generate only then block.
+ // Generate then block unconditionally as always true.
Visit(stmt->then_statement());
} else if (stmt->condition()->ToBooleanIsFalse()) {
- // Generate only else block if it exists.
+ // Generate else block unconditionally if it exists.
if (stmt->HasElseStatement()) {
Visit(stmt->else_statement());
}
} else {
+ // TODO(oth): If then statement is BreakStatement or
+ // ContinueStatement we can reduce number of generated
+ // jump/jump_ifs here. See BasicLoops test.
VisitForAccumulatorValue(stmt->condition());
builder()->JumpIfFalse(&else_label);
Visit(stmt->then_statement());
@@ -559,7 +623,6 @@ void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- EffectResultScope effect_scope(this);
VisitForAccumulatorValue(stmt->expression());
builder()->Return();
}
@@ -571,9 +634,11 @@ void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ // We need this scope because we visit for register values. We have to
+ // maintain a execution result scope where registers can be allocated.
ZoneList<CaseClause*>* clauses = stmt->cases();
SwitchBuilder switch_builder(builder(), clauses->length());
- ControlScopeForSwitch scope(this, stmt, &switch_builder);
+ ControlScopeForBreakable scope(this, stmt, &switch_builder);
int default_index = -1;
// Keep the switch value in a register until a case matches.
@@ -627,96 +692,70 @@ void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder());
ControlScopeForIteration execution_control(this, stmt, &loop_builder);
- BytecodeLabel body_label, condition_label, done_label;
-
+ loop_builder.LoopHeader();
if (stmt->cond()->ToBooleanIsFalse()) {
Visit(stmt->body());
- // Bind condition_label and done_label for processing continue and break.
- builder()->Bind(&condition_label);
- builder()->Bind(&done_label);
+ loop_builder.Condition();
+ } else if (stmt->cond()->ToBooleanIsTrue()) {
+ loop_builder.Condition();
+ Visit(stmt->body());
+ loop_builder.JumpToHeader();
} else {
- builder()->Bind(&body_label);
Visit(stmt->body());
-
- builder()->Bind(&condition_label);
- if (stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&body_label);
- } else {
- VisitForAccumulatorValue(stmt->cond());
- builder()->JumpIfTrue(&body_label);
- }
- builder()->Bind(&done_label);
+ loop_builder.Condition();
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.JumpToHeaderIfTrue();
}
- loop_builder.SetBreakTarget(done_label);
- loop_builder.SetContinueTarget(condition_label);
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
- LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
- BytecodeLabel body_label, condition_label, done_label;
if (stmt->cond()->ToBooleanIsFalse()) {
- // If the condition is false there is no need to generating the loop.
+ // If the condition is false there is no need to generate the loop.
return;
}
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
if (!stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&condition_label);
- }
- builder()->Bind(&body_label);
- Visit(stmt->body());
-
- builder()->Bind(&condition_label);
- if (stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&body_label);
- } else {
VisitForAccumulatorValue(stmt->cond());
- builder()->JumpIfTrue(&body_label);
+ loop_builder.BreakIfFalse();
}
- builder()->Bind(&done_label);
-
- loop_builder.SetBreakTarget(done_label);
- loop_builder.SetContinueTarget(condition_label);
+ Visit(stmt->body());
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
- LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
if (stmt->init() != nullptr) {
Visit(stmt->init());
}
-
if (stmt->cond() && stmt->cond()->ToBooleanIsFalse()) {
// If the condition is known to be false there is no need to generate
// body, next or condition blocks. Init block should be generated.
return;
}
- BytecodeLabel body_label, condition_label, next_label, done_label;
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration execution_control(this, stmt, &loop_builder);
+
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
- builder()->Jump(&condition_label);
+ VisitForAccumulatorValue(stmt->cond());
+ loop_builder.BreakIfFalse();
}
- builder()->Bind(&body_label);
Visit(stmt->body());
- builder()->Bind(&next_label);
if (stmt->next() != nullptr) {
+ loop_builder.Next();
Visit(stmt->next());
}
- if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
- builder()->Bind(&condition_label);
- VisitForAccumulatorValue(stmt->cond());
- builder()->JumpIfTrue(&body_label);
- } else {
- builder()->Jump(&body_label);
- }
- builder()->Bind(&done_label);
-
- loop_builder.SetBreakTarget(done_label);
- loop_builder.SetContinueTarget(next_label);
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
}
@@ -735,19 +774,19 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- TemporaryRegisterScope temporary_register_scope(builder());
- Register value = temporary_register_scope.NewRegister();
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
Register object = VisitForRegisterValue(property->obj());
- size_t name_index = builder()->GetConstantPoolEntry(
- property->key()->AsLiteral()->AsPropertyName());
- builder()->StoreNamedProperty(object, name_index, feedback_index(slot),
+ Handle<String> name = property->key()->AsLiteral()->AsPropertyName();
+ builder()->LoadAccumulatorWithRegister(value);
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
break;
}
case KEYED_PROPERTY: {
- TemporaryRegisterScope temporary_register_scope(builder());
- Register value = temporary_register_scope.NewRegister();
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
Register object = VisitForRegisterValue(property->obj());
Register key = VisitForRegisterValue(property->key());
@@ -764,12 +803,6 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
- // TODO(oth): For now we need a parent scope for paths that end up
- // in VisitLiteral which can allocate in the parent scope. A future
- // CL in preparation will add a StatementResultScope that will
- // remove the need for this EffectResultScope.
- EffectResultScope result_scope(this);
-
if (stmt->subject()->IsNullLiteral() ||
stmt->subject()->IsUndefinedLiteral(isolate())) {
// ForIn generates lots of code, skip if it wouldn't produce any effects.
@@ -778,58 +811,43 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
LoopBuilder loop_builder(builder());
ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+ BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
// Prepare the state for executing ForIn.
VisitForAccumulatorValue(stmt->subject());
- loop_builder.BreakIfUndefined();
- loop_builder.BreakIfNull();
-
- Register receiver = execution_result()->NewRegister();
+ builder()->JumpIfUndefined(&subject_undefined_label);
+ builder()->JumpIfNull(&subject_null_label);
+ Register receiver = register_allocator()->NewRegister();
builder()->CastAccumulatorToJSObject();
+ builder()->JumpIfNull(&not_object_label);
builder()->StoreAccumulatorInRegister(receiver);
- builder()->CallRuntime(Runtime::kGetPropertyNamesFast, receiver, 1);
- builder()->ForInPrepare(receiver);
- loop_builder.BreakIfUndefined();
+ Register cache_type = register_allocator()->NewRegister();
+ Register cache_array = register_allocator()->NewRegister();
+ Register cache_length = register_allocator()->NewRegister();
+ builder()->ForInPrepare(cache_type, cache_array, cache_length);
- Register for_in_state = execution_result()->NewRegister();
- builder()->StoreAccumulatorInRegister(for_in_state);
-
- // The loop.
- BytecodeLabel condition_label, break_label, continue_label;
- Register index = receiver; // Re-using register as receiver no longer used.
+ // Set up loop counter
+ Register index = register_allocator()->NewRegister();
builder()->LoadLiteral(Smi::FromInt(0));
+ builder()->StoreAccumulatorInRegister(index);
- // Check loop termination (accumulator holds index).
- builder()
- ->Bind(&condition_label)
- .StoreAccumulatorInRegister(index)
- .ForInDone(for_in_state);
+ // The loop
+ loop_builder.LoopHeader();
+ loop_builder.Condition();
+ builder()->ForInDone(index, cache_length);
loop_builder.BreakIfTrue();
-
- // Get the next item.
- builder()->ForInNext(for_in_state, index);
-
- // Start again if the item, currently in the accumulator, is undefined.
+ builder()->ForInNext(receiver, cache_type, cache_array, index);
loop_builder.ContinueIfUndefined();
-
- // Store the value in the each variable.
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
- // NB the user's loop variable will be assigned the value of each so
- // even an empty body will have this assignment.
Visit(stmt->body());
-
- // Increment the index and start loop again.
- builder()
- ->Bind(&continue_label)
- .LoadAccumulatorWithRegister(index)
- .CountOperation(Token::Value::ADD, language_mode_strength())
- .Jump(&condition_label);
-
- // End of the loop.
- builder()->Bind(&break_label);
-
- loop_builder.SetBreakTarget(break_label);
- loop_builder.SetContinueTarget(continue_label);
+ loop_builder.Next();
+ builder()->ForInStep(index);
+ builder()->StoreAccumulatorInRegister(index);
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
+ builder()->Bind(&not_object_label);
+ builder()->Bind(&subject_null_label);
+ builder()->Bind(&subject_undefined_label);
}
@@ -867,10 +885,8 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
CHECK(!shared_info.is_null()); // TODO(rmcilroy): Set stack overflow?
-
- builder()
- ->LoadLiteral(shared_info)
- .CreateClosure(expr->pretenure() ? TENURED : NOT_TENURED);
+ builder()->CreateClosure(shared_info,
+ expr->pretenure() ? TENURED : NOT_TENURED);
execution_result()->SetResultInAccumulator();
}
@@ -937,24 +953,17 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
- TemporaryRegisterScope temporary_register_scope(builder());
- Register flags = temporary_register_scope.NewRegister();
- builder()
- ->LoadLiteral(expr->flags())
- .StoreAccumulatorInRegister(flags)
- .LoadLiteral(expr->pattern())
- .CreateRegExpLiteral(expr->literal_index(), flags);
+ builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
+ expr->flags());
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Deep-copy the literal boilerplate.
- builder()
- ->LoadLiteral(expr->constant_properties())
- .CreateObjectLiteral(expr->literal_index(), expr->ComputeFlags(true));
-
- TemporaryRegisterScope temporary_register_scope(builder());
+ builder()->CreateObjectLiteral(expr->constant_properties(),
+ expr->literal_index(),
+ expr->ComputeFlags(true));
Register literal;
// Store computed values into the literal.
@@ -962,17 +971,17 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
int property_index = 0;
AccessorTable accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
- TemporaryRegisterScope inner_temporary_register_scope(builder());
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
if (literal_in_accumulator) {
- literal = temporary_register_scope.NewRegister();
+ literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
+ RegisterAllocationScope inner_register_scope(this);
Literal* literal_key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -985,26 +994,21 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// contains computed properties with an uninitialized value.
if (literal_key->value()->IsInternalizedString()) {
if (property->emit_store()) {
- size_t name_index =
- builder()->GetConstantPoolEntry(literal_key->AsPropertyName());
VisitForAccumulatorValue(property->value());
- builder()->StoreNamedProperty(literal, name_index,
- feedback_index(property->GetSlot(0)),
- language_mode());
+ builder()->StoreNamedProperty(
+ literal, literal_key->AsPropertyName(),
+ feedback_index(property->GetSlot(0)), language_mode());
} else {
VisitForEffect(property->value());
}
} else {
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(3);
- Register key =
- inner_temporary_register_scope.NextConsecutiveRegister();
- Register value =
- inner_temporary_register_scope.NextConsecutiveRegister();
- Register language =
- inner_temporary_register_scope.NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register language = register_allocator()->NextConsecutiveRegister();
// TODO(oth): This is problematic - can't assume contiguous here.
- // literal is allocated in temporary_register_scope, whereas
- // key, value, language are in another.
+ // literal is allocated in outer register scope, whereas key, value,
+ // language are in another.
DCHECK(Register::AreContiguous(literal, key, value, language));
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key);
@@ -1021,10 +1025,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(1);
+ register_allocator()->PrepareForConsecutiveAllocations(1);
DCHECK(property->emit_store());
- Register value =
- inner_temporary_register_scope.NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
DCHECK(Register::AreContiguous(literal, value));
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value).CallRuntime(
@@ -1048,12 +1051,12 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
- TemporaryRegisterScope inner_temporary_register_scope(builder());
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(4);
- Register name = inner_temporary_register_scope.NextConsecutiveRegister();
- Register getter = inner_temporary_register_scope.NextConsecutiveRegister();
- Register setter = inner_temporary_register_scope.NextConsecutiveRegister();
- Register attr = inner_temporary_register_scope.NextConsecutiveRegister();
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register getter = register_allocator()->NextConsecutiveRegister();
+ Register setter = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
VisitForAccumulatorValue(it->first);
builder()->StoreAccumulatorInRegister(name);
@@ -1075,19 +1078,17 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// compile them into a series of "SetOwnProperty" runtime calls. This will
// preserve insertion order.
for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
if (literal_in_accumulator) {
- temporary_register_scope.PrepareForConsecutiveAllocations(4);
- literal = temporary_register_scope.NextConsecutiveRegister();
+ literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ RegisterAllocationScope inner_register_scope(this);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(property->emit_store());
- TemporaryRegisterScope inner_temporary_register_scope(builder());
- Register value = inner_temporary_register_scope.NewRegister();
+ Register value = register_allocator()->NewRegister();
DCHECK(Register::AreContiguous(literal, value));
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value).CallRuntime(
@@ -1095,11 +1096,10 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
continue;
}
- TemporaryRegisterScope inner_temporary_register_scope(builder());
- inner_temporary_register_scope.PrepareForConsecutiveAllocations(3);
- Register key = inner_temporary_register_scope.NextConsecutiveRegister();
- Register value = inner_temporary_register_scope.NextConsecutiveRegister();
- Register attr = inner_temporary_register_scope.NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
DCHECK(Register::AreContiguous(literal, key, value, attr));
VisitForAccumulatorValue(property->key());
@@ -1144,11 +1144,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
- builder()
- ->LoadLiteral(expr->constant_elements())
- .CreateArrayLiteral(expr->literal_index(), expr->ComputeFlags(true));
-
- TemporaryRegisterScope temporary_register_scope(builder());
+ builder()->CreateArrayLiteral(expr->constant_elements(),
+ expr->literal_index(),
+ expr->ComputeFlags(true));
Register index, literal;
// Evaluate all the non-constant subexpressions and store them into the
@@ -1164,8 +1162,8 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (literal_in_accumulator) {
- index = temporary_register_scope.NewRegister();
- literal = temporary_register_scope.NewRegister();
+ index = register_allocator()->NewRegister();
+ literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
literal_in_accumulator = false;
}
@@ -1198,21 +1196,22 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
switch (variable->location()) {
case VariableLocation::LOCAL: {
Register source(Register(variable->index()));
- execution_result()->SetResultInRegister(source);
+ builder()->LoadAccumulatorWithRegister(source);
+ execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::PARAMETER: {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register source = builder()->Parameter(variable->index() + 1);
- execution_result()->SetResultInRegister(source);
+ builder()->LoadAccumulatorWithRegister(source);
+ execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- size_t name_index = builder()->GetConstantPoolEntry(variable->name());
- builder()->LoadGlobal(name_index, feedback_index(slot), language_mode(),
- typeof_mode);
+ builder()->LoadGlobal(variable->name(), feedback_index(slot),
+ language_mode(), typeof_mode);
execution_result()->SetResultInAccumulator();
break;
}
@@ -1223,10 +1222,12 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
if (context) {
context_reg = context->reg();
} else {
- context_reg = execution_result()->NewRegister();
+ context_reg = register_allocator()->NewRegister();
// Walk the context chain to find the context at the given depth.
// TODO(rmcilroy): Perform this work in a bytecode handler once we have
// a generic mechanism for performing jumps in interpreter.cc.
+ // TODO(mythria): Also update bytecode graph builder with correct depth
+ // when this changes.
builder()
->LoadAccumulatorWithRegister(execution_context()->reg())
.StoreAccumulatorInRegister(context_reg);
@@ -1242,8 +1243,11 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// let variables.
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ builder()->LoadLookupSlot(variable->name(), typeof_mode);
+ execution_result()->SetResultInAccumulator();
+ break;
+ }
}
}
@@ -1270,7 +1274,6 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
// TODO(rmcilroy): support const mode initialization.
Register destination(variable->index());
builder()->StoreAccumulatorInRegister(destination);
- RecordStoreToRegister(destination);
break;
}
case VariableLocation::PARAMETER: {
@@ -1278,13 +1281,12 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register destination(builder()->Parameter(variable->index() + 1));
builder()->StoreAccumulatorInRegister(destination);
- RecordStoreToRegister(destination);
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- size_t name_index = builder()->GetConstantPoolEntry(variable->name());
- builder()->StoreGlobal(name_index, feedback_index(slot), language_mode());
+ builder()->StoreGlobal(variable->name(), feedback_index(slot),
+ language_mode());
break;
}
case VariableLocation::CONTEXT: {
@@ -1295,11 +1297,13 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
if (context) {
context_reg = context->reg();
} else {
- Register value_temp = execution_result()->NewRegister();
- context_reg = execution_result()->NewRegister();
+ Register value_temp = register_allocator()->NewRegister();
+ context_reg = register_allocator()->NewRegister();
// Walk the context chain to find the context at the given depth.
// TODO(rmcilroy): Perform this work in a bytecode handler once we have
// a generic mechanism for performing jumps in interpreter.cc.
+ // TODO(mythria): Also update bytecode graph builder with correct depth
+ // when this changes.
builder()
->StoreAccumulatorInRegister(value_temp)
.LoadAccumulatorWithRegister(execution_context()->reg())
@@ -1314,8 +1318,10 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ builder()->StoreLookupSlot(variable->name(), language_mode());
+ break;
+ }
}
}
@@ -1323,7 +1329,7 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Register object, key;
- size_t name_index = kMaxUInt32;
+ Handle<String> name;
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
@@ -1336,8 +1342,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
case NAMED_PROPERTY: {
object = VisitForRegisterValue(property->obj());
- name_index = builder()->GetConstantPoolEntry(
- property->key()->AsLiteral()->AsPropertyName());
+ name = property->key()->AsLiteral()->AsPropertyName();
break;
}
case KEYED_PROPERTY: {
@@ -1345,7 +1350,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// Use VisitForAccumulator and store to register so that the key is
// still in the accumulator for loading the old value below.
- key = execution_result()->NewRegister();
+ key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key);
} else {
@@ -1371,9 +1376,9 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
case NAMED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- old_value = execution_result()->NewRegister();
+ old_value = register_allocator()->NewRegister();
builder()
- ->LoadNamedProperty(object, name_index, feedback_index(slot),
+ ->LoadNamedProperty(object, name, feedback_index(slot),
language_mode())
.StoreAccumulatorInRegister(old_value);
break;
@@ -1382,7 +1387,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// Key is already in accumulator at this point due to evaluating the
// LHS above.
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- old_value = execution_result()->NewRegister();
+ old_value = register_allocator()->NewRegister();
builder()
->LoadKeyedProperty(object, feedback_index(slot), language_mode())
.StoreAccumulatorInRegister(old_value);
@@ -1411,7 +1416,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
}
case NAMED_PROPERTY:
- builder()->StoreNamedProperty(object, name_index, feedback_index(slot),
+ builder()->StoreNamedProperty(object, name, feedback_index(slot),
language_mode());
break;
case KEYED_PROPERTY:
@@ -1442,10 +1447,9 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
case VARIABLE:
UNREACHABLE();
case NAMED_PROPERTY: {
- size_t name_index = builder()->GetConstantPoolEntry(
- expr->key()->AsLiteral()->AsPropertyName());
- builder()->LoadNamedProperty(obj, name_index, feedback_index(slot),
- language_mode());
+ builder()->LoadNamedProperty(obj,
+ expr->key()->AsLiteral()->AsPropertyName(),
+ feedback_index(slot), language_mode());
break;
}
case KEYED_PROPERTY: {
@@ -1489,16 +1493,16 @@ Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
// less calls to NextConsecutiveRegister(). Otherwise, the arguments
// here will be consecutive, but they will not be consecutive with
// earlier consecutive allocations made by the caller.
- execution_result()->PrepareForConsecutiveAllocations(args->length());
+ register_allocator()->PrepareForConsecutiveAllocations(args->length());
// Visit for first argument that goes into returned register
- Register first_arg = execution_result()->NextConsecutiveRegister();
+ Register first_arg = register_allocator()->NextConsecutiveRegister();
VisitForAccumulatorValue(args->at(0));
builder()->StoreAccumulatorInRegister(first_arg);
// Visit remaining arguments
for (int i = 1; i < static_cast<int>(args->length()); i++) {
- Register ith_arg = execution_result()->NextConsecutiveRegister();
+ Register ith_arg = register_allocator()->NextConsecutiveRegister();
VisitForAccumulatorValue(args->at(i));
builder()->StoreAccumulatorInRegister(ith_arg);
DCHECK(ith_arg.index() - i == first_arg.index());
@@ -1513,14 +1517,15 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
- Register callee = execution_result()->NewRegister();
// The receiver and arguments need to be allocated consecutively for
- // Call(). Future optimizations could avoid this there are no
+ // Call(). We allocate the callee and receiver consecutively for calls to
+ // kLoadLookupSlot. Future optimizations could avoid this there are no
// arguments or the receiver and arguments are already consecutive.
ZoneList<Expression*>* args = expr->arguments();
- execution_result()->PrepareForConsecutiveAllocations(args->length() + 1);
- Register receiver = execution_result()->NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
+ Register callee = register_allocator()->NextConsecutiveRegister();
+ Register receiver = register_allocator()->NextConsecutiveRegister();
switch (call_type) {
case Call::NAMED_PROPERTY_CALL:
@@ -1542,6 +1547,27 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->StoreAccumulatorInRegister(callee);
break;
}
+ case Call::LOOKUP_SLOT_CALL:
+ case Call::POSSIBLY_EVAL_CALL: {
+ if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register context = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NextConsecutiveRegister();
+
+ // Call LoadLookupSlot to get the callee and receiver.
+ DCHECK(Register::AreContiguous(callee, receiver));
+ Variable* variable = callee_expr->AsVariableProxy()->var();
+ builder()
+ ->MoveRegister(Register::function_context(), context)
+ .LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+ break;
+ }
+ // Fall through.
+ DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+ }
case Call::OTHER_CALL: {
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
VisitForAccumulatorValue(callee_expr);
@@ -1550,9 +1576,7 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::NAMED_SUPER_PROPERTY_CALL:
case Call::KEYED_SUPER_PROPERTY_CALL:
- case Call::LOOKUP_SLOT_CALL:
case Call::SUPER_CALL:
- case Call::POSSIBLY_EVAL_CALL:
UNIMPLEMENTED();
}
@@ -1561,15 +1585,45 @@ void BytecodeGenerator::VisitCall(Call* expr) {
Register arg = VisitArguments(args);
CHECK(args->length() == 0 || arg.index() == receiver.index() + 1);
- // TODO(rmcilroy): Deal with possible direct eval here?
+ // Resolve callee for a potential direct eval call. This block will mutate the
+ // callee value.
+ if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
+ RegisterAllocationScope inner_register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(5);
+ Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
+ Register source = register_allocator()->NextConsecutiveRegister();
+ Register function = register_allocator()->NextConsecutiveRegister();
+ Register language = register_allocator()->NextConsecutiveRegister();
+ Register position = register_allocator()->NextConsecutiveRegister();
+
+ // Set up arguments for ResolvePossiblyDirectEval by copying callee, source
+ // strings and function closure, and loading language and
+ // position.
+ builder()
+ ->MoveRegister(callee, callee_for_eval)
+ .MoveRegister(arg, source)
+ .MoveRegister(Register::function_closure(), function)
+ .LoadLiteral(Smi::FromInt(language_mode()))
+ .StoreAccumulatorInRegister(language)
+ .LoadLiteral(
+ Smi::FromInt(execution_context()->scope()->start_position()))
+ .StoreAccumulatorInRegister(position);
+
+ // Call ResolvePossiblyDirectEval and modify the callee.
+ builder()
+ ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+ .StoreAccumulatorInRegister(callee);
+ }
+
// TODO(rmcilroy): Use CallIC to allow call type feedback.
- builder()->Call(callee, receiver, args->length());
+ builder()->Call(callee, receiver, args->length(),
+ feedback_index(expr->CallFeedbackICSlot()));
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
- Register constructor = execution_result()->NewRegister();
+ Register constructor = register_allocator()->NewRegister();
VisitForAccumulatorValue(expr->expression());
builder()->StoreAccumulatorInRegister(constructor);
@@ -1585,8 +1639,8 @@ void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Register receiver;
if (expr->is_jsruntime()) {
// Allocate a register for the receiver and load it with undefined.
- execution_result()->PrepareForConsecutiveAllocations(args->length() + 1);
- receiver = execution_result()->NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
+ receiver = register_allocator()->NextConsecutiveRegister();
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
}
// Evaluate all arguments to the runtime call.
@@ -1596,8 +1650,6 @@ void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
} else {
- // TODO(rmcilroy): support multiple return values.
- DCHECK_LE(expr->function()->result_size, 1);
Runtime::FunctionId function_id = expr->function()->function_id;
builder()->CallRuntime(function_id, first_arg, args->length());
}
@@ -1678,10 +1730,13 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
// Global var, let, const or variables not explicitly declared.
- Register global_object = execution_result()->NewRegister();
+ Register native_context = register_allocator()->NewRegister();
+ Register global_object = register_allocator()->NewRegister();
builder()
->LoadContextSlot(execution_context()->reg(),
- Context::GLOBAL_OBJECT_INDEX)
+ Context::NATIVE_CONTEXT_INDEX)
+ .StoreAccumulatorInRegister(native_context)
+ .LoadContextSlot(native_context, Context::EXTENSION_INDEX)
.StoreAccumulatorInRegister(global_object)
.LoadLiteral(variable->name())
.Delete(global_object, language_mode());
@@ -1700,7 +1755,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
break;
}
case VariableLocation::LOOKUP: {
- UNIMPLEMENTED();
+ builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
break;
}
default:
@@ -1727,7 +1782,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Evaluate LHS expression and get old value.
Register obj, key, old_value;
- size_t name_index = kMaxUInt32;
+ Handle<String> name;
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -1738,9 +1793,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
obj = VisitForRegisterValue(property->obj());
- name_index = builder()->GetConstantPoolEntry(
- property->key()->AsLiteral()->AsPropertyName());
- builder()->LoadNamedProperty(obj, name_index, feedback_index(slot),
+ name = property->key()->AsLiteral()->AsPropertyName();
+ builder()->LoadNamedProperty(obj, name, feedback_index(slot),
language_mode());
break;
}
@@ -1749,7 +1803,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
obj = VisitForRegisterValue(property->obj());
// Use visit for accumulator here since we need the key in the accumulator
// for the LoadKeyedProperty.
- key = execution_result()->NewRegister();
+ key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
obj, feedback_index(slot), language_mode());
@@ -1767,7 +1821,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
// Save result for postfix expressions.
if (is_postfix) {
- old_value = execution_result()->outer()->NewRegister();
+ old_value = register_allocator()->outer()->NewRegister();
builder()->StoreAccumulatorInRegister(old_value);
}
@@ -1783,8 +1837,8 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case NAMED_PROPERTY: {
- builder()->StoreNamedProperty(
- obj, name_index, feedback_index(feedback_slot), language_mode());
+ builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+ language_mode());
break;
}
case KEYED_PROPERTY: {
@@ -1825,37 +1879,17 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
- // TODO(oth): Remove PrepareForBinaryExpression/CompleteBinaryExpression
- // once we have StatementScope that tracks hazardous loads/stores.
- PrepareForBinaryExpression();
Register lhs = VisitForRegisterValue(expr->left());
- if (builder()->RegisterIsParameterOrLocal(lhs)) {
- // Result was returned in an existing local or parameter. See if
- // it needs to be moved to a temporary.
- // TODO(oth) LoadFromAliasedRegister call into VisitVariableLoad().
- lhs = LoadFromAliasedRegister(lhs);
- }
VisitForAccumulatorValue(expr->right());
builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
- CompleteBinaryExpression();
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
- // TODO(oth): Remove PrepareForBinaryExpression/CompleteBinaryExpression
- // once we have StatementScope that tracks hazardous loads/stores.
- PrepareForBinaryExpression();
Register lhs = VisitForRegisterValue(expr->left());
- if (builder()->RegisterIsParameterOrLocal(lhs)) {
- // Result was returned in an existing local or parameter. See if
- // it needs to be moved to a temporary.
- // TODO(oth) LoadFromAliasedRegister call into VisitVariableLoad().
- lhs = LoadFromAliasedRegister(lhs);
- }
VisitForAccumulatorValue(expr->right());
builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
- CompleteBinaryExpression();
execution_result()->SetResultInAccumulator();
}
@@ -1928,15 +1962,21 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
}
+void BytecodeGenerator::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ Visit(expr->expression());
+}
+
+
void BytecodeGenerator::VisitNewLocalFunctionContext() {
AccumulatorResultScope accumulator_execution_result(this);
Scope* scope = this->scope();
// Allocate a new local context.
if (scope->is_script_scope()) {
- TemporaryRegisterScope temporary_register_scope(builder());
- Register closure = temporary_register_scope.NewRegister();
- Register scope_info = temporary_register_scope.NewRegister();
+ RegisterAllocationScope register_scope(this);
+ Register closure = register_allocator()->NewRegister();
+ Register scope_info = register_allocator()->NewRegister();
DCHECK(Register::AreContiguous(closure, scope_info));
builder()
->LoadAccumulatorWithRegister(Register::function_closure())
@@ -1956,7 +1996,12 @@ void BytecodeGenerator::VisitBuildLocalActivationContext() {
Scope* scope = this->scope();
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
- UNIMPLEMENTED();
+ Variable* variable = scope->receiver();
+ Register receiver(builder()->Parameter(0));
+ // Context variable (at bottom of the context chain).
+ DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
+ builder()->LoadAccumulatorWithRegister(receiver).StoreContextSlot(
+ execution_context()->reg(), variable->index());
}
// Copy parameters into context if necessary.
@@ -1981,10 +2026,10 @@ void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
DCHECK(scope->is_block_scope());
// Allocate a new local block context.
- TemporaryRegisterScope temporary_register_scope(builder());
- Register scope_info = temporary_register_scope.NewRegister();
- Register closure = temporary_register_scope.NewRegister();
- DCHECK(Register::AreContiguous(scope_info, closure));
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register scope_info = register_allocator()->NextConsecutiveRegister();
+ Register closure = register_allocator()->NextConsecutiveRegister();
+
builder()
->LoadLiteral(scope->GetScopeInfo(isolate()))
.StoreAccumulatorInRegister(scope_info);
@@ -2041,7 +2086,7 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
// TODO(rmcilroy): Remove once we have tests which exercise this code path.
UNIMPLEMENTED();
- // Store the closure we were called with in the this_function_var.
+ // Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
}
@@ -2050,8 +2095,8 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
if (variable == nullptr) return;
- // Store the closure we were called with in the this_function_var.
- builder()->CallRuntime(Runtime::kGetOriginalConstructor, Register(), 0);
+ // Store the new target we were called with in the given variable.
+ builder()->LoadAccumulatorWithRegister(Register::new_target());
VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
}
@@ -2063,8 +2108,12 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
- // Pass a SMI sentinel and let the runtime look up the empty function.
- builder()->LoadLiteral(Smi::FromInt(0));
+ Register native_context = register_allocator()->NewRegister();
+ builder()
+ ->LoadContextSlot(execution_context()->reg(),
+ Context::NATIVE_CONTEXT_INDEX)
+ .StoreAccumulatorInRegister(native_context)
+ .LoadContextSlot(native_context, Context::CLOSURE_INDEX);
} else {
DCHECK(closure_scope->is_function_scope());
builder()->LoadAccumulatorWithRegister(Register::function_closure());
@@ -2073,13 +2122,6 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
}
-void BytecodeGenerator::PrepareForBinaryExpression() {
- if (binary_expression_depth_++ == 0) {
- binary_expression_hazard_set_.clear();
- }
-}
-
-
// Visits the expression |expr| and places the result in the accumulator.
void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
AccumulatorResultScope accumulator_scope(this);
@@ -2103,35 +2145,6 @@ Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
}
-Register BytecodeGenerator::LoadFromAliasedRegister(Register reg) {
- // TODO(oth): Follow on CL to load from re-map here.
- DCHECK(builder()->RegisterIsParameterOrLocal(reg));
- if (binary_expression_depth_ > 0) {
- binary_expression_hazard_set_.insert(reg.index());
- }
- return reg;
-}
-
-
-void BytecodeGenerator::RecordStoreToRegister(Register reg) {
- DCHECK(builder()->RegisterIsParameterOrLocal(reg));
- if (binary_expression_depth_ > 0) {
- // TODO(oth): a store to a register that's be loaded needs to be
- // remapped.
- DCHECK(binary_expression_hazard_set_.find(reg.index()) ==
- binary_expression_hazard_set_.end());
- }
-}
-
-
-void BytecodeGenerator::CompleteBinaryExpression() {
- DCHECK(binary_expression_depth_ > 0);
- binary_expression_depth_ -= 1;
- // TODO(oth): spill remapped registers into origins.
- // TODO(oth): make statement/top-level.
-}
-
-
Register BytecodeGenerator::NextContextRegister() const {
if (execution_context() == nullptr) {
// Return the incoming function context for the outermost execution context.
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 7284cfe9e1..8bda7be301 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_GENERATOR_H_
#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecodes.h"
@@ -13,10 +13,9 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeGenerator : public AstVisitor {
+class BytecodeGenerator final : public AstVisitor {
public:
BytecodeGenerator(Isolate* isolate, Zone* zone);
- virtual ~BytecodeGenerator();
Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
@@ -24,18 +23,20 @@ class BytecodeGenerator : public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- // Visiting function for declarations list is overridden.
+ // Visiting function for declarations list and statements are overridden.
void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statments) override;
private:
class ContextScope;
class ControlScope;
+ class ControlScopeForBreakable;
class ControlScopeForIteration;
- class ControlScopeForSwitch;
class ExpressionResultScope;
class EffectResultScope;
class AccumulatorResultScope;
class RegisterResultScope;
+ class RegisterAllocationScope;
void MakeBytecodeBody();
Register NextContextRegister() const;
@@ -54,6 +55,9 @@ class BytecodeGenerator : public AstVisitor {
void VisitNot(UnaryOperation* expr);
void VisitDelete(UnaryOperation* expr);
+ // Used by flow control routines to evaluate loop condition.
+ void VisitCondition(Expression* expr);
+
// Helper visitors which perform common operations.
Register VisitArguments(ZoneList<Expression*>* arguments);
@@ -84,17 +88,12 @@ class BytecodeGenerator : public AstVisitor {
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
-
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect.
void VisitForAccumulatorValue(Expression* expression);
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
void VisitForEffect(Expression* node);
- // Methods marking the start and end of binary expressions.
- void PrepareForBinaryExpression();
- void CompleteBinaryExpression();
-
// Methods for tracking and remapping register.
void RecordStoreToRegister(Register reg);
Register LoadFromAliasedRegister(Register reg);
@@ -121,6 +120,13 @@ class BytecodeGenerator : public AstVisitor {
execution_result_ = execution_result;
}
ExpressionResultScope* execution_result() const { return execution_result_; }
+ inline void set_register_allocator(
+ RegisterAllocationScope* register_allocator) {
+ register_allocator_ = register_allocator;
+ }
+ RegisterAllocationScope* register_allocator() const {
+ return register_allocator_;
+ }
ZoneVector<Handle<Object>>* globals() { return &globals_; }
inline LanguageMode language_mode() const;
@@ -136,9 +142,7 @@ class BytecodeGenerator : public AstVisitor {
ControlScope* execution_control_;
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
-
- int binary_expression_depth_;
- ZoneSet<int> binary_expression_hazard_set_;
+ RegisterAllocationScope* register_allocator_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.cc b/deps/v8/src/interpreter/bytecode-register-allocator.cc
new file mode 100644
index 0000000000..4efb612db5
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.cc
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-allocator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeRegisterAllocator::BytecodeRegisterAllocator(
+ BytecodeArrayBuilder* builder)
+ : builder_(builder),
+ allocated_(builder->zone()),
+ next_consecutive_register_(-1),
+ next_consecutive_count_(-1) {}
+
+
+BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
+ for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
+ builder_->ReturnTemporaryRegister(*i);
+ }
+ allocated_.clear();
+}
+
+
+Register BytecodeRegisterAllocator::NewRegister() {
+ int allocated = -1;
+ if (next_consecutive_count_ <= 0) {
+ allocated = builder_->BorrowTemporaryRegister();
+ } else {
+ allocated = builder_->BorrowTemporaryRegisterNotInRange(
+ next_consecutive_register_,
+ next_consecutive_register_ + next_consecutive_count_ - 1);
+ }
+ allocated_.push_back(allocated);
+ return Register(allocated);
+}
+
+
+bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
+ Register reg) const {
+ for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
+ if (*i == reg.index()) return true;
+ }
+ return false;
+}
+
+
+void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
+ if (static_cast<int>(count) > next_consecutive_count_) {
+ next_consecutive_register_ =
+ builder_->PrepareForConsecutiveTemporaryRegisters(count);
+ next_consecutive_count_ = static_cast<int>(count);
+ }
+}
+
+
+Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
+ DCHECK_GE(next_consecutive_register_, 0);
+ DCHECK_GT(next_consecutive_count_, 0);
+ builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+ allocated_.push_back(next_consecutive_register_);
+ next_consecutive_count_--;
+ return Register(next_consecutive_register_++);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
new file mode 100644
index 0000000000..74ab3a4272
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilder;
+class Register;
+
+// A class than allows the instantiator to allocate temporary registers that are
+// cleaned up when scope is closed.
+class BytecodeRegisterAllocator {
+ public:
+ explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+ ~BytecodeRegisterAllocator();
+ Register NewRegister();
+
+ void PrepareForConsecutiveAllocations(size_t count);
+ Register NextConsecutiveRegister();
+
+ bool RegisterIsAllocatedInThisScope(Register reg) const;
+
+ bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
+
+ private:
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ BytecodeArrayBuilder* builder_;
+ ZoneVector<int> allocated_;
+ int next_consecutive_register_;
+ int next_consecutive_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterAllocator);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+
+#endif // V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index df2a1dd4f1..2d4406cc1b 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -159,8 +159,8 @@ OperandSize Bytecodes::SizeOfOperand(OperandType operand_type) {
// static
-bool Bytecodes::IsJump(Bytecode bytecode) {
- return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpIfTrue ||
+bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrue ||
bytecode == Bytecode::kJumpIfFalse ||
bytecode == Bytecode::kJumpIfToBooleanTrue ||
bytecode == Bytecode::kJumpIfToBooleanFalse ||
@@ -170,18 +170,69 @@ bool Bytecodes::IsJump(Bytecode bytecode) {
// static
-bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpConstant ||
- bytecode == Bytecode::kJumpIfTrueConstant ||
+bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstant ||
bytecode == Bytecode::kJumpIfFalseConstant ||
bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
- bytecode == Bytecode::kJumpIfNull ||
+ bytecode == Bytecode::kJumpIfNullConstant ||
bytecode == Bytecode::kJumpIfUndefinedConstant;
}
// static
+bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfTrueConstantWide ||
+ bytecode == Bytecode::kJumpIfFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfNullConstantWide ||
+ bytecode == Bytecode::kJumpIfUndefinedConstantWide;
+}
+
+
+// static
+bool Bytecodes::IsConditionalJump(Bytecode bytecode) {
+ return IsConditionalJumpImmediate(bytecode) ||
+ IsConditionalJumpConstant(bytecode) ||
+ IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpImmediate(Bytecode bytecode) {
+ return bytecode == Bytecode::kJump || IsConditionalJumpImmediate(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstant(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstant ||
+ IsConditionalJumpConstant(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpConstantWide(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpConstantWide ||
+ IsConditionalJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJump(Bytecode bytecode) {
+ return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode) ||
+ IsJumpConstantWide(bytecode);
+}
+
+
+// static
+bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn || IsJump(bytecode);
+}
+
+
+// static
std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
int parameter_count) {
Vector<char> buf = Vector<char>::New(50);
@@ -209,13 +260,15 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
case interpreter::OperandType::kCount8:
os << "#" << static_cast<unsigned int>(*operand_start);
break;
+ case interpreter::OperandType::kCount16:
+ os << '#' << ReadUnalignedUInt16(operand_start);
+ break;
case interpreter::OperandType::kIdx8:
os << "[" << static_cast<unsigned int>(*operand_start) << "]";
break;
- case interpreter::OperandType::kIdx16: {
+ case interpreter::OperandType::kIdx16:
os << "[" << ReadUnalignedUInt16(operand_start) << "]";
break;
- }
case interpreter::OperandType::kImm8:
os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
break;
@@ -226,6 +279,8 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
os << "<context>";
} else if (reg.is_function_closure()) {
os << "<closure>";
+ } else if (reg.is_new_target()) {
+ os << "<new.target>";
} else if (reg.is_parameter()) {
int parameter_index = reg.ToParameterIndex(parameter_count);
if (parameter_index == 0) {
@@ -238,6 +293,29 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
}
break;
}
+ case interpreter::OperandType::kRegPair8: {
+ Register reg = Register::FromOperand(*operand_start);
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ DCHECK_NE(parameter_index, 0);
+ os << "a" << parameter_index - 1 << "-" << parameter_index;
+ } else {
+ os << "r" << reg.index() << "-" << reg.index() + 1;
+ }
+ break;
+ }
+ case interpreter::OperandType::kReg16: {
+ Register reg =
+ Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ DCHECK_NE(parameter_index, 0);
+ os << "a" << parameter_index - 1;
+ } else {
+ os << "r" << reg.index();
+ }
+ break;
+ }
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -271,6 +349,8 @@ static const int kFunctionClosureRegisterIndex =
-InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
static const int kFunctionContextRegisterIndex =
-InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
+static const int kNewTargetRegisterIndex =
+ -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
@@ -285,7 +365,7 @@ Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
DCHECK_LT(register_index, 0);
- DCHECK_GE(register_index, Register::kMinRegisterIndex);
+ DCHECK_GE(register_index, kMinInt8);
return Register(register_index);
}
@@ -316,10 +396,22 @@ bool Register::is_function_context() const {
}
+Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
+
+
+bool Register::is_new_target() const {
+ return index() == kNewTargetRegisterIndex;
+}
+
+
int Register::MaxParameterIndex() { return kMaxParameterIndex; }
-uint8_t Register::ToOperand() const { return static_cast<uint8_t>(-index_); }
+uint8_t Register::ToOperand() const {
+ DCHECK_GE(index_, kMinInt8);
+ DCHECK_LE(index_, kMaxInt8);
+ return static_cast<uint8_t>(-index_);
+}
Register Register::FromOperand(uint8_t operand) {
@@ -327,6 +419,18 @@ Register Register::FromOperand(uint8_t operand) {
}
+uint16_t Register::ToWideOperand() const {
+ DCHECK_GE(index_, kMinInt16);
+ DCHECK_LE(index_, kMaxInt16);
+ return static_cast<uint16_t>(-index_);
+}
+
+
+Register Register::FromWideOperand(uint16_t operand) {
+ return Register(-static_cast<int16_t>(operand));
+}
+
+
bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5) {
if (reg1.index() + 1 != reg2.index()) {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 8eaf920d1b..a9beb6c918 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -25,11 +25,14 @@ namespace interpreter {
V(Count8, OperandSize::kByte) \
V(Imm8, OperandSize::kByte) \
V(Idx8, OperandSize::kByte) \
- V(Reg8, OperandSize::kByte) \
V(MaybeReg8, OperandSize::kByte) \
+ V(Reg8, OperandSize::kByte) \
+ V(RegPair8, OperandSize::kByte) \
\
/* Short operands. */ \
- V(Idx16, OperandSize::kShort)
+ V(Count16, OperandSize::kShort) \
+ V(Idx16, OperandSize::kShort) \
+ V(Reg16, OperandSize::kShort)
// The list of bytecodes which are interpreted by the interpreter.
#define BYTECODE_LIST(V) \
@@ -64,11 +67,28 @@ namespace interpreter {
V(PopContext, OperandType::kReg8) \
V(LdaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
V(StaContextSlot, OperandType::kReg8, OperandType::kIdx8) \
+ V(LdaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
+ V(StaContextSlotWide, OperandType::kReg8, OperandType::kIdx16) \
+ \
+ /* Load-Store lookup slots */ \
+ V(LdaLookupSlot, OperandType::kIdx8) \
+ V(LdaLookupSlotInsideTypeof, OperandType::kIdx8) \
+ V(LdaLookupSlotWide, OperandType::kIdx16) \
+ V(LdaLookupSlotInsideTypeofWide, OperandType::kIdx16) \
+ V(StaLookupSlotSloppy, OperandType::kIdx8) \
+ V(StaLookupSlotStrict, OperandType::kIdx8) \
+ V(StaLookupSlotSloppyWide, OperandType::kIdx16) \
+ V(StaLookupSlotStrictWide, OperandType::kIdx16) \
\
/* Register-accumulator transfers */ \
V(Ldar, OperandType::kReg8) \
V(Star, OperandType::kReg8) \
\
+ /* Register-register transfers */ \
+ V(Mov, OperandType::kReg8, OperandType::kReg8) \
+ V(Exchange, OperandType::kReg8, OperandType::kReg16) \
+ V(ExchangeWide, OperandType::kReg16, OperandType::kReg16) \
+ \
/* LoadIC operations */ \
V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
@@ -119,11 +139,17 @@ namespace interpreter {
V(TypeOf, OperandType::kNone) \
V(DeletePropertyStrict, OperandType::kReg8) \
V(DeletePropertySloppy, OperandType::kReg8) \
+ V(DeleteLookupSlot, OperandType::kNone) \
\
/* Call operations */ \
- V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8) \
+ V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8, \
+ OperandType::kIdx8) \
+ V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16, \
+ OperandType::kIdx16) \
V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8, \
OperandType::kCount8) \
+ V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8, \
+ OperandType::kCount8, OperandType::kRegPair8) \
V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8, \
OperandType::kCount8) \
\
@@ -143,18 +169,27 @@ namespace interpreter {
V(TestIn, OperandType::kReg8) \
\
/* Cast operators */ \
- V(ToBoolean, OperandType::kNone) \
V(ToName, OperandType::kNone) \
V(ToNumber, OperandType::kNone) \
V(ToObject, OperandType::kNone) \
\
/* Literals */ \
- V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kReg8) \
- V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kImm8) \
- V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kImm8) \
+ V(CreateRegExpLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateArrayLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateObjectLiteral, OperandType::kIdx8, OperandType::kIdx8, \
+ OperandType::kImm8) \
+ V(CreateRegExpLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ V(CreateArrayLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
+ V(CreateObjectLiteralWide, OperandType::kIdx16, OperandType::kIdx16, \
+ OperandType::kImm8) \
\
/* Closure allocation */ \
- V(CreateClosure, OperandType::kImm8) \
+ V(CreateClosure, OperandType::kIdx8, OperandType::kImm8) \
+ V(CreateClosureWide, OperandType::kIdx16, OperandType::kImm8) \
\
/* Arguments allocation */ \
V(CreateMappedArguments, OperandType::kNone) \
@@ -163,23 +198,32 @@ namespace interpreter {
/* Control Flow */ \
V(Jump, OperandType::kImm8) \
V(JumpConstant, OperandType::kIdx8) \
+ V(JumpConstantWide, OperandType::kIdx16) \
V(JumpIfTrue, OperandType::kImm8) \
V(JumpIfTrueConstant, OperandType::kIdx8) \
+ V(JumpIfTrueConstantWide, OperandType::kIdx16) \
V(JumpIfFalse, OperandType::kImm8) \
V(JumpIfFalseConstant, OperandType::kIdx8) \
+ V(JumpIfFalseConstantWide, OperandType::kIdx16) \
V(JumpIfToBooleanTrue, OperandType::kImm8) \
V(JumpIfToBooleanTrueConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanTrueConstantWide, OperandType::kIdx16) \
V(JumpIfToBooleanFalse, OperandType::kImm8) \
V(JumpIfToBooleanFalseConstant, OperandType::kIdx8) \
+ V(JumpIfToBooleanFalseConstantWide, OperandType::kIdx16) \
V(JumpIfNull, OperandType::kImm8) \
V(JumpIfNullConstant, OperandType::kIdx8) \
+ V(JumpIfNullConstantWide, OperandType::kIdx16) \
V(JumpIfUndefined, OperandType::kImm8) \
V(JumpIfUndefinedConstant, OperandType::kIdx8) \
+ V(JumpIfUndefinedConstantWide, OperandType::kIdx16) \
\
/* Complex flow control For..in */ \
- V(ForInPrepare, OperandType::kReg8) \
- V(ForInNext, OperandType::kReg8, OperandType::kReg8) \
- V(ForInDone, OperandType::kReg8) \
+ V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInDone, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8, \
+ OperandType::kReg8) \
+ V(ForInStep, OperandType::kReg8) \
\
/* Non-local flow control */ \
V(Throw, OperandType::kNone) \
@@ -224,15 +268,9 @@ enum class Bytecode : uint8_t {
// in its stack-frame. Register hold parameters, this, and expression values.
class Register {
public:
- static const int kMaxRegisterIndex = 127;
- static const int kMinRegisterIndex = -128;
-
Register() : index_(kIllegalIndex) {}
- explicit Register(int index) : index_(index) {
- DCHECK_LE(index_, kMaxRegisterIndex);
- DCHECK_GE(index_, kMinRegisterIndex);
- }
+ explicit Register(int index) : index_(index) {}
int index() const {
DCHECK(index_ != kIllegalIndex);
@@ -253,9 +291,16 @@ class Register {
static Register function_context();
bool is_function_context() const;
+ // Returns the register for the incoming new target value.
+ static Register new_target();
+ bool is_new_target() const;
+
static Register FromOperand(uint8_t operand);
uint8_t ToOperand() const;
+ static Register FromWideOperand(uint16_t operand);
+ uint16_t ToWideOperand() const;
+
static bool AreContiguous(Register reg1, Register reg2,
Register reg3 = Register(),
Register reg4 = Register(),
@@ -320,14 +365,41 @@ class Bytecodes {
// Returns the size of |operand|.
static OperandSize SizeOfOperand(OperandType operand);
+ // Return true if the bytecode is a conditional jump taking
+ // an immediate byte operand (OperandType::kImm8).
+ static bool IsConditionalJumpImmediate(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // a constant pool entry (OperandType::kIdx8).
+ static bool IsConditionalJumpConstant(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // a constant pool entry (OperandType::kIdx16).
+ static bool IsConditionalJumpConstantWide(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump taking
+ // any kind of operand.
+ static bool IsConditionalJump(Bytecode bytecode);
+
// Return true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm8).
- static bool IsJump(Bytecode bytecode);
+ static bool IsJumpImmediate(Bytecode bytecode);
// Return true if the bytecode is a jump or conditional jump taking a
- // constant pool entry (OperandType::kIdx).
+ // constant pool entry (OperandType::kIdx8).
static bool IsJumpConstant(Bytecode bytecode);
+ // Return true if the bytecode is a jump or conditional jump taking a
+ // constant pool entry (OperandType::kIdx16).
+ static bool IsJumpConstantWide(Bytecode bytecode);
+
+ // Return true if the bytecode is a jump or conditional jump taking
+ // any kind of operand.
+ static bool IsJump(Bytecode bytecode);
+
+ // Return true if the bytecode is a conditional jump, a jump, or a return.
+ static bool IsJumpOrReturn(Bytecode bytecode);
+
// Decode a single bytecode and operands to |os|.
static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
int number_of_parameters);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
new file mode 100644
index 0000000000..2586e1ff4d
--- /dev/null
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/constant-array-builder.h"
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+ConstantArrayBuilder::ConstantArraySlice::ConstantArraySlice(Zone* zone,
+ size_t start_index,
+ size_t capacity)
+ : start_index_(start_index),
+ capacity_(capacity),
+ reserved_(0),
+ constants_(zone) {}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Reserve() {
+ DCHECK_GT(available(), 0u);
+ reserved_++;
+ DCHECK_LE(reserved_, capacity() - constants_.size());
+}
+
+
+void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
+ DCHECK_GT(reserved_, 0u);
+ reserved_--;
+}
+
+
+size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
+ Handle<Object> object) {
+ DCHECK_GT(available(), 0u);
+ size_t index = constants_.size();
+ DCHECK_LT(index, capacity());
+ constants_.push_back(object);
+ return index + start_index();
+}
+
+
+Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
+ size_t index) const {
+ return constants_[index - start_index()];
+}
+
+
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kMaxCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::kLowCapacity;
+
+
+ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
+ : isolate_(isolate),
+ idx8_slice_(zone, 0, kLowCapacity),
+ idx16_slice_(zone, kLowCapacity, kHighCapacity),
+ constants_map_(isolate->heap(), zone) {
+ STATIC_ASSERT(kMaxCapacity == static_cast<size_t>(kMaxUInt16 + 1));
+ DCHECK_EQ(idx8_slice_.start_index(), 0u);
+ DCHECK_EQ(idx8_slice_.capacity(), kLowCapacity);
+ DCHECK_EQ(idx16_slice_.start_index(), kLowCapacity);
+ DCHECK_EQ(idx16_slice_.capacity(), kMaxCapacity - kLowCapacity);
+}
+
+
+size_t ConstantArrayBuilder::size() const {
+ if (idx16_slice_.size() > 0) {
+ return idx16_slice_.start_index() + idx16_slice_.size();
+ } else {
+ return idx8_slice_.size();
+ }
+}
+
+
+Handle<Object> ConstantArrayBuilder::At(size_t index) const {
+ if (index >= idx16_slice_.start_index()) {
+ return idx16_slice_.At(index);
+ } else if (index < idx8_slice_.size()) {
+ return idx8_slice_.At(index);
+ } else {
+ return isolate_->factory()->the_hole_value();
+ }
+}
+
+
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+ for (int i = 0; i < fixed_array->length(); i++) {
+ fixed_array->set(i, *At(static_cast<size_t>(i)));
+ }
+ return fixed_array;
+}
+
+
+size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
+ index_t* entry = constants_map_.Find(object);
+ return (entry == nullptr) ? AllocateEntry(object) : *entry;
+}
+
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
+ Handle<Object> object) {
+ DCHECK(!object->IsOddball());
+ size_t index;
+ index_t* entry = constants_map_.Get(object);
+ if (idx8_slice_.available() > 0) {
+ index = idx8_slice_.Allocate(object);
+ } else {
+ index = idx16_slice_.Allocate(object);
+ }
+ CHECK_LT(index, kMaxCapacity);
+ *entry = static_cast<index_t>(index);
+ return *entry;
+}
+
+
+OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+ if (idx8_slice_.available() > 0) {
+ idx8_slice_.Reserve();
+ return OperandSize::kByte;
+ } else if (idx16_slice_.available() > 0) {
+ idx16_slice_.Reserve();
+ return OperandSize::kShort;
+ } else {
+ UNREACHABLE();
+ return OperandSize::kNone;
+ }
+}
+
+
+size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
+ Handle<Object> object) {
+ DiscardReservedEntry(operand_size);
+ size_t index;
+ index_t* entry = constants_map_.Find(object);
+ if (nullptr == entry) {
+ index = AllocateEntry(object);
+ } else {
+ if (operand_size == OperandSize::kByte &&
+ *entry >= idx8_slice_.capacity()) {
+ // The object is already in the constant array, but has an index
+ // outside the range of an idx8 operand so we need to create a
+ // duplicate entry in the idx8 operand range to satisfy the
+ // commitment.
+ *entry = static_cast<index_t>(idx8_slice_.Allocate(object));
+ }
+ index = *entry;
+ }
+ DCHECK(operand_size == OperandSize::kShort || index < idx8_slice_.capacity());
+ DCHECK_LT(index, kMaxCapacity);
+ return index;
+}
+
+
+void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
+ switch (operand_size) {
+ case OperandSize::kByte:
+ idx8_slice_.Unreserve();
+ return;
+ case OperandSize::kShort:
+ idx16_slice_.Unreserve();
+ return;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
new file mode 100644
index 0000000000..c882b1d540
--- /dev/null
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+
+#include "src/identity-map.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Factory;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing constant arrays for the interpreter.
+class ConstantArrayBuilder final : public ZoneObject {
+ public:
+ // Capacity of the 8-bit operand slice.
+ static const size_t kLowCapacity = 1u << kBitsPerByte;
+
+ // Capacity of the combined 8-bit and 16-bit operand slices.
+ static const size_t kMaxCapacity = 1u << (2 * kBitsPerByte);
+
+ // Capacity of the 16-bit operand slice.
+ static const size_t kHighCapacity = kMaxCapacity - kLowCapacity;
+
+ ConstantArrayBuilder(Isolate* isolate, Zone* zone);
+
+ // Generate a fixed array of constants based on inserted objects.
+ Handle<FixedArray> ToFixedArray(Factory* factory) const;
+
+ // Returns the object in the constant pool array that at index
+ // |index|.
+ Handle<Object> At(size_t index) const;
+
+ // Returns the number of elements in the array.
+ size_t size() const;
+
+ // Insert an object into the constants array if it is not already
+ // present. Returns the array index associated with the object.
+ size_t Insert(Handle<Object> object);
+
+ // Creates a reserved entry in the constant pool and returns
+ // the size of the operand that'll be required to hold the entry
+ // when committed.
+ OperandSize CreateReservedEntry();
+
+ // Commit reserved entry and returns the constant pool index for the
+ // object.
+ size_t CommitReservedEntry(OperandSize operand_size, Handle<Object> object);
+
+ // Discards constant pool reservation.
+ void DiscardReservedEntry(OperandSize operand_size);
+
+ private:
+ typedef uint16_t index_t;
+
+ index_t AllocateEntry(Handle<Object> object);
+
+ struct ConstantArraySlice final {
+ ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity);
+ void Reserve();
+ void Unreserve();
+ size_t Allocate(Handle<Object> object);
+ Handle<Object> At(size_t index) const;
+
+ inline size_t available() const { return capacity() - reserved() - size(); }
+ inline size_t reserved() const { return reserved_; }
+ inline size_t capacity() const { return capacity_; }
+ inline size_t size() const { return constants_.size(); }
+ inline size_t start_index() const { return start_index_; }
+
+ private:
+ const size_t start_index_;
+ const size_t capacity_;
+ size_t reserved_;
+ ZoneVector<Handle<Object>> constants_;
+
+ DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
+ };
+
+ Isolate* isolate_;
+ ConstantArraySlice idx8_slice_;
+ ConstantArraySlice idx16_slice_;
+ IdentityMap<index_t> constants_map_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 3ecabe4351..99066e8c7e 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -32,6 +32,13 @@ void BreakableControlFlowBuilder::EmitJumpIfTrue(
}
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+ ZoneVector<BytecodeLabel>* sites) {
+ sites->push_back(BytecodeLabel());
+ builder()->JumpIfFalse(&sites->back());
+}
+
+
void BreakableControlFlowBuilder::EmitJumpIfUndefined(
ZoneVector<BytecodeLabel>* sites) {
sites->push_back(BytecodeLabel());
@@ -58,6 +65,12 @@ void BreakableControlFlowBuilder::EmitJumpIfTrue(
}
+void BreakableControlFlowBuilder::EmitJumpIfFalse(
+ ZoneVector<BytecodeLabel>* sites, int index) {
+ builder()->JumpIfFalse(&sites->at(index));
+}
+
+
void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
ZoneVector<BytecodeLabel>* sites) {
for (size_t i = 0; i < sites->size(); i++) {
@@ -68,9 +81,43 @@ void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
}
+void BlockBuilder::EndBlock() {
+ builder()->Bind(&block_end_);
+ SetBreakTarget(block_end_);
+}
+
+
LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
+void LoopBuilder::LoopHeader() {
+ // Jumps from before the loop header into the loop violate ordering
+ // requirements of bytecode basic blocks. The only entry into a loop
+ // must be the loop header. Surely breaks is okay? Not if nested
+ // and misplaced between the headers.
+ DCHECK(break_sites_.empty() && continue_sites_.empty());
+ builder()->Bind(&loop_header_);
+}
+
+
+void LoopBuilder::EndLoop() {
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ DCHECK(loop_header_.is_bound());
+ builder()->Bind(&loop_end_);
+ SetBreakTarget(loop_end_);
+ if (next_.is_bound()) {
+ DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
+ SetContinueTarget(next_);
+ } else {
+ DCHECK(condition_.is_bound());
+ DCHECK_GE(condition_.offset(), loop_header_.offset());
+ DCHECK_LE(condition_.offset(), loop_end_.offset());
+ SetContinueTarget(condition_);
+ }
+}
+
+
void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
BindLabels(target, &continue_sites_);
}
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index c9be6dcdc7..24a7dfe3e5 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -44,6 +44,7 @@ class BreakableControlFlowBuilder : public ControlFlowBuilder {
// SetBreakTarget is called.
void Break() { EmitJump(&break_sites_); }
void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
+ void BreakIfFalse() { EmitJumpIfFalse(&break_sites_); }
void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
@@ -52,19 +53,33 @@ class BreakableControlFlowBuilder : public ControlFlowBuilder {
void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
+ void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels);
+ void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels, int index);
void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
- private:
// Unbound labels that identify jumps for break statements in the code.
ZoneVector<BytecodeLabel> break_sites_;
};
+
+// Class to track control flow for block statements (which can break in JS).
+class BlockBuilder final : public BreakableControlFlowBuilder {
+ public:
+ explicit BlockBuilder(BytecodeArrayBuilder* builder)
+ : BreakableControlFlowBuilder(builder) {}
+
+ void EndBlock();
+
+ private:
+ BytecodeLabel block_end_;
+};
+
+
// A class to help with co-ordinating break and continue statements with
// their loop.
-// TODO(oth): add support for TF branch/merge info.
class LoopBuilder final : public BreakableControlFlowBuilder {
public:
explicit LoopBuilder(BytecodeArrayBuilder* builder)
@@ -72,9 +87,12 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
continue_sites_(builder->zone()) {}
~LoopBuilder();
- // This methods should be called by the LoopBuilder owner before
- // destruction to update sites that emit jumps for continue.
- void SetContinueTarget(const BytecodeLabel& continue_target);
+ void LoopHeader();
+ void Condition() { builder()->Bind(&condition_); }
+ void Next() { builder()->Bind(&next_); }
+ void JumpToHeader() { builder()->Jump(&loop_header_); }
+ void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+ void EndLoop();
// This method is called when visiting continue statements in the AST.
// Inserts a jump to a unbound label that is patched when the corresponding
@@ -85,12 +103,19 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
private:
+ void SetContinueTarget(const BytecodeLabel& continue_target);
+
+ BytecodeLabel loop_header_;
+ BytecodeLabel condition_;
+ BytecodeLabel next_;
+ BytecodeLabel loop_end_;
+
// Unbound labels that identify jumps for continue statements in the code.
ZoneVector<BytecodeLabel> continue_sites_;
};
+
// A class to help with co-ordinating break statements with their switch.
-// TODO(oth): add support for TF branch/merge info.
class SwitchBuilder final : public BreakableControlFlowBuilder {
public:
explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index e089a5d475..574602b0ed 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -17,6 +17,7 @@ namespace internal {
namespace interpreter {
using compiler::Node;
+
#define __ assembler->
@@ -200,11 +201,47 @@ void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
}
+// Exchange <reg8> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
+ Node* reg0_index = __ BytecodeOperandReg(0);
+ Node* reg1_index = __ BytecodeOperandReg(1);
+ Node* reg0_value = __ LoadRegister(reg0_index);
+ Node* reg1_value = __ LoadRegister(reg1_index);
+ __ StoreRegister(reg1_value, reg0_index);
+ __ StoreRegister(reg0_value, reg1_index);
+ __ Dispatch();
+}
+
+
+// ExchangeWide <reg16> <reg16>
+//
+// Exchange two registers.
+void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
+ return DoExchange(assembler);
+}
+
+
+// Mov <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+ Node* src_index = __ BytecodeOperandReg(0);
+ Node* src_value = __ LoadRegister(src_index);
+ Node* dst_index = __ BytecodeOperandReg(1);
+ __ StoreRegister(src_value, dst_index);
+ __ Dispatch();
+}
+
+
void Interpreter::DoLoadGlobal(Callable ic,
compiler::InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
- Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
// Load the global via the LoadIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -216,7 +253,6 @@ void Interpreter::DoLoadGlobal(Callable ic,
Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
type_feedback_vector);
__ SetAccumulator(result);
-
__ Dispatch();
}
@@ -319,7 +355,9 @@ void Interpreter::DoStoreGlobal(Callable ic,
compiler::InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
- Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
+ Node* native_context =
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
+ Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
// Store the global via the StoreIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -395,6 +433,15 @@ void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
}
+// LdaContextSlotWide <context> <slot_index>
+//
+// Load the object in |slot_index| of |context| into the accumulator.
+void Interpreter::DoLdaContextSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaContextSlot(assembler);
+}
+
+
// StaContextSlot <context> <slot_index>
//
// Stores the object in the accumulator into |slot_index| of |context|.
@@ -408,6 +455,120 @@ void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
}
+// StaContextSlot <context> <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of |context|.
+void Interpreter::DoStaContextSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaContextSlot(assembler);
+}
+
+
+void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+ Node* result_pair = __ CallRuntime(function_id, context, name);
+ Node* result = __ Projection(0, result_pair);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LdaLookupSlot <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+}
+
+
+// LdaLookupSlotInsideTypeof <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeof(
+ compiler::InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+}
+
+
+// LdaLookupSlotWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically.
+void Interpreter::DoLdaLookupSlotWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaLookupSlot(assembler);
+}
+
+
+// LdaLookupSlotInsideTypeofWide <name_index>
+//
+// Lookup the object with the name in constant pool entry |name_index|
+// dynamically without causing a NoReferenceError.
+void Interpreter::DoLdaLookupSlotInsideTypeofWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoLdaLookupSlotInsideTypeof(assembler);
+}
+
+
+void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
+ compiler::InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+ Node* language_mode_node = __ NumberConstant(language_mode);
+ Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
+ language_mode_node);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// StaLookupSlotSloppy <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppy(
+ compiler::InterpreterAssembler* assembler) {
+ DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+}
+
+
+// StaLookupSlotStrict <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrict(
+ compiler::InterpreterAssembler* assembler) {
+ DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+}
+
+
+// StaLookupSlotSloppyWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in sloppy mode.
+void Interpreter::DoStaLookupSlotSloppyWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaLookupSlotSloppy(assembler);
+}
+
+
+// StaLookupSlotStrictWide <name_index>
+//
+// Store the object in accumulator to the object with the name in constant
+// pool entry |name_index| in strict mode.
+void Interpreter::DoStaLookupSlotStrictWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoStaLookupSlotStrict(assembler);
+}
+
+
void Interpreter::DoLoadIC(Callable ic,
compiler::InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
@@ -882,22 +1043,51 @@ void Interpreter::DoDeletePropertySloppy(
}
-// Call <callable> <receiver> <arg_count>
+// DeleteLookupSlot
//
-// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+// Delete the variable with the name specified in the accumulator by dynamically
+// looking it up.
+void Interpreter::DoDeleteLookupSlot(
+ compiler::InterpreterAssembler* assembler) {
+ Node* name = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
Node* function_reg = __ BytecodeOperandReg(0);
Node* function = __ LoadRegister(function_reg);
Node* receiver_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(receiver_reg);
Node* args_count = __ BytecodeOperandCount(2);
+ // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
Node* result = __ CallJS(function, first_arg, args_count);
__ SetAccumulator(result);
__ Dispatch();
}
+// Call <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+ DoJSCall(assembler);
+}
+
+
+// CallWide <callable> <receiver> <arg_count>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
+ DoJSCall(assembler);
+}
+
+
// CallRuntime <function_id> <first_arg> <arg_count>
//
// Call the runtime function |function_id| with the first argument in
@@ -914,6 +1104,33 @@ void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
}
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(
+ compiler::InterpreterAssembler* assembler) {
+ // Call the runtime function.
+ Node* function_id = __ BytecodeOperandIdx(0);
+ Node* first_arg_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
+
+ // Store the results in <first_return> and <first_return + 1>
+ Node* first_return_reg = __ BytecodeOperandReg(3);
+ Node* second_return_reg = __ NextRegister(first_return_reg);
+ Node* result0 = __ Projection(0, result_pair);
+ Node* result1 = __ Projection(1, result_pair);
+ __ StoreRegister(result0, first_return_reg);
+ __ StoreRegister(result1, second_return_reg);
+
+ __ Dispatch();
+}
+
+
// CallJSRuntime <context_index> <receiver> <arg_count>
//
// Call the JS runtime function that has the |context_index| with the receiver
@@ -926,9 +1143,8 @@ void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
// Get the function to call from the native context.
Node* context = __ GetContext();
- Node* global = __ LoadContextSlot(context, Context::GLOBAL_OBJECT_INDEX);
Node* native_context =
- __ LoadObjectField(global, JSGlobalObject::kNativeContextOffset);
+ __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
Node* function = __ LoadContextSlot(native_context, context_index);
// Call the function.
@@ -938,7 +1154,7 @@ void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
}
-// New <constructor> <arg_count>
+// New <constructor> <first_arg> <arg_count>
//
// Call operator new with |constructor| and the first argument in
// register |first_arg| and |arg_count| arguments in subsequent
@@ -1045,17 +1261,6 @@ void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
}
-// ToBoolean
-//
-// Cast the object referenced by the accumulator to a boolean.
-void Interpreter::DoToBoolean(compiler::InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-
// ToName
//
// Cast the object referenced by the accumulator to a name.
@@ -1098,9 +1303,9 @@ void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
}
-// JumpConstant <idx>
+// JumpConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
@@ -1109,6 +1314,16 @@ void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
}
+// JumpConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the
+// constant pool.
+void Interpreter::DoJumpConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpConstant(assembler);
+}
+
+
// JumpIfTrue <imm8>
//
// Jump by number of bytes represented by an immediate operand if the
@@ -1121,9 +1336,9 @@ void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfTrueConstant <idx>
+// JumpIfTrueConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains true.
void Interpreter::DoJumpIfTrueConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1136,6 +1351,16 @@ void Interpreter::DoJumpIfTrueConstant(
}
+// JumpIfTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains true.
+void Interpreter::DoJumpIfTrueConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfTrueConstant(assembler);
+}
+
+
// JumpIfFalse <imm8>
//
// Jump by number of bytes represented by an immediate operand if the
@@ -1148,9 +1373,9 @@ void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfFalseConstant <idx>
+// JumpIfFalseConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains false.
void Interpreter::DoJumpIfFalseConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1163,6 +1388,16 @@ void Interpreter::DoJumpIfFalseConstant(
}
+// JumpIfFalseConstant <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the accumulator contains false.
+void Interpreter::DoJumpIfFalseConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfFalseConstant(assembler);
+}
+
+
// JumpIfToBooleanTrue <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -1170,17 +1405,17 @@ void Interpreter::DoJumpIfFalseConstant(
void Interpreter::DoJumpIfToBooleanTrue(
compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
Node* to_boolean_value =
__ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
}
-// JumpIfToBooleanTrueConstant <idx>
+// JumpIfToBooleanTrueConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is true when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstant(
@@ -1196,6 +1431,17 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
}
+// JumpIfToBooleanTrueConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is true when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanTrueConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfToBooleanTrueConstant(assembler);
+}
+
+
// JumpIfToBooleanFalse <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -1203,17 +1449,17 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
void Interpreter::DoJumpIfToBooleanFalse(
compiler::InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
Node* to_boolean_value =
__ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
}
-// JumpIfToBooleanFalseConstant <idx>
+// JumpIfToBooleanFalseConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is false when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstant(
@@ -1229,6 +1475,17 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
}
+// JumpIfToBooleanFalseConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is false when the object is cast
+// to boolean.
+void Interpreter::DoJumpIfToBooleanFalseConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfToBooleanFalseConstant(assembler);
+}
+
+
// JumpIfNull <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -1241,9 +1498,9 @@ void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfNullConstant <idx>
+// JumpIfNullConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is the null constant.
void Interpreter::DoJumpIfNullConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1256,7 +1513,17 @@ void Interpreter::DoJumpIfNullConstant(
}
-// JumpIfUndefined <imm8>
+// JumpIfNullConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the null constant.
+void Interpreter::DoJumpIfNullConstantWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoJumpIfNullConstant(assembler);
+}
+
+
+// jumpifundefined <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the undefined constant.
@@ -1269,9 +1536,9 @@ void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
}
-// JumpIfUndefinedConstant <idx>
+// JumpIfUndefinedConstant <idx8>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is the undefined constant.
void Interpreter::DoJumpIfUndefinedConstant(
compiler::InterpreterAssembler* assembler) {
@@ -1285,73 +1552,102 @@ void Interpreter::DoJumpIfUndefinedConstant(
}
-// CreateRegExpLiteral <idx> <flags_reg>
+// JumpIfUndefinedConstantWide <idx16>
//
-// Creates a regular expression literal for literal index <idx> with flags held
-// in <flags_reg> and the pattern in the accumulator.
-void Interpreter::DoCreateRegExpLiteral(
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the undefined constant.
+void Interpreter::DoJumpIfUndefinedConstantWide(
compiler::InterpreterAssembler* assembler) {
- Node* pattern = __ GetAccumulator();
- Node* literal_index_raw = __ BytecodeOperandIdx(0);
- Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_reg = __ BytecodeOperandReg(1);
- Node* flags = __ LoadRegister(flags_reg);
- Node* closure = __ LoadRegister(Register::function_closure());
- Node* literals_array =
- __ LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* result = __ CallRuntime(Runtime::kMaterializeRegExpLiteral,
- literals_array, literal_index, pattern, flags);
- __ SetAccumulator(result);
- __ Dispatch();
+ DoJumpIfUndefinedConstant(assembler);
}
void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler) {
- Node* constant_elements = __ GetAccumulator();
- Node* literal_index_raw = __ BytecodeOperandIdx(0);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandImm(1);
+ Node* flags_raw = __ BytecodeOperandImm(2);
Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
- Node* literals_array =
- __ LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* result = __ CallRuntime(function_id, literals_array, literal_index,
+ Node* result = __ CallRuntime(function_id, closure, literal_index,
constant_elements, flags);
__ SetAccumulator(result);
__ Dispatch();
}
-// CreateArrayLiteral <idx> <flags>
+// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
-// Creates an array literal for literal index <idx> with flags <flags> and
-// constant elements in the accumulator.
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteral(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateRegExpLiteralWide <pattern_idx> <literal_idx> <flags>
+//
+// Creates a regular expression literal for literal index <literal_idx> with
+// <flags> and the pattern in <pattern_idx>.
+void Interpreter::DoCreateRegExpLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
+}
+
+
+// CreateArrayLiteral <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(
compiler::InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
}
-// CreateObjectLiteral <idx> <flags>
+// CreateArrayLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an array literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateArrayLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+}
+
+
+// CreateObjectLiteral <element_idx> <literal_idx> <flags>
//
-// Creates an object literal for literal index <idx> with flags <flags> and
-// constant elements in the accumulator.
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
void Interpreter::DoCreateObjectLiteral(
compiler::InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
}
-// CreateClosure <tenured>
+// CreateObjectLiteralWide <element_idx> <literal_idx> <flags>
+//
+// Creates an object literal for literal index <literal_idx> with flags <flags>
+// and constant elements in <element_idx>.
+void Interpreter::DoCreateObjectLiteralWide(
+ compiler::InterpreterAssembler* assembler) {
+ DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+}
+
+
+// CreateClosure <index> <tenured>
//
-// Creates a new closure for SharedFunctionInfo in the accumulator with the
-// PretenureFlag <tenured>.
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
// TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
// calling into the runtime.
- Node* shared = __ GetAccumulator();
- Node* tenured_raw = __ BytecodeOperandImm(0);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* shared = __ LoadConstantPoolEntry(index);
+ Node* tenured_raw = __ BytecodeOperandImm(1);
Node* tenured = __ SmiTag(tenured_raw);
Node* result =
__ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
@@ -1360,6 +1656,16 @@ void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
}
+// CreateClosureWide <index> <tenured>
+//
+// Creates a new closure for SharedFunctionInfo at position |index| in the
+// constant pool and with the PretenureFlag <tenured>.
+void Interpreter::DoCreateClosureWide(
+ compiler::InterpreterAssembler* assembler) {
+ return DoCreateClosure(assembler);
+}
+
+
// CreateMappedArguments
//
// Creates a new mapped arguments object.
@@ -1403,33 +1709,36 @@ void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
}
-// ForInPrepare <receiver>
+// ForInPrepare <cache_type> <cache_array> <cache_length>
//
-// Returns state for for..in loop execution based on the |receiver| and
-// the property names in the accumulator.
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The registers |cache_type|, |cache_array|, and
+// |cache_length| represent output parameters.
void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
- Node* receiver_reg = __ BytecodeOperandReg(0);
- Node* receiver = __ LoadRegister(receiver_reg);
- Node* property_names = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, receiver,
- property_names);
+ Node* object = __ GetAccumulator();
+ Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+ for (int i = 0; i < 3; i++) {
+ // 0 == cache_type, 1 == cache_array, 2 == cache_length
+ Node* cache_info = __ LoadFixedArrayElement(result, i);
+ Node* cache_info_reg = __ BytecodeOperandReg(i);
+ __ StoreRegister(cache_info, cache_info_reg);
+ }
__ SetAccumulator(result);
__ Dispatch();
}
-// ForInNext <for_in_state> <index>
+// ForInNext <receiver> <cache_type> <cache_array> <index>
//
-// Returns the next key in a for..in loop. The state associated with the
-// iteration is contained in |for_in_state| and |index| is the current
-// zero-based iteration count.
+// Returns the next enumerable property in the the accumulator.
void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
- Node* for_in_state_reg = __ BytecodeOperandReg(0);
- Node* for_in_state = __ LoadRegister(for_in_state_reg);
- Node* receiver = __ LoadFixedArrayElement(for_in_state, 0);
- Node* cache_array = __ LoadFixedArrayElement(for_in_state, 1);
- Node* cache_type = __ LoadFixedArrayElement(for_in_state, 2);
- Node* index_reg = __ BytecodeOperandReg(1);
+ Node* receiver_reg = __ BytecodeOperandReg(0);
+ Node* receiver = __ LoadRegister(receiver_reg);
+ Node* cache_type_reg = __ BytecodeOperandReg(1);
+ Node* cache_type = __ LoadRegister(cache_type_reg);
+ Node* cache_array_reg = __ BytecodeOperandReg(2);
+ Node* cache_array = __ LoadRegister(cache_array_reg);
+ Node* index_reg = __ BytecodeOperandReg(3);
Node* index = __ LoadRegister(index_reg);
Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
cache_type, index);
@@ -1438,22 +1747,34 @@ void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
}
-// ForInDone <for_in_state>
+// ForInDone <index> <cache_length>
//
-// Returns the next key in a for..in loop. The accumulator contains the current
-// zero-based iteration count and |for_in_state| is the state returned by an
-// earlier invocation of ForInPrepare.
+// Returns true if the end of the enumerable properties has been reached.
void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
- Node* index = __ GetAccumulator();
- Node* for_in_state_reg = __ BytecodeOperandReg(0);
- Node* for_in_state = __ LoadRegister(for_in_state_reg);
- Node* cache_length = __ LoadFixedArrayElement(for_in_state, 3);
+ // TODO(oth): Implement directly rather than making a runtime call.
+ Node* index_reg = __ BytecodeOperandReg(0);
+ Node* index = __ LoadRegister(index_reg);
+ Node* cache_length_reg = __ BytecodeOperandReg(1);
+ Node* cache_length = __ LoadRegister(cache_length_reg);
Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
__ SetAccumulator(result);
__ Dispatch();
}
+// ForInStep <index>
+//
+// Increments the loop counter in register |index| and stores the result
+// in the accumulator.
+void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+ // TODO(oth): Implement directly rather than making a runtime call.
+ Node* index_reg = __ BytecodeOperandReg(0);
+ Node* index = __ LoadRegister(index_reg);
+ Node* result = __ CallRuntime(Runtime::kForInStep, index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 560aba19d7..ef9b5d1fe3 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -11,8 +11,8 @@
#include "src/base/macros.h"
#include "src/builtins.h"
#include "src/interpreter/bytecodes.h"
+#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
-#include "src/token.h"
namespace v8 {
namespace internal {
@@ -84,6 +84,9 @@ class Interpreter {
// Generates code to perform a keyed property store via |ic|.
void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ // Generates code to perform a JS call.
+ void DoJSCall(compiler::InterpreterAssembler* assembler);
+
// Generates code ro create a literal via |function_id|.
void DoCreateLiteral(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
@@ -92,6 +95,14 @@ class Interpreter {
void DoDelete(Runtime::FunctionId function_id,
compiler::InterpreterAssembler* assembler);
+ // Generates code to perform a lookup slot load via |function_id|.
+ void DoLoadLookupSlot(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a lookup slot store depending on |language_mode|.
+ void DoStoreLookupSlot(LanguageMode language_mode,
+ compiler::InterpreterAssembler* assembler);
+
bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
Isolate* isolate_;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 89f285b2ee..c27b7a700d 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -73,7 +73,7 @@ bool Isolate::is_catchable_by_javascript(Object* exception) {
Handle<JSGlobalObject> Isolate::global_object() {
- return Handle<JSGlobalObject>(context()->global_object());
+ return Handle<JSGlobalObject>(context()->global_object(), this);
}
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 04198bb79c..4e42b436b1 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -9,7 +9,8 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
@@ -32,7 +33,6 @@
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
-#include "src/scopeinfo.h"
#include "src/simulator.h"
#include "src/snapshot/serialize.h"
#include "src/v8.h"
@@ -135,6 +135,22 @@ Isolate::PerIsolateThreadData*
}
+void Isolate::DiscardPerThreadDataForThisThread() {
+ int thread_id_int = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+ if (thread_id_int) {
+ ThreadId thread_id = ThreadId(thread_id_int);
+ DCHECK(!thread_manager_->mutex_owner_.Equals(thread_id));
+ base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
+ PerIsolateThreadData* per_thread =
+ thread_data_table_->Lookup(this, thread_id);
+ if (per_thread) {
+ DCHECK(!per_thread->thread_state_);
+ thread_data_table_->Remove(per_thread);
+ }
+ }
+}
+
+
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
return FindPerThreadDataForThread(thread_id);
@@ -772,14 +788,6 @@ bool Isolate::IsInternallyUsedPropertyName(Handle<Object> name) {
}
-bool Isolate::IsInternallyUsedPropertyName(Object* name) {
- if (name->IsSymbol()) {
- return Symbol::cast(name)->is_private();
- }
- return name == heap()->hidden_string();
-}
-
-
bool Isolate::MayAccess(Handle<Context> accessing_context,
Handle<JSObject> receiver) {
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
@@ -787,10 +795,10 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
+ // During bootstrapping, callback functions are not enabled yet.
+ if (bootstrapper()->IsActive()) return true;
{
DisallowHeapAllocation no_gc;
- // During bootstrapping, callback functions are not enabled yet.
- if (bootstrapper()->IsActive()) return true;
if (receiver->IsJSGlobalProxy()) {
Object* receiver_context =
@@ -1356,29 +1364,11 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
}
-// Traverse prototype chain to find out whether the object is derived from
-// the Error object.
-bool Isolate::IsErrorObject(Handle<Object> obj) {
- if (!obj->IsJSObject()) return false;
- Handle<Object> error_constructor = error_function();
- DisallowHeapAllocation no_gc;
- for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) return false;
- if (iter.GetCurrent<JSObject>()->map()->GetConstructor() ==
- *error_constructor) {
- return true;
- }
- }
- return false;
-}
-
-
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
- if (IsErrorObject(exception)) {
+ if (Object::IsErrorObject(this, exception)) {
// We fetch the stack trace that corresponds to this error object.
// If the lookup fails, the exception is probably not a valid Error
// object. In that case, we fall through and capture the stack trace
@@ -1797,6 +1787,7 @@ Isolate::Isolate(bool enable_serializer)
#endif
use_counter_callback_(NULL),
basic_block_profiler_(NULL),
+ cancelable_task_manager_(new CancelableTaskManager()),
abort_on_uncaught_exception_callback_(NULL) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
@@ -1835,6 +1826,8 @@ Isolate::Isolate(bool enable_serializer)
InitializeLoggingAndCounters();
debug_ = new Debug(this);
+
+ init_memcopy_functions(this);
}
@@ -1846,7 +1839,9 @@ void Isolate::TearDown() {
// direct pointer. We don't use Enter/Exit here to avoid
// initializing the thread data.
PerIsolateThreadData* saved_data = CurrentPerIsolateThreadData();
- Isolate* saved_isolate = UncheckedCurrent();
+ DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
+ Isolate* saved_isolate =
+ reinterpret_cast<Isolate*>(base::Thread::GetThreadLocal(isolate_key_));
SetIsolateThreadLocals(this, NULL);
Deinit();
@@ -1924,14 +1919,11 @@ void Isolate::Deinit() {
delete basic_block_profiler_;
basic_block_profiler_ = NULL;
- for (Cancelable* task : cancelable_tasks_) {
- task->Cancel();
- }
- cancelable_tasks_.clear();
-
heap_.TearDown();
logger_->TearDown();
+ cancelable_task_manager()->CancelAndWait();
+
delete heap_profiler_;
heap_profiler_ = NULL;
delete cpu_profiler_;
@@ -2032,6 +2024,9 @@ Isolate::~Isolate() {
delete debug_;
debug_ = NULL;
+ delete cancelable_task_manager_;
+ cancelable_task_manager_ = nullptr;
+
#if USE_SIMULATOR
Simulator::TearDown(simulator_i_cache_, simulator_redirection_);
simulator_i_cache_ = nullptr;
@@ -2152,7 +2147,7 @@ bool Isolate::Init(Deserializer* des) {
#endif
#endif
- code_aging_helper_ = new CodeAgingHelper();
+ code_aging_helper_ = new CodeAgingHelper(this);
{ // NOLINT
// Ensure that the thread has a valid stack guard. The v8::Locker object
@@ -2201,12 +2196,6 @@ bool Isolate::Init(Deserializer* des) {
// occur, clearing/updating ICs.
runtime_profiler_ = new RuntimeProfiler(this);
- if (create_heap_objects) {
- if (!bootstrapper_->CreateCodeStubContext(this)) {
- return false;
- }
- }
-
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize(this);
@@ -2246,7 +2235,7 @@ bool Isolate::Init(Deserializer* des) {
heap_.amount_of_external_allocated_memory_at_last_global_gc_)),
Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
- time_millis_at_init_ = base::OS::TimeCurrentMillis();
+ time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
heap_.NotifyDeserializationComplete();
@@ -2416,18 +2405,15 @@ CodeTracer* Isolate::GetCodeTracer() {
Map* Isolate::get_initial_js_array_map(ElementsKind kind, Strength strength) {
- Context* native_context = context()->native_context();
- Object* maybe_map_array = is_strong(strength)
- ? native_context->js_array_strong_maps()
- : native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- return Map::cast(maybe_transitioned_map);
+ if (IsFastElementsKind(kind)) {
+ DisallowHeapAllocation no_gc;
+ Object* const initial_js_array_map = context()->native_context()->get(
+ Context::ArrayMapIndex(kind, strength));
+ if (!initial_js_array_map->IsUndefined()) {
+ return Map::cast(initial_js_array_map);
}
}
- return NULL;
+ return nullptr;
}
@@ -2802,18 +2788,6 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
-void Isolate::RegisterCancelableTask(Cancelable* task) {
- cancelable_tasks_.insert(task);
-}
-
-
-void Isolate::RemoveCancelableTask(Cancelable* task) {
- auto removed = cancelable_tasks_.erase(task);
- USE(removed);
- DCHECK(removed == 1);
-}
-
-
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 79988f8f27..40c8157165 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -481,14 +481,9 @@ class Isolate {
return isolate;
}
- INLINE(static Isolate* UncheckedCurrent()) {
- DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
- return reinterpret_cast<Isolate*>(
- base::Thread::GetThreadLocal(isolate_key_));
- }
-
- // Like UncheckedCurrent, but skips the check that |isolate_key_| was
- // initialized. Callers have to ensure that themselves.
+ // Like Current, but skips the check that |isolate_key_| was initialized.
+ // Callers have to ensure that themselves.
+ // DO NOT USE. The only remaining callsite will be deleted soon.
INLINE(static Isolate* UnsafeCurrent()) {
return reinterpret_cast<Isolate*>(
base::Thread::GetThreadLocal(isolate_key_));
@@ -523,6 +518,10 @@ class Isolate {
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
+ // Discard the PerThread for this particular (isolate, thread) combination
+ // If one does not yet exist, no-op.
+ void DiscardPerThreadDataForThisThread();
+
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
@@ -685,7 +684,6 @@ class Isolate {
bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
bool IsInternallyUsedPropertyName(Handle<Object> name);
- bool IsInternallyUsedPropertyName(Object* name);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(Handle<JSObject> receiver);
@@ -939,7 +937,7 @@ class Isolate {
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
double time_millis_since_init() {
- return base::OS::TimeCurrentMillis() - time_millis_at_init_;
+ return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
DateCache* date_cache() {
@@ -953,10 +951,6 @@ class Isolate {
date_cache_ = date_cache;
}
- ErrorToStringHelper* error_tostring_helper() {
- return &error_tostring_helper_;
- }
-
Map* get_initial_js_array_map(ElementsKind kind,
Strength strength = Strength::WEAK);
@@ -1093,8 +1087,9 @@ class Isolate {
FutexWaitListNode* futex_wait_list_node() { return &futex_wait_list_node_; }
- void RegisterCancelableTask(Cancelable* task);
- void RemoveCancelableTask(Cancelable* task);
+ CancelableTaskManager* cancelable_task_manager() {
+ return cancelable_task_manager_;
+ }
interpreter::Interpreter* interpreter() const { return interpreter_; }
@@ -1206,10 +1201,6 @@ class Isolate {
// the frame.
void RemoveMaterializedObjectsOnUnwind(StackFrame* frame);
- // Traverse prototype chain to find out whether the object is derived from
- // the Error object.
- bool IsErrorObject(Handle<Object> obj);
-
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
@@ -1254,7 +1245,6 @@ class Isolate {
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
- ErrorToStringHelper error_tostring_helper_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
@@ -1338,7 +1328,7 @@ class Isolate {
FutexWaitListNode futex_wait_list_node_;
- std::set<Cancelable*> cancelable_tasks_;
+ CancelableTaskManager* cancelable_task_manager_;
v8::Isolate::AbortOnUncaughtExceptionCallback
abort_on_uncaught_exception_callback_;
diff --git a/deps/v8/src/js/OWNERS b/deps/v8/src/js/OWNERS
new file mode 100644
index 0000000000..f7002c723b
--- /dev/null
+++ b/deps/v8/src/js/OWNERS
@@ -0,0 +1,11 @@
+set noparent
+
+adamk@chromium.org
+bmeurer@chromium.org
+cbruni@chromium.org
+ishell@chromium.org
+jkummerow@chromium.org
+littledan@chromium.org
+rossberg@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/js/array-iterator.js b/deps/v8/src/js/array-iterator.js
index f0754ad093..2609ebdd73 100644
--- a/deps/v8/src/js/array-iterator.js
+++ b/deps/v8/src/js/array-iterator.js
@@ -22,24 +22,7 @@ var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-macro TYPED_ARRAYS(FUNCTION)
- FUNCTION(Uint8Array)
- FUNCTION(Int8Array)
- FUNCTION(Uint16Array)
- FUNCTION(Int16Array)
- FUNCTION(Uint32Array)
- FUNCTION(Int32Array)
- FUNCTION(Float32Array)
- FUNCTION(Float64Array)
- FUNCTION(Uint8ClampedArray)
-endmacro
-
-macro COPY_FROM_GLOBAL(NAME)
- var GlobalNAME = global.NAME;
-endmacro
-
-TYPED_ARRAYS(COPY_FROM_GLOBAL)
+var GlobalTypedArray = global.Uint8Array.__proto__;
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
@@ -78,7 +61,7 @@ function ArrayIteratorNext() {
var value = UNDEFINED;
var done = true;
- if (!IS_SPEC_OBJECT(iterator) ||
+ if (!IS_RECEIVER(iterator) ||
!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'Array Iterator.prototype.next', this);
@@ -152,15 +135,12 @@ utils.SetFunctionName(ArrayValues, 'values');
%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
DONT_ENUM);
-macro EXTEND_TYPED_ARRAY(NAME)
- %AddNamedProperty(GlobalNAME.prototype, 'entries', ArrayEntries, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, 'values', ArrayValues, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, 'keys', ArrayKeys, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, iteratorSymbol, ArrayValues,
- DONT_ENUM);
-endmacro
-
-TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
+%AddNamedProperty(GlobalTypedArray.prototype,
+ 'entries', ArrayEntries, DONT_ENUM);
+%AddNamedProperty(GlobalTypedArray.prototype, 'values', ArrayValues, DONT_ENUM);
+%AddNamedProperty(GlobalTypedArray.prototype, 'keys', ArrayKeys, DONT_ENUM);
+%AddNamedProperty(GlobalTypedArray.prototype,
+ iteratorSymbol, ArrayValues, DONT_ENUM);
// -------------------------------------------------------------------
// Exports
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 294e474be6..f9cf161191 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -12,8 +12,8 @@
// Imports
var AddIndexedProperty;
-var Delete;
var FLAG_harmony_tolength;
+var FLAG_harmony_species;
var GetIterator;
var GetMethod;
var GlobalArray = global.Array;
@@ -24,18 +24,16 @@ var MaxSimple;
var MinSimple;
var ObjectDefineProperty;
var ObjectHasOwnProperty;
-var ObjectIsFrozen;
-var ObjectIsSealed;
-var ObjectToString;
+var ObjectToString = utils.ImportNow("object_to_string");
var ObserveBeginPerformSplice;
var ObserveEndPerformSplice;
var ObserveEnqueueSpliceRecord;
+var SameValueZero;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
AddIndexedProperty = from.AddIndexedProperty;
- Delete = from.Delete;
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
MakeTypeError = from.MakeTypeError;
@@ -43,20 +41,43 @@ utils.Import(function(from) {
MinSimple = from.MinSimple;
ObjectDefineProperty = from.ObjectDefineProperty;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- ObjectIsFrozen = from.ObjectIsFrozen;
- ObjectIsSealed = from.ObjectIsSealed;
- ObjectToString = from.ObjectToString;
ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
ObserveEndPerformSplice = from.ObserveEndPerformSplice;
ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
+ SameValueZero = from.SameValueZero;
});
utils.ImportFromExperimental(function(from) {
FLAG_harmony_tolength = from.FLAG_harmony_tolength;
+ FLAG_harmony_species = from.FLAG_harmony_species;
});
// -------------------------------------------------------------------
+
+function ArraySpeciesCreate(array, length) {
+ var constructor;
+ if (FLAG_harmony_species) {
+ constructor = %ArraySpeciesConstructor(array);
+ } else {
+ constructor = GlobalArray;
+ }
+ return new constructor(length);
+}
+
+
+function DefineIndexedProperty(array, i, value) {
+ if (FLAG_harmony_species) {
+ var result = ObjectDefineProperty(array, i, {
+ value: value, writable: true, configurable: true, enumerable: true
+ });
+ if (!result) throw MakeTypeError(kStrictCannotAssign, i);
+ } else {
+ AddIndexedProperty(array, i, value);
+ }
+}
+
+
// Global list of arrays visited during toString, toLocaleString and
// join invocations.
var visited_arrays = new InternalArray();
@@ -256,7 +277,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
- AddIndexedProperty(deleted_elements, i - start_i, current);
+ DefineIndexedProperty(deleted_elements, i - start_i, current);
}
}
} else {
@@ -267,7 +288,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
- AddIndexedProperty(deleted_elements, key - start_i, current);
+ DefineIndexedProperty(deleted_elements, key - start_i, current);
}
}
}
@@ -347,9 +368,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var index = start_i + i;
if (HAS_INDEX(array, index, is_array)) {
var current = array[index];
- // The spec requires [[DefineOwnProperty]] here, AddIndexedProperty is
- // close enough (in that it ignores the prototype).
- AddIndexedProperty(deleted_elements, i, current);
+ DefineIndexedProperty(deleted_elements, i, current);
}
}
}
@@ -490,7 +509,7 @@ function ArrayPop() {
n--;
var value = array[n];
- Delete(array, n, true);
+ %DeleteProperty_Strict(array, n);
array.length = n;
return value;
}
@@ -673,7 +692,7 @@ function ArrayShift() {
return;
}
- if (ObjectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
+ if (%object_is_sealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (%IsObserved(array))
return ObservedArrayShift.call(array, len);
@@ -724,7 +743,7 @@ function ArrayUnshift(arg1) { // length == 1
var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
- !ObjectIsSealed(array)) {
+ !%object_is_sealed(array)) {
SparseMove(array, 0, 0, len, num_arguments);
} else {
SimpleMove(array, 0, 0, len, num_arguments);
@@ -764,7 +783,7 @@ function ArraySlice(start, end) {
if (end_i > len) end_i = len;
}
- var result = [];
+ var result = ArraySpeciesCreate(array, MaxSimple(end_i - start_i, 0));
if (end_i < start_i) return result;
@@ -866,13 +885,13 @@ function ArraySplice(start, delete_count) {
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
- var deleted_elements = [];
+ var deleted_elements = ArraySpeciesCreate(array, del_count);
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
- if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
+ if (del_count != num_elements_to_add && %object_is_sealed(array)) {
throw MakeTypeError(kArrayFunctionsOnSealed);
- } else if (del_count > 0 && ObjectIsFrozen(array)) {
+ } else if (del_count > 0 && %object_is_frozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
@@ -1206,29 +1225,23 @@ function ArraySort(comparefn) {
// The following functions cannot be made efficient on sparse arrays while
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
-function InnerArrayFilter(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
-
- var accumulator = new InternalArray();
- var accumulator_length = 0;
+function InnerArrayFilter(f, receiver, array, length, result) {
+ var result_length = 0;
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
if (%_Call(f, receiver, element, i, array)) {
- accumulator[accumulator_length++] = element;
+ DefineIndexedProperty(result, result_length, element);
+ result_length++;
}
}
}
- var result = new GlobalArray();
- %MoveArrayContents(accumulator, result);
return result;
}
+
function ArrayFilter(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
@@ -1236,7 +1249,9 @@ function ArrayFilter(f, receiver) {
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
var length = TO_LENGTH_OR_UINT32(array.length);
- return InnerArrayFilter(f, receiver, array, length);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ var result = ArraySpeciesCreate(array, 0);
+ return InnerArrayFilter(f, receiver, array, length, result);
}
@@ -1244,12 +1259,9 @@ function InnerArrayForEach(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
%_Call(f, receiver, element, i, array);
}
}
@@ -1271,12 +1283,9 @@ function InnerArraySome(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
if (%_Call(f, receiver, element, i, array)) return true;
}
}
@@ -1301,12 +1310,9 @@ function InnerArrayEvery(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
if (!%_Call(f, receiver, element, i, array)) return false;
}
}
@@ -1324,37 +1330,26 @@ function ArrayEvery(f, receiver) {
}
-function InnerArrayMap(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+function ArrayMap(f, receiver) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
- var accumulator = new InternalArray(length);
+ // Pull out the length so that modifications to the length in the
+ // loop will not affect the looping and side effects are visible.
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH_OR_UINT32(array.length);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ var result = ArraySpeciesCreate(array, length);
var is_array = IS_ARRAY(array);
- var stepping = DEBUG_IS_STEPPING(f);
for (var i = 0; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(f);
- accumulator[i] = %_Call(f, receiver, element, i, array);
+ DefineIndexedProperty(result, i, %_Call(f, receiver, element, i, array));
}
}
- var result = new GlobalArray();
- %MoveArrayContents(accumulator, result);
return result;
}
-function ArrayMap(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
- return InnerArrayMap(f, receiver, array, length);
-}
-
-
// For .indexOf, we don't need to pass in the number of arguments
// at the callsite since ToInteger(undefined) == 0; however, for
// .lastIndexOf, we need to pass it, since the behavior for passing
@@ -1497,12 +1492,9 @@ function InnerArrayReduce(callback, current, array, length, argumentsLength) {
throw MakeTypeError(kReduceNoInitial);
}
- var stepping = DEBUG_IS_STEPPING(callback);
for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(callback);
current = callback(current, element, i, array);
}
}
@@ -1540,12 +1532,9 @@ function InnerArrayReduceRight(callback, current, array, length,
throw MakeTypeError(kReduceNoInitial);
}
- var stepping = DEBUG_IS_STEPPING(callback);
for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) {
var element = array[i];
- // Prepare break slots for debugger step in.
- if (stepping) %DebugPrepareStepInIfStepping(callback);
current = callback(current, element, i, array);
}
}
@@ -1697,7 +1686,7 @@ function InnerArrayFill(value, start, end, array, length) {
if (end > length) end = length;
}
- if ((end - i) > 0 && ObjectIsFrozen(array)) {
+ if ((end - i) > 0 && %object_is_frozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
@@ -1718,9 +1707,44 @@ function ArrayFill(value, start, end) {
}
-// ES5, 15.4.3.2
-function ArrayIsArray(obj) {
- return IS_ARRAY(obj);
+function InnerArrayIncludes(searchElement, fromIndex, array, length) {
+ if (length === 0) {
+ return false;
+ }
+
+ var n = TO_INTEGER(fromIndex);
+
+ var k;
+ if (n >= 0) {
+ k = n;
+ } else {
+ k = length + n;
+ if (k < 0) {
+ k = 0;
+ }
+ }
+
+ while (k < length) {
+ var elementK = array[k];
+ if (SameValueZero(searchElement, elementK)) {
+ return true;
+ }
+
+ ++k;
+ }
+
+ return false;
+}
+
+
+// ES2016 draft, section 22.1.3.11
+function ArrayIncludes(searchElement, fromIndex) {
+ CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
+
+ var array = TO_OBJECT(this);
+ var length = TO_LENGTH(array.length);
+
+ return InnerArrayIncludes(searchElement, fromIndex, array, length);
}
@@ -1761,7 +1785,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
while (true) {
var next = iterator.next();
- if (!IS_OBJECT(next)) {
+ if (!IS_RECEIVER(next)) {
throw MakeTypeError(kIteratorResultNotAnObject, next);
}
@@ -1837,7 +1861,6 @@ var unscopables = {
// Set up non-enumerable functions on the Array object.
utils.InstallFunctions(GlobalArray, DONT_ENUM, [
- "isArray", ArrayIsArray,
"from", ArrayFrom,
"of", ArrayOf
]);
@@ -1883,7 +1906,8 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"copyWithin", getFunction("copyWithin", ArrayCopyWithin, 2),
"find", getFunction("find", ArrayFind, 1),
"findIndex", getFunction("findIndex", ArrayFindIndex, 1),
- "fill", getFunction("fill", ArrayFill, 1)
+ "fill", getFunction("fill", ArrayFill, 1),
+ "includes", getFunction("includes", ArrayIncludes, 1),
]);
%FinishArrayPrototypeSetup(GlobalArray.prototype);
@@ -1935,10 +1959,10 @@ utils.Export(function(to) {
to.InnerArrayFind = InnerArrayFind;
to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayForEach = InnerArrayForEach;
+ to.InnerArrayIncludes = InnerArrayIncludes;
to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
- to.InnerArrayMap = InnerArrayMap;
to.InnerArrayReduce = InnerArrayReduce;
to.InnerArrayReduceRight = InnerArrayReduceRight;
to.InnerArraySome = InnerArraySome;
diff --git a/deps/v8/src/js/arraybuffer.js b/deps/v8/src/js/arraybuffer.js
index 1159488160..f0273c71ed 100644
--- a/deps/v8/src/js/arraybuffer.js
+++ b/deps/v8/src/js/arraybuffer.js
@@ -12,31 +12,20 @@
// Imports
var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalObject = global.Object;
var MakeTypeError;
var MaxSimple;
var MinSimple;
-var ToPositiveInteger;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var SpeciesConstructor;
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
- ToPositiveInteger = from.ToPositiveInteger;
+ SpeciesConstructor = from.SpeciesConstructor;
});
// -------------------------------------------------------------------
-function ArrayBufferConstructor(length) { // length = 1
- if (%_IsConstructCall()) {
- var byteLength = ToPositiveInteger(length, kInvalidArrayBufferLength);
- %ArrayBufferInitialize(this, byteLength, kNotShared);
- } else {
- throw MakeTypeError(kConstructorNotFunction, "ArrayBuffer");
- }
-}
-
function ArrayBufferGetByteLen() {
if (!IS_ARRAYBUFFER(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -75,36 +64,27 @@ function ArrayBufferSlice(start, end) {
fin = first;
}
var newLen = fin - first;
- // TODO(dslomov): implement inheritance
- var result = new GlobalArrayBuffer(newLen);
+ var constructor = SpeciesConstructor(this, GlobalArrayBuffer, true);
+ var result = new constructor(newLen);
+ if (!IS_ARRAYBUFFER(result)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ 'ArrayBuffer.prototype.slice', result);
+ }
+ // TODO(littledan): Check for a detached ArrayBuffer
+ if (result === this) {
+ throw MakeTypeError(kArrayBufferSpeciesThis);
+ }
+ if (%_ArrayBufferGetByteLength(result) < newLen) {
+ throw MakeTypeError(kArrayBufferTooShort);
+ }
- %ArrayBufferSliceImpl(this, result, first);
+ %ArrayBufferSliceImpl(this, result, first, newLen);
return result;
}
-function ArrayBufferIsViewJS(obj) {
- return %ArrayBufferIsView(obj);
-}
-
-
-// Set up the ArrayBuffer constructor function.
-%SetCode(GlobalArrayBuffer, ArrayBufferConstructor);
-%FunctionSetPrototype(GlobalArrayBuffer, new GlobalObject());
-
-// Set up the constructor property on the ArrayBuffer prototype object.
-%AddNamedProperty(
- GlobalArrayBuffer.prototype, "constructor", GlobalArrayBuffer, DONT_ENUM);
-
-%AddNamedProperty(GlobalArrayBuffer.prototype,
- toStringTagSymbol, "ArrayBuffer", DONT_ENUM | READ_ONLY);
-
utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength",
ArrayBufferGetByteLen);
-utils.InstallFunctions(GlobalArrayBuffer, DONT_ENUM, [
- "isView", ArrayBufferIsViewJS
-]);
-
utils.InstallFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
"slice", ArrayBufferSlice
]);
diff --git a/deps/v8/src/js/code-stubs.js b/deps/v8/src/js/code-stubs.js
deleted file mode 100644
index 7cb10d726a..0000000000
--- a/deps/v8/src/js/code-stubs.js
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, code_stubs) {
-
-"use strict";
-
-code_stubs.StringLengthTFStub = function StringLengthTFStub(call_conv, minor_key) {
- var stub = function(receiver, name, i, v) {
- // i and v are dummy parameters mandated by the InterfaceDescriptor,
- // (LoadWithVectorDescriptor).
- return %_StringGetLength(%_JSValueGetValue(receiver));
- }
- return stub;
-}
-
-code_stubs.StringAddTFStub = function StringAddTFStub(call_conv, minor_key) {
- var stub = function(left, right) {
- return %StringAdd(left, right);
- }
- return stub;
-}
-
-const kTurboFanICCallModeMask = 1;
-const kTurboFanICCallForUnptimizedCode = 0;
-const kTurboFanICCallForOptimizedCode = 1;
-
-code_stubs.MathFloorStub = function MathFloorStub(call_conv, minor_key) {
- var call_from_optimized_ic = function(f, i, tv, receiver, v) {
- "use strict";
- // |f| is this function's JSFunction
- // |i| is TypeFeedbackVector slot # of callee's CallIC for Math.floor call
- // |receiver| is receiver, should not be used
- // |tv| is the calling function's type vector
- // |v| is the value to floor
- if (f !== %_FixedArrayGet(tv, i|0)) {
- return %_Call(f, receiver, v);
- }
- var r = %_MathFloor(+v);
- if (%_IsMinusZero(r)) {
- // Collect type feedback when the result of the floor is -0. This is
- // accomplished by storing a sentinel in the second, "extra"
- // TypeFeedbackVector slot corresponding to the Math.floor CallIC call in
- // the caller's TypeVector.
- %_FixedArraySet(tv, ((i|0)+1)|0, 1);
- return -0;
- }
- // Return integers in smi range as smis.
- var trunc = r|0;
- if (trunc === r) {
- return trunc;
- }
- return r;
- }
- var call_mode = (minor_key & kTurboFanICCallModeMask);
- if (call_mode == kTurboFanICCallForOptimizedCode) {
- return call_from_optimized_ic;
- } else {
- %SetForceInlineFlag(call_from_optimized_ic);
- var call_from_unoptimized_ic = function(f, i, receiver, v) {
- var tv = %_GetTypeFeedbackVector(%_GetCallerJSFunction());
- return call_from_optimized_ic(f, i, tv, receiver, v);
- }
- return call_from_unoptimized_ic;
- }
-}
-
-})
diff --git a/deps/v8/src/js/collection.js b/deps/v8/src/js/collection.js
index 050c37b5bd..0d7195d53e 100644
--- a/deps/v8/src/js/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -100,7 +100,7 @@ function GetExistingHash(key) {
if ((field & 1 /* Name::kHashNotComputedMask */) === 0) {
return field >>> 2 /* Name::kHashShift */;
}
- } else if (IS_SPEC_OBJECT(key) && !%_IsJSProxy(key) && !IS_GLOBAL(key)) {
+ } else if (IS_RECEIVER(key) && !IS_PROXY(key) && !IS_GLOBAL(key)) {
var hash = GET_PRIVATE(key, hashCodeSymbol);
return hash;
}
@@ -125,7 +125,7 @@ function GetHash(key) {
// Harmony Set
function SetConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "Set");
}
@@ -134,7 +134,7 @@ function SetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'add', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'add', this);
}
for (var value of iterable) {
@@ -248,10 +248,8 @@ function SetForEach(f, receiver) {
var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
var key;
- var stepping = DEBUG_IS_STEPPING(f);
var value_array = [UNDEFINED];
while (%SetIteratorNext(iterator, value_array)) {
- if (stepping) %DebugPrepareStepInIfStepping(f);
key = value_array[0];
%_Call(f, receiver, key, key, this);
}
@@ -283,7 +281,7 @@ utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
// Harmony Map
function MapConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "Map");
}
@@ -292,11 +290,11 @@ function MapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'set', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'set', this);
}
for (var nextItem of iterable) {
- if (!IS_SPEC_OBJECT(nextItem)) {
+ if (!IS_RECEIVER(nextItem)) {
throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
}
%_Call(adder, this, nextItem[0], nextItem[1]);
@@ -431,10 +429,8 @@ function MapForEach(f, receiver) {
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
- var stepping = DEBUG_IS_STEPPING(f);
var value_array = [UNDEFINED, UNDEFINED];
while (%MapIteratorNext(iterator, value_array)) {
- if (stepping) %DebugPrepareStepInIfStepping(f);
%_Call(f, receiver, value_array[1], value_array[0], this);
}
}
@@ -461,26 +457,6 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
"forEach", MapForEach
]);
-function MapFromArray(array) {
- var map = new GlobalMap;
- var length = array.length;
- for (var i = 0; i < length; i += 2) {
- var key = array[i];
- var value = array[i + 1];
- %_Call(MapSet, map, key, value);
- }
- return map;
-};
-
-function SetFromArray(array) {
- var set = new GlobalSet;
- var length = array.length;
- for (var i = 0; i < length; ++i) {
- %_Call(SetAdd, set, array[i]);
- }
- return set;
-};
-
// -----------------------------------------------------------------------
// Exports
@@ -492,8 +468,6 @@ function SetFromArray(array) {
"set_add", SetAdd,
"set_has", SetHas,
"set_delete", SetDelete,
- "map_from_array", MapFromArray,
- "set_from_array",SetFromArray,
]);
utils.Export(function(to) {
diff --git a/deps/v8/src/js/date.js b/deps/v8/src/js/date.js
deleted file mode 100644
index a99d8e4d51..0000000000
--- a/deps/v8/src/js/date.js
+++ /dev/null
@@ -1,884 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// -------------------------------------------------------------------
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalDate = global.Date;
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
-var IsFinite;
-var MakeRangeError;
-var MathAbs;
-var MathFloor;
-var NaN = %GetRootNaN();
-
-utils.Import(function(from) {
- IsFinite = from.IsFinite;
- MakeRangeError = from.MakeRangeError;
- MathAbs = from.MathAbs;
- MathFloor = from.MathFloor;
-});
-
-// -------------------------------------------------------------------
-
-// This file contains date support implemented in JavaScript.
-
-var timezone_cache_time = NaN;
-var timezone_cache_timezone;
-
-function LocalTimezone(t) {
- if (NUMBER_IS_NAN(t)) return "";
- CheckDateCacheCurrent();
- if (t == timezone_cache_time) {
- return timezone_cache_timezone;
- }
- var timezone = %DateLocalTimezone(t);
- timezone_cache_time = t;
- timezone_cache_timezone = timezone;
- return timezone;
-}
-
-
-function UTC(time) {
- if (NUMBER_IS_NAN(time)) return time;
- // local_time_offset is needed before the call to DaylightSavingsOffset,
- // so it may be uninitialized.
- return %DateToUTC(time);
-}
-
-
-// ECMA 262 - 15.9.1.11
-function MakeTime(hour, min, sec, ms) {
- if (!IsFinite(hour)) return NaN;
- if (!IsFinite(min)) return NaN;
- if (!IsFinite(sec)) return NaN;
- if (!IsFinite(ms)) return NaN;
- return TO_INTEGER(hour) * msPerHour
- + TO_INTEGER(min) * msPerMinute
- + TO_INTEGER(sec) * msPerSecond
- + TO_INTEGER(ms);
-}
-
-
-// ECMA 262 - 15.9.1.12
-function TimeInYear(year) {
- return DaysInYear(year) * msPerDay;
-}
-
-
-// Compute number of days given a year, month, date.
-// Note that month and date can lie outside the normal range.
-// For example:
-// MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20)
-// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1)
-// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11)
-function MakeDay(year, month, date) {
- if (!IsFinite(year) || !IsFinite(month) || !IsFinite(date)) return NaN;
-
- // Convert to integer and map -0 to 0.
- year = TO_INTEGER_MAP_MINUS_ZERO(year);
- month = TO_INTEGER_MAP_MINUS_ZERO(month);
- date = TO_INTEGER_MAP_MINUS_ZERO(date);
-
- if (year < kMinYear || year > kMaxYear ||
- month < kMinMonth || month > kMaxMonth) {
- return NaN;
- }
-
- // Now we rely on year and month being SMIs.
- return %DateMakeDay(year | 0, month | 0) + date - 1;
-}
-
-
-// ECMA 262 - 15.9.1.13
-function MakeDate(day, time) {
- var time = day * msPerDay + time;
- // Some of our runtime funtions for computing UTC(time) rely on
- // times not being significantly larger than MAX_TIME_MS. If there
- // is no way that the time can be within range even after UTC
- // conversion we return NaN immediately instead of relying on
- // TimeClip to do it.
- if (MathAbs(time) > MAX_TIME_BEFORE_UTC) return NaN;
- return time;
-}
-
-
-// ECMA 262 - 15.9.1.14
-function TimeClip(time) {
- if (!IsFinite(time)) return NaN;
- if (MathAbs(time) > MAX_TIME_MS) return NaN;
- return TO_INTEGER(time) + 0;
-}
-
-
-// The Date cache is used to limit the cost of parsing the same Date
-// strings over and over again.
-var Date_cache = {
- // Cached time value.
- time: 0,
- // String input for which the cached time is valid.
- string: null
-};
-
-
-function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
- if (!%_IsConstructCall()) {
- // ECMA 262 - 15.9.2
- return %_Call(DateToString, new GlobalDate());
- }
-
- // ECMA 262 - 15.9.3
- var argc = %_ArgumentsLength();
- var value;
- if (argc == 0) {
- value = %DateCurrentTime();
- SET_UTC_DATE_VALUE(this, value);
- } else if (argc == 1) {
- if (IS_NUMBER(year)) {
- value = TimeClip(year);
-
- } else if (IS_STRING(year)) {
- // Probe the Date cache. If we already have a time value for the
- // given time, we re-use that instead of parsing the string again.
- CheckDateCacheCurrent();
- var cache = Date_cache;
- if (cache.string === year) {
- value = cache.time;
- } else {
- value = DateParse(year);
- if (!NUMBER_IS_NAN(value)) {
- cache.time = value;
- cache.string = year;
- }
- }
-
- } else if (IS_DATE(year)) {
- value = UTC_DATE_VALUE(year);
-
- } else {
- var time = TO_PRIMITIVE(year);
- value = IS_STRING(time) ? DateParse(time) : TO_NUMBER(time);
- }
- SET_UTC_DATE_VALUE(this, value);
- } else {
- year = TO_NUMBER(year);
- month = TO_NUMBER(month);
- date = argc > 2 ? TO_NUMBER(date) : 1;
- hours = argc > 3 ? TO_NUMBER(hours) : 0;
- minutes = argc > 4 ? TO_NUMBER(minutes) : 0;
- seconds = argc > 5 ? TO_NUMBER(seconds) : 0;
- ms = argc > 6 ? TO_NUMBER(ms) : 0;
- year = (!NUMBER_IS_NAN(year) &&
- 0 <= TO_INTEGER(year) &&
- TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- value = MakeDate(day, time);
- SET_LOCAL_DATE_VALUE(this, value);
- }
-}
-
-
-var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
-
-
-function TwoDigitString(value) {
- return value < 10 ? "0" + value : "" + value;
-}
-
-
-function DateString(date) {
- CHECK_DATE(date);
- return WeekDays[LOCAL_WEEKDAY(date)] + ' '
- + Months[LOCAL_MONTH(date)] + ' '
- + TwoDigitString(LOCAL_DAY(date)) + ' '
- + LOCAL_YEAR(date);
-}
-
-
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
- 'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
- 'July', 'August', 'September', 'October', 'November', 'December'];
-
-
-function LongDateString(date) {
- CHECK_DATE(date);
- return LongWeekDays[LOCAL_WEEKDAY(date)] + ', '
- + LongMonths[LOCAL_MONTH(date)] + ' '
- + TwoDigitString(LOCAL_DAY(date)) + ', '
- + LOCAL_YEAR(date);
-}
-
-
-function TimeString(date) {
- CHECK_DATE(date);
- return TwoDigitString(LOCAL_HOUR(date)) + ':'
- + TwoDigitString(LOCAL_MIN(date)) + ':'
- + TwoDigitString(LOCAL_SEC(date));
-}
-
-
-function TimeStringUTC(date) {
- CHECK_DATE(date);
- return TwoDigitString(UTC_HOUR(date)) + ':'
- + TwoDigitString(UTC_MIN(date)) + ':'
- + TwoDigitString(UTC_SEC(date));
-}
-
-
-function LocalTimezoneString(date) {
- CHECK_DATE(date);
- var timezone = LocalTimezone(UTC_DATE_VALUE(date));
-
- var timezoneOffset = -TIMEZONE_OFFSET(date);
- var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = MathFloor((sign * timezoneOffset)/60);
- var min = MathFloor((sign * timezoneOffset)%60);
- var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
- TwoDigitString(hours) + TwoDigitString(min);
- return gmt + ' (' + timezone + ')';
-}
-
-
-function DatePrintString(date) {
- CHECK_DATE(date);
- return DateString(date) + ' ' + TimeString(date);
-}
-
-// -------------------------------------------------------------------
-
-// Reused output buffer. Used when parsing date strings.
-var parse_buffer = new InternalArray(8);
-
-// ECMA 262 - 15.9.4.2
-function DateParse(string) {
- var arr = %DateParseString(string, parse_buffer);
- if (IS_NULL(arr)) return NaN;
-
- var day = MakeDay(arr[0], arr[1], arr[2]);
- var time = MakeTime(arr[3], arr[4], arr[5], arr[6]);
- var date = MakeDate(day, time);
-
- if (IS_NULL(arr[7])) {
- return TimeClip(UTC(date));
- } else {
- return TimeClip(date - arr[7] * 1000);
- }
-}
-
-
-// ECMA 262 - 15.9.4.3
-function DateUTC(year, month, date, hours, minutes, seconds, ms) {
- year = TO_NUMBER(year);
- month = TO_NUMBER(month);
- var argc = %_ArgumentsLength();
- date = argc > 2 ? TO_NUMBER(date) : 1;
- hours = argc > 3 ? TO_NUMBER(hours) : 0;
- minutes = argc > 4 ? TO_NUMBER(minutes) : 0;
- seconds = argc > 5 ? TO_NUMBER(seconds) : 0;
- ms = argc > 6 ? TO_NUMBER(ms) : 0;
- year = (!NUMBER_IS_NAN(year) &&
- 0 <= TO_INTEGER(year) &&
- TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, month, date);
- var time = MakeTime(hours, minutes, seconds, ms);
- return TimeClip(MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.4.4
-function DateNow() {
- return %DateCurrentTime();
-}
-
-
-// ECMA 262 - 15.9.5.2
-function DateToString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this)
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(this)
- return DatePrintString(this) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.3
-function DateToDateString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return DateString(this);
-}
-
-
-// ECMA 262 - 15.9.5.4
-function DateToTimeString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- var time_zone_string = LocalTimezoneString(this);
- return TimeString(this) + time_zone_string;
-}
-
-
-// ECMA 262 - 15.9.5.5
-function DateToLocaleString() {
- CHECK_DATE(this);
- return %_Call(DateToString, this);
-}
-
-
-// ECMA 262 - 15.9.5.6
-function DateToLocaleDateString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return LongDateString(this);
-}
-
-
-// ECMA 262 - 15.9.5.7
-function DateToLocaleTimeString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- return TimeString(this);
-}
-
-
-// ECMA 262 - 15.9.5.8
-function DateValueOf() {
- CHECK_DATE(this);
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.9
-function DateGetTime() {
- CHECK_DATE(this);
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.10
-function DateGetFullYear() {
- CHECK_DATE(this);
- return LOCAL_YEAR(this);
-}
-
-
-// ECMA 262 - 15.9.5.11
-function DateGetUTCFullYear() {
- CHECK_DATE(this);
- return UTC_YEAR(this);
-}
-
-
-// ECMA 262 - 15.9.5.12
-function DateGetMonth() {
- CHECK_DATE(this);
- return LOCAL_MONTH(this);
-}
-
-
-// ECMA 262 - 15.9.5.13
-function DateGetUTCMonth() {
- CHECK_DATE(this);
- return UTC_MONTH(this);
-}
-
-
-// ECMA 262 - 15.9.5.14
-function DateGetDate() {
- CHECK_DATE(this);
- return LOCAL_DAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.15
-function DateGetUTCDate() {
- CHECK_DATE(this);
- return UTC_DAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.16
-function DateGetDay() {
- CHECK_DATE(this);
- return LOCAL_WEEKDAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.17
-function DateGetUTCDay() {
- CHECK_DATE(this);
- return UTC_WEEKDAY(this);
-}
-
-
-// ECMA 262 - 15.9.5.18
-function DateGetHours() {
- CHECK_DATE(this);
- return LOCAL_HOUR(this);
-}
-
-
-// ECMA 262 - 15.9.5.19
-function DateGetUTCHours() {
- CHECK_DATE(this);
- return UTC_HOUR(this);
-}
-
-
-// ECMA 262 - 15.9.5.20
-function DateGetMinutes() {
- CHECK_DATE(this);
- return LOCAL_MIN(this);
-}
-
-
-// ECMA 262 - 15.9.5.21
-function DateGetUTCMinutes() {
- CHECK_DATE(this);
- return UTC_MIN(this);
-}
-
-
-// ECMA 262 - 15.9.5.22
-function DateGetSeconds() {
- CHECK_DATE(this);
- return LOCAL_SEC(this);
-}
-
-
-// ECMA 262 - 15.9.5.23
-function DateGetUTCSeconds() {
- CHECK_DATE(this);
- return UTC_SEC(this)
-}
-
-
-// ECMA 262 - 15.9.5.24
-function DateGetMilliseconds() {
- CHECK_DATE(this);
- return LOCAL_MS(this);
-}
-
-
-// ECMA 262 - 15.9.5.25
-function DateGetUTCMilliseconds() {
- CHECK_DATE(this);
- return UTC_MS(this);
-}
-
-
-// ECMA 262 - 15.9.5.26
-function DateGetTimezoneOffset() {
- CHECK_DATE(this);
- return TIMEZONE_OFFSET(this);
-}
-
-
-// ECMA 262 - 15.9.5.27
-function DateSetTime(ms) {
- CHECK_DATE(this);
- SET_UTC_DATE_VALUE(this, TO_NUMBER(ms));
- return UTC_DATE_VALUE(this);
-}
-
-
-// ECMA 262 - 15.9.5.28
-function DateSetMilliseconds(ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- ms = TO_NUMBER(ms);
- var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.29
-function DateSetUTCMilliseconds(ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- ms = TO_NUMBER(ms);
- var time = MakeTime(UTC_HOUR(this),
- UTC_MIN(this),
- UTC_SEC(this),
- ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.30
-function DateSetSeconds(sec, ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- sec = TO_NUMBER(sec);
- ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : TO_NUMBER(ms);
- var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.31
-function DateSetUTCSeconds(sec, ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- sec = TO_NUMBER(sec);
- ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : TO_NUMBER(ms);
- var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.33
-function DateSetMinutes(min, sec, ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- min = TO_NUMBER(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? LOCAL_SEC(this) : TO_NUMBER(sec);
- ms = argc < 3 ? LOCAL_MS(this) : TO_NUMBER(ms);
- var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCMinutes(min, sec, ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- min = TO_NUMBER(min);
- var argc = %_ArgumentsLength();
- sec = argc < 2 ? UTC_SEC(this) : TO_NUMBER(sec);
- ms = argc < 3 ? UTC_MS(this) : TO_NUMBER(ms);
- var time = MakeTime(UTC_HOUR(this), min, sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.35
-function DateSetHours(hour, min, sec, ms) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- hour = TO_NUMBER(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? LOCAL_MIN(this) : TO_NUMBER(min);
- sec = argc < 3 ? LOCAL_SEC(this) : TO_NUMBER(sec);
- ms = argc < 4 ? LOCAL_MS(this) : TO_NUMBER(ms);
- var time = MakeTime(hour, min, sec, ms);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.34
-function DateSetUTCHours(hour, min, sec, ms) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- hour = TO_NUMBER(hour);
- var argc = %_ArgumentsLength();
- min = argc < 2 ? UTC_MIN(this) : TO_NUMBER(min);
- sec = argc < 3 ? UTC_SEC(this) : TO_NUMBER(sec);
- ms = argc < 4 ? UTC_MS(this) : TO_NUMBER(ms);
- var time = MakeTime(hour, min, sec, ms);
- return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
-}
-
-
-// ECMA 262 - 15.9.5.36
-function DateSetDate(date) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- date = TO_NUMBER(date);
- var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.37
-function DateSetUTCDate(date) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- date = TO_NUMBER(date);
- var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.38
-function DateSetMonth(month, date) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- month = TO_NUMBER(month);
- date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : TO_NUMBER(date);
- var day = MakeDay(LOCAL_YEAR(this), month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.39
-function DateSetUTCMonth(month, date) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- month = TO_NUMBER(month);
- date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : TO_NUMBER(date);
- var day = MakeDay(UTC_YEAR(this), month, date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
-}
-
-
-// ECMA 262 - 15.9.5.40
-function DateSetFullYear(year, month, date) {
- CHECK_DATE(this);
- var t = LOCAL_DATE_VALUE(this);
- year = TO_NUMBER(year);
- var argc = %_ArgumentsLength();
- var time ;
- if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : TO_NUMBER(month);
- date = argc < 3 ? 1 : TO_NUMBER(date);
- time = 0;
- } else {
- month = argc < 2 ? LOCAL_MONTH(this) : TO_NUMBER(month);
- date = argc < 3 ? LOCAL_DAY(this) : TO_NUMBER(date);
- time = LOCAL_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.5.41
-function DateSetUTCFullYear(year, month, date) {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- year = TO_NUMBER(year);
- var argc = %_ArgumentsLength();
- var time ;
- if (NUMBER_IS_NAN(t)) {
- month = argc < 2 ? 0 : TO_NUMBER(month);
- date = argc < 3 ? 1 : TO_NUMBER(date);
- time = 0;
- } else {
- month = argc < 2 ? UTC_MONTH(this) : TO_NUMBER(month);
- date = argc < 3 ? UTC_DAY(this) : TO_NUMBER(date);
- time = UTC_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_UTC_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - 15.9.5.42
-function DateToUTCString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) return kInvalidDate;
- // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
- return WeekDays[UTC_WEEKDAY(this)] + ', '
- + TwoDigitString(UTC_DAY(this)) + ' '
- + Months[UTC_MONTH(this)] + ' '
- + UTC_YEAR(this) + ' '
- + TimeStringUTC(this) + ' GMT';
-}
-
-
-// ECMA 262 - B.2.4
-function DateGetYear() {
- CHECK_DATE(this);
- return LOCAL_YEAR(this) - 1900;
-}
-
-
-// ECMA 262 - B.2.5
-function DateSetYear(year) {
- CHECK_DATE(this);
- year = TO_NUMBER(year);
- if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NaN);
- year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
- ? 1900 + TO_INTEGER(year) : year;
- var t = LOCAL_DATE_VALUE(this);
- var month, date, time;
- if (NUMBER_IS_NAN(t)) {
- month = 0;
- date = 1;
- time = 0;
- } else {
- month = LOCAL_MONTH(this);
- date = LOCAL_DAY(this);
- time = LOCAL_TIME_IN_DAY(this);
- }
- var day = MakeDay(year, month, date);
- return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
-}
-
-
-// ECMA 262 - B.2.6
-//
-// Notice that this does not follow ECMA 262 completely. ECMA 262
-// says that toGMTString should be the same Function object as
-// toUTCString. JSC does not do this, so for compatibility we do not
-// do that either. Instead, we create a new function whose name
-// property will return toGMTString.
-function DateToGMTString() {
- return %_Call(DateToUTCString, this);
-}
-
-
-function PadInt(n, digits) {
- if (digits == 1) return n;
- return n < %_MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
-}
-
-
-// ECMA 262 - 20.3.4.36
-function DateToISOString() {
- CHECK_DATE(this);
- var t = UTC_DATE_VALUE(this);
- if (NUMBER_IS_NAN(t)) throw MakeRangeError(kInvalidTimeValue);
- var year = UTC_YEAR(this);
- var year_string;
- if (year >= 0 && year <= 9999) {
- year_string = PadInt(year, 4);
- } else {
- if (year < 0) {
- year_string = "-" + PadInt(-year, 6);
- } else {
- year_string = "+" + PadInt(year, 6);
- }
- }
- return year_string +
- '-' + PadInt(UTC_MONTH(this) + 1, 2) +
- '-' + PadInt(UTC_DAY(this), 2) +
- 'T' + PadInt(UTC_HOUR(this), 2) +
- ':' + PadInt(UTC_MIN(this), 2) +
- ':' + PadInt(UTC_SEC(this), 2) +
- '.' + PadInt(UTC_MS(this), 3) +
- 'Z';
-}
-
-
-// 20.3.4.37 Date.prototype.toJSON ( key )
-function DateToJSON(key) {
- var o = TO_OBJECT(this);
- var tv = TO_PRIMITIVE_NUMBER(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
- }
- return o.toISOString();
-}
-
-
-var date_cache_version_holder;
-var date_cache_version = NaN;
-
-
-function CheckDateCacheCurrent() {
- if (!date_cache_version_holder) {
- date_cache_version_holder = %DateCacheVersion();
- if (!date_cache_version_holder) return;
- }
- if (date_cache_version_holder[0] == date_cache_version) {
- return;
- }
- date_cache_version = date_cache_version_holder[0];
-
- // Reset the timezone cache:
- timezone_cache_time = NaN;
- timezone_cache_timezone = UNDEFINED;
-
- // Reset the date cache:
- Date_cache.time = NaN;
- Date_cache.string = null;
-}
-
-
-function CreateDate(time) {
- var date = new GlobalDate();
- date.setTime(time);
- return date;
-}
-
-// -------------------------------------------------------------------
-
-%SetCode(GlobalDate, DateConstructor);
-%FunctionSetPrototype(GlobalDate, new GlobalObject());
-
-// Set up non-enumerable properties of the Date object itself.
-utils.InstallFunctions(GlobalDate, DONT_ENUM, [
- "UTC", DateUTC,
- "parse", DateParse,
- "now", DateNow
-]);
-
-// Set up non-enumerable constructor property of the Date prototype object.
-%AddNamedProperty(GlobalDate.prototype, "constructor", GlobalDate, DONT_ENUM);
-
-// Set up non-enumerable functions of the Date prototype object and
-// set their names.
-utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
- "toString", DateToString,
- "toDateString", DateToDateString,
- "toTimeString", DateToTimeString,
- "toLocaleString", DateToLocaleString,
- "toLocaleDateString", DateToLocaleDateString,
- "toLocaleTimeString", DateToLocaleTimeString,
- "valueOf", DateValueOf,
- "getTime", DateGetTime,
- "getFullYear", DateGetFullYear,
- "getUTCFullYear", DateGetUTCFullYear,
- "getMonth", DateGetMonth,
- "getUTCMonth", DateGetUTCMonth,
- "getDate", DateGetDate,
- "getUTCDate", DateGetUTCDate,
- "getDay", DateGetDay,
- "getUTCDay", DateGetUTCDay,
- "getHours", DateGetHours,
- "getUTCHours", DateGetUTCHours,
- "getMinutes", DateGetMinutes,
- "getUTCMinutes", DateGetUTCMinutes,
- "getSeconds", DateGetSeconds,
- "getUTCSeconds", DateGetUTCSeconds,
- "getMilliseconds", DateGetMilliseconds,
- "getUTCMilliseconds", DateGetUTCMilliseconds,
- "getTimezoneOffset", DateGetTimezoneOffset,
- "setTime", DateSetTime,
- "setMilliseconds", DateSetMilliseconds,
- "setUTCMilliseconds", DateSetUTCMilliseconds,
- "setSeconds", DateSetSeconds,
- "setUTCSeconds", DateSetUTCSeconds,
- "setMinutes", DateSetMinutes,
- "setUTCMinutes", DateSetUTCMinutes,
- "setHours", DateSetHours,
- "setUTCHours", DateSetUTCHours,
- "setDate", DateSetDate,
- "setUTCDate", DateSetUTCDate,
- "setMonth", DateSetMonth,
- "setUTCMonth", DateSetUTCMonth,
- "setFullYear", DateSetFullYear,
- "setUTCFullYear", DateSetUTCFullYear,
- "toGMTString", DateToGMTString,
- "toUTCString", DateToUTCString,
- "getYear", DateGetYear,
- "setYear", DateSetYear,
- "toISOString", DateToISOString,
- "toJSON", DateToJSON
-]);
-
-%InstallToContext(["create_date_fun", CreateDate]);
-
-})
diff --git a/deps/v8/src/js/generator.js b/deps/v8/src/js/generator.js
index 2f61b3f22c..7f43656ebc 100644
--- a/deps/v8/src/js/generator.js
+++ b/deps/v8/src/js/generator.js
@@ -15,12 +15,10 @@ var GeneratorFunctionPrototype = utils.ImportNow("GeneratorFunctionPrototype");
var GeneratorFunction = utils.ImportNow("GeneratorFunction");
var GlobalFunction = global.Function;
var MakeTypeError;
-var NewFunctionString;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
- NewFunctionString = from.NewFunctionString;
});
// ----------------------------------------------------------------------------
@@ -78,19 +76,6 @@ function GeneratorObjectThrow(exn) {
}
}
-
-function GeneratorFunctionConstructor(arg1) { // length == 1
- var source = NewFunctionString(arguments, 'function*');
- var global_proxy = %GlobalProxy(GeneratorFunctionConstructor);
- // Compile the string in the constructor and not a helper so that errors
- // appear to come from here.
- var func = %_Call(%CompileString(source, true), global_proxy);
- // Set name-should-print-as-anonymous flag on the ShareFunctionInfo and
- // ensure that |func| uses correct initial map from |new.target| if
- // it's available.
- return %CompleteFunctionConstruction(func, GeneratorFunction, new.target);
-}
-
// ----------------------------------------------------------------------------
// Both Runtime_GeneratorNext and Runtime_GeneratorThrow are supported by
@@ -115,6 +100,5 @@ utils.InstallFunctions(GeneratorObjectPrototype,
%AddNamedProperty(GeneratorFunctionPrototype, "constructor",
GeneratorFunction, DONT_ENUM | READ_ONLY);
%InternalSetPrototype(GeneratorFunction, GlobalFunction);
-%SetCode(GeneratorFunction, GeneratorFunctionConstructor);
})
diff --git a/deps/v8/src/js/harmony-array-includes.js b/deps/v8/src/js/harmony-array-includes.js
deleted file mode 100644
index bb1f01cd57..0000000000
--- a/deps/v8/src/js/harmony-array-includes.js
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalArray = global.Array;
-var MakeTypeError;
-var SameValueZero;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
- SameValueZero = from.SameValueZero;
-});
-
-// -------------------------------------------------------------------
-
-// Proposed for ES7
-// https://github.com/tc39/Array.prototype.includes
-// 46c7532ec8499dea3e51aeb940d09e07547ed3f5
-function InnerArrayIncludes(searchElement, fromIndex, array, length) {
- if (length === 0) {
- return false;
- }
-
- var n = TO_INTEGER(fromIndex);
-
- var k;
- if (n >= 0) {
- k = n;
- } else {
- k = length + n;
- if (k < 0) {
- k = 0;
- }
- }
-
- while (k < length) {
- var elementK = array[k];
- if (SameValueZero(searchElement, elementK)) {
- return true;
- }
-
- ++k;
- }
-
- return false;
-}
-
-
-function ArrayIncludes(searchElement, fromIndex) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayIncludes(searchElement, fromIndex, array, length);
-}
-
-
-function TypedArrayIncludes(searchElement, fromIndex) {
- if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerArrayIncludes(searchElement, fromIndex, this, length);
-}
-
-// -------------------------------------------------------------------
-
-%FunctionSetLength(ArrayIncludes, 1);
-%FunctionSetLength(TypedArrayIncludes, 1);
-
-// Set up the non-enumerable function on the Array prototype object.
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
- "includes", ArrayIncludes
-]);
-
-// Set up the non-enumerable function on the typed array prototypes.
-// This duplicates some of the machinery in harmony-typedarray.js in order to
-// keep includes behind the separate --harmony-array-includes flag.
-// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
-
-macro TYPED_ARRAYS(FUNCTION)
-// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-FUNCTION(Uint8Array)
-FUNCTION(Int8Array)
-FUNCTION(Uint16Array)
-FUNCTION(Int16Array)
-FUNCTION(Uint32Array)
-FUNCTION(Int32Array)
-FUNCTION(Float32Array)
-FUNCTION(Float64Array)
-FUNCTION(Uint8ClampedArray)
-endmacro
-
-macro DECLARE_GLOBALS(NAME)
-var GlobalNAME = global.NAME;
-endmacro
-
-macro EXTEND_TYPED_ARRAY(NAME)
-// Set up non-enumerable functions on the prototype object.
-utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- "includes", TypedArrayIncludes
-]);
-endmacro
-
-TYPED_ARRAYS(DECLARE_GLOBALS)
-TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
-
-})
diff --git a/deps/v8/src/js/harmony-atomics.js b/deps/v8/src/js/harmony-atomics.js
index 71125a9680..b861a2a471 100644
--- a/deps/v8/src/js/harmony-atomics.js
+++ b/deps/v8/src/js/harmony-atomics.js
@@ -32,7 +32,7 @@ function CheckSharedIntegerTypedArray(ia) {
function CheckSharedInteger32TypedArray(ia) {
CheckSharedIntegerTypedArray(ia);
- if (%_ClassOf(ia) !== 'Int32Array') {
+ if (!%IsSharedInteger32TypedArray(ia)) {
throw MakeTypeError(kNotInt32SharedTypedArray, ia);
}
}
diff --git a/deps/v8/src/js/harmony-reflect.js b/deps/v8/src/js/harmony-reflect.js
index bbca0fef61..dcadad522f 100644
--- a/deps/v8/src/js/harmony-reflect.js
+++ b/deps/v8/src/js/harmony-reflect.js
@@ -23,7 +23,7 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
function ReflectEnumerate(obj) {
- if (!IS_SPEC_OBJECT(obj))
+ if (!IS_RECEIVER(obj))
throw MakeTypeError(kCalledOnNonObject, "Reflect.enumerate")
return (function* () { for (var x in obj) yield x })();
}
diff --git a/deps/v8/src/js/harmony-regexp.js b/deps/v8/src/js/harmony-regexp.js
index eadf1d237c..f76ef86ec7 100644
--- a/deps/v8/src/js/harmony-regexp.js
+++ b/deps/v8/src/js/harmony-regexp.js
@@ -12,6 +12,7 @@
// Imports
var GlobalRegExp = global.RegExp;
+var GlobalRegExpPrototype = GlobalRegExp.prototype;
var MakeTypeError;
var regExpFlagsSymbol = utils.ImportNow("regexp_flags_symbol");
@@ -24,7 +25,7 @@ utils.Import(function(from) {
// ES6 draft 12-06-13, section 21.2.5.3
// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
function RegExpGetFlags() {
- if (!IS_SPEC_OBJECT(this)) {
+ if (!IS_RECEIVER(this)) {
throw MakeTypeError(
kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
}
@@ -37,10 +38,15 @@ function RegExpGetFlags() {
return result;
}
-
// ES6 21.2.5.12.
function RegExpGetSticky() {
if (!IS_REGEXP(this)) {
+ // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
+ // TODO(littledan): Remove this workaround or standardize it
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeStickyGetter);
+ return UNDEFINED;
+ }
throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
}
return !!REGEXP_STICKY(this);
@@ -48,19 +54,7 @@ function RegExpGetSticky() {
%FunctionSetName(RegExpGetSticky, "RegExp.prototype.sticky");
%SetNativeFlag(RegExpGetSticky);
-
-// ES6 21.2.5.15.
-function RegExpGetUnicode() {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
- }
- return !!REGEXP_UNICODE(this);
-}
-%FunctionSetName(RegExpGetUnicode, "RegExp.prototype.unicode");
-%SetNativeFlag(RegExpGetUnicode);
-
utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
-utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
})
diff --git a/deps/v8/src/js/harmony-sharedarraybuffer.js b/deps/v8/src/js/harmony-sharedarraybuffer.js
index b4c34151a3..10ceb70d27 100644
--- a/deps/v8/src/js/harmony-sharedarraybuffer.js
+++ b/deps/v8/src/js/harmony-sharedarraybuffer.js
@@ -9,27 +9,14 @@
%CheckIsBootstrapping();
var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
-var GlobalObject = global.Object;
var MakeTypeError;
-var ToPositiveInteger;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
- ToPositiveInteger = from.ToPositiveInteger;
})
// -------------------------------------------------------------------
-function SharedArrayBufferConstructor(length) { // length = 1
- if (%_IsConstructCall()) {
- var byteLength = ToPositiveInteger(length, kInvalidArrayBufferLength);
- %ArrayBufferInitialize(this, byteLength, kShared);
- } else {
- throw MakeTypeError(kConstructorNotFunction, "SharedArrayBuffer");
- }
-}
-
function SharedArrayBufferGetByteLen() {
if (!IS_SHAREDARRAYBUFFER(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
@@ -38,27 +25,7 @@ function SharedArrayBufferGetByteLen() {
return %_ArrayBufferGetByteLength(this);
}
-function SharedArrayBufferIsViewJS(obj) {
- return %ArrayBufferIsView(obj);
-}
-
-
-// Set up the SharedArrayBuffer constructor function.
-%SetCode(GlobalSharedArrayBuffer, SharedArrayBufferConstructor);
-%FunctionSetPrototype(GlobalSharedArrayBuffer, new GlobalObject());
-
-// Set up the constructor property on the SharedArrayBuffer prototype object.
-%AddNamedProperty(GlobalSharedArrayBuffer.prototype, "constructor",
- GlobalSharedArrayBuffer, DONT_ENUM);
-
-%AddNamedProperty(GlobalSharedArrayBuffer.prototype,
- toStringTagSymbol, "SharedArrayBuffer", DONT_ENUM | READ_ONLY);
-
utils.InstallGetter(GlobalSharedArrayBuffer.prototype, "byteLength",
SharedArrayBufferGetByteLen);
-utils.InstallFunctions(GlobalSharedArrayBuffer, DONT_ENUM, [
- "isView", SharedArrayBufferIsViewJS
-]);
-
})
diff --git a/deps/v8/src/js/harmony-simd.js b/deps/v8/src/js/harmony-simd.js
index 6847f2279a..4df2f437ec 100644
--- a/deps/v8/src/js/harmony-simd.js
+++ b/deps/v8/src/js/harmony-simd.js
@@ -62,11 +62,11 @@ function NAMECheckJS(a) {
}
function NAMEToString() {
- if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ var value = %_ValueOf(this);
+ if (typeof(value) !== 'TYPE') {
throw MakeTypeError(kIncompatibleMethodReceiver,
"NAME.prototype.toString", this);
}
- var value = %_ValueOf(this);
var str = "SIMD.NAME(";
str += %NAMEExtractLane(value, 0);
for (var i = 1; i < LANES; i++) {
@@ -76,11 +76,11 @@ function NAMEToString() {
}
function NAMEToLocaleString() {
- if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ var value = %_ValueOf(this);
+ if (typeof(value) !== 'TYPE') {
throw MakeTypeError(kIncompatibleMethodReceiver,
"NAME.prototype.toLocaleString", this);
}
- var value = %_ValueOf(this);
var str = "SIMD.NAME(";
str += %NAMEExtractLane(value, 0).toLocaleString();
for (var i = 1; i < LANES; i++) {
@@ -90,11 +90,12 @@ function NAMEToLocaleString() {
}
function NAMEValueOf() {
- if (typeof(this) !== 'TYPE' && %_ClassOf(this) !== 'NAME') {
+ var value = %_ValueOf(this);
+ if (typeof(value) !== 'TYPE') {
throw MakeTypeError(kIncompatibleMethodReceiver,
"NAME.prototype.valueOf", this);
}
- return %_ValueOf(this);
+ return value;
}
function NAMEExtractLaneJS(instance, lane) {
@@ -432,34 +433,44 @@ SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
//-------------------------------------------------------------------
function Float32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Float32x4");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Float32x4");
+ }
return %CreateFloat32x4(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3));
}
function Int32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int32x4");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Int32x4");
+ }
return %CreateInt32x4(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3));
}
function Uint32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint32x4");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Uint32x4");
+ }
return %CreateUint32x4(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3));
}
function Bool32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool32x4");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Bool32x4");
+ }
return %CreateBool32x4(c0, c1, c2, c3);
}
function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int16x8");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Int16x8");
+ }
return %CreateInt16x8(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
TO_NUMBER(c4), TO_NUMBER(c5),
@@ -468,7 +479,9 @@ function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint16x8");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Uint16x8");
+ }
return %CreateUint16x8(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
TO_NUMBER(c4), TO_NUMBER(c5),
@@ -477,14 +490,18 @@ function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool16x8");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Bool16x8");
+ }
return %CreateBool16x8(c0, c1, c2, c3, c4, c5, c6, c7);
}
function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int8x16");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Int8x16");
+ }
return %CreateInt8x16(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
TO_NUMBER(c4), TO_NUMBER(c5),
@@ -498,7 +515,9 @@ function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint8x16");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Uint8x16");
+ }
return %CreateUint8x16(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
TO_NUMBER(c4), TO_NUMBER(c5),
@@ -512,7 +531,9 @@ function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool8x16");
+ if (!IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kNotConstructor, "Bool8x16");
+ }
return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
c13, c14, c15);
}
diff --git a/deps/v8/src/js/harmony-species.js b/deps/v8/src/js/harmony-species.js
new file mode 100644
index 0000000000..426ac466e7
--- /dev/null
+++ b/deps/v8/src/js/harmony-species.js
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils, extrasUtils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalArray = global.Array;
+// It is important that this file is run after src/js/typedarray.js,
+// otherwise GlobalTypedArray would be Object, and we would break
+// old versions of Zepto.
+var GlobalTypedArray = global.Uint8Array.__proto__;
+var GlobalMap = global.Map;
+var GlobalSet = global.Set;
+var GlobalArrayBuffer = global.ArrayBuffer;
+var GlobalPromise = global.Promise;
+var GlobalRegExp = global.RegExp;
+var speciesSymbol = utils.ImportNow("species_symbol");
+
+function ArraySpecies() {
+ return this;
+}
+
+function TypedArraySpecies() {
+ return this;
+}
+
+function MapSpecies() {
+ return this;
+}
+
+function SetSpecies() {
+ return this;
+}
+
+function ArrayBufferSpecies() {
+ return this;
+}
+
+function PromiseSpecies() {
+ return this;
+}
+
+function RegExpSpecies() {
+ return this;
+}
+
+utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies, DONT_ENUM);
+utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies, DONT_ENUM);
+utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies, DONT_ENUM);
+utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies, DONT_ENUM);
+utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies,
+ DONT_ENUM);
+utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies, DONT_ENUM);
+utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies, DONT_ENUM);
+
+});
diff --git a/deps/v8/src/js/harmony-unicode-regexps.js b/deps/v8/src/js/harmony-unicode-regexps.js
new file mode 100644
index 0000000000..aa8fc76bd5
--- /dev/null
+++ b/deps/v8/src/js/harmony-unicode-regexps.js
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+'use strict';
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var GlobalRegExp = global.RegExp;
+var GlobalRegExpPrototype = GlobalRegExp.prototype;
+var MakeTypeError;
+
+utils.Import(function(from) {
+ MakeTypeError = from.MakeTypeError;
+});
+
+// -------------------------------------------------------------------
+
+// ES6 21.2.5.15.
+function RegExpGetUnicode() {
+ if (!IS_REGEXP(this)) {
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
+ }
+ throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
+ }
+ return !!REGEXP_UNICODE(this);
+}
+%FunctionSetName(RegExpGetUnicode, "RegExp.prototype.unicode");
+%SetNativeFlag(RegExpGetUnicode);
+
+utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
+
+})
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index f2b9dd4445..7e00fcdac4 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -33,7 +33,9 @@ var MakeTypeError;
var MathFloor;
var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
+var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var RegExpTest;
+var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var StringIndexOf;
var StringLastIndexOf;
var StringMatch;
@@ -176,13 +178,26 @@ var TIMEZONE_NAME_CHECK_RE = UNDEFINED;
function GetTimezoneNameCheckRE() {
if (IS_UNDEFINED(TIMEZONE_NAME_CHECK_RE)) {
- TIMEZONE_NAME_CHECK_RE =
- new GlobalRegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+ TIMEZONE_NAME_CHECK_RE = new GlobalRegExp(
+ '^([A-Za-z]+)/([A-Za-z_-]+)((?:\/[A-Za-z_-]+)+)*$');
}
return TIMEZONE_NAME_CHECK_RE;
}
/**
+ * Matches valid location parts of IANA time zone names.
+ */
+var TIMEZONE_NAME_LOCATION_PART_RE = UNDEFINED;
+
+function GetTimezoneNameLocationPartRE() {
+ if (IS_UNDEFINED(TIMEZONE_NAME_LOCATION_PART_RE)) {
+ TIMEZONE_NAME_LOCATION_PART_RE =
+ new GlobalRegExp('^([A-Za-z]+)((?:[_-][A-Za-z]+)+)*$');
+ }
+ return TIMEZONE_NAME_LOCATION_PART_RE;
+}
+
+/**
* Adds bound method to the prototype of the given object.
*/
function addBoundMethod(obj, methodName, implementation, length) {
@@ -197,21 +212,21 @@ function addBoundMethod(obj, methodName, implementation, length) {
var boundMethod;
if (IS_UNDEFINED(length) || length === 2) {
boundMethod = function(x, y) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return implementation(that, x, y);
}
} else if (length === 1) {
boundMethod = function(x) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
return implementation(that, x);
}
} else {
boundMethod = function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
// DateTimeFormat.format needs to be 0 arg method, but can stil
@@ -679,6 +694,34 @@ function toTitleCaseWord(word) {
}
/**
+ * Returns titlecased location, bueNos_airES -> Buenos_Aires
+ * or ho_cHi_minH -> Ho_Chi_Minh. It is locale-agnostic and only
+ * deals with ASCII only characters.
+ * 'of', 'au' and 'es' are special-cased and lowercased.
+ */
+function toTitleCaseTimezoneLocation(location) {
+ var match = %_Call(StringMatch, location, GetTimezoneNameLocationPartRE());
+ if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, location);
+
+ var result = toTitleCaseWord(match[1]);
+ if (!IS_UNDEFINED(match[2]) && 2 < match.length) {
+ // The first character is a separator, '_' or '-'.
+ // None of IANA zone names has both '_' and '-'.
+ var separator = %_Call(StringSubstring, match[2], 0, 1);
+ var parts = %_Call(StringSplit, match[2], separator);
+ for (var i = 1; i < parts.length; i++) {
+ var part = parts[i]
+ var lowercasedPart = %StringToLowerCase(part);
+ result = result + separator +
+ ((lowercasedPart !== 'es' &&
+ lowercasedPart !== 'of' && lowercasedPart !== 'au') ?
+ toTitleCaseWord(part) : lowercasedPart);
+ }
+ }
+ return result;
+}
+
+/**
* Canonicalizes the language tag, or throws in case the tag is invalid.
*/
function canonicalizeLanguageTag(localeID) {
@@ -837,6 +880,16 @@ function BuildLanguageTagREs() {
LANGUAGE_TAG_RE = new GlobalRegExp(languageTag, 'i');
}
+var resolvedAccessor = {
+ get() {
+ %IncrementUseCounter(kIntlResolved);
+ return this[resolvedSymbol];
+ },
+ set(value) {
+ this[resolvedSymbol] = value;
+ }
+};
+
/**
* Initializes the given object so it's a valid Collator instance.
* Useful for subclassing.
@@ -935,7 +988,8 @@ function initializeCollator(collator, locales, options) {
// Writable, configurable and enumerable are set to false by default.
%MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
- ObjectDefineProperty(collator, 'resolved', {value: resolved});
+ collator[resolvedSymbol] = resolved;
+ ObjectDefineProperty(collator, 'resolved', resolvedAccessor);
return collator;
}
@@ -966,7 +1020,7 @@ function initializeCollator(collator, locales, options) {
* Collator resolvedOptions method.
*/
%AddNamedProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -975,17 +1029,17 @@ function initializeCollator(collator, locales, options) {
}
var coll = this;
- var locale = getOptimalLanguageTag(coll.resolved.requestedLocale,
- coll.resolved.locale);
+ var locale = getOptimalLanguageTag(coll[resolvedSymbol].requestedLocale,
+ coll[resolvedSymbol].locale);
return {
locale: locale,
- usage: coll.resolved.usage,
- sensitivity: coll.resolved.sensitivity,
- ignorePunctuation: coll.resolved.ignorePunctuation,
- numeric: coll.resolved.numeric,
- caseFirst: coll.resolved.caseFirst,
- collation: coll.resolved.collation
+ usage: coll[resolvedSymbol].usage,
+ sensitivity: coll[resolvedSymbol].sensitivity,
+ ignorePunctuation: coll[resolvedSymbol].ignorePunctuation,
+ numeric: coll[resolvedSymbol].numeric,
+ caseFirst: coll[resolvedSymbol].caseFirst,
+ collation: coll[resolvedSymbol].collation
};
},
DONT_ENUM
@@ -1002,7 +1056,7 @@ function initializeCollator(collator, locales, options) {
* Options are optional parameter.
*/
%AddNamedProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1062,6 +1116,15 @@ function getNumberOption(options, property, min, max, fallback) {
return fallback;
}
+var patternAccessor = {
+ get() {
+ %IncrementUseCounter(kIntlPattern);
+ return this[patternSymbol];
+ },
+ set(value) {
+ this[patternSymbol] = value;
+ }
+};
/**
* Initializes the given object so it's a valid NumberFormat instance.
@@ -1157,6 +1220,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
minimumFractionDigits: {writable: true},
minimumIntegerDigits: {writable: true},
numberingSystem: {writable: true},
+ pattern: patternAccessor,
requestedLocale: {value: requestedLocale, writable: true},
style: {value: internalOptions.style, writable: true},
useGrouping: {writable: true}
@@ -1177,7 +1241,8 @@ function initializeNumberFormat(numberFormat, locales, options) {
}
%MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
- ObjectDefineProperty(numberFormat, 'resolved', {value: resolved});
+ numberFormat[resolvedSymbol] = resolved;
+ ObjectDefineProperty(numberFormat, 'resolved', resolvedAccessor);
return numberFormat;
}
@@ -1208,7 +1273,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
* NumberFormat resolvedOptions method.
*/
%AddNamedProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1217,33 +1282,33 @@ function initializeNumberFormat(numberFormat, locales, options) {
}
var format = this;
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
+ var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
+ format[resolvedSymbol].locale);
var result = {
locale: locale,
- numberingSystem: format.resolved.numberingSystem,
- style: format.resolved.style,
- useGrouping: format.resolved.useGrouping,
- minimumIntegerDigits: format.resolved.minimumIntegerDigits,
- minimumFractionDigits: format.resolved.minimumFractionDigits,
- maximumFractionDigits: format.resolved.maximumFractionDigits,
+ numberingSystem: format[resolvedSymbol].numberingSystem,
+ style: format[resolvedSymbol].style,
+ useGrouping: format[resolvedSymbol].useGrouping,
+ minimumIntegerDigits: format[resolvedSymbol].minimumIntegerDigits,
+ minimumFractionDigits: format[resolvedSymbol].minimumFractionDigits,
+ maximumFractionDigits: format[resolvedSymbol].maximumFractionDigits,
};
if (result.style === 'currency') {
- defineWECProperty(result, 'currency', format.resolved.currency);
+ defineWECProperty(result, 'currency', format[resolvedSymbol].currency);
defineWECProperty(result, 'currencyDisplay',
- format.resolved.currencyDisplay);
+ format[resolvedSymbol].currencyDisplay);
}
- if (%HasOwnProperty(format.resolved, 'minimumSignificantDigits')) {
+ if (%HasOwnProperty(format[resolvedSymbol], 'minimumSignificantDigits')) {
defineWECProperty(result, 'minimumSignificantDigits',
- format.resolved.minimumSignificantDigits);
+ format[resolvedSymbol].minimumSignificantDigits);
}
- if (%HasOwnProperty(format.resolved, 'maximumSignificantDigits')) {
+ if (%HasOwnProperty(format[resolvedSymbol], 'maximumSignificantDigits')) {
defineWECProperty(result, 'maximumSignificantDigits',
- format.resolved.maximumSignificantDigits);
+ format[resolvedSymbol].maximumSignificantDigits);
}
return result;
@@ -1263,7 +1328,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
* Options are optional parameter.
*/
%AddNamedProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1561,7 +1626,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
minute: {writable: true},
month: {writable: true},
numberingSystem: {writable: true},
- pattern: {writable: true},
+ [patternSymbol]: {writable: true},
+ pattern: patternAccessor,
requestedLocale: {value: requestedLocale, writable: true},
second: {writable: true},
timeZone: {writable: true},
@@ -1574,12 +1640,13 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
var formatter = %CreateDateTimeFormat(
requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
- if (!IS_UNDEFINED(tz) && tz !== resolved.timeZone) {
+ if (resolved.timeZone === "Etc/Unknown") {
throw MakeRangeError(kUnsupportedTimeZone, tz);
}
%MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
- ObjectDefineProperty(dateFormat, 'resolved', {value: resolved});
+ dateFormat[resolvedSymbol] = resolved;
+ ObjectDefineProperty(dateFormat, 'resolved', resolvedAccessor);
return dateFormat;
}
@@ -1610,7 +1677,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* DateTimeFormat resolvedOptions method.
*/
%AddNamedProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1638,22 +1705,22 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
};
var format = this;
- var fromPattern = fromLDMLString(format.resolved.pattern);
- var userCalendar = ICU_CALENDAR_MAP[format.resolved.calendar];
+ var fromPattern = fromLDMLString(format[resolvedSymbol][patternSymbol]);
+ var userCalendar = ICU_CALENDAR_MAP[format[resolvedSymbol].calendar];
if (IS_UNDEFINED(userCalendar)) {
// Use ICU name if we don't have a match. It shouldn't happen, but
// it would be too strict to throw for this.
- userCalendar = format.resolved.calendar;
+ userCalendar = format[resolvedSymbol].calendar;
}
- var locale = getOptimalLanguageTag(format.resolved.requestedLocale,
- format.resolved.locale);
+ var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
+ format[resolvedSymbol].locale);
var result = {
locale: locale,
- numberingSystem: format.resolved.numberingSystem,
+ numberingSystem: format[resolvedSymbol].numberingSystem,
calendar: userCalendar,
- timeZone: format.resolved.timeZone
+ timeZone: format[resolvedSymbol].timeZone
};
addWECPropertyIfDefined(result, 'timeZoneName', fromPattern.timeZoneName);
@@ -1684,7 +1751,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* Options are optional parameter.
*/
%AddNamedProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1735,8 +1802,8 @@ addBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
/**
- * Returns canonical Area/Location name, or throws an exception if the zone
- * name is invalid IANA name.
+ * Returns canonical Area/Location(/Location) name, or throws an exception
+ * if the zone name is invalid IANA name.
*/
function canonicalizeTimeZoneID(tzID) {
// Skip undefined zones.
@@ -1751,16 +1818,22 @@ function canonicalizeTimeZoneID(tzID) {
return 'UTC';
}
- // We expect only _ and / beside ASCII letters.
- // All inputs should conform to Area/Location from now on.
+ // TODO(jshin): Add support for Etc/GMT[+-]([1-9]|1[0-2])
+
+ // We expect only _, '-' and / beside ASCII letters.
+ // All inputs should conform to Area/Location(/Location)* from now on.
var match = %_Call(StringMatch, tzID, GetTimezoneNameCheckRE());
- if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, tzID);
+ if (IS_NULL(match)) throw MakeRangeError(kExpectedTimezoneID, tzID);
- var result = toTitleCaseWord(match[1]) + '/' + toTitleCaseWord(match[2]);
- var i = 3;
- while (!IS_UNDEFINED(match[i]) && i < match.length) {
- result = result + '_' + toTitleCaseWord(match[i]);
- i++;
+ var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
+ toTitleCaseTimezoneLocation(match[2]);
+
+ if (!IS_UNDEFINED(match[3]) && 3 < match.length) {
+ var locations = %_Call(StringSplit, match[3], '/');
+ // The 1st element is empty. Starts with i=1.
+ for (var i = 1; i < locations.length; i++) {
+ result = result + '/' + toTitleCaseTimezoneLocation(locations[i]);
+ }
}
return result;
@@ -1799,7 +1872,8 @@ function initializeBreakIterator(iterator, locales, options) {
%MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
internalIterator);
- ObjectDefineProperty(iterator, 'resolved', {value: resolved});
+ iterator[resolvedSymbol] = resolved;
+ ObjectDefineProperty(iterator, 'resolved', resolvedAccessor);
return iterator;
}
@@ -1831,7 +1905,7 @@ function initializeBreakIterator(iterator, locales, options) {
*/
%AddNamedProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions',
function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1840,12 +1914,13 @@ function initializeBreakIterator(iterator, locales, options) {
}
var segmenter = this;
- var locale = getOptimalLanguageTag(segmenter.resolved.requestedLocale,
- segmenter.resolved.locale);
+ var locale =
+ getOptimalLanguageTag(segmenter[resolvedSymbol].requestedLocale,
+ segmenter[resolvedSymbol].locale);
return {
locale: locale,
- type: segmenter.resolved.type
+ type: segmenter[resolvedSymbol].type
};
},
DONT_ENUM
@@ -1864,7 +1939,7 @@ function initializeBreakIterator(iterator, locales, options) {
*/
%AddNamedProperty(Intl.v8BreakIterator, 'supportedLocalesOf',
function(locales) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -1978,7 +2053,7 @@ function OverrideFunction(object, name, f) {
* Overrides the built-in method.
*/
OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2003,7 +2078,7 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
*/
OverrideFunction(GlobalString.prototype, 'normalize', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2031,7 +2106,7 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
* If locale or options are omitted, defaults are used.
*/
OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2072,7 +2147,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* present in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2090,7 +2165,7 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
* in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
@@ -2108,7 +2183,7 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
* in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
diff --git a/deps/v8/src/js/json.js b/deps/v8/src/js/json.js
index 38c46af6d6..b8836eaddd 100644
--- a/deps/v8/src/js/json.js
+++ b/deps/v8/src/js/json.js
@@ -11,7 +11,9 @@
// -------------------------------------------------------------------
// Imports
+var GlobalDate = global.Date;
var GlobalJSON = global.JSON;
+var GlobalSet = global.Set;
var InternalArray = utils.InternalArray;
var MakeTypeError;
var MaxSimple;
@@ -28,24 +30,33 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
-function Revive(holder, name, reviver) {
+function CreateDataProperty(o, p, v) {
+ var desc = {value: v, enumerable: true, writable: true, configurable: true};
+ return %reflect_define_property(o, p, desc);
+}
+
+
+function InternalizeJSONProperty(holder, name, reviver) {
var val = holder[name];
- if (IS_OBJECT(val)) {
- if (IS_ARRAY(val)) {
- var length = val.length;
+ if (IS_RECEIVER(val)) {
+ if (%is_arraylike(val)) {
+ var length = TO_LENGTH(val.length);
for (var i = 0; i < length; i++) {
- var newElement = Revive(val, %_NumberToString(i), reviver);
- val[i] = newElement;
+ var newElement =
+ InternalizeJSONProperty(val, %_NumberToString(i), reviver);
+ if (IS_UNDEFINED(newElement)) {
+ %reflect_delete_property(val, i);
+ } else {
+ CreateDataProperty(val, i, newElement);
+ }
}
} else {
- for (var p in val) {
- if (HAS_OWN_PROPERTY(val, p)) {
- var newElement = Revive(val, p, reviver);
- if (IS_UNDEFINED(newElement)) {
- delete val[p];
- } else {
- val[p] = newElement;
- }
+ for (var p of %object_keys(val)) {
+ var newElement = InternalizeJSONProperty(val, p, reviver);
+ if (IS_UNDEFINED(newElement)) {
+ %reflect_delete_property(val, p);
+ } else {
+ CreateDataProperty(val, p, newElement);
}
}
}
@@ -57,7 +68,7 @@ function Revive(holder, name, reviver) {
function JSONParse(text, reviver) {
var unfiltered = %ParseJson(text);
if (IS_CALLABLE(reviver)) {
- return Revive({'': unfiltered}, '', reviver);
+ return InternalizeJSONProperty({'': unfiltered}, '', reviver);
} else {
return unfiltered;
}
@@ -69,7 +80,7 @@ function SerializeArray(value, replacer, stack, indent, gap) {
var stepback = indent;
indent += gap;
var partial = new InternalArray();
- var len = value.length;
+ var len = TO_LENGTH(value.length);
for (var i = 0; i < len; i++) {
var strP = JSONSerialize(%_NumberToString(i), value, replacer, stack,
indent, gap);
@@ -101,27 +112,23 @@ function SerializeObject(value, replacer, stack, indent, gap) {
if (IS_ARRAY(replacer)) {
var length = replacer.length;
for (var i = 0; i < length; i++) {
- if (HAS_OWN_PROPERTY(replacer, i)) {
- var p = replacer[i];
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
+ var p = replacer[i];
+ var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+ if (!IS_UNDEFINED(strP)) {
+ var member = %QuoteJSONString(p) + ":";
+ if (gap != "") member += " ";
+ member += strP;
+ partial.push(member);
}
}
} else {
- for (var p in value) {
- if (HAS_OWN_PROPERTY(value, p)) {
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
+ for (var p of %object_keys(value)) {
+ var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
+ if (!IS_UNDEFINED(strP)) {
+ var member = %QuoteJSONString(p) + ":";
+ if (gap != "") member += " ";
+ member += strP;
+ partial.push(member);
}
}
}
@@ -142,7 +149,7 @@ function SerializeObject(value, replacer, stack, indent, gap) {
function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
- if (IS_SPEC_OBJECT(value)) {
+ if (IS_RECEIVER(value)) {
var toJSON = value.toJSON;
if (IS_CALLABLE(toJSON)) {
value = %_Call(toJSON, value, key);
@@ -159,9 +166,9 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
return "null";
- } else if (IS_SPEC_OBJECT(value) && !IS_CALLABLE(value)) {
+ } else if (IS_RECEIVER(value) && !IS_CALLABLE(value)) {
// Non-callable object. If it's a primitive wrapper, it must be unwrapped.
- if (IS_ARRAY(value)) {
+ if (%is_arraylike(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
value = TO_NUMBER(value);
@@ -180,14 +187,13 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
function JSONStringify(value, replacer, space) {
- if (%_ArgumentsLength() == 1) {
+ if (%_ArgumentsLength() == 1 && !IS_PROXY(value)) {
return %BasicJSONStringify(value);
}
- if (IS_ARRAY(replacer)) {
- // Deduplicate replacer array items.
+ if (!IS_CALLABLE(replacer) && %is_arraylike(replacer)) {
var property_list = new InternalArray();
- var seen_properties = { __proto__: null };
- var length = replacer.length;
+ var seen_properties = new GlobalSet();
+ var length = TO_LENGTH(replacer.length);
for (var i = 0; i < length; i++) {
var v = replacer[i];
var item;
@@ -200,9 +206,9 @@ function JSONStringify(value, replacer, space) {
} else {
continue;
}
- if (!seen_properties[item]) {
+ if (!seen_properties.has(item)) {
property_list.push(item);
- seen_properties[item] = true;
+ seen_properties.add(item);
}
}
replacer = property_list;
@@ -242,6 +248,24 @@ utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
]);
// -------------------------------------------------------------------
+// Date.toJSON
+
+// 20.3.4.37 Date.prototype.toJSON ( key )
+function DateToJSON(key) {
+ var o = TO_OBJECT(this);
+ var tv = TO_PRIMITIVE_NUMBER(o);
+ if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
+ return null;
+ }
+ return o.toISOString();
+}
+
+// Set up non-enumerable functions of the Date prototype object.
+utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
+ "toJSON", DateToJSON
+]);
+
+// -------------------------------------------------------------------
// JSON Builtins
function JsonSerializeAdapter(key, object) {
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index a4370d2181..3bcc8c114e 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -39,32 +39,6 @@ define NEW_TWO_BYTE_STRING = false;
define GETTER = 0;
define SETTER = 1;
-# For date.js.
-define HoursPerDay = 24;
-define MinutesPerHour = 60;
-define SecondsPerMinute = 60;
-define msPerSecond = 1000;
-define msPerMinute = 60000;
-define msPerHour = 3600000;
-define msPerDay = 86400000;
-define msPerMonth = 2592000000;
-
-# Note: kDayZeroInJulianDay = ToJulianDay(1970, 0, 1).
-define kInvalidDate = 'Invalid Date';
-define kDayZeroInJulianDay = 2440588;
-define kMonthMask = 0x1e0;
-define kDayMask = 0x01f;
-define kYearShift = 9;
-define kMonthShift = 5;
-
-# Limits for parts of the date, so that we support all the dates that
-# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
-# the date (days since 1970) is in SMI range.
-define kMinYear = -1000000;
-define kMaxYear = 1000000;
-define kMinMonth = -10000000;
-define kMaxMonth = 10000000;
-
# Safe maximum number of arguments to push to stack, when multiplied by
# pointer size. Used by Function.prototype.apply(), Reflect.apply() and
# Reflect.construct().
@@ -88,59 +62,49 @@ define STRING_TO_REGEXP_CACHE_ID = 0;
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
# It will *not* generate a runtime typeof call for the most important
# values of 'bar'.
-macro IS_NULL(arg) = (arg === null);
-macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
-macro IS_UNDEFINED(arg) = (arg === (void 0));
-macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
-macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_ARRAY(arg) = (%_IsArray(arg));
+macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
+macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
+macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_DATE(arg) = (%_IsDate(arg));
+macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
-macro IS_REGEXP(arg) = (%_IsRegExp(arg));
-macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
-macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
+macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
+macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
-macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
-macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
+macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
+macro IS_NULL(arg) = (arg === null);
+macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
+macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
-macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
-macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
-macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
-macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
+macro IS_OBJECT(arg) = (typeof(arg) === 'object');
+macro IS_PROXY(arg) = (%_IsJSProxy(arg));
+macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
-macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
-macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
-macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
-macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
-macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
+macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
-macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
+macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
+macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
+macro IS_STRING(arg) = (typeof(arg) === 'string');
+macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_STRONG(arg) = (%IsStrong(arg));
+macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
+macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
+macro IS_UNDEFINED(arg) = (arg === (void 0));
+macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
+macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
-# Macro for ECMAScript 5 queries of the type:
-# "Type(O) is object."
-# This is the same as being either a function or an object in V8 terminology
-# (including proxies).
-# In addition, an undetectable object is also included by this.
-macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
+# Macro for ES queries of the type: "Type(O) is Object."
+macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
-# Macro for ECMAScript 5 queries of the type:
-# "IsCallable(O)"
+# Macro for ES queries of the type: "IsCallable(O)"
macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName);
-# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
-define kBoundFunctionIndex = 0;
-define kBoundThisIndex = 1;
-define kBoundArgumentsStartIndex = 2;
-
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
@@ -193,44 +157,6 @@ macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
# REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
-# Limit according to ECMA 262 15.9.1.1
-define MAX_TIME_MS = 8640000000000000;
-# Limit which is MAX_TIME_MS + msPerMonth.
-define MAX_TIME_BEFORE_UTC = 8640002592000000;
-
-# Gets the value of a Date object. If arg is not a Date object
-# a type error is thrown.
-macro CHECK_DATE(arg) = if (!%_IsDate(arg)) %_ThrowNotDateError();
-macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
-macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
-
-macro LOCAL_YEAR(arg) = (%_DateField(arg, 1));
-macro LOCAL_MONTH(arg) = (%_DateField(arg, 2));
-macro LOCAL_DAY(arg) = (%_DateField(arg, 3));
-macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4));
-macro LOCAL_HOUR(arg) = (%_DateField(arg, 5));
-macro LOCAL_MIN(arg) = (%_DateField(arg, 6));
-macro LOCAL_SEC(arg) = (%_DateField(arg, 7));
-macro LOCAL_MS(arg) = (%_DateField(arg, 8));
-macro LOCAL_DAYS(arg) = (%_DateField(arg, 9));
-macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
-
-macro UTC_YEAR(arg) = (%_DateField(arg, 11));
-macro UTC_MONTH(arg) = (%_DateField(arg, 12));
-macro UTC_DAY(arg) = (%_DateField(arg, 13));
-macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14));
-macro UTC_HOUR(arg) = (%_DateField(arg, 15));
-macro UTC_MIN(arg) = (%_DateField(arg, 16));
-macro UTC_SEC(arg) = (%_DateField(arg, 17));
-macro UTC_MS(arg) = (%_DateField(arg, 18));
-macro UTC_DAYS(arg) = (%_DateField(arg, 19));
-macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
-
-macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21));
-
-macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
-macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
-
# Last input and last subject of regexp matches.
define LAST_SUBJECT_INDEX = 1;
macro LAST_SUBJECT(array) = ((array)[1]);
@@ -274,11 +200,11 @@ define COMPILATION_TYPE_JSON = 2;
# Matches Messages::kNoLineNumberInfo from v8.h
define kNoLineNumberInfo = 0;
-# Matches PropertyAttributes from property-details.h
-define PROPERTY_ATTRIBUTES_NONE = 0;
-define PROPERTY_ATTRIBUTES_STRING = 8;
-define PROPERTY_ATTRIBUTES_SYMBOLIC = 16;
-define PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL = 32;
+# Must match PropertyFilter in property-details.h
+define PROPERTY_FILTER_NONE = 0;
+define PROPERTY_FILTER_ONLY_ENUMERABLE = 2;
+define PROPERTY_FILTER_SKIP_STRINGS = 8;
+define PROPERTY_FILTER_SKIP_SYMBOLS = 16;
# Use for keys, values and entries iterators.
define ITERATOR_KIND_KEYS = 1;
@@ -314,9 +240,30 @@ define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
-macro DEBUG_IS_STEPPING(function) = (%_DebugIsActive() != 0 && %DebugCallbackSupportsStepping(function));
macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (%_DebugIsActive() != 0) %DebugPrepareStepInIfStepping(function);
# SharedFlag equivalents
define kNotShared = false;
define kShared = true;
+
+# UseCounters from include/v8.h
+define kUseAsm = 0;
+define kBreakIterator = 1;
+define kLegacyConst = 2;
+define kMarkDequeOverflow = 3;
+define kStoreBufferOverflow = 4;
+define kSlotsBufferOverflow = 5;
+define kObjectObserve = 6;
+define kForcedGC = 7;
+define kSloppyMode = 8;
+define kStrictMode = 9;
+define kStrongMode = 10;
+define kRegExpPrototypeStickyGetter = 11;
+define kRegExpPrototypeToString = 12;
+define kRegExpPrototypeUnicodeGetter = 13;
+define kIntlV8Parse = 14;
+define kIntlPattern = 15;
+define kIntlResolved = 16;
+define kPromiseChain = 17;
+define kPromiseAccept = 18;
+define kPromiseDefer = 19;
diff --git a/deps/v8/src/js/math.js b/deps/v8/src/js/math.js
index ba2b2186f4..990a7e993c 100644
--- a/deps/v8/src/js/math.js
+++ b/deps/v8/src/js/math.js
@@ -10,24 +10,19 @@
// -------------------------------------------------------------------
// Imports
+define kRandomBatchSize = 64;
+// The first two slots are reserved to persist PRNG state.
+define kRandomNumberStart = 2;
+
+var GlobalFloat64Array = global.Float64Array;
var GlobalMath = global.Math;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var NaN = %GetRootNaN();
-var rngstate_0;
-var rngstate_1;
-var rngstate_2;
-var rngstate_3;
+var nextRandomIndex = kRandomBatchSize;
+var randomNumbers = UNDEFINED;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-utils.InitializeRNG = function() {
- var rngstate = %InitializeRNG();
- rngstate_0 = rngstate[0];
- rngstate_1 = rngstate[1];
- rngstate_2 = rngstate[2];
- rngstate_3 = rngstate[3];
-};
-
//-------------------------------------------------------------------
// ECMA 262 - 15.8.2.1
@@ -141,25 +136,19 @@ function MathPowJS(x, y) {
// ECMA 262 - 15.8.2.14
function MathRandom() {
- var r0 = (MathImul(18030, rngstate_0) + rngstate_1) | 0;
- var r1 = (MathImul(36969, rngstate_2) + rngstate_3) | 0;
- rngstate_0 = r0 & 0xFFFF;
- rngstate_1 = r0 >>> 16;
- rngstate_2 = r1 & 0xFFFF;
- rngstate_3 = r1 >>> 16;
- // Construct a double number 1.<32-bits of randomness> and subtract 1.
- return %_ConstructDouble(0x3FF00000 | (r0 & 0x000FFFFF), r1 & 0xFFF00000) - 1;
+ if (nextRandomIndex >= kRandomBatchSize) {
+ randomNumbers = %GenerateRandomNumbers(randomNumbers);
+ nextRandomIndex = kRandomNumberStart;
+ }
+ return randomNumbers[nextRandomIndex++];
}
function MathRandomRaw() {
- var r0 = (MathImul(18030, rngstate_0) + rngstate_1) | 0;
- var r1 = (MathImul(36969, rngstate_2) + rngstate_3) | 0;
- rngstate_0 = r0 & 0xFFFF;
- rngstate_1 = r0 >>> 16;
- rngstate_2 = r1 & 0xFFFF;
- rngstate_3 = r1 >>> 16;
- var x = ((r0 << 16) + (r1 & 0xFFFF)) | 0;
- return x & 0x3FFFFFFF;
+ if (nextRandomIndex >= kRandomBatchSize) {
+ randomNumbers = %GenerateRandomNumbers(randomNumbers);
+ nextRandomIndex = kRandomNumberStart;
+ }
+ return %_DoubleLo(randomNumbers[nextRandomIndex++]) & 0x3FFFFFFF;
}
// ECMA 262 - 15.8.2.15
@@ -282,7 +271,7 @@ endmacro
function CubeRoot(x) {
var approx_hi = MathFloorJS(%_DoubleHi(x) / 3) + 0x2A9F7893;
- var approx = %_ConstructDouble(approx_hi, 0);
+ var approx = %_ConstructDouble(approx_hi | 0, 0);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
diff --git a/deps/v8/src/js/messages.js b/deps/v8/src/js/messages.js
index 581f457a31..6b7306a7d3 100644
--- a/deps/v8/src/js/messages.js
+++ b/deps/v8/src/js/messages.js
@@ -23,10 +23,10 @@ var callSitePositionSymbol =
utils.ImportNow("call_site_position_symbol");
var callSiteStrictSymbol =
utils.ImportNow("call_site_strict_symbol");
+var FLAG_harmony_tostring;
var Float32x4ToString;
var formattedStackTraceSymbol =
utils.ImportNow("formatted_stack_trace_symbol");
-var FunctionSourceString
var GlobalObject = global.Object;
var Int16x8ToString;
var Int32x4ToString;
@@ -34,13 +34,14 @@ var Int8x16ToString;
var InternalArray = utils.InternalArray;
var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
var ObjectDefineProperty;
-var ObjectToString;
+var ObjectToString = utils.ImportNow("object_to_string");
var Script = utils.ImportNow("Script");
var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
var StringCharAt;
var StringIndexOf;
var StringSubstring;
var SymbolToString;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var Uint16x8ToString;
var Uint32x4ToString;
var Uint8x16ToString;
@@ -51,12 +52,10 @@ utils.Import(function(from) {
Bool32x4ToString = from.Bool32x4ToString;
Bool8x16ToString = from.Bool8x16ToString;
Float32x4ToString = from.Float32x4ToString;
- FunctionSourceString = from.FunctionSourceString;
Int16x8ToString = from.Int16x8ToString;
Int32x4ToString = from.Int32x4ToString;
Int8x16ToString = from.Int8x16ToString;
ObjectDefineProperty = from.ObjectDefineProperty;
- ObjectToString = from.ObjectToString;
StringCharAt = from.StringCharAt;
StringIndexOf = from.StringIndexOf;
StringSubstring = from.StringSubstring;
@@ -66,6 +65,10 @@ utils.Import(function(from) {
Uint8x16ToString = from.Uint8x16ToString;
});
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tostring = from.FLAG_harmony_tostring;
+});
+
// -------------------------------------------------------------------
var GlobalError;
@@ -80,18 +83,42 @@ var GlobalEvalError;
function NoSideEffectsObjectToString() {
if (IS_UNDEFINED(this)) return "[object Undefined]";
if (IS_NULL(this)) return "[object Null]";
- return "[object " + %_ClassOf(TO_OBJECT(this)) + "]";
+ var O = TO_OBJECT(this);
+ var builtinTag = %_ClassOf(O);
+ var tag;
+ if (FLAG_harmony_tostring) {
+ tag = %GetDataProperty(O, toStringTagSymbol);
+ if (!IS_STRING(tag)) {
+ tag = builtinTag;
+ }
+ } else {
+ tag = builtinTag;
+ }
+ return `[object ${tag}]`;
}
+function IsErrorObject(obj) {
+ return HAS_PRIVATE(obj, stackTraceSymbol);
+}
-function NoSideEffectToString(obj) {
+function NoSideEffectsErrorToString() {
+ var name = %GetDataProperty(this, "name");
+ var message = %GetDataProperty(this, "message");
+ name = IS_UNDEFINED(name) ? "Error" : NoSideEffectsToString(name);
+ message = IS_UNDEFINED(message) ? "" : NoSideEffectsToString(message);
+ if (name == "") return message;
+ if (message == "") return name;
+ return `${name}: ${message}`;
+}
+
+function NoSideEffectsToString(obj) {
if (IS_STRING(obj)) return obj;
if (IS_NUMBER(obj)) return %_NumberToString(obj);
if (IS_BOOLEAN(obj)) return obj ? 'true' : 'false';
if (IS_UNDEFINED(obj)) return 'undefined';
if (IS_NULL(obj)) return 'null';
if (IS_FUNCTION(obj)) {
- var str = %_Call(FunctionSourceString, obj, obj);
+ var str = %FunctionToString(obj);
if (str.length > 128) {
str = %_SubString(str, 0, 111) + "...<omitted>..." +
%_SubString(str, str.length - 2, str.length);
@@ -113,69 +140,26 @@ function NoSideEffectToString(obj) {
case 'bool8x16': return %_Call(Bool8x16ToString, obj);
}
}
- if (IS_OBJECT(obj)
- && %GetDataProperty(obj, "toString") === ObjectToString) {
- var constructor = %GetDataProperty(obj, "constructor");
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
- }
- }
- }
- if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
- return %_Call(ErrorToString, obj);
- }
-
- return %_Call(NoSideEffectsObjectToString, obj);
-}
-
-// To determine whether we can safely stringify an object using ErrorToString
-// without the risk of side-effects, we need to check whether the object is
-// either an instance of a native error type (via '%_ClassOf'), or has Error
-// in its prototype chain and hasn't overwritten 'toString' with something
-// strange and unusual.
-function CanBeSafelyTreatedAsAnErrorObject(obj) {
- switch (%_ClassOf(obj)) {
- case 'Error':
- case 'EvalError':
- case 'RangeError':
- case 'ReferenceError':
- case 'SyntaxError':
- case 'TypeError':
- case 'URIError':
- return true;
- }
-
- var objToString = %GetDataProperty(obj, "toString");
- return obj instanceof GlobalError && objToString === ErrorToString;
-}
-
-
-// When formatting internally created error messages, do not
-// invoke overwritten error toString methods but explicitly use
-// the error to string method. This is to avoid leaking error
-// objects between script tags in a browser setting.
-function ToStringCheckErrorObject(obj) {
- if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
- return %_Call(ErrorToString, obj);
- } else {
- return TO_STRING(obj);
- }
-}
+ if (IS_RECEIVER(obj)) {
+ // When internally formatting error objects, use a side-effects-free version
+ // of Error.prototype.toString independent of the actually installed
+ // toString method.
+ if (IsErrorObject(obj) ||
+ %GetDataProperty(obj, "toString") === ErrorToString) {
+ return %_Call(NoSideEffectsErrorToString, obj);
+ }
-function ToDetailString(obj) {
- if (obj != null && IS_OBJECT(obj) && obj.toString === ObjectToString) {
- var constructor = obj.constructor;
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
+ if (%GetDataProperty(obj, "toString") === ObjectToString) {
+ var constructor = %GetDataProperty(obj, "constructor");
+ if (IS_FUNCTION(constructor)) {
+ var constructor_name = %FunctionGetName(constructor);
+ if (constructor_name != "") return `#<${constructor_name}>`;
}
}
}
- return ToStringCheckErrorObject(obj);
+
+ return %_Call(NoSideEffectsObjectToString, obj);
}
@@ -200,9 +184,9 @@ function MakeGenericError(constructor, type, arg0, arg1, arg2) {
// Helper functions; called from the runtime system.
function FormatMessage(type, arg0, arg1, arg2) {
- var arg0 = NoSideEffectToString(arg0);
- var arg1 = NoSideEffectToString(arg1);
- var arg2 = NoSideEffectToString(arg2);
+ var arg0 = NoSideEffectsToString(arg0);
+ var arg1 = NoSideEffectsToString(arg1);
+ var arg2 = NoSideEffectsToString(arg2);
try {
return %FormatMessageString(type, arg0, arg1, arg2);
} catch (e) {
@@ -448,11 +432,10 @@ function ScriptLineEnd(n) {
* If sourceURL comment is available returns sourceURL comment contents.
* Otherwise, script name is returned. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * and Source Map Revision 3 proposal for details on using //# sourceURL and
- * deprecated //@ sourceURL comment to identify scripts that don't have name.
+ * and Source Map Revision 3 proposal for details on using //# sourceURL
+ * comment to identify scripts that don't have name.
*
- * @return {?string} script name if present, value for //# sourceURL or
- * deprecated //@ sourceURL comment otherwise.
+ * @return {?string} script name if present, value for //# sourceURL comment.
*/
function ScriptNameOrSourceURL() {
if (this.source_url) return this.source_url;
@@ -580,10 +563,18 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// Error implementation
function CallSite(receiver, fun, pos, strict_mode) {
+ if (!IS_FUNCTION(fun)) {
+ throw MakeTypeError(kCallSiteExpectsFunction, typeof fun);
+ }
+
+ if (IS_UNDEFINED(new.target)) {
+ return new CallSite(receiver, fun, pos, strict_mode);
+ }
+
SET_PRIVATE(this, callSiteReceiverSymbol, receiver);
SET_PRIVATE(this, callSiteFunctionSymbol, fun);
- SET_PRIVATE(this, callSitePositionSymbol, pos);
- SET_PRIVATE(this, callSiteStrictSymbol, strict_mode);
+ SET_PRIVATE(this, callSitePositionSymbol, TO_INT32(pos));
+ SET_PRIVATE(this, callSiteStrictSymbol, TO_BOOLEAN(strict_mode));
}
function CallSiteGetThis() {
@@ -850,17 +841,13 @@ function FormatStackTrace(obj, raw_stack) {
function GetTypeName(receiver, requireConstructor) {
if (IS_NULL_OR_UNDEFINED(receiver)) return null;
- var constructor = receiver.constructor;
- if (!constructor) {
- return requireConstructor ? null :
- %_Call(NoSideEffectsObjectToString, receiver);
- }
- var constructorName = constructor.name;
- if (!constructorName) {
- return requireConstructor ? null :
- %_Call(NoSideEffectsObjectToString, receiver);
+ if (IS_PROXY(receiver)) return "Proxy";
+
+ var constructor = %GetDataProperty(TO_OBJECT(receiver), "constructor");
+ if (!IS_FUNCTION(constructor)) {
+ return requireConstructor ? null : %_Call(NoSideEffectsToString, receiver);
}
- return constructorName;
+ return %FunctionGetName(constructor);
}
@@ -894,7 +881,7 @@ var StackTraceGetter = function() {
// If the receiver equals the holder, set the formatted stack trace that the
// getter returns.
var StackTraceSetter = function(v) {
- if (HAS_PRIVATE(this, stackTraceSymbol)) {
+ if (IsErrorObject(this)) {
SET_PRIVATE(this, stackTraceSymbol, UNDEFINED);
SET_PRIVATE(this, formattedStackTraceSymbol, v);
}
@@ -906,69 +893,63 @@ var StackTraceSetter = function(v) {
var captureStackTrace = function() {};
-// Define special error type constructors.
-function DefineError(global, f) {
- // Store the error function in both the global object
- // and the runtime object. The function is fetched
- // from the runtime object when throwing errors from
- // within the runtime system to avoid strange side
- // effects when overwriting the error functions from
- // user code.
- var name = f.name;
- %AddNamedProperty(global, name, f, DONT_ENUM);
- // Configure the error function.
- if (name == 'Error') {
- // The prototype of the Error object must itself be an error.
- // However, it can't be an instance of the Error object because
- // it hasn't been properly configured yet. Instead we create a
- // special not-a-true-error-but-close-enough object.
- var ErrorPrototype = function() {};
- %FunctionSetPrototype(ErrorPrototype, GlobalObject.prototype);
- %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
- %FunctionSetPrototype(f, new ErrorPrototype());
- } else {
- %FunctionSetPrototype(f, new GlobalError());
- %InternalSetPrototype(f, GlobalError);
+// Set up special error type constructors.
+function SetUpError(error_function) {
+ %FunctionSetInstanceClassName(error_function, 'Error');
+ var name = error_function.name;
+ var prototype = new GlobalObject();
+ if (name !== 'Error') {
+ %InternalSetPrototype(error_function, GlobalError);
+ %InternalSetPrototype(prototype, GlobalError.prototype);
}
- %FunctionSetInstanceClassName(f, 'Error');
- %AddNamedProperty(f.prototype, 'constructor', f, DONT_ENUM);
- %AddNamedProperty(f.prototype, 'name', name, DONT_ENUM);
- %SetCode(f, function(m) {
- if (%_IsConstructCall()) {
- try { captureStackTrace(this, f); } catch (e) { }
- // Define all the expected properties directly on the error
- // object. This avoids going through getters and setters defined
- // on prototype objects.
- if (!IS_UNDEFINED(m)) {
- %AddNamedProperty(this, 'message', TO_STRING(m), DONT_ENUM);
- }
- } else {
- return new f(m);
+ %FunctionSetPrototype(error_function, prototype);
+
+ %AddNamedProperty(error_function.prototype, 'name', name, DONT_ENUM);
+ %AddNamedProperty(error_function.prototype, 'message', '', DONT_ENUM);
+ %AddNamedProperty(
+ error_function.prototype, 'constructor', error_function, DONT_ENUM);
+
+ %SetCode(error_function, function(m) {
+ if (IS_UNDEFINED(new.target)) return new error_function(m);
+
+ try { captureStackTrace(this, error_function); } catch (e) { }
+ // Define all the expected properties directly on the error
+ // object. This avoids going through getters and setters defined
+ // on prototype objects.
+ if (!IS_UNDEFINED(m)) {
+ %AddNamedProperty(this, 'message', TO_STRING(m), DONT_ENUM);
}
});
- %SetNativeFlag(f);
- return f;
-};
-GlobalError = DefineError(global, function Error() { });
-GlobalEvalError = DefineError(global, function EvalError() { });
-GlobalRangeError = DefineError(global, function RangeError() { });
-GlobalReferenceError = DefineError(global, function ReferenceError() { });
-GlobalSyntaxError = DefineError(global, function SyntaxError() { });
-GlobalTypeError = DefineError(global, function TypeError() { });
-GlobalURIError = DefineError(global, function URIError() { });
+ %SetNativeFlag(error_function);
+ return error_function;
+};
-%AddNamedProperty(GlobalError.prototype, 'message', '', DONT_ENUM);
+GlobalError = SetUpError(global.Error);
+GlobalEvalError = SetUpError(global.EvalError);
+GlobalRangeError = SetUpError(global.RangeError);
+GlobalReferenceError = SetUpError(global.ReferenceError);
+GlobalSyntaxError = SetUpError(global.SyntaxError);
+GlobalTypeError = SetUpError(global.TypeError);
+GlobalURIError = SetUpError(global.URIError);
utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
['toString', ErrorToString]);
function ErrorToString() {
- if (!IS_SPEC_OBJECT(this)) {
+ if (!IS_RECEIVER(this)) {
throw MakeTypeError(kCalledOnNonObject, "Error.prototype.toString");
}
- return %ErrorToStringRT(this);
+ var name = this.name;
+ name = IS_UNDEFINED(name) ? "Error" : TO_STRING(name);
+
+ var message = this.message;
+ message = IS_UNDEFINED(message) ? "" : TO_STRING(message);
+
+ if (name == "") return message;
+ if (message == "") return name;
+ return `${name}: ${message}`
}
function MakeError(type, arg0, arg1, arg2) {
@@ -1009,8 +990,6 @@ captureStackTrace = function captureStackTrace(obj, cons_opt) {
GlobalError.captureStackTrace = captureStackTrace;
%InstallToContext([
- "error_function", GlobalError,
- "eval_error_function", GlobalEvalError,
"get_stack_trace_line_fun", GetStackTraceLine,
"make_error_function", MakeGenericError,
"make_range_error", MakeRangeError,
@@ -1018,14 +997,8 @@ GlobalError.captureStackTrace = captureStackTrace;
"message_get_column_number", GetColumnNumber,
"message_get_line_number", GetLineNumber,
"message_get_source_line", GetSourceLine,
- "no_side_effect_to_string_fun", NoSideEffectToString,
- "range_error_function", GlobalRangeError,
- "reference_error_function", GlobalReferenceError,
+ "no_side_effects_to_string_fun", NoSideEffectsToString,
"stack_overflow_boilerplate", StackOverflowBoilerplate,
- "syntax_error_function", GlobalSyntaxError,
- "to_detail_string_fun", ToDetailString,
- "type_error_function", GlobalTypeError,
- "uri_error_function", GlobalURIError,
]);
utils.Export(function(to) {
diff --git a/deps/v8/src/js/object-observe.js b/deps/v8/src/js/object-observe.js
index 2c297a59c2..5e256bf0bb 100644
--- a/deps/v8/src/js/object-observe.js
+++ b/deps/v8/src/js/object-observe.js
@@ -16,14 +16,10 @@ var GlobalArray = global.Array;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var MakeTypeError;
-var ObjectFreeze;
-var ObjectIsFrozen;
utils.Import(function(from) {
GetHash = from.GetHash;
MakeTypeError = from.MakeTypeError;
- ObjectFreeze = from.ObjectFreeze;
- ObjectIsFrozen = from.ObjectIsFrozen;
});
// -------------------------------------------------------------------
@@ -192,7 +188,7 @@ function ObserverIsActive(observer, objectInfo) {
function ObjectInfoGetOrCreate(object) {
var objectInfo = ObjectInfoGet(object);
if (IS_UNDEFINED(objectInfo)) {
- if (!%_IsJSProxy(object)) {
+ if (!IS_PROXY(object)) {
%SetIsObserved(object);
}
objectInfo = {
@@ -324,7 +320,7 @@ function ConvertAcceptListToTypeMap(arg) {
if (IS_UNDEFINED(arg))
return arg;
- if (!IS_SPEC_OBJECT(arg)) throw MakeTypeError(kObserveInvalidAccept);
+ if (!IS_RECEIVER(arg)) throw MakeTypeError(kObserveInvalidAccept);
var len = TO_INTEGER(arg.length);
if (len < 0) len = 0;
@@ -380,7 +376,7 @@ function CallbackInfoNormalize(callback) {
function ObjectObserve(object, callback, acceptList) {
- if (!IS_SPEC_OBJECT(object))
+ if (!IS_RECEIVER(object))
throw MakeTypeError(kObserveNonObject, "observe", "observe");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "observe");
@@ -388,7 +384,7 @@ function ObjectObserve(object, callback, acceptList) {
throw MakeTypeError(kObserveAccessChecked, "observe");
if (!IS_CALLABLE(callback))
throw MakeTypeError(kObserveNonFunction, "observe");
- if (ObjectIsFrozen(callback))
+ if (%object_is_frozen(callback))
throw MakeTypeError(kObserveCallbackFrozen);
var objectObserveFn = %GetObjectContextObjectObserve(object);
@@ -405,7 +401,7 @@ function NativeObjectObserve(object, callback, acceptList) {
function ObjectUnobserve(object, callback) {
- if (!IS_SPEC_OBJECT(object))
+ if (!IS_RECEIVER(object))
throw MakeTypeError(kObserveNonObject, "unobserve", "unobserve");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "unobserve");
@@ -481,7 +477,7 @@ function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
%DefineDataPropertyUnchecked(
newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE);
}
- ObjectFreeze(newRecord);
+ %object_freeze(newRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord);
}
@@ -533,8 +529,8 @@ function EnqueueSpliceRecord(array, index, removed, addedCount) {
addedCount: addedCount
};
- ObjectFreeze(changeRecord);
- ObjectFreeze(changeRecord.removed);
+ %object_freeze(changeRecord);
+ %object_freeze(changeRecord.removed);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
@@ -558,13 +554,13 @@ function NotifyChange(type, object, name, oldValue) {
};
}
- ObjectFreeze(changeRecord);
+ %object_freeze(changeRecord);
ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
}
function ObjectNotifierNotify(changeRecord) {
- if (!IS_SPEC_OBJECT(this))
+ if (!IS_RECEIVER(this))
throw MakeTypeError(kCalledOnNonObject, "notify");
var objectInfo = ObjectInfoGetFromNotifier(this);
@@ -578,7 +574,7 @@ function ObjectNotifierNotify(changeRecord) {
function ObjectNotifierPerformChange(changeType, changeFn) {
- if (!IS_SPEC_OBJECT(this))
+ if (!IS_RECEIVER(this))
throw MakeTypeError(kCalledOnNonObject, "performChange");
var objectInfo = ObjectInfoGetFromNotifier(this);
@@ -604,20 +600,20 @@ function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) {
ObjectInfoRemovePerformingType(objectInfo, changeType);
}
- if (IS_SPEC_OBJECT(changeRecord))
+ if (IS_RECEIVER(changeRecord))
ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, changeType);
}
function ObjectGetNotifier(object) {
- if (!IS_SPEC_OBJECT(object))
+ if (!IS_RECEIVER(object))
throw MakeTypeError(kObserveNonObject, "getNotifier", "getNotifier");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "getNotifier");
if (%IsAccessCheckNeeded(object))
throw MakeTypeError(kObserveAccessChecked, "getNotifier");
- if (ObjectIsFrozen(object)) return null;
+ if (%object_is_frozen(object)) return null;
if (!%ObjectWasCreatedInCurrentOrigin(object)) return null;
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 0f605be649..2779393bd1 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -171,22 +171,8 @@ function PostNatives(utils) {
var expose_list = [
"ArrayToString",
"ErrorToString",
- "FunctionSourceString",
"GetIterator",
"GetMethod",
- "InnerArrayEvery",
- "InnerArrayFilter",
- "InnerArrayForEach",
- "InnerArrayIndexOf",
- "InnerArrayJoin",
- "InnerArrayLastIndexOf",
- "InnerArrayMap",
- "InnerArrayReduce",
- "InnerArrayReduceRight",
- "InnerArrayReverse",
- "InnerArraySome",
- "InnerArraySort",
- "InnerArrayToLocaleString",
"IsNaN",
"MakeError",
"MakeTypeError",
@@ -197,27 +183,32 @@ function PostNatives(utils) {
"MathMin",
"MaxSimple",
"MinSimple",
- "ObjectIsFrozen",
"ObjectDefineProperty",
"ObserveArrayMethods",
"ObserveObjectMethods",
- "OwnPropertyKeys",
+ "PromiseChain",
+ "PromiseDeferred",
+ "PromiseResolved",
"SameValueZero",
"SetIterator",
"SetIteratorNext",
"SetValues",
"SymbolToString",
- "ToNameArray",
"ToPositiveInteger",
// From runtime:
"is_concat_spreadable_symbol",
"iterator_symbol",
"promise_status_symbol",
"promise_value_symbol",
+ "object_freeze",
+ "object_is_frozen",
+ "object_is_sealed",
"reflect_apply",
"reflect_construct",
"regexp_flags_symbol",
"to_string_tag_symbol",
+ "object_to_string",
+ "species_symbol",
];
var filtered_exports = {};
@@ -245,8 +236,6 @@ function PostExperimentals(utils) {
imports_from_experimental(exports_container);
}
- utils.InitializeRNG();
- utils.InitializeRNG = UNDEFINED;
utils.CreateDoubleResultArray();
utils.CreateDoubleResultArray = UNDEFINED;
@@ -262,8 +251,6 @@ function PostDebug(utils) {
imports(exports_container);
}
- utils.InitializeRNG();
- utils.InitializeRNG = UNDEFINED;
utils.CreateDoubleResultArray();
utils.CreateDoubleResultArray = UNDEFINED;
@@ -289,7 +276,7 @@ function InitializeBuiltinTypedArrays(utils, rng_state, rempio2result) {
// -----------------------------------------------------------------------
-%OptimizeObjectForAddingMultipleProperties(utils, 15);
+%OptimizeObjectForAddingMultipleProperties(utils, 14);
utils.Import = Import;
utils.ImportNow = ImportNow;
diff --git a/deps/v8/src/js/promise-extra.js b/deps/v8/src/js/promise-extra.js
new file mode 100644
index 0000000000..f6f79592bc
--- /dev/null
+++ b/deps/v8/src/js/promise-extra.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalPromise = global.Promise;
+
+var PromiseChain = utils.ImportNow("PromiseChain");
+var PromiseDeferred = utils.ImportNow("PromiseDeferred");
+var PromiseResolved = utils.ImportNow("PromiseResolved");
+
+utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
+ "chain", PromiseChain,
+]);
+
+utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
+ "defer", PromiseDeferred,
+ "accept", PromiseResolved,
+]);
+
+})
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index d7e9a5c67f..8cf6a36cef 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -23,10 +23,12 @@ var promiseOnResolveSymbol =
var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var SpeciesConstructor;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
+ SpeciesConstructor = from.SpeciesConstructor;
});
// -------------------------------------------------------------------
@@ -34,21 +36,50 @@ utils.Import(function(from) {
// Status values: 0 = pending, +1 = resolved, -1 = rejected
var lastMicrotaskId = 0;
+function CreateResolvingFunctions(promise) {
+ var alreadyResolved = false;
+
+ var resolve = value => {
+ if (alreadyResolved === true) return;
+ alreadyResolved = true;
+ PromiseResolve(promise, value);
+ };
+
+ var reject = reason => {
+ if (alreadyResolved === true) return;
+ alreadyResolved = true;
+ PromiseReject(promise, reason);
+ };
+
+ return {
+ __proto__: null,
+ resolve: resolve,
+ reject: reject
+ };
+}
+
+
var GlobalPromise = function Promise(resolver) {
- if (resolver === promiseRawSymbol) return;
- if (!%_IsConstructCall()) throw MakeTypeError(kNotAPromise, this);
+ if (resolver === promiseRawSymbol) {
+ return %NewObject(GlobalPromise, new.target);
+ }
+ if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
if (!IS_CALLABLE(resolver))
throw MakeTypeError(kResolverNotAFunction, resolver);
- var promise = PromiseInit(this);
+
+ var promise = PromiseInit(%NewObject(GlobalPromise, new.target));
+ var callbacks = CreateResolvingFunctions(promise);
+
try {
- %DebugPushPromise(promise, Promise, resolver);
- resolver(function(x) { PromiseResolve(promise, x) },
- function(r) { PromiseReject(promise, r) });
+ %DebugPushPromise(promise, Promise);
+ resolver(callbacks.resolve, callbacks.reject);
} catch (e) {
- PromiseReject(promise, e);
+ %_Call(callbacks.reject, UNDEFINED, e);
} finally {
%DebugPopPromise();
}
+
+ return promise;
}
// Core functionality.
@@ -84,37 +115,11 @@ function PromiseDone(promise, status, value, promiseQueue) {
}
}
-function PromiseCoerce(constructor, x) {
- if (!IsPromise(x) && IS_SPEC_OBJECT(x)) {
- var then;
- try {
- then = x.then;
- } catch(r) {
- return %_Call(PromiseRejected, constructor, r);
- }
- if (IS_CALLABLE(then)) {
- var deferred = %_Call(PromiseDeferred, constructor);
- try {
- %_Call(then, x, deferred.resolve, deferred.reject);
- } catch(r) {
- deferred.reject(r);
- }
- return deferred.promise;
- }
- }
- return x;
-}
-
function PromiseHandle(value, handler, deferred) {
try {
- %DebugPushPromise(deferred.promise, PromiseHandle, handler);
+ %DebugPushPromise(deferred.promise, PromiseHandle);
var result = handler(value);
- if (result === deferred.promise)
- throw MakeTypeError(kPromiseCyclic, result);
- else if (IsPromise(result))
- %_Call(PromiseChain, result, deferred.resolve, deferred.reject);
- else
- deferred.resolve(result);
+ deferred.resolve(result);
} catch (exception) {
try { deferred.reject(exception); } catch (e) { }
} finally {
@@ -153,7 +158,7 @@ function PromiseNopResolver() {}
// For bootstrapper.
function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
+ return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
}
function PromiseCreate() {
@@ -161,7 +166,42 @@ function PromiseCreate() {
}
function PromiseResolve(promise, x) {
- PromiseDone(promise, +1, x, promiseOnResolveSymbol)
+ if (x === promise) {
+ return PromiseReject(promise, MakeTypeError(kPromiseCyclic, x));
+ }
+ if (IS_RECEIVER(x)) {
+ // 25.4.1.3.2 steps 8-12
+ try {
+ var then = x.then;
+ } catch (e) {
+ return PromiseReject(promise, e);
+ }
+ if (IS_CALLABLE(then)) {
+ // PromiseResolveThenableJob
+ var id, name, instrumenting = DEBUG_IS_ACTIVE;
+ %EnqueueMicrotask(function() {
+ if (instrumenting) {
+ %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
+ }
+ var callbacks = CreateResolvingFunctions(promise);
+ try {
+ %_Call(then, x, callbacks.resolve, callbacks.reject);
+ } catch (e) {
+ %_Call(callbacks.reject, UNDEFINED, e);
+ }
+ if (instrumenting) {
+ %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
+ }
+ });
+ if (instrumenting) {
+ id = ++lastMicrotaskId;
+ name = "PromseResolveThenableJob";
+ %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+ }
+ return;
+ }
+ }
+ PromiseDone(promise, +1, x, promiseOnResolveSymbol);
}
function PromiseReject(promise, r) {
@@ -179,57 +219,70 @@ function PromiseReject(promise, r) {
// Convenience.
-function PromiseDeferred() {
- if (this === GlobalPromise) {
+function NewPromiseCapability(C) {
+ if (C === GlobalPromise) {
// Optimized case, avoid extra closure.
var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+ var callbacks = CreateResolvingFunctions(promise);
return {
promise: promise,
- resolve: function(x) { PromiseResolve(promise, x) },
- reject: function(r) { PromiseReject(promise, r) }
+ resolve: callbacks.resolve,
+ reject: callbacks.reject
};
- } else {
- var result = {promise: UNDEFINED, reject: UNDEFINED, resolve: UNDEFINED};
- result.promise = new this(function(resolve, reject) {
- result.resolve = resolve;
- result.reject = reject;
- });
- return result;
}
+
+ var result = {promise: UNDEFINED, resolve: UNDEFINED, reject: UNDEFINED };
+ result.promise = new C((resolve, reject) => {
+ if (!IS_UNDEFINED(result.resolve) || !IS_UNDEFINED(result.reject))
+ throw MakeTypeError(kPromiseExecutorAlreadyInvoked);
+ result.resolve = resolve;
+ result.reject = reject;
+ });
+
+ return result;
+}
+
+function PromiseDeferred() {
+ %IncrementUseCounter(kPromiseDefer);
+ return NewPromiseCapability(this);
}
function PromiseResolved(x) {
- if (this === GlobalPromise) {
- // Optimized case, avoid extra closure.
- return PromiseCreateAndSet(+1, x);
- } else {
- return new this(function(resolve, reject) { resolve(x) });
- }
+ %IncrementUseCounter(kPromiseAccept);
+ return %_Call(PromiseCast, this, x);
}
function PromiseRejected(r) {
- var promise;
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, PromiseRejected);
+ }
if (this === GlobalPromise) {
// Optimized case, avoid extra closure.
- promise = PromiseCreateAndSet(-1, r);
+ var promise = PromiseCreateAndSet(-1, r);
// The debug event for this would always be an uncaught promise reject,
// which is usually simply noise. Do not trigger that debug event.
%PromiseRejectEvent(promise, r, false);
+ return promise;
} else {
- promise = new this(function(resolve, reject) { reject(r) });
+ var promiseCapability = NewPromiseCapability(this);
+ %_Call(promiseCapability.reject, UNDEFINED, r);
+ return promiseCapability.promise;
}
- return promise;
}
-// Simple chaining.
+// Multi-unwrapped chaining with thenable coercion.
-function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
- onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
- onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
- var deferred = %_Call(PromiseDeferred, this.constructor);
- switch (GET_PRIVATE(this, promiseStatusSymbol)) {
- case UNDEFINED:
- throw MakeTypeError(kNotAPromise, this);
+function PromiseThen(onResolve, onReject) {
+ var status = GET_PRIVATE(this, promiseStatusSymbol);
+ if (IS_UNDEFINED(status)) {
+ throw MakeTypeError(kNotAPromise, this);
+ }
+
+ var constructor = SpeciesConstructor(this, GlobalPromise);
+ onResolve = IS_CALLABLE(onResolve) ? onResolve : PromiseIdResolveHandler;
+ onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
+ var deferred = NewPromiseCapability(constructor);
+ switch (status) {
case 0: // Pending
GET_PRIVATE(this, promiseOnResolveSymbol).push(onResolve, deferred);
GET_PRIVATE(this, promiseOnRejectSymbol).push(onReject, deferred);
@@ -258,85 +311,88 @@ function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
return deferred.promise;
}
-function PromiseCatch(onReject) {
- return this.then(UNDEFINED, onReject);
+// Chain is left around for now as an alias for then
+function PromiseChain(onResolve, onReject) {
+ %IncrementUseCounter(kPromiseChain);
+ return %_Call(PromiseThen, this, onResolve, onReject);
}
-// Multi-unwrapped chaining with thenable coercion.
-
-function PromiseThen(onResolve, onReject) {
- onResolve = IS_CALLABLE(onResolve) ? onResolve : PromiseIdResolveHandler;
- onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
- var that = this;
- var constructor = this.constructor;
- return %_Call(
- PromiseChain,
- this,
- function(x) {
- x = PromiseCoerce(constructor, x);
- if (x === that) {
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(onReject);
- return onReject(MakeTypeError(kPromiseCyclic, x));
- } else if (IsPromise(x)) {
- return x.then(onResolve, onReject);
- } else {
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(onResolve);
- return onResolve(x);
- }
- },
- onReject
- );
+function PromiseCatch(onReject) {
+ return this.then(UNDEFINED, onReject);
}
// Combinators.
function PromiseCast(x) {
- if (IsPromise(x) && x.constructor === this) {
- return x;
- } else {
- return new this(function(resolve) { resolve(x) });
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, PromiseCast);
}
+ if (IsPromise(x) && x.constructor === this) return x;
+
+ var promiseCapability = NewPromiseCapability(this);
+ var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
+ return promiseCapability.promise;
}
function PromiseAll(iterable) {
- var deferred = %_Call(PromiseDeferred, this);
- var resolutions = [];
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, "Promise.all");
+ }
+
+ var deferred = NewPromiseCapability(this);
+ var resolutions = new InternalArray();
+ var count;
+
+ function CreateResolveElementFunction(index, values, promiseCapability) {
+ var alreadyCalled = false;
+ return (x) => {
+ if (alreadyCalled === true) return;
+ alreadyCalled = true;
+ values[index] = x;
+ if (--count === 0) {
+ var valuesArray = [];
+ %MoveArrayContents(values, valuesArray);
+ %_Call(promiseCapability.resolve, UNDEFINED, valuesArray);
+ }
+ };
+ }
+
try {
- var count = 0;
var i = 0;
+ count = 1;
for (var value of iterable) {
- var reject = function(r) { deferred.reject(r) };
- this.resolve(value).then(
- // Nested scope to get closure over current i.
- // TODO(arv): Use an inner let binding once available.
- (function(i) {
- return function(x) {
- resolutions[i] = x;
- if (--count === 0) deferred.resolve(resolutions);
- }
- })(i), reject);
- SET_PRIVATE(reject, promiseCombinedDeferredSymbol, deferred);
- ++i;
+ var nextPromise = this.resolve(value);
++count;
+ nextPromise.then(
+ CreateResolveElementFunction(i, resolutions, deferred),
+ deferred.reject);
+ SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
+ ++i;
}
- if (count === 0) {
- deferred.resolve(resolutions);
+ // 6.d
+ if (--count === 0) {
+ var valuesArray = [];
+ %MoveArrayContents(resolutions, valuesArray);
+ %_Call(deferred.resolve, UNDEFINED, valuesArray);
}
} catch (e) {
- deferred.reject(e)
+ %_Call(deferred.reject, UNDEFINED, e);
}
return deferred.promise;
}
function PromiseRace(iterable) {
- var deferred = %_Call(PromiseDeferred, this);
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(kCalledOnNonObject, PromiseRace);
+ }
+
+ var deferred = NewPromiseCapability(this);
try {
for (var value of iterable) {
- var reject = function(r) { deferred.reject(r) };
- this.resolve(value).then(function(x) { deferred.resolve(x) }, reject);
- SET_PRIVATE(reject, promiseCombinedDeferredSymbol, deferred);
+ this.resolve(value).then(deferred.resolve, deferred.reject);
+ SET_PRIVATE(deferred.reject, promiseCombinedDeferredSymbol, deferred);
}
} catch (e) {
deferred.reject(e)
@@ -381,8 +437,6 @@ function PromiseHasUserDefinedRejectHandler() {
DONT_ENUM | READ_ONLY);
utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
- "defer", PromiseDeferred,
- "accept", PromiseResolved,
"reject", PromiseRejected,
"all", PromiseAll,
"race", PromiseRace,
@@ -390,7 +444,6 @@ utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
]);
utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
- "chain", PromiseChain,
"then", PromiseThen,
"catch", PromiseCatch
]);
@@ -414,4 +467,14 @@ utils.InstallFunctions(extrasUtils, 0, [
"rejectPromise", PromiseReject
]);
+// TODO(v8:4567): Allow experimental natives to remove function prototype
+[PromiseChain, PromiseDeferred, PromiseResolved].forEach(
+ fn => %FunctionRemovePrototype(fn));
+
+utils.Export(function(to) {
+ to.PromiseChain = PromiseChain;
+ to.PromiseDeferred = PromiseDeferred;
+ to.PromiseResolved = PromiseResolved;
+});
+
})
diff --git a/deps/v8/src/js/proxy.js b/deps/v8/src/js/proxy.js
index fc38680a13..842bac0252 100644
--- a/deps/v8/src/js/proxy.js
+++ b/deps/v8/src/js/proxy.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,201 +10,59 @@
// ----------------------------------------------------------------------------
// Imports
-
-var GlobalFunction = global.Function;
-var GlobalObject = global.Object;
+//
+var GlobalProxy = global.Proxy;
var MakeTypeError;
-var ToNameArray;
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
- ToNameArray = from.ToNameArray;
});
//----------------------------------------------------------------------------
-function ProxyCreate(handler, proto) {
- if (!IS_SPEC_OBJECT(handler))
- throw MakeTypeError(kProxyHandlerNonObject, "create")
- if (IS_UNDEFINED(proto))
- proto = null
- else if (!(IS_SPEC_OBJECT(proto) || IS_NULL(proto)))
- throw MakeTypeError(kProxyProtoNonObject)
- return %CreateJSProxy(handler, proto)
-}
-
-function ProxyCreateFunction(handler, callTrap, constructTrap) {
- if (!IS_SPEC_OBJECT(handler))
- throw MakeTypeError(kProxyHandlerNonObject, "createFunction")
- if (!IS_CALLABLE(callTrap))
- throw MakeTypeError(kProxyTrapFunctionExpected, "call")
- if (IS_UNDEFINED(constructTrap)) {
- constructTrap = DerivedConstructTrap(callTrap)
- } else if (IS_CALLABLE(constructTrap)) {
- // Make sure the trap receives 'undefined' as this.
- var construct = constructTrap
- constructTrap = function() {
- return %Apply(construct, UNDEFINED, arguments, 0, %_ArgumentsLength());
- }
- } else {
- throw MakeTypeError(kProxyTrapFunctionExpected, "construct")
- }
- return %CreateJSFunctionProxy(
- handler, callTrap, constructTrap, GlobalFunction.prototype)
+function ProxyCreateRevocable(target, handler) {
+ var p = new GlobalProxy(target, handler);
+ return {proxy: p, revoke: () => %JSProxyRevoke(p)};
}
// -------------------------------------------------------------------
// Proxy Builtins
-function DerivedConstructTrap(callTrap) {
- return function() {
- var proto = this.prototype
- if (!IS_SPEC_OBJECT(proto)) proto = GlobalObject.prototype
- var obj = { __proto__: proto };
- var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
- return IS_SPEC_OBJECT(result) ? result : obj
- }
-}
-
-function DelegateCallAndConstruct(callTrap, constructTrap) {
- return function() {
- return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
- this, arguments, 0, %_ArgumentsLength())
- }
-}
-
-function DerivedGetTrap(receiver, name) {
- var desc = this.getPropertyDescriptor(name)
- if (IS_UNDEFINED(desc)) { return desc }
- if ('value' in desc) {
- return desc.value
- } else {
- if (IS_UNDEFINED(desc.get)) { return desc.get }
- // The proposal says: desc.get.call(receiver)
- return %_Call(desc.get, receiver)
- }
-}
-
-function DerivedSetTrap(receiver, name, val) {
- var desc = this.getOwnPropertyDescriptor(name)
- if (desc) {
- if ('writable' in desc) {
- if (desc.writable) {
- desc.value = val
- this.defineProperty(name, desc)
- return true
- } else {
- return false
- }
- } else { // accessor
- if (desc.set) {
- // The proposal says: desc.set.call(receiver, val)
- %_Call(desc.set, receiver, val)
- return true
- } else {
- return false
- }
- }
- }
- desc = this.getPropertyDescriptor(name)
- if (desc) {
- if ('writable' in desc) {
- if (desc.writable) {
- // fall through
- } else {
- return false
- }
- } else { // accessor
- if (desc.set) {
- // The proposal says: desc.set.call(receiver, val)
- %_Call(desc.set, receiver, val)
- return true
- } else {
- return false
- }
- }
- }
- this.defineProperty(name, {
- value: val,
- writable: true,
- enumerable: true,
- configurable: true});
- return true;
-}
-
-function DerivedHasTrap(name) {
- return !!this.getPropertyDescriptor(name)
-}
-
-function DerivedHasOwnTrap(name) {
- return !!this.getOwnPropertyDescriptor(name)
-}
-
-function DerivedKeysTrap() {
- var names = this.getOwnPropertyNames()
- var enumerableNames = []
- for (var i = 0, count = 0; i < names.length; ++i) {
- var name = names[i]
- if (IS_SYMBOL(name)) continue
- var desc = this.getOwnPropertyDescriptor(TO_STRING(name))
- if (!IS_UNDEFINED(desc) && desc.enumerable) {
- enumerableNames[count++] = names[i]
- }
+// Implements part of ES6 9.5.11 Proxy.[[Enumerate]]:
+// Call the trap, which should return an iterator, exhaust the iterator,
+// and return an array containing the values.
+function ProxyEnumerate(trap, handler, target) {
+ // 7. Let trapResult be ? Call(trap, handler, «target»).
+ var trap_result = %_Call(trap, handler, target);
+ // 8. If Type(trapResult) is not Object, throw a TypeError exception.
+ if (!IS_RECEIVER(trap_result)) {
+ throw MakeTypeError(kProxyEnumerateNonObject);
}
- return enumerableNames
-}
-
-function DerivedEnumerateTrap() {
- var names = this.getPropertyNames()
- var enumerableNames = []
- for (var i = 0, count = 0; i < names.length; ++i) {
- var name = names[i]
- if (IS_SYMBOL(name)) continue
- var desc = this.getPropertyDescriptor(TO_STRING(name))
- if (!IS_UNDEFINED(desc)) {
- if (!desc.configurable) {
- throw MakeTypeError(kProxyPropNotConfigurable,
- this, name, "getPropertyDescriptor")
- }
- if (desc.enumerable) enumerableNames[count++] = names[i]
+ // 9. Return trapResult.
+ var result = [];
+ for (var it = trap_result.next(); !it.done; it = trap_result.next()) {
+ var key = it.value;
+ // Not yet spec'ed as of 2015-11-25, but will be spec'ed soon:
+ // If the iterator returns a non-string value, throw a TypeError.
+ if (!IS_STRING(key)) {
+ throw MakeTypeError(kProxyEnumerateNonString);
}
+ result.push(key);
}
- return enumerableNames
-}
-
-function ProxyEnumerate(proxy) {
- var handler = %GetHandler(proxy)
- if (IS_UNDEFINED(handler.enumerate)) {
- return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
- } else {
- return ToNameArray(handler.enumerate(), "enumerate", false)
- }
+ return result;
}
//-------------------------------------------------------------------
-var Proxy = new GlobalObject();
-%AddNamedProperty(global, "Proxy", Proxy, DONT_ENUM);
-
//Set up non-enumerable properties of the Proxy object.
-utils.InstallFunctions(Proxy, DONT_ENUM, [
- "create", ProxyCreate,
- "createFunction", ProxyCreateFunction
-])
+utils.InstallFunctions(GlobalProxy, DONT_ENUM, [
+ "revocable", ProxyCreateRevocable
+]);
// -------------------------------------------------------------------
// Exports
-utils.Export(function(to) {
- to.ProxyDelegateCallAndConstruct = DelegateCallAndConstruct;
- to.ProxyDerivedHasOwnTrap = DerivedHasOwnTrap;
- to.ProxyDerivedKeysTrap = DerivedKeysTrap;
-});
-
%InstallToContext([
- "derived_get_trap", DerivedGetTrap,
- "derived_has_trap", DerivedHasTrap,
- "derived_set_trap", DerivedSetTrap,
"proxy_enumerate", ProxyEnumerate,
]);
diff --git a/deps/v8/src/js/regexp.js b/deps/v8/src/js/regexp.js
index 55466dc03d..a163952451 100644
--- a/deps/v8/src/js/regexp.js
+++ b/deps/v8/src/js/regexp.js
@@ -12,9 +12,12 @@
var FLAG_harmony_tolength;
var GlobalObject = global.Object;
var GlobalRegExp = global.RegExp;
+var GlobalRegExpPrototype;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MakeTypeError;
+var matchSymbol = utils.ImportNow("match_symbol");
+var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
utils.ImportFromExperimental(function(from) {
@@ -43,67 +46,80 @@ var RegExpLastMatchInfo = new InternalPackedArray(
// -------------------------------------------------------------------
-// A recursive descent parser for Patterns according to the grammar of
-// ECMA-262 15.10.1, with deviations noted below.
-function DoConstructRegExp(object, pattern, flags) {
- // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
- if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
- flags = (REGEXP_GLOBAL(pattern) ? 'g' : '')
- + (REGEXP_IGNORE_CASE(pattern) ? 'i' : '')
- + (REGEXP_MULTILINE(pattern) ? 'm' : '')
- + (REGEXP_UNICODE(pattern) ? 'u' : '')
- + (REGEXP_STICKY(pattern) ? 'y' : '');
- pattern = REGEXP_SOURCE(pattern);
- }
+function IsRegExp(o) {
+ if (!IS_RECEIVER(o)) return false;
+ var is_regexp = o[matchSymbol];
+ if (!IS_UNDEFINED(is_regexp)) return TO_BOOLEAN(is_regexp);
+ return IS_REGEXP(o);
+}
+
+// ES6 section 21.2.3.2.2
+function RegExpInitialize(object, pattern, flags) {
pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
-
%RegExpInitializeAndCompile(object, pattern, flags);
+ return object;
+}
+
+
+function PatternFlags(pattern) {
+ return (REGEXP_GLOBAL(pattern) ? 'g' : '') +
+ (REGEXP_IGNORE_CASE(pattern) ? 'i' : '') +
+ (REGEXP_MULTILINE(pattern) ? 'm' : '') +
+ (REGEXP_UNICODE(pattern) ? 'u' : '') +
+ (REGEXP_STICKY(pattern) ? 'y' : '');
}
function RegExpConstructor(pattern, flags) {
- if (%_IsConstructCall()) {
- DoConstructRegExp(this, pattern, flags);
- } else {
- // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
- if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
+ var newtarget = new.target;
+ var pattern_is_regexp = IsRegExp(pattern);
+
+ if (IS_UNDEFINED(newtarget)) {
+ newtarget = GlobalRegExp;
+
+ // ES6 section 21.2.3.1 step 3.b
+ if (pattern_is_regexp && IS_UNDEFINED(flags) &&
+ pattern.constructor === newtarget) {
return pattern;
}
- return new GlobalRegExp(pattern, flags);
}
+
+ if (IS_REGEXP(pattern)) {
+ if (IS_UNDEFINED(flags)) flags = PatternFlags(pattern);
+ pattern = REGEXP_SOURCE(pattern);
+
+ } else if (pattern_is_regexp) {
+ var input_pattern = pattern;
+ pattern = pattern.source;
+ if (IS_UNDEFINED(flags)) flags = input_pattern.flags;
+ }
+
+ var object = %NewObject(GlobalRegExp, newtarget);
+ return RegExpInitialize(object, pattern, flags);
}
-// Deprecated RegExp.prototype.compile method. We behave like the constructor
-// were called again. In SpiderMonkey, this method returns the regexp object.
-// In JSC, it returns undefined. For compatibility with JSC, we match their
-// behavior.
+
function RegExpCompileJS(pattern, flags) {
- // Both JSC and SpiderMonkey treat a missing pattern argument as the
- // empty subject string, and an actual undefined value passed as the
- // pattern as the string 'undefined'. Note that JSC is inconsistent
- // here, treating undefined values differently in
- // RegExp.prototype.compile and in the constructor, where they are
- // the empty string. For compatibility with JSC, we match their
- // behavior.
- if (this == GlobalRegExp.prototype) {
- // We don't allow recompiling RegExp.prototype.
+ if (!IS_REGEXP(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
- 'RegExp.prototype.compile', this);
+ "RegExp.prototype.compile", this);
}
- if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
- DoConstructRegExp(this, 'undefined', flags);
- } else {
- DoConstructRegExp(this, pattern, flags);
+
+ if (IS_REGEXP(pattern)) {
+ if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
+
+ flags = PatternFlags(pattern);
+ pattern = REGEXP_SOURCE(pattern);
}
+
+ return RegExpInitialize(this, pattern, flags);
}
function DoRegExpExec(regexp, string, index) {
- var result = %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
- return result;
+ return %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
}
@@ -257,6 +273,13 @@ function TrimRegExp(regexp) {
function RegExpToString() {
if (!IS_REGEXP(this)) {
+ // RegExp.prototype.toString() returns '/(?:)/' as a compatibility fix;
+ // a UseCounter is incremented to track it.
+ // TODO(littledan): Remove this workaround or standardize it
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeToString);
+ return '/(?:)/';
+ }
throw MakeTypeError(kIncompatibleMethodReceiver,
'RegExp.prototype.toString', this);
}
@@ -340,6 +363,35 @@ function RegExpSplit(string, limit) {
}
+// ES6 21.2.5.6.
+function RegExpMatch(string) {
+ // TODO(yangguo): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@match", this);
+ }
+ var subject = TO_STRING(string);
+
+ if (!REGEXP_GLOBAL(this)) return RegExpExecNoTests(this, subject, 0);
+ this.lastIndex = 0;
+ var result = %StringMatch(subject, this, RegExpLastMatchInfo);
+ return result;
+}
+
+
+// ES6 21.2.5.9.
+function RegExpSearch(string) {
+ // TODO(yangguo): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search", this);
+ }
+ var match = DoRegExpExec(this, TO_STRING(string), 0);
+ if (match) return match[CAPTURE0];
+ return -1;
+}
+
+
// Getters for the static properties lastMatch, lastParen, leftContext, and
// rightContext of the RegExp constructor. The properties are computed based
// on the captures array of the last successful match and the subject string
@@ -447,7 +499,8 @@ function RegExpGetSource() {
// -------------------------------------------------------------------
%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
-%FunctionSetPrototype(GlobalRegExp, new GlobalObject());
+GlobalRegExpPrototype = new GlobalObject();
+%FunctionSetPrototype(GlobalRegExp, GlobalRegExpPrototype);
%AddNamedProperty(
GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
%SetCode(GlobalRegExp, RegExpConstructor);
@@ -457,6 +510,8 @@ utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
"test", RegExpTest,
"toString", RegExpToString,
"compile", RegExpCompileJS,
+ matchSymbol, RegExpMatch,
+ searchSymbol, RegExpSearch,
splitSymbol, RegExpSplit,
]);
diff --git a/deps/v8/src/js/runtime.js b/deps/v8/src/js/runtime.js
index b5e23671f4..301d75a391 100644
--- a/deps/v8/src/js/runtime.js
+++ b/deps/v8/src/js/runtime.js
@@ -16,15 +16,22 @@
%CheckIsBootstrapping();
+var FLAG_harmony_species;
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
var GlobalString = global.String;
-var isConcatSpreadableSymbol =
- utils.ImportNow("is_concat_spreadable_symbol");
var MakeRangeError;
+var MakeTypeError;
+var speciesSymbol;
utils.Import(function(from) {
MakeRangeError = from.MakeRangeError;
+ MakeTypeError = from.MakeTypeError;
+ speciesSymbol = from.species_symbol;
+});
+
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_species = from.FLAG_harmony_species;
});
// ----------------------------------------------------------------------------
@@ -34,127 +41,6 @@ utils.Import(function(from) {
-----------------------------
*/
-function APPLY_PREPARE(args) {
- var length;
-
- // First check that the receiver is callable.
- if (!IS_CALLABLE(this)) {
- throw %make_type_error(kApplyNonFunction, TO_STRING(this), typeof this);
- }
-
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength) {
- return length;
- }
- }
-
- length = (args == null) ? 0 : TO_UINT32(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
-
- // Make sure the arguments list has the right type.
- if (args != null && !IS_SPEC_OBJECT(args)) {
- throw %make_type_error(kWrongArgs, "Function.prototype.apply");
- }
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function REFLECT_APPLY_PREPARE(args) {
- var length;
-
- // First check that the receiver is callable.
- if (!IS_CALLABLE(this)) {
- throw %make_type_error(kApplyNonFunction, TO_STRING(this), typeof this);
- }
-
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength) {
- return length;
- }
- }
-
- if (!IS_SPEC_OBJECT(args)) {
- throw %make_type_error(kWrongArgs, "Reflect.apply");
- }
-
- length = TO_LENGTH(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
-function REFLECT_CONSTRUCT_PREPARE(
- args, newTarget) {
- var length;
- var ctorOk = IS_CALLABLE(this) && %IsConstructor(this);
- var newTargetOk = IS_CALLABLE(newTarget) && %IsConstructor(newTarget);
-
- // First check whether length is a positive Smi and args is an
- // array. This is the fast case. If this fails, we do the slow case
- // that takes care of more eventualities.
- if (IS_ARRAY(args)) {
- length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
- ctorOk && newTargetOk) {
- return length;
- }
- }
-
- if (!ctorOk) {
- if (!IS_CALLABLE(this)) {
- throw %make_type_error(kCalledNonCallable, TO_STRING(this));
- } else {
- throw %make_type_error(kNotConstructor, TO_STRING(this));
- }
- }
-
- if (!newTargetOk) {
- if (!IS_CALLABLE(newTarget)) {
- throw %make_type_error(kCalledNonCallable, TO_STRING(newTarget));
- } else {
- throw %make_type_error(kNotConstructor, TO_STRING(newTarget));
- }
- }
-
- if (!IS_SPEC_OBJECT(args)) {
- throw %make_type_error(kWrongArgs, "Reflect.construct");
- }
-
- length = TO_LENGTH(args.length);
-
- // We can handle any number of apply arguments if the stack is
- // big enough, but sanity check the value to avoid overflow when
- // multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
-
- // Return the length which is the number of arguments to copy to the
- // stack. It is guaranteed to be a small integer at this point.
- return length;
-}
-
-
function CONCAT_ITERABLE_TO_ARRAY(iterable) {
return %concat_iterable_to_array(this, iterable);
};
@@ -219,15 +105,6 @@ function AddIndexedProperty(obj, index, value) {
%SetForceInlineFlag(AddIndexedProperty);
-// ES6, draft 10-14-14, section 22.1.3.1.1
-function IsConcatSpreadable(O) {
- if (!IS_SPEC_OBJECT(O)) return false;
- var spreadable = O[isConcatSpreadableSymbol];
- if (IS_UNDEFINED(spreadable)) return IS_ARRAY(O);
- return TO_BOOLEAN(spreadable);
-}
-
-
function ToPositiveInteger(x, rangeErrorIndex) {
var i = TO_INTEGER_MAP_MINUS_ZERO(x);
if (i < 0) throw MakeRangeError(rangeErrorIndex);
@@ -248,6 +125,39 @@ function MinSimple(a, b) {
%SetForceInlineFlag(MaxSimple);
%SetForceInlineFlag(MinSimple);
+
+// ES2015 7.3.20
+// For the fallback with --harmony-species off, there are two possible choices:
+// - "conservative": return defaultConstructor
+// - "not conservative": return object.constructor
+// This fallback path is only needed in the transition to ES2015, and the
+// choice is made simply to preserve the previous behavior so that we don't
+// have a three-step upgrade: old behavior, unspecified intermediate behavior,
+// and ES2015.
+// In some cases, we were "conservative" (e.g., ArrayBuffer, RegExp), and in
+// other cases we were "not conservative (e.g., TypedArray, Promise).
+function SpeciesConstructor(object, defaultConstructor, conservative) {
+ if (FLAG_harmony_species) {
+ var constructor = object.constructor;
+ if (IS_UNDEFINED(constructor)) {
+ return defaultConstructor;
+ }
+ if (!IS_RECEIVER(constructor)) {
+ throw MakeTypeError(kConstructorNotReceiver);
+ }
+ var species = constructor[speciesSymbol];
+ if (IS_NULL_OR_UNDEFINED(species)) {
+ return defaultConstructor;
+ }
+ if (%IsConstructor(species)) {
+ return species;
+ }
+ throw MakeTypeError(kSpeciesNotConstructor);
+ } else {
+ return conservative ? defaultConstructor : object.constructor;
+ }
+}
+
//----------------------------------------------------------------------------
// NOTE: Setting the prototype for Array must take place as early as
@@ -267,13 +177,11 @@ utils.Export(function(to) {
to.SameValue = SameValue;
to.SameValueZero = SameValueZero;
to.ToPositiveInteger = ToPositiveInteger;
+ to.SpeciesConstructor = SpeciesConstructor;
});
%InstallToContext([
- "apply_prepare_builtin", APPLY_PREPARE,
"concat_iterable_to_array_builtin", CONCAT_ITERABLE_TO_ARRAY,
- "reflect_apply_prepare_builtin", REFLECT_APPLY_PREPARE,
- "reflect_construct_prepare_builtin", REFLECT_CONSTRUCT_PREPARE,
]);
%InstallToContext([
diff --git a/deps/v8/src/js/string-iterator.js b/deps/v8/src/js/string-iterator.js
index ece207cd74..3c331dd1a2 100644
--- a/deps/v8/src/js/string-iterator.js
+++ b/deps/v8/src/js/string-iterator.js
@@ -46,7 +46,7 @@ function StringIteratorNext() {
var value = UNDEFINED;
var done = true;
- if (!IS_SPEC_OBJECT(iterator) ||
+ if (!IS_RECEIVER(iterator) ||
!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'String Iterator.prototype.next');
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index 8ff5b3c008..b220038b74 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -17,9 +17,13 @@ var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MakeRangeError;
var MakeTypeError;
+var MathMax;
+var MathMin;
+var matchSymbol = utils.ImportNow("match_symbol");
var RegExpExec;
var RegExpExecNoTests;
var RegExpLastMatchInfo;
+var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
@@ -27,6 +31,8 @@ utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
+ MathMax = from.MathMax;
+ MathMin = from.MathMin;
RegExpExec = from.RegExpExec;
RegExpExecNoTests = from.RegExpExecNoTests;
RegExpLastMatchInfo = from.RegExpLastMatchInfo;
@@ -150,19 +156,21 @@ function StringLocaleCompareJS(other) {
}
-// ECMA-262 section 15.5.4.10
-function StringMatchJS(regexp) {
+// ES6 21.1.3.11.
+function StringMatchJS(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
- var subject = TO_STRING(this);
- if (IS_REGEXP(regexp)) {
- if (!REGEXP_GLOBAL(regexp)) return RegExpExecNoTests(regexp, subject, 0);
- var result = %StringMatch(subject, regexp, RegExpLastMatchInfo);
- regexp.lastIndex = 0;
- return result;
+ if (!IS_NULL_OR_UNDEFINED(pattern)) {
+ var matcher = pattern[matchSymbol];
+ if (!IS_UNDEFINED(matcher)) {
+ return %_Call(matcher, pattern, this);
+ }
}
+
+ var subject = TO_STRING(this);
+
// Non-regexp argument.
- regexp = new GlobalRegExp(regexp);
+ var regexp = new GlobalRegExp(pattern);
return RegExpExecNoTests(regexp, subject, 0);
}
@@ -495,21 +503,20 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
}
-// ECMA-262 section 15.5.4.12
-function StringSearch(re) {
+// ES6 21.1.3.15.
+function StringSearch(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
- var regexp;
- if (IS_REGEXP(re)) {
- regexp = re;
- } else {
- regexp = new GlobalRegExp(re);
- }
- var match = RegExpExec(regexp, TO_STRING(this), 0);
- if (match) {
- return match[CAPTURE0];
+ if (!IS_NULL_OR_UNDEFINED(pattern)) {
+ var searcher = pattern[searchSymbol];
+ if (!IS_UNDEFINED(searcher)) {
+ return %_Call(searcher, pattern, this);
+ }
}
- return -1;
+
+ var subject = TO_STRING(this);
+ var regexp = new GlobalRegExp(pattern);
+ return %_Call(regexp[searchSymbol], regexp, subject);
}
@@ -562,9 +569,6 @@ function StringSplitJS(separator, limit) {
if (!IS_NULL_OR_UNDEFINED(separator)) {
var splitter = separator[splitSymbol];
if (!IS_UNDEFINED(splitter)) {
- if (!IS_CALLABLE(splitter)) {
- throw MakeTypeError(kCalledNonCallable, splitter);
- }
return %_Call(splitter, separator, this, limit);
}
}
@@ -839,15 +843,21 @@ function StringSup() {
return "<sup>" + TO_STRING(this) + "</sup>";
}
-// ES6 draft 01-20-14, section 21.1.3.13
+// ES6, section 21.1.3.13
function StringRepeat(count) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
var s = TO_STRING(this);
var n = TO_INTEGER(count);
+
+ if (n < 0 || n === INFINITY) throw MakeRangeError(kInvalidCountValue);
+
+ // Early return to allow an arbitrarily-large repeat of the empty string.
+ if (s.length === 0) return "";
+
// The maximum string length is stored in a smi, so a longer repeat
// must result in a range error.
- if (n < 0 || n > %_MaxSmi()) throw MakeRangeError(kInvalidCountValue);
+ if (n > %_MaxSmi()) throw MakeRangeError(kInvalidCountValue);
var r = "";
while (true) {
@@ -879,21 +889,13 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
var s_len = s.length;
- if (pos < 0) pos = 0;
- if (pos > s_len) pos = s_len;
+ var start = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
-
- if (ss_len + pos > s_len) {
+ if (ss_len + start > s_len) {
return false;
}
- for (var i = 0; i < ss_len; i++) {
- if (%_StringCharCodeAt(s, pos + i) !== %_StringCharCodeAt(ss, i)) {
- return false;
- }
- }
-
- return true;
+ return %_SubString(s, start, start + ss_len) === ss;
}
@@ -917,22 +919,14 @@ function StringEndsWith(searchString /* position */) { // length == 1
}
}
- if (pos < 0) pos = 0;
- if (pos > s_len) pos = s_len;
+ var end = MathMin(MathMax(pos, 0), s_len);
var ss_len = ss.length;
- pos = pos - ss_len;
-
- if (pos < 0) {
+ var start = end - ss_len;
+ if (start < 0) {
return false;
}
- for (var i = 0; i < ss_len; i++) {
- if (%_StringCharCodeAt(s, pos + i) !== %_StringCharCodeAt(ss, i)) {
- return false;
- }
- }
-
- return true;
+ return %_SubString(s, start, start + ss_len) === ss;
}
diff --git a/deps/v8/src/js/symbol.js b/deps/v8/src/js/symbol.js
index 62ef0dd216..5be6e0168d 100644
--- a/deps/v8/src/js/symbol.js
+++ b/deps/v8/src/js/symbol.js
@@ -18,14 +18,12 @@ var isConcatSpreadableSymbol =
utils.ImportNow("is_concat_spreadable_symbol");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var MakeTypeError;
-var ObjectGetOwnPropertyKeys;
var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
MakeTypeError = from.MakeTypeError;
- ObjectGetOwnPropertyKeys = from.ObjectGetOwnPropertyKeys;
});
// -------------------------------------------------------------------
@@ -80,9 +78,7 @@ function SymbolKeyFor(symbol) {
function ObjectGetOwnPropertySymbols(obj) {
obj = TO_OBJECT(obj);
- // TODO(arv): Proxies use a shared trap for String and Symbol keys.
-
- return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_STRING);
+ return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_STRINGS);
}
// -------------------------------------------------------------------
diff --git a/deps/v8/src/js/templates.js b/deps/v8/src/js/templates.js
index eee6b7d7e7..7236d5c130 100644
--- a/deps/v8/src/js/templates.js
+++ b/deps/v8/src/js/templates.js
@@ -70,10 +70,10 @@ function GetTemplateCallSite(siteObj, rawStrings, hash) {
if (!IS_UNDEFINED(cached)) return cached;
- %AddNamedProperty(siteObj, "raw", %ObjectFreeze(rawStrings),
+ %AddNamedProperty(siteObj, "raw", %object_freeze(rawStrings),
READ_ONLY | DONT_ENUM | DONT_DELETE);
- return SetCachedCallSite(%ObjectFreeze(siteObj), hash);
+ return SetCachedCallSite(%object_freeze(siteObj), hash);
}
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index db8dabe867..fd668a57fd 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -18,6 +18,7 @@ var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalDataView = global.DataView;
var GlobalObject = global.Object;
+var InternalArray = utils.InternalArray;
var InnerArrayCopyWithin;
var InnerArrayEvery;
var InnerArrayFill;
@@ -25,10 +26,10 @@ var InnerArrayFilter;
var InnerArrayFind;
var InnerArrayFindIndex;
var InnerArrayForEach;
+var InnerArrayIncludes;
var InnerArrayIndexOf;
var InnerArrayJoin;
var InnerArrayLastIndexOf;
-var InnerArrayMap;
var InnerArrayReduce;
var InnerArrayReduceRight;
var InnerArraySome;
@@ -41,6 +42,7 @@ var MakeTypeError;
var MaxSimple;
var MinSimple;
var PackedArrayReverse;
+var SpeciesConstructor;
var ToPositiveInteger;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
@@ -75,10 +77,10 @@ utils.Import(function(from) {
InnerArrayFind = from.InnerArrayFind;
InnerArrayFindIndex = from.InnerArrayFindIndex;
InnerArrayForEach = from.InnerArrayForEach;
+ InnerArrayIncludes = from.InnerArrayIncludes;
InnerArrayIndexOf = from.InnerArrayIndexOf;
InnerArrayJoin = from.InnerArrayJoin;
InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
- InnerArrayMap = from.InnerArrayMap;
InnerArrayReduce = from.InnerArrayReduce;
InnerArrayReduceRight = from.InnerArrayReduceRight;
InnerArraySome = from.InnerArraySome;
@@ -87,17 +89,52 @@ utils.Import(function(from) {
IsNaN = from.IsNaN;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
- MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
- MaxSimple = from.MaxSimple;
- MinSimple = from.MinSimple;
MinSimple = from.MinSimple;
PackedArrayReverse = from.PackedArrayReverse;
+ SpeciesConstructor = from.SpeciesConstructor;
ToPositiveInteger = from.ToPositiveInteger;
});
// --------------- Typed Arrays ---------------------
+function TypedArrayDefaultConstructor(typedArray) {
+ switch (%_ClassOf(typedArray)) {
+macro TYPED_ARRAY_CONSTRUCTOR_CASE(ARRAY_ID, NAME, ELEMENT_SIZE)
+ case "NAME":
+ return GlobalNAME;
+endmacro
+TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR_CASE)
+ }
+ // The TypeError should not be generated since all callers should
+ // have already called ValidateTypedArray.
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "TypedArrayDefaultConstructor", this);
+}
+
+function TypedArrayCreate(constructor, arg0, arg1, arg2) {
+ if (IS_UNDEFINED(arg1)) {
+ var newTypedArray = new constructor(arg0);
+ } else {
+ var newTypedArray = new constructor(arg0, arg1, arg2);
+ }
+ if (!%_IsTypedArray(newTypedArray)) throw MakeTypeError(kNotTypedArray);
+ // TODO(littledan): Check for being detached, here and elsewhere
+ // All callers where the first argument is a Number have no additional
+ // arguments.
+ if (IS_NUMBER(arg0) && %_TypedArrayGetLength(newTypedArray) < arg0) {
+ throw MakeTypeError(kTypedArrayTooShort);
+ }
+ return newTypedArray;
+}
+
+function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2, conservative) {
+ var defaultConstructor = TypedArrayDefaultConstructor(exemplar);
+ var constructor = SpeciesConstructor(exemplar, defaultConstructor,
+ conservative);
+ return TypedArrayCreate(constructor, arg0, arg1, arg2);
+}
+
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
if (!IS_UNDEFINED(byteOffset)) {
@@ -203,13 +240,17 @@ function NAMEConstructByIterable(obj, iterable, iteratorFn) {
}
function NAMEConstructor(arg1, arg2, arg3) {
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
} else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
NAMEConstructByLength(this, arg1);
} else {
+ // TODO(littledan): If arg1 is a TypedArray, follow the constructor
+ // path in ES2015 22.2.4.3, and call SpeciesConstructor, in a
+ // path that seems to be an optimized version of what's below, but
+ // in an observably different way.
var iteratorFn = arg1[iteratorSymbol];
if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
NAMEConstructByArrayLike(this, arg1);
@@ -222,27 +263,7 @@ function NAMEConstructor(arg1, arg2, arg3) {
}
}
-function NAME_GetBuffer() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.buffer", this);
- }
- return %TypedArrayGetBuffer(this);
-}
-
-function NAME_GetByteLength() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.byteLength", this);
- }
- return %_ArrayBufferViewGetByteLength(this);
-}
-
-function NAME_GetByteOffset() {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.byteOffset", this);
- }
- return %_ArrayBufferViewGetByteOffset(this);
-}
-
+// TODO(littledan): Remove this performance workaround BUG(chromium:579905)
function NAME_GetLength() {
if (!(%_ClassOf(this) === 'NAME')) {
throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.length", this);
@@ -251,9 +272,6 @@ function NAME_GetLength() {
}
function NAMESubArray(begin, end) {
- if (!(%_ClassOf(this) === 'NAME')) {
- throw MakeTypeError(kIncompatibleMethodReceiver, "NAME.subarray", this);
- }
var beginInt = TO_INTEGER(begin);
if (!IS_UNDEFINED(end)) {
var endInt = TO_INTEGER(end);
@@ -282,13 +300,63 @@ function NAMESubArray(begin, end) {
var newLength = endInt - beginInt;
var beginByteOffset =
%_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
- return new GlobalNAME(%TypedArrayGetBuffer(this),
- beginByteOffset, newLength);
+ return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
+ beginByteOffset, newLength, true);
}
endmacro
TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR)
+function TypedArraySubArray(begin, end) {
+ switch (%_ClassOf(this)) {
+macro TYPED_ARRAY_SUBARRAY_CASE(ARRAY_ID, NAME, ELEMENT_SIZE)
+ case "NAME":
+ return %_Call(NAMESubArray, this, begin, end);
+endmacro
+TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
+ }
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.subarray", this);
+}
+%SetForceInlineFlag(TypedArraySubArray);
+
+function TypedArrayGetBuffer() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.buffer", this);
+ }
+ return %TypedArrayGetBuffer(this);
+}
+%SetForceInlineFlag(TypedArrayGetBuffer);
+
+function TypedArrayGetByteLength() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.byteLength", this);
+ }
+ return %_ArrayBufferViewGetByteLength(this);
+}
+%SetForceInlineFlag(TypedArrayGetByteLength);
+
+function TypedArrayGetByteOffset() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.byteOffset", this);
+ }
+ return %_ArrayBufferViewGetByteOffset(this);
+}
+%SetForceInlineFlag(TypedArrayGetByteOffset);
+
+function TypedArrayGetLength() {
+ if (!%_IsTypedArray(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "get TypedArray.prototype.length", this);
+ }
+ return %_TypedArrayGetLength(this);
+}
+%SetForceInlineFlag(TypedArrayGetLength);
+
+
function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
if (offset > 0) {
@@ -401,32 +469,6 @@ function TypedArrayGetToStringTag() {
}
-function ConstructTypedArray(constructor, arg) {
- // TODO(littledan): This is an approximation of the spec, which requires
- // that only real TypedArray classes should be accepted (22.2.2.1.1)
- if (!%IsConstructor(constructor) || IS_UNDEFINED(constructor.prototype) ||
- !%HasOwnProperty(constructor.prototype, "BYTES_PER_ELEMENT")) {
- throw MakeTypeError(kNotTypedArray);
- }
-
- // TODO(littledan): The spec requires that, rather than directly calling
- // the constructor, a TypedArray is created with the proper proto and
- // underlying size and element size, and elements are put in one by one.
- // By contrast, this would allow subclasses to make a radically different
- // constructor with different semantics.
- return new constructor(arg);
-}
-
-
-function ConstructTypedArrayLike(typedArray, arg) {
- // TODO(littledan): The spec requires that we actuallly use
- // typedArray.constructor[Symbol.species] (bug v8:4093)
- // Also, it should default to the default constructor from
- // table 49 if typedArray.constructor doesn't exist.
- return ConstructTypedArray(typedArray.constructor, arg);
-}
-
-
function TypedArrayCopyWithin(target, start, end) {
if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
@@ -472,12 +514,19 @@ function TypedArrayFill(value, start, end) {
// ES6 draft 07-15-13, section 22.2.3.9
-function TypedArrayFilter(predicate, thisArg) {
+function TypedArrayFilter(f, thisArg) {
if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- var array = InnerArrayFilter(predicate, thisArg, this, length);
- return ConstructTypedArrayLike(this, array);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ var result = new InternalArray();
+ InnerArrayFilter(f, thisArg, this, length, result);
+ var captured = result.length;
+ var output = TypedArraySpeciesCreate(this, captured);
+ for (var i = 0; i < captured; i++) {
+ output[i] = result[i];
+ }
+ return output;
}
%FunctionSetLength(TypedArrayFilter, 1);
@@ -571,14 +620,17 @@ function TypedArrayLastIndexOf(element, index) {
// ES6 draft 07-15-13, section 22.2.3.18
-function TypedArrayMap(predicate, thisArg) {
+function TypedArrayMap(f, thisArg) {
if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
- // TODO(littledan): Preallocate rather than making an intermediate
- // InternalArray, for better performance.
var length = %_TypedArrayGetLength(this);
- var array = InnerArrayMap(predicate, thisArg, this, length);
- return ConstructTypedArrayLike(this, array);
+ var result = TypedArraySpeciesCreate(this, length);
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ for (var i = 0; i < length; i++) {
+ var element = this[i];
+ result[i] = %_Call(f, thisArg, element, i, this);
+ }
+ return result;
}
%FunctionSetLength(TypedArrayMap, 1);
@@ -670,15 +722,13 @@ function TypedArraySlice(start, end) {
}
var count = MaxSimple(final - k, 0);
- var array = ConstructTypedArrayLike(this, count);
+ var array = TypedArraySpeciesCreate(this, count);
// The code below is the 'then' branch; the 'else' branch species
// a memcpy. Because V8 doesn't canonicalize NaN, the difference is
// unobservable.
var n = 0;
while (k < final) {
var kValue = this[k];
- // TODO(littledan): The spec says to throw on an error in setting;
- // does this throw?
array[n] = kValue;
k++;
n++;
@@ -687,10 +737,21 @@ function TypedArraySlice(start, end) {
}
+// ES2016 draft, section 22.2.3.14
+function TypedArrayIncludes(searchElement, fromIndex) {
+ if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
+
+ var length = %_TypedArrayGetLength(this);
+
+ return InnerArrayIncludes(searchElement, fromIndex, this, length);
+}
+%FunctionSetLength(TypedArrayIncludes, 1);
+
+
// ES6 draft 08-24-14, section 22.2.2.2
function TypedArrayOf() {
var length = %_ArgumentsLength();
- var array = new this(length);
+ var array = TypedArrayCreate(this, length);
for (var i = 0; i < length; i++) {
array[i] = %_Arguments(i);
}
@@ -701,63 +762,84 @@ function TypedArrayOf() {
function TypedArrayFrom(source, mapfn, thisArg) {
// TODO(littledan): Investigate if there is a receiver which could be
// faster to accumulate on than Array, e.g., a TypedVector.
+ // TODO(littledan): Rewrite this code to ensure that things happen
+ // in the right order, e.g., the constructor needs to be called before
+ // the mapping function on array-likes.
var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
- return ConstructTypedArray(this, array);
+ return TypedArrayCreate(this, array);
}
%FunctionSetLength(TypedArrayFrom, 1);
+function TypedArray() {
+ if (IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kConstructorNonCallable, "TypedArray");
+ }
+ if (new.target === TypedArray) {
+ throw MakeTypeError(kConstructAbstractClass, "TypedArray");
+ }
+}
+
// -------------------------------------------------------------------
-// TODO(littledan): Fix the TypedArray proto chain (bug v8:4085).
+%FunctionSetPrototype(TypedArray, new GlobalObject());
+%AddNamedProperty(TypedArray.prototype,
+ "constructor", TypedArray, DONT_ENUM);
+utils.InstallFunctions(TypedArray, DONT_ENUM | DONT_DELETE | READ_ONLY, [
+ "from", TypedArrayFrom,
+ "of", TypedArrayOf
+]);
+utils.InstallGetter(TypedArray.prototype, "buffer", TypedArrayGetBuffer);
+utils.InstallGetter(TypedArray.prototype, "byteOffset", TypedArrayGetByteOffset,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetter(TypedArray.prototype, "byteLength",
+ TypedArrayGetByteLength, DONT_ENUM | DONT_DELETE);
+utils.InstallGetter(TypedArray.prototype, "length", TypedArrayGetLength,
+ DONT_ENUM | DONT_DELETE);
+utils.InstallGetter(TypedArray.prototype, toStringTagSymbol,
+ TypedArrayGetToStringTag);
+utils.InstallFunctions(TypedArray.prototype, DONT_ENUM, [
+ "subarray", TypedArraySubArray,
+ "set", TypedArraySet,
+ "copyWithin", TypedArrayCopyWithin,
+ "every", TypedArrayEvery,
+ "fill", TypedArrayFill,
+ "filter", TypedArrayFilter,
+ "find", TypedArrayFind,
+ "findIndex", TypedArrayFindIndex,
+ "includes", TypedArrayIncludes,
+ "indexOf", TypedArrayIndexOf,
+ "join", TypedArrayJoin,
+ "lastIndexOf", TypedArrayLastIndexOf,
+ "forEach", TypedArrayForEach,
+ "map", TypedArrayMap,
+ "reduce", TypedArrayReduce,
+ "reduceRight", TypedArrayReduceRight,
+ "reverse", TypedArrayReverse,
+ "slice", TypedArraySlice,
+ "some", TypedArraySome,
+ "sort", TypedArraySort,
+ "toString", TypedArrayToString,
+ "toLocaleString", TypedArrayToLocaleString
+]);
+
+
macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%SetCode(GlobalNAME, NAMEConstructor);
%FunctionSetPrototype(GlobalNAME, new GlobalObject());
+ %InternalSetPrototype(GlobalNAME, TypedArray);
+ %InternalSetPrototype(GlobalNAME.prototype, TypedArray.prototype);
%AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
- utils.InstallFunctions(GlobalNAME, DONT_ENUM | DONT_DELETE | READ_ONLY, [
- "from", TypedArrayFrom,
- "of", TypedArrayOf
- ]);
-
%AddNamedProperty(GlobalNAME.prototype,
"constructor", global.NAME, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype,
"BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, "buffer", NAME_GetBuffer);
- utils.InstallGetter(GlobalNAME.prototype, "byteOffset", NAME_GetByteOffset,
- DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, "byteLength", NAME_GetByteLength,
- DONT_ENUM | DONT_DELETE);
+ // TODO(littledan): Remove this performance workaround BUG(chromium:579905)
utils.InstallGetter(GlobalNAME.prototype, "length", NAME_GetLength,
DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, toStringTagSymbol,
- TypedArrayGetToStringTag);
- utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
- "subarray", NAMESubArray,
- "set", TypedArraySet,
- "copyWithin", TypedArrayCopyWithin,
- "every", TypedArrayEvery,
- "fill", TypedArrayFill,
- "filter", TypedArrayFilter,
- "find", TypedArrayFind,
- "findIndex", TypedArrayFindIndex,
- "indexOf", TypedArrayIndexOf,
- "join", TypedArrayJoin,
- "lastIndexOf", TypedArrayLastIndexOf,
- "forEach", TypedArrayForEach,
- "map", TypedArrayMap,
- "reduce", TypedArrayReduce,
- "reduceRight", TypedArrayReduceRight,
- "reverse", TypedArrayReverse,
- "slice", TypedArraySlice,
- "some", TypedArraySome,
- "sort", TypedArraySort,
- "toString", TypedArrayToString,
- "toLocaleString", TypedArrayToLocaleString
- ]);
endmacro
TYPED_ARRAYS(SETUP_TYPED_ARRAY)
@@ -765,31 +847,33 @@ TYPED_ARRAYS(SETUP_TYPED_ARRAY)
// --------------------------- DataView -----------------------------
function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
- if (%_IsConstructCall()) {
- // TODO(binji): support SharedArrayBuffers?
- if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
- if (!IS_UNDEFINED(byteOffset)) {
- byteOffset = ToPositiveInteger(byteOffset, kInvalidDataViewOffset);
- }
- if (!IS_UNDEFINED(byteLength)) {
- byteLength = TO_INTEGER(byteLength);
- }
+ if (IS_UNDEFINED(new.target)) {
+ throw MakeTypeError(kConstructorNotFunction, "DataView");
+ }
+
+ // TODO(binji): support SharedArrayBuffers?
+ if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
+ if (!IS_UNDEFINED(byteOffset)) {
+ byteOffset = ToPositiveInteger(byteOffset, kInvalidDataViewOffset);
+ }
+ if (!IS_UNDEFINED(byteLength)) {
+ byteLength = TO_INTEGER(byteLength);
+ }
- var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
+ var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
- var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
- if (offset > bufferByteLength) throw MakeRangeError(kInvalidDataViewOffset);
+ var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
+ if (offset > bufferByteLength) throw MakeRangeError(kInvalidDataViewOffset);
- var length = IS_UNDEFINED(byteLength)
- ? bufferByteLength - offset
- : byteLength;
- if (length < 0 || offset + length > bufferByteLength) {
- throw new MakeRangeError(kInvalidDataViewLength);
- }
- %_DataViewInitialize(this, buffer, offset, length);
- } else {
- throw MakeTypeError(kConstructorNotFunction, "DataView");
+ var length = IS_UNDEFINED(byteLength)
+ ? bufferByteLength - offset
+ : byteLength;
+ if (length < 0 || offset + length > bufferByteLength) {
+ throw new MakeRangeError(kInvalidDataViewLength);
}
+ var result = %NewObject(GlobalDataView, new.target);
+ %_DataViewInitialize(result, buffer, offset, length);
+ return result;
}
function DataViewGetBufferJS() {
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index f6b394c2e7..26447dac5d 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -9,10 +9,8 @@
// ----------------------------------------------------------------------------
// Imports
-var FLAG_harmony_tostring;
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
-var GlobalFunction = global.Function;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
@@ -22,14 +20,11 @@ var MakeSyntaxError;
var MakeTypeError;
var MathAbs;
var NaN = %GetRootNaN();
+var ObjectToString = utils.ImportNow("object_to_string");
var ObserveBeginPerformSplice;
var ObserveEndPerformSplice;
var ObserveEnqueueSpliceRecord;
-var ProxyDelegateCallAndConstruct;
-var ProxyDerivedHasOwnTrap;
-var ProxyDerivedKeysTrap;
var SameValue = utils.ImportNow("SameValue");
-var StringIndexOf;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
@@ -40,34 +35,26 @@ utils.Import(function(from) {
ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
ObserveEndPerformSplice = from.ObserveEndPerformSplice;
ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
- StringIndexOf = from.StringIndexOf;
-});
-
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_tostring = from.FLAG_harmony_tostring;
- ProxyDelegateCallAndConstruct = from.ProxyDelegateCallAndConstruct;
- ProxyDerivedHasOwnTrap = from.ProxyDerivedHasOwnTrap;
- ProxyDerivedKeysTrap = from.ProxyDerivedKeysTrap;
});
// ----------------------------------------------------------------------------
-// ECMA 262 - 15.1.4
+// ES6 18.2.3 isNaN(number)
function GlobalIsNaN(number) {
number = TO_NUMBER(number);
return NUMBER_IS_NAN(number);
}
-// ECMA 262 - 15.1.5
+// ES6 18.2.2 isFinite(number)
function GlobalIsFinite(number) {
number = TO_NUMBER(number);
return NUMBER_IS_FINITE(number);
}
-// ECMA-262 - 15.1.2.2
+// ES6 18.2.5 parseInt(string, radix)
function GlobalParseInt(string, radix) {
if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
// Some people use parseInt instead of Math.floor. This
@@ -101,37 +88,26 @@ function GlobalParseInt(string, radix) {
}
-// ECMA-262 - 15.1.2.3
+// ES6 18.2.4 parseFloat(string)
function GlobalParseFloat(string) {
+ // 1. Let inputString be ? ToString(string).
string = TO_STRING(string);
if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
return %StringParseFloat(string);
}
-function GlobalEval(x) {
- if (!IS_STRING(x)) return x;
-
- var global_proxy = %GlobalProxy(GlobalEval);
-
- var f = %CompileString(x, false);
- if (!IS_FUNCTION(f)) return f;
-
- return %_Call(f, global_proxy);
-}
-
-
// ----------------------------------------------------------------------------
// Set up global object.
var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
utils.InstallConstants(global, [
- // ECMA 262 - 15.1.1.1.
- "NaN", NaN,
- // ECMA-262 - 15.1.1.2.
+ // ES6 18.1.1
"Infinity", INFINITY,
- // ECMA-262 - 15.1.1.2.
+ // ES6 18.1.2
+ "NaN", NaN,
+ // ES6 18.1.3
"undefined", UNDEFINED,
]);
@@ -141,83 +117,45 @@ utils.InstallFunctions(global, DONT_ENUM, [
"isFinite", GlobalIsFinite,
"parseInt", GlobalParseInt,
"parseFloat", GlobalParseFloat,
- "eval", GlobalEval
]);
// ----------------------------------------------------------------------------
// Object
-// ECMA-262 - 15.2.4.2
-function ObjectToString() {
- if (IS_UNDEFINED(this)) return "[object Undefined]";
- if (IS_NULL(this)) return "[object Null]";
- var O = TO_OBJECT(this);
- var builtinTag = %_ClassOf(O);
- var tag;
-
- // TODO(caitp): cannot wait to get rid of this flag :>
- if (FLAG_harmony_tostring) {
- tag = O[toStringTagSymbol];
- if (!IS_STRING(tag)) {
- tag = builtinTag;
- }
- } else {
- tag = builtinTag;
- }
-
- return `[object ${tag}]`;
-}
-
-
-// ECMA-262 - 15.2.4.3
+// ES6 19.1.3.5 Object.prototype.toLocaleString([reserved1 [,reserved2]])
function ObjectToLocaleString() {
CHECK_OBJECT_COERCIBLE(this, "Object.prototype.toLocaleString");
return this.toString();
}
-// ECMA-262 - 15.2.4.4
+// ES6 19.1.3.7 Object.prototype.valueOf()
function ObjectValueOf() {
return TO_OBJECT(this);
}
-// ECMA-262 - 15.2.4.5
+// ES6 7.3.11
function ObjectHasOwnProperty(value) {
var name = TO_NAME(value);
var object = TO_OBJECT(this);
-
- if (%_IsJSProxy(object)) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(value)) return false;
-
- var handler = %GetHandler(object);
- return CallTrap1(handler, "hasOwn", ProxyDerivedHasOwnTrap, name);
- }
return %HasOwnProperty(object, name);
}
-// ECMA-262 - 15.2.4.6
+// ES6 19.1.3.3 Object.prototype.isPrototypeOf(V)
function ObjectIsPrototypeOf(V) {
- if (!IS_SPEC_OBJECT(V)) return false;
+ if (!IS_RECEIVER(V)) return false;
var O = TO_OBJECT(this);
- return %_HasInPrototypeChain(V, O);
+ return %HasInPrototypeChain(V, O);
}
-// ECMA-262 - 15.2.4.6
+// ES6 19.1.3.4
function ObjectPropertyIsEnumerable(V) {
var P = TO_NAME(V);
- if (%_IsJSProxy(this)) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(V)) return false;
-
- var desc = GetOwnPropertyJS(this, P);
- return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
- }
- return %IsPropertyEnumerable(TO_OBJECT(this), P);
+ return %PropertyIsEnumerable(TO_OBJECT(this), P);
}
@@ -272,32 +210,21 @@ function ObjectLookupSetter(name) {
}
-function ObjectKeys(obj) {
- obj = TO_OBJECT(obj);
- if (%_IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "keys", ProxyDerivedKeysTrap);
- return ToNameArray(names, "keys", false);
- }
- return %OwnKeys(obj);
-}
-
-
-// ES5 8.10.1.
+// ES6 6.2.4.1
function IsAccessorDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasGetter() || desc.hasSetter();
}
-// ES5 8.10.2.
+// ES6 6.2.4.2
function IsDataDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasValue() || desc.hasWritable();
}
-// ES5 8.10.3.
+// ES6 6.2.4.3
function IsGenericDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
@@ -309,24 +236,6 @@ function IsInconsistentDescriptor(desc) {
}
-// ES5 8.10.4
-function FromPropertyDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return desc;
-
- if (IsDataDescriptor(desc)) {
- return { value: desc.getValue(),
- writable: desc.isWritable(),
- enumerable: desc.isEnumerable(),
- configurable: desc.isConfigurable() };
- }
- // Must be an AccessorDescriptor then. We never return a generic descriptor.
- return { get: desc.getGet(),
- set: desc.getSet(),
- enumerable: desc.isEnumerable(),
- configurable: desc.isConfigurable() };
-}
-
-
// Harmony Proxies
function FromGenericPropertyDescriptor(desc) {
if (IS_UNDEFINED(desc)) return desc;
@@ -354,9 +263,9 @@ function FromGenericPropertyDescriptor(desc) {
}
-// ES5 8.10.5.
+// ES6 6.2.4.5
function ToPropertyDescriptor(obj) {
- if (!IS_SPEC_OBJECT(obj)) throw MakeTypeError(kPropertyDescObject, obj);
+ if (!IS_RECEIVER(obj)) throw MakeTypeError(kPropertyDescObject, obj);
var desc = new PropertyDescriptor();
@@ -398,8 +307,7 @@ function ToPropertyDescriptor(obj) {
return desc;
}
-
-// For Harmony proxies.
+// TODO(cbruni): remove once callers have been removed
function ToCompletePropertyDescriptor(obj) {
var desc = ToPropertyDescriptor(obj);
if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
@@ -543,21 +451,16 @@ function GetTrap(handler, name, defaultTrap) {
var trap = handler[name];
if (IS_UNDEFINED(trap)) {
if (IS_UNDEFINED(defaultTrap)) {
- throw MakeTypeError(kProxyHandlerTrapMissing, handler, name);
+ throw MakeTypeError(kIllegalInvocation);
}
trap = defaultTrap;
} else if (!IS_CALLABLE(trap)) {
- throw MakeTypeError(kProxyHandlerTrapMustBeCallable, handler, name);
+ throw MakeTypeError(kIllegalInvocation);
}
return trap;
}
-function CallTrap0(handler, name, defaultTrap) {
- return %_Call(GetTrap(handler, name, defaultTrap), handler);
-}
-
-
function CallTrap1(handler, name, defaultTrap, x) {
return %_Call(GetTrap(handler, name, defaultTrap), handler, x);
}
@@ -569,20 +472,21 @@ function CallTrap2(handler, name, defaultTrap, x, y) {
// ES5 section 8.12.1.
+// TODO(jkummerow): Deprecated. Migrate all callers to
+// ObjectGetOwnPropertyDescriptor and delete this.
function GetOwnPropertyJS(obj, v) {
var p = TO_NAME(v);
- if (%_IsJSProxy(obj)) {
+ if (IS_PROXY(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(v)) return UNDEFINED;
- var handler = %GetHandler(obj);
+ var handler = %JSProxyGetHandler(obj);
var descriptor = CallTrap1(
handler, "getOwnPropertyDescriptor", UNDEFINED, p);
if (IS_UNDEFINED(descriptor)) return descriptor;
var desc = ToCompletePropertyDescriptor(descriptor);
if (!desc.isConfigurable()) {
- throw MakeTypeError(kProxyPropNotConfigurable,
- handler, p, "getOwnPropertyDescriptor");
+ throw MakeTypeError(kIllegalInvocation);
}
return desc;
}
@@ -590,28 +494,13 @@ function GetOwnPropertyJS(obj, v) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(TO_OBJECT(obj), p);
+ var props = %GetOwnProperty_Legacy(TO_OBJECT(obj), p);
return ConvertDescriptorArrayToDescriptor(props);
}
-// ES5 section 8.12.7.
-function Delete(obj, p, should_throw) {
- var desc = GetOwnPropertyJS(obj, p);
- if (IS_UNDEFINED(desc)) return true;
- if (desc.isConfigurable()) {
- %DeleteProperty_Sloppy(obj, p);
- return true;
- } else if (should_throw) {
- throw MakeTypeError(kDefineDisallowed, p);
- } else {
- return;
- }
-}
-
-
-// ES6, draft 12-24-14, section 7.3.8
+// ES6 7.3.9
function GetMethod(obj, p) {
var func = obj[p];
if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
@@ -625,12 +514,11 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(p)) return false;
- var handler = %GetHandler(obj);
+ var handler = %JSProxyGetHandler(obj);
var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
if (!result) {
if (should_throw) {
- throw MakeTypeError(kProxyHandlerReturned,
- handler, "false", "defineProperty");
+ throw MakeTypeError(kIllegalInvocation);
} else {
return false;
}
@@ -639,14 +527,12 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
}
-// ES5 8.12.9.
+// ES6 9.1.6 [[DefineOwnProperty]](P, Desc)
function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_array = %GetOwnProperty(obj, TO_NAME(p));
+ var current_array = %GetOwnProperty_Legacy(obj, TO_NAME(p));
var current = ConvertDescriptorArrayToDescriptor(current_array);
- var extensible = %IsExtensible(obj);
+ var extensible = %object_is_extensible(obj);
- // Error handling according to spec.
- // Step 3
if (IS_UNDEFINED(current) && !extensible) {
if (should_throw) {
throw MakeTypeError(kDefineDisallowed, p);
@@ -656,7 +542,6 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
if (!IS_UNDEFINED(current)) {
- // Step 5 and 6
if ((IsGenericDescriptor(desc) ||
IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
(!desc.hasEnumerable() ||
@@ -852,7 +737,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
function DefineOwnProperty(obj, p, desc, should_throw) {
- if (%_IsJSProxy(obj)) {
+ if (IS_PROXY(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(p)) return false;
@@ -871,15 +756,15 @@ function ObjectGetPrototypeOf(obj) {
return %_GetPrototype(TO_OBJECT(obj));
}
-// ES6 section 19.1.2.19.
+// ES6 section 19.1.2.18.
function ObjectSetPrototypeOf(obj, proto) {
CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
- if (proto !== null && !IS_SPEC_OBJECT(proto)) {
+ if (proto !== null && !IS_RECEIVER(proto)) {
throw MakeTypeError(kProtoObjectOrNull, proto);
}
- if (IS_SPEC_OBJECT(obj)) {
+ if (IS_RECEIVER(obj)) {
%SetPrototype(obj, proto);
}
@@ -889,182 +774,28 @@ function ObjectSetPrototypeOf(obj, proto) {
// ES6 section 19.1.2.6
function ObjectGetOwnPropertyDescriptor(obj, p) {
- var desc = GetOwnPropertyJS(TO_OBJECT(obj), p);
- return FromPropertyDescriptor(desc);
-}
-
-
-// For Harmony proxies
-function ToNameArray(obj, trap, includeSymbols) {
- if (!IS_SPEC_OBJECT(obj)) {
- throw MakeTypeError(kProxyNonObjectPropNames, trap, obj);
- }
- var n = TO_UINT32(obj.length);
- var array = new GlobalArray(n);
- var realLength = 0;
- var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
- for (var index = 0; index < n; index++) {
- var s = TO_NAME(obj[index]);
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(s) && !includeSymbols) continue;
- if (%HasOwnProperty(names, s)) {
- throw MakeTypeError(kProxyRepeatedPropName, trap, s);
- }
- array[realLength] = s;
- ++realLength;
- names[s] = 0;
- }
- array.length = realLength;
- return array;
-}
-
-
-function ObjectGetOwnPropertyKeys(obj, filter) {
- var nameArrays = new InternalArray();
- filter |= PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL;
- var interceptorInfo = %GetInterceptorInfo(obj);
-
- // Find all the indexed properties.
-
- // Only get own element names if we want to include string keys.
- if ((filter & PROPERTY_ATTRIBUTES_STRING) === 0) {
- var ownElementNames = %GetOwnElementNames(obj);
- for (var i = 0; i < ownElementNames.length; ++i) {
- ownElementNames[i] = %_NumberToString(ownElementNames[i]);
- }
- nameArrays.push(ownElementNames);
- // Get names for indexed interceptor properties.
- if ((interceptorInfo & 1) != 0) {
- var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
- if (!IS_UNDEFINED(indexedInterceptorNames)) {
- nameArrays.push(indexedInterceptorNames);
- }
- }
- }
-
- // Find all the named properties.
-
- // Get own property names.
- nameArrays.push(%GetOwnPropertyNames(obj, filter));
-
- // Get names for named interceptor properties if any.
- if ((interceptorInfo & 2) != 0) {
- var namedInterceptorNames =
- %GetNamedInterceptorPropertyNames(obj);
- if (!IS_UNDEFINED(namedInterceptorNames)) {
- nameArrays.push(namedInterceptorNames);
- }
- }
-
- var propertyNames =
- %Apply(InternalArray.prototype.concat,
- nameArrays[0], nameArrays, 1, nameArrays.length - 1);
-
- // Property names are expected to be unique strings,
- // but interceptors can interfere with that assumption.
- if (interceptorInfo != 0) {
- var seenKeys = { __proto__: null };
- var j = 0;
- for (var i = 0; i < propertyNames.length; ++i) {
- var name = propertyNames[i];
- if (IS_SYMBOL(name)) {
- if ((filter & PROPERTY_ATTRIBUTES_SYMBOLIC) || IS_PRIVATE(name)) {
- continue;
- }
- } else {
- if (filter & PROPERTY_ATTRIBUTES_STRING) continue;
- name = TO_STRING(name);
- }
- if (seenKeys[name]) continue;
- seenKeys[name] = true;
- propertyNames[j++] = name;
- }
- propertyNames.length = j;
- }
-
- return propertyNames;
-}
-
-
-// ES6 section 9.1.12 / 9.5.12
-function OwnPropertyKeys(obj) {
- if (%_IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- // TODO(caitp): Proxy.[[OwnPropertyKeys]] can not be implemented to spec
- // without an implementation of Direct Proxies.
- var names = CallTrap0(handler, "ownKeys", UNDEFINED);
- return ToNameArray(names, "getOwnPropertyNames", false);
- }
- return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
+ return %GetOwnProperty(obj, p);
}
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
obj = TO_OBJECT(obj);
- // Special handling for proxies.
- if (%_IsJSProxy(obj)) {
- var handler = %GetHandler(obj);
- var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED);
- return ToNameArray(names, "getOwnPropertyNames", false);
- }
-
- return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_SYMBOLIC);
-}
-
-
-// ES5 section 15.2.3.5.
-function ObjectCreate(proto, properties) {
- if (!IS_SPEC_OBJECT(proto) && proto !== null) {
- throw MakeTypeError(kProtoObjectOrNull, proto);
- }
- var obj = {};
- %InternalSetPrototype(obj, proto);
- if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
- return obj;
+ return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_SYMBOLS);
}
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
- // The new pure-C++ implementation doesn't support Proxies yet, nor O.o.
+ // The new pure-C++ implementation doesn't support O.o.
// TODO(jkummerow): Implement missing features and remove fallback path.
- if (%_IsJSProxy(obj) || %IsObserved(obj)) {
- if (!IS_SPEC_OBJECT(obj)) {
+ if (%IsObserved(obj)) {
+ if (!IS_RECEIVER(obj)) {
throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
}
var name = TO_NAME(p);
- if (%_IsJSProxy(obj)) {
- // Clone the attributes object for protection.
- // TODO(rossberg): not spec'ed yet, so not sure if this should involve
- // non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = { __proto__: null };
- for (var a in attributes) {
- attributesClone[a] = attributes[a];
- }
- DefineProxyProperty(obj, name, attributesClone, true);
- // The following would implement the spec as in the current proposal,
- // but after recent comments on es-discuss, is most likely obsolete.
- /*
- var defineObj = FromGenericPropertyDescriptor(desc);
- var names = ObjectGetOwnPropertyNames(attributes);
- var standardNames =
- {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0};
- for (var i = 0; i < names.length; i++) {
- var N = names[i];
- if (!(%HasOwnProperty(standardNames, N))) {
- var attr = GetOwnPropertyJS(attributes, N);
- DefineOwnProperty(descObj, N, attr, true);
- }
- }
- // This is really confusing the types, but it is what the proxies spec
- // currently requires:
- desc = descObj;
- */
- } else {
- var desc = ToPropertyDescriptor(attributes);
- DefineOwnProperty(obj, name, desc, true);
- }
+ var desc = ToPropertyDescriptor(attributes);
+ DefineOwnProperty(obj, name, desc, true);
return obj;
}
return %ObjectDefineProperty(obj, p, attributes);
@@ -1072,33 +803,16 @@ function ObjectDefineProperty(obj, p, attributes) {
function GetOwnEnumerablePropertyNames(object) {
- var names = new InternalArray();
- for (var key in object) {
- if (%HasOwnProperty(object, key)) {
- names.push(key);
- }
- }
-
- var filter = PROPERTY_ATTRIBUTES_STRING | PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL;
- var symbols = %GetOwnPropertyNames(object, filter);
- for (var i = 0; i < symbols.length; ++i) {
- var symbol = symbols[i];
- if (IS_SYMBOL(symbol)) {
- var desc = ObjectGetOwnPropertyDescriptor(object, symbol);
- if (desc.enumerable) names.push(symbol);
- }
- }
-
- return names;
+ return %GetOwnPropertyKeys(object, PROPERTY_FILTER_ONLY_ENUMERABLE);
}
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
- // The new pure-C++ implementation doesn't support Proxies yet, nor O.o.
+ // The new pure-C++ implementation doesn't support O.o.
// TODO(jkummerow): Implement missing features and remove fallback path.
- if (%_IsJSProxy(obj) || %_IsJSProxy(properties) || %IsObserved(obj)) {
- if (!IS_SPEC_OBJECT(obj)) {
+ if (%IsObserved(obj)) {
+ if (!IS_RECEIVER(obj)) {
throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
}
var props = TO_OBJECT(properties);
@@ -1116,202 +830,23 @@ function ObjectDefineProperties(obj, properties) {
}
-// Harmony proxies.
-function ProxyFix(obj) {
- var handler = %GetHandler(obj);
- var props = CallTrap0(handler, "fix", UNDEFINED);
- if (IS_UNDEFINED(props)) {
- throw MakeTypeError(kProxyHandlerReturned, handler, "undefined", "fix");
- }
-
- if (%IsJSFunctionProxy(obj)) {
- var callTrap = %GetCallTrap(obj);
- var constructTrap = %GetConstructTrap(obj);
- var code = ProxyDelegateCallAndConstruct(callTrap, constructTrap);
- %Fix(obj); // becomes a regular function
- %SetCode(obj, code);
- // TODO(rossberg): What about length and other properties? Not specified.
- // We just put in some half-reasonable defaults for now.
- var prototype = new GlobalObject();
- ObjectDefineProperty(prototype, "constructor",
- {value: obj, writable: true, enumerable: false, configurable: true});
- // TODO(v8:1530): defineProperty does not handle prototype and length.
- %FunctionSetPrototype(obj, prototype);
- obj.length = 0;
- } else {
- %Fix(obj);
- }
- ObjectDefineProperties(obj, props);
-}
-
-
-// ES5 section 15.2.3.8.
-function ObjectSealJS(obj) {
- if (!IS_SPEC_OBJECT(obj)) return obj;
- var isProxy = %_IsJSProxy(obj);
- if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
- if (isProxy) {
- ProxyFix(obj);
- }
- var names = OwnPropertyKeys(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (desc.isConfigurable()) {
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- }
- %PreventExtensions(obj);
- } else {
- // TODO(adamk): Is it worth going to this fast path if the
- // object's properties are already in dictionary mode?
- %ObjectSeal(obj);
- }
- return obj;
-}
-
-
-// ES5 section 15.2.3.9.
-function ObjectFreezeJS(obj) {
- if (!IS_SPEC_OBJECT(obj)) return obj;
- var isProxy = %_IsJSProxy(obj);
- // TODO(conradw): Investigate modifying the fast path to accommodate strong
- // objects.
- if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj) ||
- IS_STRONG(obj)) {
- if (isProxy) {
- ProxyFix(obj);
- }
- var names = OwnPropertyKeys(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (desc.isWritable() || desc.isConfigurable()) {
- if (IsDataDescriptor(desc)) desc.setWritable(false);
- desc.setConfigurable(false);
- DefineOwnProperty(obj, name, desc, true);
- }
- }
- %PreventExtensions(obj);
- } else {
- // TODO(adamk): Is it worth going to this fast path if the
- // object's properties are already in dictionary mode?
- %ObjectFreeze(obj);
- }
- return obj;
-}
-
-
-// ES5 section 15.2.3.10
-function ObjectPreventExtension(obj) {
- if (!IS_SPEC_OBJECT(obj)) return obj;
- if (%_IsJSProxy(obj)) {
- ProxyFix(obj);
- }
- %PreventExtensions(obj);
- return obj;
-}
-
-
-// ES5 section 15.2.3.11
-function ObjectIsSealed(obj) {
- if (!IS_SPEC_OBJECT(obj)) return true;
- if (%_IsJSProxy(obj)) {
- return false;
- }
- if (%IsExtensible(obj)) {
- return false;
- }
- var names = OwnPropertyKeys(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (desc.isConfigurable()) {
- return false;
- }
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.12
-function ObjectIsFrozen(obj) {
- if (!IS_SPEC_OBJECT(obj)) return true;
- if (%_IsJSProxy(obj)) {
- return false;
- }
- if (%IsExtensible(obj)) {
- return false;
- }
- var names = OwnPropertyKeys(obj);
- for (var i = 0; i < names.length; i++) {
- var name = names[i];
- var desc = GetOwnPropertyJS(obj, name);
- if (IsDataDescriptor(desc) && desc.isWritable()) return false;
- if (desc.isConfigurable()) return false;
- }
- return true;
-}
-
-
-// ES5 section 15.2.3.13
-function ObjectIsExtensible(obj) {
- if (!IS_SPEC_OBJECT(obj)) return false;
- if (%_IsJSProxy(obj)) {
- return true;
- }
- return %IsExtensible(obj);
-}
-
-
-// ECMA-262, Edition 6, section 19.1.2.1
-function ObjectAssign(target, sources) {
- // TODO(bmeurer): Move this to toplevel.
- "use strict";
- var to = TO_OBJECT(target);
- var argsLen = %_ArgumentsLength();
- if (argsLen < 2) return to;
-
- for (var i = 1; i < argsLen; ++i) {
- var nextSource = %_Arguments(i);
- if (IS_NULL_OR_UNDEFINED(nextSource)) {
- continue;
- }
-
- var from = TO_OBJECT(nextSource);
- var keys = OwnPropertyKeys(from);
- var len = keys.length;
-
- for (var j = 0; j < len; ++j) {
- var key = keys[j];
- if (%IsPropertyEnumerable(from, key)) {
- var propValue = from[key];
- to[key] = propValue;
- }
- }
- }
- return to;
-}
-
-
-// ECMA-262, Edition 6, section B.2.2.1.1
+// ES6 B.2.2.1.1
function ObjectGetProto() {
return %_GetPrototype(TO_OBJECT(this));
}
-// ECMA-262, Edition 6, section B.2.2.1.2
+// ES6 B.2.2.1.2
function ObjectSetProto(proto) {
CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
- if ((IS_SPEC_OBJECT(proto) || IS_NULL(proto)) && IS_SPEC_OBJECT(this)) {
+ if ((IS_RECEIVER(proto) || IS_NULL(proto)) && IS_RECEIVER(this)) {
%SetPrototype(this, proto);
}
}
-// ECMA-262, Edition 6, section 19.1.1.1
+// ES6 19.1.1.1
function ObjectConstructor(x) {
if (GlobalObject != new.target && !IS_UNDEFINED(new.target)) {
return this;
@@ -1348,23 +883,16 @@ utils.InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
// Set up non-enumerable functions in the Object object.
utils.InstallFunctions(GlobalObject, DONT_ENUM, [
- "assign", ObjectAssign,
- "keys", ObjectKeys,
- "create", ObjectCreate,
+ // assign is added in bootstrapper.cc.
+ // keys is added in bootstrapper.cc.
"defineProperty", ObjectDefineProperty,
"defineProperties", ObjectDefineProperties,
- "freeze", ObjectFreezeJS,
"getPrototypeOf", ObjectGetPrototypeOf,
"setPrototypeOf", ObjectSetPrototypeOf,
"getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
"getOwnPropertyNames", ObjectGetOwnPropertyNames,
// getOwnPropertySymbols is added in symbol.js.
"is", SameValue, // ECMA-262, Edition 6, section 19.1.2.10
- "isExtensible", ObjectIsExtensible,
- "isFrozen", ObjectIsFrozen,
- "isSealed", ObjectIsSealed,
- "preventExtensions", ObjectPreventExtension,
- "seal", ObjectSealJS
// deliverChangeRecords, getNotifier, observe and unobserve are added
// in object-observe.js.
]);
@@ -1376,7 +904,7 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
function BooleanConstructor(x) {
// TODO(bmeurer): Move this to toplevel.
"use strict";
- if (%_IsConstructCall()) {
+ if (!IS_UNDEFINED(new.target)) {
%_SetValueOf(this, TO_BOOLEAN(x));
} else {
return TO_BOOLEAN(x);
@@ -1424,19 +952,7 @@ utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
// ----------------------------------------------------------------------------
// Number
-function NumberConstructor(x) {
- // TODO(bmeurer): Move this to toplevel.
- "use strict";
- var value = %_ArgumentsLength() == 0 ? 0 : TO_NUMBER(x);
- if (%_IsConstructCall()) {
- %_SetValueOf(this, value);
- } else {
- return value;
- }
-}
-
-
-// ECMA-262 section 15.7.4.2.
+// ES6 Number.prototype.toString([ radix ])
function NumberToStringJS(radix) {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
@@ -1461,13 +977,13 @@ function NumberToStringJS(radix) {
}
-// ECMA-262 section 15.7.4.3
+// ES6 20.1.3.4 Number.prototype.toLocaleString([reserved1 [, reserved2]])
function NumberToLocaleString() {
return %_Call(NumberToStringJS, this);
}
-// ECMA-262 section 15.7.4.4
+// ES6 20.1.3.7 Number.prototype.valueOf()
function NumberValueOf() {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
@@ -1478,7 +994,7 @@ function NumberValueOf() {
}
-// ECMA-262 section 15.7.4.5
+// ES6 20.1.3.3 Number.prototype.toFixed(fractionDigits)
function NumberToFixedJS(fractionDigits) {
var x = this;
if (!IS_NUMBER(this)) {
@@ -1503,7 +1019,7 @@ function NumberToFixedJS(fractionDigits) {
}
-// ECMA-262 section 15.7.4.6
+// ES6 20.1.3.2 Number.prototype.toExponential(fractionDigits)
function NumberToExponentialJS(fractionDigits) {
var x = this;
if (!IS_NUMBER(this)) {
@@ -1529,7 +1045,7 @@ function NumberToExponentialJS(fractionDigits) {
}
-// ECMA-262 section 15.7.4.7
+// ES6 20.1.3.5 Number.prototype.toPrecision(precision)
function NumberToPrecisionJS(precision) {
var x = this;
if (!IS_NUMBER(this)) {
@@ -1586,7 +1102,6 @@ function NumberIsSafeInteger(number) {
// ----------------------------------------------------------------------------
-%SetCode(GlobalNumber, NumberConstructor);
%FunctionSetPrototype(GlobalNumber, new GlobalNumber(0));
%OptimizeObjectForAddingMultipleProperties(GlobalNumber.prototype, 8);
@@ -1637,178 +1152,9 @@ utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
// ----------------------------------------------------------------------------
-// Function
-
-function NativeCodeFunctionSourceString(func) {
- var name = %FunctionGetName(func);
- if (name) {
- // Mimic what KJS does.
- return 'function ' + name + '() { [native code] }';
- }
-
- return 'function () { [native code] }';
-}
-
-function FunctionSourceString(func) {
- while (%IsJSFunctionProxy(func)) {
- func = %GetCallTrap(func);
- }
-
- if (!IS_FUNCTION(func)) {
- throw MakeTypeError(kNotGeneric, 'Function.prototype.toString');
- }
-
- if (%FunctionHidesSource(func)) {
- return NativeCodeFunctionSourceString(func);
- }
-
- var classSource = %ClassGetSourceCode(func);
- if (IS_STRING(classSource)) {
- return classSource;
- }
-
- var source = %FunctionGetSourceCode(func);
- if (!IS_STRING(source)) {
- return NativeCodeFunctionSourceString(func);
- }
-
- if (%FunctionIsArrow(func)) {
- return source;
- }
-
- var name = %FunctionNameShouldPrintAsAnonymous(func)
- ? 'anonymous'
- : %FunctionGetName(func);
-
- var isGenerator = %FunctionIsGenerator(func);
- var head = %FunctionIsConciseMethod(func)
- ? (isGenerator ? '*' : '')
- : (isGenerator ? 'function* ' : 'function ');
- return head + name + source;
-}
-
-
-function FunctionToString() {
- return FunctionSourceString(this);
-}
-
-
-// ES5 15.3.4.5
-function FunctionBind(this_arg) { // Length is 1.
- if (!IS_CALLABLE(this)) throw MakeTypeError(kFunctionBind);
-
- var boundFunction = function () {
- // Poison .arguments and .caller, but is otherwise not detectable.
- "use strict";
- // This function must not use any object literals (Object, Array, RegExp),
- // since the literals-array is being used to store the bound data.
- if (%_IsConstructCall()) {
- return %NewObjectFromBound(boundFunction);
- }
- var bindings = %BoundFunctionGetBindings(boundFunction);
-
- var argc = %_ArgumentsLength();
- if (argc == 0) {
- return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
- }
- if (bindings.length === 2) {
- return %Apply(bindings[0], bindings[1], arguments, 0, argc);
- }
- var bound_argc = bindings.length - 2;
- var argv = new InternalArray(bound_argc + argc);
- for (var i = 0; i < bound_argc; i++) {
- argv[i] = bindings[i + 2];
- }
- for (var j = 0; j < argc; j++) {
- argv[i++] = %_Arguments(j);
- }
- return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
- };
-
- var new_length = 0;
- var old_length = this.length;
- // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
- if ((typeof old_length === "number") &&
- ((old_length >>> 0) === old_length)) {
- var argc = %_ArgumentsLength();
- if (argc > 0) argc--; // Don't count the thisArg as parameter.
- new_length = old_length - argc;
- if (new_length < 0) new_length = 0;
- }
- // This runtime function finds any remaining arguments on the stack,
- // so we don't pass the arguments object.
- var result = %FunctionBindArguments(boundFunction, this,
- this_arg, new_length);
-
- var name = this.name;
- var bound_name = IS_STRING(name) ? name : "";
- %DefineDataPropertyUnchecked(result, "name", "bound " + bound_name,
- DONT_ENUM | READ_ONLY);
-
- // We already have caller and arguments properties on functions,
- // which are non-configurable. It therefore makes no sence to
- // try to redefine these as defined by the spec. The spec says
- // that bind should make these throw a TypeError if get or set
- // is called and make them non-enumerable and non-configurable.
- // To be consistent with our normal functions we leave this as it is.
- // TODO(lrn): Do set these to be thrower.
- return result;
-}
-
-
-function NewFunctionString(args, function_token) {
- var n = args.length;
- var p = '';
- if (n > 1) {
- p = TO_STRING(args[0]);
- for (var i = 1; i < n - 1; i++) {
- p += ',' + TO_STRING(args[i]);
- }
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- if (%_Call(StringIndexOf, p, ')') != -1) {
- throw MakeSyntaxError(kParenthesisInArgString);
- }
- // If the formal parameters include an unbalanced block comment, the
- // function must be rejected. Since JavaScript does not allow nested
- // comments we can include a trailing block comment to catch this.
- p += '\n/' + '**/';
- }
- var body = (n > 0) ? TO_STRING(args[n - 1]) : '';
- return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
-}
-
-
-function FunctionConstructor(arg1) { // length == 1
- var source = NewFunctionString(arguments, 'function');
- var global_proxy = %GlobalProxy(FunctionConstructor);
- // Compile the string in the constructor and not a helper so that errors
- // appear to come from here.
- var func = %_Call(%CompileString(source, true), global_proxy);
- // Set name-should-print-as-anonymous flag on the ShareFunctionInfo and
- // ensure that |func| uses correct initial map from |new.target| if
- // it's available.
- return %CompleteFunctionConstruction(func, GlobalFunction, new.target);
-}
-
-
-// ----------------------------------------------------------------------------
-
-%SetCode(GlobalFunction, FunctionConstructor);
-%AddNamedProperty(GlobalFunction.prototype, "constructor", GlobalFunction,
- DONT_ENUM);
-
-utils.InstallFunctions(GlobalFunction.prototype, DONT_ENUM, [
- "bind", FunctionBind,
- "toString", FunctionToString
-]);
-
-// ----------------------------------------------------------------------------
// Iterator related spec functions.
-// ES6 rev 33, 2015-02-12
-// 7.4.1 GetIterator ( obj, method )
+// ES6 7.4.1 GetIterator(obj, method)
function GetIterator(obj, method) {
if (IS_UNDEFINED(method)) {
method = obj[iteratorSymbol];
@@ -1817,7 +1163,7 @@ function GetIterator(obj, method) {
throw MakeTypeError(kNotIterable, obj);
}
var iterator = %_Call(method, obj);
- if (!IS_SPEC_OBJECT(iterator)) {
+ if (!IS_RECEIVER(iterator)) {
throw MakeTypeError(kNotAnIterator, iterator);
}
return iterator;
@@ -1827,31 +1173,18 @@ function GetIterator(obj, method) {
// Exports
utils.Export(function(to) {
- to.Delete = Delete;
- to.FunctionSourceString = FunctionSourceString;
to.GetIterator = GetIterator;
to.GetMethod = GetMethod;
to.IsFinite = GlobalIsFinite;
to.IsNaN = GlobalIsNaN;
- to.NewFunctionString = NewFunctionString;
to.NumberIsNaN = NumberIsNaN;
to.ObjectDefineProperties = ObjectDefineProperties;
to.ObjectDefineProperty = ObjectDefineProperty;
- to.ObjectFreeze = ObjectFreezeJS;
- to.ObjectGetOwnPropertyKeys = ObjectGetOwnPropertyKeys;
to.ObjectHasOwnProperty = ObjectHasOwnProperty;
- to.ObjectIsFrozen = ObjectIsFrozen;
- to.ObjectIsSealed = ObjectIsSealed;
- to.ObjectToString = ObjectToString;
- to.ToNameArray = ToNameArray;
});
%InstallToContext([
- "global_eval_fun", GlobalEval,
"object_value_of", ObjectValueOf,
- "object_to_string", ObjectToString,
- "object_get_own_property_descriptor", ObjectGetOwnPropertyDescriptor,
- "to_complete_property_descriptor", ToCompletePropertyDescriptor,
]);
})
diff --git a/deps/v8/src/js/weak-collection.js b/deps/v8/src/js/weak-collection.js
index c4568f97f4..308b9edef7 100644
--- a/deps/v8/src/js/weak-collection.js
+++ b/deps/v8/src/js/weak-collection.js
@@ -29,7 +29,7 @@ utils.Import(function(from) {
// Harmony WeakMap
function WeakMapConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "WeakMap");
}
@@ -38,10 +38,10 @@ function WeakMapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'set', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'set', this);
}
for (var nextItem of iterable) {
- if (!IS_SPEC_OBJECT(nextItem)) {
+ if (!IS_RECEIVER(nextItem)) {
throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
}
%_Call(adder, this, nextItem[0], nextItem[1]);
@@ -55,7 +55,7 @@ function WeakMapGet(key) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.get', this);
}
- if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
+ if (!IS_RECEIVER(key)) return UNDEFINED;
var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return UNDEFINED;
return %WeakCollectionGet(this, key, hash);
@@ -67,7 +67,7 @@ function WeakMapSet(key, value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.set', this);
}
- if (!IS_SPEC_OBJECT(key)) throw MakeTypeError(kInvalidWeakMapKey);
+ if (!IS_RECEIVER(key)) throw MakeTypeError(kInvalidWeakMapKey);
return %WeakCollectionSet(this, key, value, GetHash(key));
}
@@ -77,7 +77,7 @@ function WeakMapHas(key) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.has', this);
}
- if (!IS_SPEC_OBJECT(key)) return false;
+ if (!IS_RECEIVER(key)) return false;
var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionHas(this, key, hash);
@@ -89,7 +89,7 @@ function WeakMapDelete(key) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakMap.prototype.delete', this);
}
- if (!IS_SPEC_OBJECT(key)) return false;
+ if (!IS_RECEIVER(key)) return false;
var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionDelete(this, key, hash);
@@ -118,7 +118,7 @@ utils.InstallFunctions(GlobalWeakMap.prototype, DONT_ENUM, [
// Harmony WeakSet
function WeakSetConstructor(iterable) {
- if (!%_IsConstructCall()) {
+ if (IS_UNDEFINED(new.target)) {
throw MakeTypeError(kConstructorNotFunction, "WeakSet");
}
@@ -127,7 +127,7 @@ function WeakSetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, 'add', this);
+ throw MakeTypeError(kPropertyNotFunction, adder, 'add', this);
}
for (var value of iterable) {
%_Call(adder, this, value);
@@ -141,7 +141,7 @@ function WeakSetAdd(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.add', this);
}
- if (!IS_SPEC_OBJECT(value)) throw MakeTypeError(kInvalidWeakSetValue);
+ if (!IS_RECEIVER(value)) throw MakeTypeError(kInvalidWeakSetValue);
return %WeakCollectionSet(this, value, true, GetHash(value));
}
@@ -151,7 +151,7 @@ function WeakSetHas(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.has', this);
}
- if (!IS_SPEC_OBJECT(value)) return false;
+ if (!IS_RECEIVER(value)) return false;
var hash = GetExistingHash(value);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionHas(this, value, hash);
@@ -163,7 +163,7 @@ function WeakSetDelete(value) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'WeakSet.prototype.delete', this);
}
- if (!IS_SPEC_OBJECT(value)) return false;
+ if (!IS_RECEIVER(value)) return false;
var hash = GetExistingHash(value);
if (IS_UNDEFINED(hash)) return false;
return %WeakCollectionDelete(this, value, hash);
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 8bcef34c79..5c0459eb1b 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -337,14 +337,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
case JS_VALUE_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeJSValue(Handle<JSValue>::cast(object));
- case JS_FUNCTION_TYPE:
- return UNCHANGED;
default:
if (object->IsString()) {
if (deferred_string_key) SerializeDeferredKey(comma, key);
SerializeString(Handle<String>::cast(object));
return SUCCESS;
} else if (object->IsJSObject()) {
+ if (object->IsCallable()) return UNCHANGED;
// Go to slow path for global proxy and objects requiring access checks.
if (object->IsAccessCheckNeeded() || object->IsJSGlobalProxy()) break;
if (deferred_string_key) SerializeDeferredKey(comma, key);
@@ -397,9 +396,10 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
DCHECK(value->IsBoolean());
builder_.AppendCString(value->IsTrue() ? "true" : "false");
} else {
- // Fail gracefully for special value wrappers.
- isolate_->ThrowIllegalOperation();
- return EXCEPTION;
+ // ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
+ CHECK(!object->IsAccessCheckNeeded());
+ CHECK(!object->IsJSGlobalProxy());
+ return SerializeJSObject(object);
}
return SUCCESS;
}
@@ -567,7 +567,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
Handle<FixedArray> contents;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, contents,
- JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY),
+ JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS),
EXCEPTION);
for (int i = 0; i < contents->length(); i++) {
diff --git a/deps/v8/src/key-accumulator.cc b/deps/v8/src/key-accumulator.cc
index 91b014aacd..e7a9c3cceb 100644
--- a/deps/v8/src/key-accumulator.cc
+++ b/deps/v8/src/key-accumulator.cc
@@ -6,7 +6,9 @@
#include "src/elements.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
+#include "src/property-descriptor.h"
namespace v8 {
@@ -97,9 +99,11 @@ bool KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
if (key->IsSymbol()) {
- if (filter_ == SKIP_SYMBOLS) return false;
+ if (filter_ & SKIP_SYMBOLS) return false;
+ if (Handle<Symbol>::cast(key)->is_private()) return false;
return AddSymbolKey(key);
}
+ if (filter_ & SKIP_STRINGS) return false;
// Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
DCHECK_LE(0, level_string_length_);
// In some cases (e.g. proxies) we might get in String-converted ints which
@@ -217,6 +221,54 @@ void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
}
+MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
+ Handle<FixedArray> keys,
+ PropertyFilter filter) {
+ if (filter == ALL_PROPERTIES) {
+ // Nothing to do.
+ return keys;
+ }
+ int store_position = 0;
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key(Name::cast(keys->get(i)), isolate);
+ if (key->FilterKey(filter)) continue; // Skip this key.
+ if (filter & ONLY_ENUMERABLE) {
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSProxy::GetOwnPropertyDescriptor(isolate, owner, key, &desc);
+ MAYBE_RETURN(found, MaybeHandle<FixedArray>());
+ if (!found.FromJust() || !desc.enumerable()) continue; // Skip this key.
+ }
+ // Keep this key.
+ if (store_position != i) {
+ keys->set(store_position, *key);
+ }
+ store_position++;
+ }
+ if (store_position == 0) return isolate->factory()->empty_fixed_array();
+ keys->Shrink(store_position);
+ return keys;
+}
+
+
+// Returns "nothing" in case of exception, "true" on success.
+Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
+ Handle<FixedArray> keys) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
+ Nothing<bool>());
+ // Proxies define a complete list of keys with no distinction of
+ // elements and properties, which breaks the normal assumption for the
+ // KeyAccumulator.
+ AddKeys(keys, PROXY_MAGIC);
+ // Invert the current length to indicate a present proxy, so we can ignore
+ // element keys for this level. Otherwise we would not fully respect the order
+ // given by the proxy.
+ level_string_length_ = -level_string_length_;
+ return Just(true);
+}
+
+
void KeyAccumulator::AddElementKeysFromInterceptor(
Handle<JSObject> array_like) {
AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
diff --git a/deps/v8/src/key-accumulator.h b/deps/v8/src/key-accumulator.h
index 21b68433ec..8a4d886f51 100644
--- a/deps/v8/src/key-accumulator.h
+++ b/deps/v8/src/key-accumulator.h
@@ -31,8 +31,7 @@ enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
// are more compact and allow for reasonably fast includes check.
class KeyAccumulator final BASE_EMBEDDED {
public:
- explicit KeyAccumulator(Isolate* isolate,
- KeyFilter filter = KeyFilter::SKIP_SYMBOLS)
+ KeyAccumulator(Isolate* isolate, PropertyFilter filter)
: isolate_(isolate), filter_(filter) {}
~KeyAccumulator();
@@ -44,6 +43,7 @@ class KeyAccumulator final BASE_EMBEDDED {
void AddKeys(Handle<JSObject> array,
AddKeyConversion convert = DO_NOT_CONVERT);
void AddKeysFromProxy(Handle<JSObject> array);
+ Maybe<bool> AddKeysFromProxy(Handle<JSProxy> proxy, Handle<FixedArray> keys);
void AddElementKeysFromInterceptor(Handle<JSObject> array);
// Jump to the next level, pushing the current |levelLength_| to
// |levelLengths_| and adding a new list to |elements_|.
@@ -52,6 +52,7 @@ class KeyAccumulator final BASE_EMBEDDED {
void SortCurrentElementsList();
Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
int length() { return length_; }
+ Isolate* isolate() { return isolate_; }
private:
bool AddIntegerKey(uint32_t key);
@@ -60,7 +61,7 @@ class KeyAccumulator final BASE_EMBEDDED {
void SortCurrentElementsListRemoveDuplicates();
Isolate* isolate_;
- KeyFilter filter_;
+ PropertyFilter filter_;
// |elements_| contains the sorted element keys (indices) per level.
std::vector<std::vector<uint32_t>*> elements_;
// |protoLengths_| contains the total number of keys (elements + properties)
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 383d8ce1e0..e8c15572ad 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -168,5 +168,30 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
return base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(base::Time::kMicrosecondsPerSecond);
}
+
+
+uint64_t DefaultPlatform::AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ uint64_t id, uint64_t bind_id, int num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values, unsigned int flags) {
+ return 0;
+}
+
+
+void DefaultPlatform::UpdateTraceEventDuration(
+ const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {}
+
+
+const uint8_t* DefaultPlatform::GetCategoryGroupEnabled(const char* name) {
+ static uint8_t no = 0;
+ return &no;
+}
+
+
+const char* DefaultPlatform::GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) {
+ static const char dummy[] = "dummy";
+ return dummy;
+}
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index b452fdd345..8bdda95be6 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -42,6 +42,17 @@ class DefaultPlatform : public Platform {
void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override;
bool IdleTasksEnabled(Isolate* isolate) override;
double MonotonicallyIncreasingTime() override;
+ const uint8_t* GetCategoryGroupEnabled(const char* name) override;
+ const char* GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) override;
+ uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
+ const char* name, uint64_t id, uint64_t bind_id,
+ int32_t num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values,
+ unsigned int flags) override;
+ void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle) override;
+
private:
static const int kMaxThreadPoolSize;
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/locked-queue-inl.h
new file mode 100644
index 0000000000..8b3e9d02bb
--- /dev/null
+++ b/deps/v8/src/locked-queue-inl.h
@@ -0,0 +1,91 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOCKED_QUEUE_INL_
+#define V8_LOCKED_QUEUE_INL_
+
+#include "src/atomic-utils.h"
+#include "src/locked-queue.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename Record>
+struct LockedQueue<Record>::Node : Malloced {
+ Node() : next(nullptr) {}
+ Record value;
+ AtomicValue<Node*> next;
+};
+
+
+template <typename Record>
+inline LockedQueue<Record>::LockedQueue() {
+ head_ = new Node();
+ CHECK(head_ != nullptr);
+ tail_ = head_;
+}
+
+
+template <typename Record>
+inline LockedQueue<Record>::~LockedQueue() {
+ // Destroy all remaining nodes. Note that we do not destroy the actual values.
+ Node* old_node = nullptr;
+ Node* cur_node = head_;
+ while (cur_node != nullptr) {
+ old_node = cur_node;
+ cur_node = cur_node->next.Value();
+ delete old_node;
+ }
+}
+
+
+template <typename Record>
+inline void LockedQueue<Record>::Enqueue(const Record& record) {
+ Node* n = new Node();
+ CHECK(n != nullptr);
+ n->value = record;
+ {
+ base::LockGuard<base::Mutex> guard(&tail_mutex_);
+ tail_->next.SetValue(n);
+ tail_ = n;
+ }
+}
+
+
+template <typename Record>
+inline bool LockedQueue<Record>::Dequeue(Record* record) {
+ Node* old_head = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&head_mutex_);
+ old_head = head_;
+ Node* const next_node = head_->next.Value();
+ if (next_node == nullptr) return false;
+ *record = next_node->value;
+ head_ = next_node;
+ }
+ delete old_head;
+ return true;
+}
+
+
+template <typename Record>
+inline bool LockedQueue<Record>::IsEmpty() const {
+ base::LockGuard<base::Mutex> guard(&head_mutex_);
+ return head_->next.Value() == nullptr;
+}
+
+
+template <typename Record>
+inline bool LockedQueue<Record>::Peek(Record* record) const {
+ base::LockGuard<base::Mutex> guard(&head_mutex_);
+ Node* const next_node = head_->next.Value();
+ if (next_node == nullptr) return false;
+ *record = next_node->value;
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOCKED_QUEUE_INL_
diff --git a/deps/v8/src/locked-queue.h b/deps/v8/src/locked-queue.h
new file mode 100644
index 0000000000..5bb97c8a12
--- /dev/null
+++ b/deps/v8/src/locked-queue.h
@@ -0,0 +1,43 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOCKED_QUEUE_
+#define V8_LOCKED_QUEUE_
+
+#include "src/allocation.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+// Simple lock-based unbounded size queue (multi producer; multi consumer) based
+// on "Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
+// Algorithms" by M. Scott and M. Michael.
+// See:
+// https://www.cs.rochester.edu/research/synchronization/pseudocode/queues.html
+template <typename Record>
+class LockedQueue final BASE_EMBEDDED {
+ public:
+ inline LockedQueue();
+ inline ~LockedQueue();
+ inline void Enqueue(const Record& record);
+ inline bool Dequeue(Record* record);
+ inline bool IsEmpty() const;
+ inline bool Peek(Record* record) const;
+
+ private:
+ struct Node;
+
+ mutable base::Mutex head_mutex_;
+ base::Mutex tail_mutex_;
+ Node* head_;
+ Node* tail_;
+
+ DISALLOW_COPY_AND_ASSIGN(LockedQueue);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOCKED_QUEUE_
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index 70c74bc33f..d47a24b96a 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -8,6 +8,7 @@
#include "src/log.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -37,6 +38,19 @@ void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
isolate->event_logger()(name, se);
}
}
+ if (expose_to_api) {
+ if (se == START) {
+ TRACE_EVENT_BEGIN0("v8", name);
+ } else {
+ TRACE_EVENT_END0("v8", name);
+ }
+ } else {
+ if (se == START) {
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
+ } else {
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
+ }
+ }
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index f60563b19e..48da4fabee 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -92,10 +92,8 @@ void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
// static
-Handle<JSReceiver> LookupIterator::GetRoot(Isolate* isolate,
- Handle<Object> receiver,
- uint32_t index) {
- if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
+ Isolate* isolate, Handle<Object> receiver, uint32_t index) {
// Strings are the only objects with properties (only elements) directly on
// the wrapper. Hence we can skip generating the wrapper for all other cases.
if (index != kMaxUInt32 && receiver->IsString() &&
@@ -272,23 +270,27 @@ void LookupIterator::ApplyTransitionToDataProperty() {
void LookupIterator::Delete() {
- Handle<JSObject> holder = Handle<JSObject>::cast(holder_);
+ Handle<JSReceiver> holder = Handle<JSReceiver>::cast(holder_);
if (IsElement()) {
- ElementsAccessor* accessor = holder->GetElementsAccessor();
- accessor->Delete(holder, number_);
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Delete(object, number_);
} else {
PropertyNormalizationMode mode = holder->map()->is_prototype_map()
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
if (holder->HasFastProperties()) {
- JSObject::NormalizeProperties(holder, mode, 0, "DeletingProperty");
+ JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
+ "DeletingProperty");
holder_map_ = handle(holder->map(), isolate_);
ReloadPropertyInformation();
}
// TODO(verwaest): Get rid of the name_ argument.
- JSObject::DeleteNormalizedProperty(holder, name_, number_);
- JSObject::ReoptimizeIfPrototype(holder);
+ JSReceiver::DeleteNormalizedProperty(holder, name_, number_);
+ if (holder->IsJSObject()) {
+ JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
+ }
}
}
@@ -415,8 +417,8 @@ bool LookupIterator::InternalHolderIsReceiverOrHiddenPrototype() const {
Handle<Object> LookupIterator::FetchValue() const {
Object* result = NULL;
- Handle<JSObject> holder = GetHolder<JSObject>();
if (IsElement()) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
// TODO(verwaest): Optimize.
if (holder->IsStringObjectWithCharacterAt(index_)) {
Handle<JSValue> js_value = Handle<JSValue>::cast(holder);
@@ -428,12 +430,14 @@ Handle<Object> LookupIterator::FetchValue() const {
ElementsAccessor* accessor = holder->GetElementsAccessor();
return accessor->Get(handle(holder->elements()), number_);
} else if (holder_map_->IsJSGlobalObjectMap()) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
result = holder->global_dictionary()->ValueAt(number_);
DCHECK(result->IsPropertyCell());
result = PropertyCell::cast(result)->value();
} else if (holder_map_->is_dictionary_map()) {
- result = holder->property_dictionary()->ValueAt(number_);
+ result = holder_->property_dictionary()->ValueAt(number_);
} else if (property_details_.type() == v8::internal::DATA) {
+ Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
@@ -508,20 +512,21 @@ Handle<Object> LookupIterator::GetDataValue() const {
void LookupIterator::WriteDataValue(Handle<Object> value) {
DCHECK_EQ(DATA, state_);
- Handle<JSObject> holder = GetHolder<JSObject>();
+ Handle<JSReceiver> holder = GetHolder<JSReceiver>();
if (IsElement()) {
- ElementsAccessor* accessor = holder->GetElementsAccessor();
- accessor->Set(holder->elements(), number_, *value);
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->Set(object->elements(), number_, *value);
} else if (holder->IsJSGlobalObject()) {
Handle<GlobalDictionary> property_dictionary =
- handle(holder->global_dictionary());
+ handle(JSObject::cast(*holder)->global_dictionary());
PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
property_details_);
} else if (holder_map_->is_dictionary_map()) {
NameDictionary* property_dictionary = holder->property_dictionary();
property_dictionary->ValueAtPut(dictionary_entry(), *value);
} else if (property_details_.type() == v8::internal::DATA) {
- holder->WriteToField(descriptor_number(), *value);
+ JSObject::cast(*holder)->WriteToField(descriptor_number(), *value);
} else {
DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
}
@@ -530,8 +535,6 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
bool LookupIterator::IsIntegerIndexedExotic(JSReceiver* holder) {
DCHECK(exotic_index_state_ != ExoticIndexState::kNotExotic);
- // Currently typed arrays are the only such objects.
- if (!holder->IsJSTypedArray()) return false;
if (exotic_index_state_ == ExoticIndexState::kExotic) return true;
if (!InternalHolderIsReceiverOrHiddenPrototype()) {
exotic_index_state_ = ExoticIndexState::kNotExotic;
@@ -566,18 +569,6 @@ bool LookupIterator::HasInterceptor(Map* map) const {
}
-Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
- DCHECK_EQ(INTERCEPTOR, state_);
- return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
-}
-
-
-InterceptorInfo* LookupIterator::GetInterceptor(JSObject* holder) const {
- if (IsElement()) return holder->GetIndexedInterceptor();
- return holder->GetNamedInterceptor();
-}
-
-
bool LookupIterator::SkipInterceptor(JSObject* holder) {
auto info = GetInterceptor(holder);
// TODO(dcarney): check for symbol/can_intercept_symbols here as well.
@@ -625,7 +616,10 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
}
switch (state_) {
case NOT_FOUND:
- if (map->IsJSProxyMap()) return JSPROXY;
+ if (map->IsJSProxyMap()) {
+ // Do not leak private property names.
+ if (IsElement() || !name_->IsPrivate()) return JSPROXY;
+ }
if (map->is_access_check_needed() &&
(IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
return ACCESS_CHECK;
@@ -633,11 +627,13 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
// Fall through.
case ACCESS_CHECK:
if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
- IsIntegerIndexedExotic(holder)) {
+ holder->IsJSTypedArray() && IsIntegerIndexedExotic(holder)) {
return INTEGER_INDEXED_EXOTIC;
}
if (check_interceptor() && HasInterceptor(map) &&
!SkipInterceptor(JSObject::cast(holder))) {
+ // Do not leak private property names.
+ if (!name_.is_null() && name_->IsPrivate()) return NOT_FOUND;
return INTERCEPTOR;
}
// Fall through.
@@ -678,7 +674,7 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
if (cell->value()->IsTheHole()) return NOT_FOUND;
property_details_ = cell->property_details();
} else {
- NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
+ NameDictionary* dict = holder->property_dictionary();
int number = dict->FindEntry(name_);
if (number == NameDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 5396619852..7d689560b8 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -203,10 +203,13 @@ class LookupIterator final BASE_EMBEDDED {
DCHECK(IsFound());
return Handle<T>::cast(holder_);
}
- static Handle<JSReceiver> GetRoot(Isolate* isolate, Handle<Object> receiver,
- uint32_t index = kMaxUInt32);
+
bool HolderIsReceiverOrHiddenPrototype() const;
+ bool check_prototype_chain() const {
+ return (configuration_ & kPrototypeChain) != 0;
+ }
+
/* ACCESS_CHECK */
bool HasAccess() const;
@@ -245,7 +248,10 @@ class LookupIterator final BASE_EMBEDDED {
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
Handle<Object> GetAccessors() const;
- Handle<InterceptorInfo> GetInterceptor() const;
+ inline Handle<InterceptorInfo> GetInterceptor() const {
+ DCHECK_EQ(INTERCEPTOR, state_);
+ return handle(GetInterceptor(JSObject::cast(*holder_)), isolate_);
+ }
Handle<Object> GetDataValue() const;
void WriteDataValue(Handle<Object> value);
void InternalizeName();
@@ -269,18 +275,18 @@ class LookupIterator final BASE_EMBEDDED {
State LookupNonMaskingInterceptorInHolder(Map* map, JSReceiver* holder);
Handle<Object> FetchValue() const;
void ReloadPropertyInformation();
- bool SkipInterceptor(JSObject* holder);
+ inline bool SkipInterceptor(JSObject* holder);
bool HasInterceptor(Map* map) const;
bool InternalHolderIsReceiverOrHiddenPrototype() const;
- InterceptorInfo* GetInterceptor(JSObject* holder) const;
+ inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
+ if (IsElement()) return holder->GetIndexedInterceptor();
+ return holder->GetNamedInterceptor();
+ }
bool check_hidden() const { return (configuration_ & kHidden) != 0; }
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
}
- bool check_prototype_chain() const {
- return (configuration_ & kPrototypeChain) != 0;
- }
int descriptor_number() const {
DCHECK(has_property_);
DCHECK(!holder_map_->is_dictionary_map());
@@ -302,8 +308,17 @@ class LookupIterator final BASE_EMBEDDED {
}
}
+ static Handle<JSReceiver> GetRootForNonJSReceiver(
+ Isolate* isolate, Handle<Object> receiver, uint32_t index = kMaxUInt32);
+ inline static Handle<JSReceiver> GetRoot(Isolate* isolate,
+ Handle<Object> receiver,
+ uint32_t index = kMaxUInt32) {
+ if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ return GetRootForNonJSReceiver(isolate, receiver, index);
+ }
+
enum class ExoticIndexState { kUninitialized, kNotExotic, kExotic };
- bool IsIntegerIndexedExotic(JSReceiver* holder);
+ inline bool IsIntegerIndexedExotic(JSReceiver* holder);
// If configuration_ becomes mutable, update
// HolderIsReceiverOrHiddenPrototype.
diff --git a/deps/v8/src/machine-type.cc b/deps/v8/src/machine-type.cc
new file mode 100644
index 0000000000..1fb886ca52
--- /dev/null
+++ b/deps/v8/src/machine-type.cc
@@ -0,0 +1,75 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/machine-type.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& os, MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kNone:
+ return os << "kMachNone";
+ case MachineRepresentation::kBit:
+ return os << "kRepBit";
+ case MachineRepresentation::kWord8:
+ return os << "kRepWord8";
+ case MachineRepresentation::kWord16:
+ return os << "kRepWord16";
+ case MachineRepresentation::kWord32:
+ return os << "kRepWord32";
+ case MachineRepresentation::kWord64:
+ return os << "kRepWord64";
+ case MachineRepresentation::kFloat32:
+ return os << "kRepFloat32";
+ case MachineRepresentation::kFloat64:
+ return os << "kRepFloat64";
+ case MachineRepresentation::kTagged:
+ return os << "kRepTagged";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, MachineSemantic type) {
+ switch (type) {
+ case MachineSemantic::kNone:
+ return os << "kMachNone";
+ case MachineSemantic::kBool:
+ return os << "kTypeBool";
+ case MachineSemantic::kInt32:
+ return os << "kTypeInt32";
+ case MachineSemantic::kUint32:
+ return os << "kTypeUint32";
+ case MachineSemantic::kInt64:
+ return os << "kTypeInt64";
+ case MachineSemantic::kUint64:
+ return os << "kTypeUint64";
+ case MachineSemantic::kNumber:
+ return os << "kTypeNumber";
+ case MachineSemantic::kAny:
+ return os << "kTypeAny";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, MachineType type) {
+ if (type == MachineType::None()) {
+ return os;
+ } else if (type.representation() == MachineRepresentation::kNone) {
+ return os << type.semantic();
+ } else if (type.semantic() == MachineSemantic::kNone) {
+ return os << type.representation();
+ } else {
+ return os << type.representation() << "|" << type.semantic();
+ }
+ return os;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
new file mode 100644
index 0000000000..97f6ae3bbd
--- /dev/null
+++ b/deps/v8/src/machine-type.h
@@ -0,0 +1,204 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MACHINE_TYPE_H_
+#define V8_MACHINE_TYPE_H_
+
+#include <iosfwd>
+
+#include "src/base/bits.h"
+#include "src/globals.h"
+#include "src/signature.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+enum class MachineRepresentation : uint8_t {
+ kNone,
+ kBit,
+ kWord8,
+ kWord16,
+ kWord32,
+ kWord64,
+ kFloat32,
+ kFloat64,
+ kTagged
+};
+
+enum class MachineSemantic : uint8_t {
+ kNone,
+ kBool,
+ kInt32,
+ kUint32,
+ kInt64,
+ kUint64,
+ kNumber,
+ kAny
+};
+
+class MachineType {
+ public:
+ MachineType()
+ : representation_(MachineRepresentation::kNone),
+ semantic_(MachineSemantic::kNone) {}
+ MachineType(MachineRepresentation representation, MachineSemantic semantic)
+ : representation_(representation), semantic_(semantic) {}
+
+ bool operator==(MachineType other) const {
+ return representation() == other.representation() &&
+ semantic() == other.semantic();
+ }
+
+ bool operator!=(MachineType other) const { return !(*this == other); }
+
+
+ MachineRepresentation representation() const { return representation_; }
+ MachineSemantic semantic() const { return semantic_; }
+
+ bool IsSigned() {
+ return semantic() == MachineSemantic::kInt32 ||
+ semantic() == MachineSemantic::kInt64;
+ }
+ bool IsUnsigned() {
+ return semantic() == MachineSemantic::kUint32 ||
+ semantic() == MachineSemantic::kUint64;
+ }
+
+ static MachineRepresentation PointerRepresentation() {
+ return (kPointerSize == 4) ? MachineRepresentation::kWord32
+ : MachineRepresentation::kWord64;
+ }
+ static MachineType Pointer() {
+ return MachineType(PointerRepresentation(), MachineSemantic::kNone);
+ }
+ static MachineType IntPtr() {
+ return (kPointerSize == 4) ? Int32() : Int64();
+ }
+ static MachineType Float32() {
+ return MachineType(MachineRepresentation::kFloat32,
+ MachineSemantic::kNumber);
+ }
+ static MachineType Float64() {
+ return MachineType(MachineRepresentation::kFloat64,
+ MachineSemantic::kNumber);
+ }
+ static MachineType Int8() {
+ return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
+ }
+ static MachineType Uint8() {
+ return MachineType(MachineRepresentation::kWord8, MachineSemantic::kUint32);
+ }
+ static MachineType Int16() {
+ return MachineType(MachineRepresentation::kWord16, MachineSemantic::kInt32);
+ }
+ static MachineType Uint16() {
+ return MachineType(MachineRepresentation::kWord16,
+ MachineSemantic::kUint32);
+ }
+ static MachineType Int32() {
+ return MachineType(MachineRepresentation::kWord32, MachineSemantic::kInt32);
+ }
+ static MachineType Uint32() {
+ return MachineType(MachineRepresentation::kWord32,
+ MachineSemantic::kUint32);
+ }
+ static MachineType Int64() {
+ return MachineType(MachineRepresentation::kWord64, MachineSemantic::kInt64);
+ }
+ static MachineType Uint64() {
+ return MachineType(MachineRepresentation::kWord64,
+ MachineSemantic::kUint64);
+ }
+ static MachineType AnyTagged() {
+ return MachineType(MachineRepresentation::kTagged, MachineSemantic::kAny);
+ }
+ static MachineType Bool() {
+ return MachineType(MachineRepresentation::kBit, MachineSemantic::kBool);
+ }
+ static MachineType TaggedBool() {
+ return MachineType(MachineRepresentation::kTagged, MachineSemantic::kBool);
+ }
+ static MachineType None() {
+ return MachineType(MachineRepresentation::kNone, MachineSemantic::kNone);
+ }
+
+ // These naked representations should eventually go away.
+ static MachineType RepWord8() {
+ return MachineType(MachineRepresentation::kWord8, MachineSemantic::kNone);
+ }
+ static MachineType RepWord16() {
+ return MachineType(MachineRepresentation::kWord16, MachineSemantic::kNone);
+ }
+ static MachineType RepWord32() {
+ return MachineType(MachineRepresentation::kWord32, MachineSemantic::kNone);
+ }
+ static MachineType RepWord64() {
+ return MachineType(MachineRepresentation::kWord64, MachineSemantic::kNone);
+ }
+ static MachineType RepFloat32() {
+ return MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone);
+ }
+ static MachineType RepFloat64() {
+ return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
+ }
+ static MachineType RepTagged() {
+ return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
+ }
+ static MachineType RepBit() {
+ return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
+ }
+
+ private:
+ MachineRepresentation representation_;
+ MachineSemantic semantic_;
+};
+
+V8_INLINE size_t hash_value(MachineRepresentation rep) {
+ return static_cast<size_t>(rep);
+}
+
+V8_INLINE size_t hash_value(MachineType type) {
+ return static_cast<size_t>(type.representation()) +
+ static_cast<size_t>(type.semantic()) * 16;
+}
+
+std::ostream& operator<<(std::ostream& os, MachineRepresentation rep);
+std::ostream& operator<<(std::ostream& os, MachineSemantic type);
+std::ostream& operator<<(std::ostream& os, MachineType type);
+
+inline bool IsFloatingPoint(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kFloat64;
+}
+
+// Gets the log2 of the element size in bytes of the machine type.
+inline int ElementSizeLog2Of(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ return 0;
+ case MachineRepresentation::kWord16:
+ return 1;
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kFloat32:
+ return 2;
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat64:
+ return 3;
+ case MachineRepresentation::kTagged:
+ return kPointerSizeLog2;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+typedef Signature<MachineType> MachineSignature;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MACHINE_TYPE_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 27ee8e4334..23deb1afeb 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -82,11 +82,23 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
if (message->argument()->IsJSObject()) {
HandleScope scope(isolate);
Handle<Object> argument(message->argument(), isolate);
- Handle<Object> args[] = {argument};
- MaybeHandle<Object> maybe_stringified = Execution::TryCall(
- isolate, isolate->to_detail_string_fun(),
- isolate->factory()->undefined_value(), arraysize(args), args);
+
+ MaybeHandle<Object> maybe_stringified;
Handle<Object> stringified;
+ // Make sure we don't leak uncaught internally generated Error objects.
+ if (Object::IsErrorObject(isolate, argument)) {
+ Handle<Object> args[] = {argument};
+ maybe_stringified = Execution::TryCall(
+ isolate, isolate->no_side_effects_to_string_fun(),
+ isolate->factory()->undefined_value(), arraysize(args), args);
+ } else {
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
+
+ maybe_stringified = Object::ToString(isolate, argument);
+ }
+
if (!maybe_stringified.ToHandle(&stringified)) {
stringified = isolate->factory()->NewStringFromAsciiChecked("exception");
}
@@ -151,10 +163,9 @@ CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
fun_ = Handle<JSFunction>::cast(maybe_function);
receiver_ = JSObject::GetDataProperty(
call_site_obj, isolate->factory()->call_site_receiver_symbol());
- pos_ = Handle<Smi>::cast(JSObject::GetDataProperty(
- call_site_obj,
- isolate->factory()->call_site_position_symbol()))
- ->value();
+ CHECK(JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_position_symbol())
+ ->ToInt32(&pos_));
}
@@ -168,8 +179,9 @@ Handle<Object> CallSite::GetFileName() {
Handle<Object> CallSite::GetFunctionName() {
- Handle<String> result = JSFunction::GetDebugName(fun_);
+ Handle<String> result = JSFunction::GetName(fun_);
if (result->length() != 0) return result;
+
Handle<Object> script(fun_->shared()->script(), isolate_);
if (script->IsScript() &&
Handle<Script>::cast(script)->compilation_type() ==
@@ -316,7 +328,7 @@ Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
if (arg->IsString()) {
result_string = Handle<String>::cast(arg);
} else {
- Handle<JSFunction> fun = isolate->no_side_effect_to_string_fun();
+ Handle<JSFunction> fun = isolate->no_side_effects_to_string_fun();
MaybeHandle<Object> maybe_result =
Execution::TryCall(isolate, fun, factory->undefined_value(), 1, &arg);
@@ -390,94 +402,5 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
}
-MaybeHandle<String> ErrorToStringHelper::Stringify(Isolate* isolate,
- Handle<JSObject> error) {
- VisitedScope scope(this, error);
- if (scope.has_visited()) return isolate->factory()->empty_string();
-
- Handle<String> name;
- Handle<String> message;
- Handle<Name> internal_key = isolate->factory()->internal_error_symbol();
- Handle<String> message_string =
- isolate->factory()->NewStringFromStaticChars("message");
- Handle<String> name_string = isolate->factory()->name_string();
- LookupIterator internal_error_lookup(
- error, internal_key, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-
- // Find out whether an internally created error object is on the prototype
- // chain. If the name property is found on a holder prior to the internally
- // created error object, use that name property. Otherwise just use the
- // constructor name to avoid triggering possible side effects.
- // Similar for the message property. If the message property shadows the
- // internally created error object, use that message property. Otherwise
- // use empty string as message.
- LookupIterator name_lookup(error, name_string,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- if (internal_error_lookup.IsFound() &&
- !ShadowsInternalError(isolate, &name_lookup, &internal_error_lookup)) {
- Handle<JSObject> holder = internal_error_lookup.GetHolder<JSObject>();
- name = Handle<String>(holder->constructor_name());
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, name,
- GetStringifiedProperty(isolate, &name_lookup,
- isolate->factory()->Error_string()),
- String);
- }
-
- LookupIterator message_lookup(
- error, message_string, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- if (internal_error_lookup.IsFound() &&
- !ShadowsInternalError(isolate, &message_lookup, &internal_error_lookup)) {
- message = isolate->factory()->empty_string();
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, message,
- GetStringifiedProperty(isolate, &message_lookup,
- isolate->factory()->empty_string()),
- String);
- }
-
- if (name->length() == 0) return message;
- if (message->length() == 0) return name;
- IncrementalStringBuilder builder(isolate);
- builder.AppendString(name);
- builder.AppendCString(": ");
- builder.AppendString(message);
- return builder.Finish();
-}
-
-
-bool ErrorToStringHelper::ShadowsInternalError(
- Isolate* isolate, LookupIterator* property_lookup,
- LookupIterator* internal_error_lookup) {
- if (!property_lookup->IsFound()) return false;
- Handle<JSObject> holder = property_lookup->GetHolder<JSObject>();
- // It's fine if the property is defined on the error itself.
- if (holder.is_identical_to(property_lookup->GetReceiver())) return true;
- PrototypeIterator it(isolate, holder, PrototypeIterator::START_AT_RECEIVER);
- while (true) {
- if (it.IsAtEnd()) return false;
- if (it.IsAtEnd(internal_error_lookup->GetHolder<JSObject>())) return true;
- it.AdvanceIgnoringProxies();
- }
-}
-
-
-MaybeHandle<String> ErrorToStringHelper::GetStringifiedProperty(
- Isolate* isolate, LookupIterator* property_lookup,
- Handle<String> default_value) {
- if (!property_lookup->IsFound()) return default_value;
- Handle<Object> obj;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Object::GetProperty(property_lookup),
- String);
- if (obj->IsUndefined()) return default_value;
- if (!obj->IsString()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Object::ToString(isolate, obj),
- String);
- }
- return Handle<String>::cast(obj);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 6d3f797822..8cd60b1c5c 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -68,7 +68,7 @@ class CallSite {
Isolate* isolate_;
Handle<Object> receiver_;
Handle<JSFunction> fun_;
- int pos_;
+ int32_t pos_;
};
@@ -87,21 +87,29 @@ class CallSite {
T(ApplyNonFunction, \
"Function.prototype.apply was called on %, which is a % and not a " \
"function") \
+ T(ArrayBufferTooShort, \
+ "Derived ArrayBuffer constructor created a buffer which was too small") \
+ T(ArrayBufferSpeciesThis, \
+ "ArrayBuffer subclass returned this from species constructor") \
T(ArrayFunctionsOnFrozen, "Cannot modify frozen array elements") \
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.") \
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
+ T(CallSiteExpectsFunction, \
+ "CallSite expects function as second argument, got %") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
T(CannotPreventExt, "Cannot prevent extensions") \
T(CannotFreezeArrayBufferView, \
"Cannot freeze array buffer views with elements") \
T(CircularStructure, "Converting circular structure to JSON") \
+ T(ConstructAbstractClass, "Abstract class % not directly constructable") \
T(ConstAssign, "Assignment to constant variable.") \
T(ConstructorNonCallable, \
- "Class constructors cannot be invoked without 'new'") \
+ "Class constructor % cannot be invoked without 'new'") \
T(ConstructorNotFunction, "Constructor % requires 'new'") \
+ T(ConstructorNotReceiver, "The .constructor property is not an object") \
T(CurrencyCode, "Currency code is required with currency style.") \
T(DataViewNotArrayBuffer, \
"First argument to DataView constructor must be an ArrayBuffer") \
@@ -127,6 +135,7 @@ class CallSite {
"Function has non-object prototype '%' in instanceof check") \
T(InvalidArgument, "invalid_argument") \
T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %") \
+ T(InvalidSimdOperation, "% is not a valid type for this SIMD operation.") \
T(IteratorResultNotAnObject, "Iterator result % is not an object") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
T(LanguageID, "Language ID should be string or object.") \
@@ -148,6 +157,7 @@ class CallSite {
T(NotIntlObject, "% is not an i18n object.") \
T(NotGeneric, "% is not generic") \
T(NotIterable, "% is not iterable") \
+ T(NotPropertyName, "% is not a valid property name") \
T(NotTypedArray, "this is not a typed array.") \
T(NotSharedTypedArray, "% is not a shared typed array.") \
T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
@@ -175,26 +185,100 @@ class CallSite {
T(OrdinaryFunctionCalledAsConstructor, \
"Function object that's not a constructor was created with new") \
T(PromiseCyclic, "Chaining cycle detected for promise %") \
+ T(PromiseExecutorAlreadyInvoked, \
+ "Promise executor has already been invoked with non-undefined arguments") \
T(PropertyDescObject, "Property description must be an object: %") \
- T(PropertyNotFunction, "Property '%' of object % is not a function") \
+ T(PropertyNotFunction, \
+ "'%' returned for property '%' of object '%' is not a function") \
T(ProtoObjectOrNull, "Object prototype may only be an Object or null: %") \
T(PrototypeParentNotAnObject, \
"Class extends value does not have valid prototype property %") \
- T(ProxyHandlerDeleteFailed, \
- "Proxy handler % did not return a boolean value from 'delete' trap") \
- T(ProxyHandlerNonObject, "Proxy.% called with non-object as handler") \
- T(ProxyHandlerReturned, "Proxy handler % returned % from '%' trap") \
- T(ProxyHandlerTrapMissing, "Proxy handler % has no '%' trap") \
- T(ProxyHandlerTrapMustBeCallable, \
- "Proxy handler %0 has non-callable '%' trap") \
- T(ProxyNonObjectPropNames, "Trap '%' returned non-object %") \
- T(ProxyProtoNonObject, "Proxy.create called with no-object as prototype") \
- T(ProxyPropNotConfigurable, \
- "Proxy handler % returned non-configurable descriptor for property '%' " \
- "from '%' trap") \
- T(ProxyRepeatedPropName, "Trap '%' returned repeated property name '%'") \
- T(ProxyTrapFunctionExpected, \
- "Proxy.createFunction called with non-function for '%' trap") \
+ T(ProxyConstructNonObject, \
+ "'construct' on proxy: trap returned non-object ('%')") \
+ T(ProxyDefinePropertyNonConfigurable, \
+ "'defineProperty' on proxy: trap returned truish for defining " \
+ "non-configurable property '%' which is either non-existant or " \
+ "configurable in the proxy target") \
+ T(ProxyDefinePropertyNonExtensible, \
+ "'defineProperty' on proxy: trap returned truish for adding property '%' " \
+ " to the non-extensible proxy target") \
+ T(ProxyDefinePropertyIncompatible, \
+ "'defineProperty' on proxy: trap returned truish for adding property '%' " \
+ " that is incompatible with the existing property in the proxy target") \
+ T(ProxyDeletePropertyNonConfigurable, \
+ "'deleteProperty' on proxy: trap returned truish for property '%' which " \
+ "is non-configurable in the proxy target") \
+ T(ProxyEnumerateNonObject, "'enumerate' on proxy: trap returned non-object") \
+ T(ProxyEnumerateNonString, \
+ "'enumerate' on proxy: trap result includes non-string") \
+ T(ProxyGetNonConfigurableData, \
+ "'get' on proxy: property '%' is a read-only and " \
+ "non-configurable data property on the proxy target but the proxy " \
+ "did not return its actual value (expected '%' but got '%')") \
+ T(ProxyGetNonConfigurableAccessor, \
+ "'get' on proxy: property '%' is a non-configurable accessor " \
+ "property on the proxy target and does not have a getter function, but " \
+ "the trap did not return 'undefined' (got '%')") \
+ T(ProxyGetOwnPropertyDescriptorIncompatible, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned descriptor for " \
+ "property '%' that is incompatible with the existing property in the " \
+ "proxy target") \
+ T(ProxyGetOwnPropertyDescriptorInvalid, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned neither object nor " \
+ "undefined for property '%'") \
+ T(ProxyGetOwnPropertyDescriptorNonConfigurable, \
+ "'getOwnPropertyDescriptor' on proxy: trap reported non-configurability " \
+ "for property '%' which is either non-existant or configurable in the " \
+ "proxy target") \
+ T(ProxyGetOwnPropertyDescriptorNonExtensible, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
+ "property '%' which exists in the non-extensible proxy target") \
+ T(ProxyGetOwnPropertyDescriptorUndefined, \
+ "'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
+ "property '%' which is non-configurable in the proxy target") \
+ T(ProxyGetPrototypeOfInvalid, \
+ "'getPrototypeOf' on proxy: trap returned neither object nor null") \
+ T(ProxyGetPrototypeOfNonExtensible, \
+ "'getPrototypeOf' on proxy: proxy target is non-extensible but the " \
+ "trap did not return its actual prototype") \
+ T(ProxyHandlerOrTargetRevoked, \
+ "Cannot create proxy with a revoked proxy as target or handler") \
+ T(ProxyHasNonConfigurable, \
+ "'has' on proxy: trap returned falsish for property '%' which exists in " \
+ "the proxy target as non-configurable") \
+ T(ProxyHasNonExtensible, \
+ "'has' on proxy: trap returned falsish for property '%' but the proxy " \
+ "target is not extensible") \
+ T(ProxyIsExtensibleInconsistent, \
+ "'isExtensible' on proxy: trap result does not reflect extensibility of " \
+ "proxy target (which is '%')") \
+ T(ProxyNonObject, \
+ "Cannot create proxy with a non-object as target or handler") \
+ T(ProxyOwnKeysMissing, \
+ "'ownKeys' on proxy: trap result did not include '%'") \
+ T(ProxyOwnKeysNonExtensible, \
+ "'ownKeys' on proxy: trap returned extra keys but proxy target is " \
+ "non-extensible") \
+ T(ProxyPreventExtensionsExtensible, \
+ "'preventExtensions' on proxy: trap returned truish but the proxy target " \
+ "is extensible") \
+ T(ProxyPrivate, "Cannot pass private property name to proxy trap") \
+ T(ProxyRevoked, "Cannot perform '%' on a proxy that has been revoked") \
+ T(ProxySetFrozenData, \
+ "'set' on proxy: trap returned truish for property '%' which exists in " \
+ "the proxy target as a non-configurable and non-writable data property " \
+ "with a different value") \
+ T(ProxySetFrozenAccessor, \
+ "'set' on proxy: trap returned truish for property '%' which exists in " \
+ "the proxy target as a non-configurable and non-writable accessor " \
+ "property without a setter") \
+ T(ProxySetPrototypeOfNonExtensible, \
+ "'setPrototypeOf' on proxy: trap returned truish for setting a new " \
+ "prototype on the non-extensible proxy target") \
+ T(ProxyTrapReturnedFalsish, "'%' on proxy: trap returned falsish") \
+ T(ProxyTrapReturnedFalsishFor, \
+ "'%' on proxy: trap returned falsish for property '%'") \
+ T(ReadGlobalReferenceThroughProxy, "Trying to access '%' through proxy") \
T(RedefineDisallowed, "Cannot redefine property: %") \
T(RedefineExternalArray, \
"Cannot redefine a property of an object with external array elements") \
@@ -212,7 +296,7 @@ class CallSite {
"'caller' and 'arguments' are restricted function properties and cannot " \
"be accessed in this context.") \
T(StaticPrototype, "Classes may not have static property named prototype") \
- T(StrictCannotAssign, "Cannot assign to read only '% in strict mode") \
+ T(StrictCannotAssign, "Cannot assign to read only '%' in strict mode") \
T(StrictDeleteProperty, "Cannot delete property '%' of %") \
T(StrictPoisonPill, \
"'caller', 'callee', and 'arguments' properties may not be accessed on " \
@@ -241,7 +325,6 @@ class CallSite {
"Invalid property descriptor. Cannot both specify accessors and a value " \
"or writable attribute, %") \
T(VarRedeclaration, "Identifier '%' has already been declared") \
- T(WithExpression, "% has no properties") \
T(WrongArgs, "%: Arguments list has wrong type") \
/* ReferenceError */ \
T(NonMethod, "'super' is referenced from non-method") \
@@ -254,7 +337,11 @@ class CallSite {
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(DateRange, "Provided date is not in valid range.") \
- T(ExpectedLocation, "Expected Area/Location for time zone, got %") \
+ T(ExpectedTimezoneID, \
+ "Expected Area/Location(/Location)* for time zone, got %") \
+ T(ExpectedLocation, \
+ "Expected letters optionally connected with underscores or hyphens for " \
+ "a location, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
T(InvalidArrayLength, "Invalid array length") \
@@ -309,7 +396,10 @@ class CallSite {
T(IllegalLanguageModeDirective, \
"Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
+ T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
+ T(InvalidCoverInitializedName, "Invalid shorthand property initializer") \
+ T(InvalidDestructuringTarget, "Invalid destructuring assignment target") \
T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
T(InvalidLhsInPostfixOp, \
"Invalid left-hand side expression in postfix operation") \
@@ -330,6 +420,7 @@ class CallSite {
T(PushPastSafeLength, \
"Pushing % elements on an array-like of length % " \
"is disallowed, as the total surpasses 2**53-1") \
+ T(ElementAfterRest, "Rest element must be last element in array") \
T(BadSetterRestParameter, \
"Setter function argument must not be a rest parameter") \
T(ParamDupe, "Duplicate parameter name not allowed in this context") \
@@ -338,6 +429,8 @@ class CallSite {
T(SloppyLexical, \
"Block-scoped declarations (let, const, function, class) not yet " \
"supported outside strict mode") \
+ T(SpeciesNotConstructor, \
+ "object.constructor[Symbol.species] is not a constructor") \
T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
T(StrictEvalArguments, "Unexpected eval or arguments in strict mode") \
T(StrictFunction, \
@@ -399,6 +492,8 @@ class CallSite {
T(TooManyParameters, \
"Too many parameters in function definition (only 65535 allowed)") \
T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
+ T(TypedArrayTooShort, \
+ "Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
@@ -463,44 +558,6 @@ class MessageHandler {
};
-class ErrorToStringHelper {
- public:
- ErrorToStringHelper() : visited_(0) {}
-
- MUST_USE_RESULT MaybeHandle<String> Stringify(Isolate* isolate,
- Handle<JSObject> error);
-
- private:
- class VisitedScope {
- public:
- VisitedScope(ErrorToStringHelper* helper, Handle<JSObject> error)
- : helper_(helper), has_visited_(false) {
- for (const Handle<JSObject>& visited : helper->visited_) {
- if (visited.is_identical_to(error)) {
- has_visited_ = true;
- break;
- }
- }
- helper->visited_.Add(error);
- }
- ~VisitedScope() { helper_->visited_.RemoveLast(); }
- bool has_visited() { return has_visited_; }
-
- private:
- ErrorToStringHelper* helper_;
- bool has_visited_;
- };
-
- static bool ShadowsInternalError(Isolate* isolate,
- LookupIterator* property_lookup,
- LookupIterator* internal_error_lookup);
-
- static MUST_USE_RESULT MaybeHandle<String> GetStringifiedProperty(
- Isolate* isolate, LookupIterator* property_lookup,
- Handle<String> default_value);
-
- List<Handle<JSObject> > visited_;
-};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 0719055eff..27ec8e5bda 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -92,7 +92,7 @@ void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
int count = Assembler::RelocateInternalReference(rmode_, p, delta);
- CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
+ Assembler::FlushICache(isolate_, p, count * sizeof(uint32_t));
}
}
@@ -142,7 +142,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -179,7 +180,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -207,7 +208,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -310,8 +311,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
- host_,
+ Assembler::set_target_address_at(isolate_, pc_ + Assembler::kInstrSize, host_,
stub->instruction_start());
}
@@ -328,7 +328,7 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// The pc_ offset of 0 assumes patched debug break slot or return
// sequence.
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -346,7 +346,7 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -437,11 +437,23 @@ void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
}
-void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
+void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
if (IsPrevInstrCompactBranch()) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+
+ ClearCompactBranchState();
+ }
+}
+
+
+void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
+ if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
// Nop instruction to preceed a CTI in forbidden slot:
Instr nop = SPECIAL | SLL;
@@ -459,6 +471,22 @@ void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
}
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+ CheckTrampolinePoolQuick();
+}
+
+
+void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(x, is_compact_branch);
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 3860fe4e19..a8b6cc7c32 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -296,6 +296,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -1335,17 +1336,23 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ DCHECK(!rs.is(zero_reg));
+ if (rs.code() >= rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ DCHECK(!rs.is(zero_reg));
+ if (rs.code() >= rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
@@ -1652,7 +1659,7 @@ void Assembler::sll(Register rd,
// nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
// instructions.
DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
@@ -1662,7 +1669,7 @@ void Assembler::sllv(Register rd, Register rt, Register rs) {
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
}
@@ -1672,7 +1679,7 @@ void Assembler::srlv(Register rd, Register rt, Register rs) {
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
}
@@ -1693,7 +1700,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
- DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
@@ -1701,6 +1708,16 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
}
+void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ DCHECK(sa < 5 && sa > 0);
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+ emit(instr);
+}
+
+
// ------------Memory-instructions-------------
// Helper for base-reg + offset, when offset is larger than int16.
@@ -1818,7 +1835,7 @@ void Assembler::lui(Register rd, int32_t j) {
}
-void Assembler::aui(Register rs, Register rt, int32_t j) {
+void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
DCHECK(!(rs.is(zero_reg)));
@@ -2194,13 +2211,13 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
}
void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
}
@@ -2267,19 +2284,19 @@ void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
}
void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
}
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
@@ -2287,7 +2304,7 @@ void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
ft.reg_code = (cc & 0x0007) << 2 | 1;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
@@ -2295,7 +2312,7 @@ void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
@@ -2303,7 +2320,7 @@ void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft;
ft.reg_code = (cc & 0x0007) << 2 | 0;
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
@@ -2489,55 +2506,71 @@ void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
}
void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
}
void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
}
void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
}
void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
}
void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
}
@@ -2632,7 +2665,8 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -2648,7 +2682,8 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode());
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -2796,6 +2831,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -2829,54 +2865,42 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(uint32_t data) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dq(uint64_t data) {
- CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) = data;
- pc_ += sizeof(uint64_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(Label* label) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
uint32_t data;
+ CheckForEmitInForbiddenSlot();
if (label->is_bound()) {
data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
} else {
data = jump_address(label);
internal_reference_positions_.insert(label->pos());
}
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
-}
-
-
-void Assembler::emit_code_stub_address(Code* stub) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) =
- reinterpret_cast<uint32_t>(stub->instruction_start());
- pc_ += sizeof(uint32_t);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
if (rmode >= RelocInfo::COMMENT &&
- rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) {
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsComment(rmode)
@@ -2891,10 +2915,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
+ RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -3006,7 +3028,7 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Address pc,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
Instr instr2 = instr_at(pc + kInstrSize);
@@ -3028,7 +3050,7 @@ void Assembler::set_target_address_at(Address pc,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, 2 * sizeof(int32_t));
+ Assembler::FlushICache(isolate, pc, 2 * sizeof(int32_t));
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 682c6602da..054695483f 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -457,30 +457,28 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ static void set_target_address_at(
+ Isolate* isolate, Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(pc, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, target, icache_flush_mode);
}
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- INLINE(static void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -493,16 +491,17 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
set_target_address_at(
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
- code,
+ isolate,
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -752,6 +751,8 @@ class Assembler : public AssemblerBase {
void rotr(Register rd, Register rt, uint16_t sa);
void rotrv(Register rd, Register rt, Register rs);
+ // Address computing instructions with shift.
+ void lsa(Register rd, Register rt, Register rs, uint8_t sa);
// ------------Memory-instructions-------------
@@ -1012,7 +1013,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1048,9 +1049,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
- // Emits the address of the code stub's first instruction.
- void emit_code_stub_address(Code* stub);
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1260,6 +1258,11 @@ class Assembler : public AssemblerBase {
void GrowBuffer();
inline void emit(Instr x,
CompactBranchType is_compact_branch = CompactBranchType::NO);
+ inline void emit(uint64_t x);
+ inline void CheckForEmitInForbiddenSlot();
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 08f8e65359..f6c1dfbaaf 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -23,9 +23,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- a1 : called function
+ // -- a1 : target
+ // -- a3 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -37,34 +36,30 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(a1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(a1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(a3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(a1, a3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects a0 to contain the number of arguments
- // including the receiver and the extra arguments. But a0 is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- Label argc, done_argc;
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2,
- FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(a2);
- __ Branch(&argc, eq, a2,
- Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ Addu(a0, a2, num_extra_args + 1);
- __ jmp(&done_argc);
- __ bind(&argc);
+ // including the receiver and the extra arguments.
__ Addu(a0, a0, num_extra_args + 1);
- __ bind(&done_argc);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -73,30 +68,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ lw(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ lw(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ lw(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
- __ lw(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -162,6 +142,108 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ __ Drop(2);
+ }
+
+ // 2a. Convert first argument to number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ Move(v0, Smi::FromInt(0));
+ __ DropAndRet(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ __ Drop(2);
+ __ jmp(&done);
+ __ bind(&no_arguments);
+ __ Move(a0, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure a0 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(a0, &done_convert);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(a0, v0);
+ __ Pop(a1, a3);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -215,7 +297,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(a0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -225,13 +307,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
- // -- a3 : original constructor
+ // -- a3 : new target
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into a0 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -248,7 +333,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure a0 is a string.
+ // 3. Make sure a0 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
@@ -267,68 +352,42 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- a0 : the first argument
- // -- a1 : constructor function
- // -- a3 : original constructor
- // -- ra : return address
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
-
- // Fall back to runtime if the original constructor and function differ.
- __ Branch(&rt_call, ne, a1, Operand(a3));
-
- __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(a1, a2, a3);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ Ret(USE_DELAY_SLOT);
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Move(a2, Smi::FromInt(JSValue::kSize));
- __ Push(a0, a1, a2);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(a0, a1);
- }
- __ jmp(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a1, a3); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(a0, a1);
- }
- __ Ret(USE_DELAY_SLOT);
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push call kind information and function as parameter to the runtime call.
- __ Push(a1, a1);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
- // Restore call kind information and receiver.
- __ Pop(a1);
+ // Restore target function and new target.
+ __ Pop(a1, a3);
}
@@ -365,12 +424,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : allocation site or undefined
- // -- a3 : original constructor
+ // -- a3 : new target
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -384,170 +444,162 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(a2, a0, a1, a3);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
-
- // Verify that the original constructor is a JSFunction.
- __ GetObjectType(a3, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it is in fact a map.
- // a3: original constructor
- __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t5, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&rt_call, ne, a1, Operand(t1));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lw(t0, bit_field3);
- __ DecodeField<Map::Counter>(t2, t0);
- __ Branch(&allocate, lt, t2, Operand(Map::kSlackTrackingCounterEnd));
- // Decrease generous allocation count.
- __ Subu(t0, t0, Operand(1 << Map::Counter::kShift));
- __ Branch(USE_DELAY_SLOT, &allocate, ne, t2,
- Operand(Map::kSlackTrackingCounterEnd));
- __ sw(t0, bit_field3); // In delay slot.
-
- __ Push(a1, a2, a2); // a2 = Initial map.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(a1, a2);
- __ li(t2, Operand(Map::kSlackTrackingCounterEnd - 1));
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- Label rt_call_reload_new_target;
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
- __ Allocate(a3, t4, t5, t6, &rt_call_reload_new_target, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t4: JSObject (not tagged)
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- __ Addu(t5, t5, Operand(3*kPointerSize));
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t4: JSObject (not tagged)
- // t5: First in-object property of JSObject (not tagged)
- // t2: slack tracking counter (non-API function case)
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-
- // Use t7 to hold undefined, which is used in several places below.
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ Branch(&no_inobject_slack_tracking, lt, t2,
- Operand(Map::kSlackTrackingCounterEnd));
-
- // Allocate object with a slack.
- __ lbu(
- a0,
- FieldMemOperand(
- a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ subu(a0, a0, a2);
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a0, t5, at);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ sll(at, a3, kPointerSizeLog2);
- __ Addu(t6, t4, Operand(at)); // End of object.
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
- a0, Operand(t6));
+ __ Push(a2, a0);
+
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ GetObjectType(a3, t1, t0);
+ __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it is in fact a map.
+ // a3: new target
+ __ lw(a2,
+ FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &rt_call);
+ __ GetObjectType(a2, t5, t4);
+ __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&rt_call, ne, a1, Operand(t1));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ // a3: new target
+ __ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+
+ __ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: new target
+ // t4: JSObject (not HeapObject tagged - the actual address).
+ // t3: start of next object
+ __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t5, t4);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+ __ Addu(t5, t5, Operand(3 * kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with appropriate filler.
+ // t4: JSObject (tagged)
+ // t5: First in-object property of JSObject (not tagged)
+ __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lw(t0, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(t2, t0);
+ // t2: slack tracking counter
+ __ Branch(&no_inobject_slack_tracking, lt, t2,
+ Operand(Map::kSlackTrackingCounterEnd));
+ // Decrease generous allocation count.
+ __ Subu(t0, t0, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(t0, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ subu(a0, t3, a0);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t5,
+ Operand(a0));
+ }
+ __ InitializeFieldsWithFiller(t5, a0, t7);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(t5, t3, t7);
+
+ // t2: slack tracking counter value before decreasing.
+ __ Branch(&allocated, ne, t2, Operand(Map::kSlackTrackingCounterEnd));
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(a1, a3, t4, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(a1, a3, t4);
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t4: JSObject
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(t5, a0, t7);
- // To allow for truncation.
- __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
- }
+ __ InitializeFieldsWithFiller(t5, t3, t7);
- __ sll(at, a3, kPointerSizeLog2);
- __ Addu(a0, t4, Operand(at)); // End of object.
- __ InitializeFieldsWithFiller(t5, a0, t7);
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t4: JSObject
+ __ jmp(&allocated);
+ }
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ // a3: new target
+ __ bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(a1, a3, a1, a3); // constructor function, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(t4, v0);
+ __ Pop(a1, a3);
- // Continue with JSObject being successfully allocated.
+ // Receiver for constructor call allocated.
+ // a1: constructor function
+ // a3: new target
// t4: JSObject
- __ jmp(&allocated);
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ lw(a3, MemOperand(sp, 0 * kPointerSize));
+ // Retrieve smi-tagged arguments count from the stack.
+ __ lw(a0, MemOperand(sp));
}
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: original constructor
- __ bind(&rt_call);
-
- __ Push(a1, a3); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(t4, v0);
-
- // Receiver for constructor call allocated.
- // t4: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(a3); // new.target
- __ Pop(a1);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ lw(a0, MemOperand(sp));
__ SmiUntag(a0);
- __ Push(a3, t4, t4);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(t4, t4);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -556,26 +608,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a0: number of arguments
// a1: constructor function
// a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
+ // a3: new target
+ // t4: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(a3, a0);
+ __ SmiTag(t4, a0);
__ jmp(&entry);
__ bind(&loop);
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ lw(t1, MemOperand(t0));
__ push(t1);
__ bind(&entry);
- __ Addu(a3, a3, Operand(-2));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ __ Addu(t4, t4, Operand(-2));
+ __ Branch(&loop, greater_equal, t4, Operand(zero_reg));
// Call the function.
// a0: number of arguments
// a1: constructor function
+ // a3: new target
if (is_api_function) {
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
@@ -583,47 +636,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(v0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a1, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ lw(a1, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -631,104 +687,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sll(t0, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, t0);
__ Addu(sp, sp, kPointerSize);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ }
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a2 : allocation site or undefined
- // -- a3 : original constructor
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(a2, t0);
- __ push(a2);
-
- __ mov(t0, a0);
- __ SmiTag(t0);
- __ push(t0); // Smi-tagged arguments count.
-
- // Push new.target.
- __ push(a3);
-
- // receiver is the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ push(at);
-
- // Set up pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // t0: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, entry;
- __ Branch(&entry);
- __ bind(&loop);
- __ sll(at, t0, kPointerSizeLog2 - 1);
- __ Addu(at, a2, Operand(at));
- __ lw(at, MemOperand(at));
- __ push(at);
- __ bind(&entry);
- __ Subu(t0, t0, Operand(2));
- __ Branch(&loop, ge, t0, Operand(zero_reg));
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ li(a2, Operand(debug_step_in_fp));
- __ lw(a2, MemOperand(a2));
- __ Branch(&skip_step_in, eq, a2, Operand(zero_reg));
-
- __ Push(a0, a1, a1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(a0, a1);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // v0: result
- // sp[0]: new.target
- // sp[1]: number of arguments (smi-tagged)
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ lw(a1, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ sll(at, a1, kPointerSizeLog2 - 1);
- __ Addu(sp, sp, Operand(at));
- __ Addu(sp, sp, Operand(kPointerSize));
- __ Jump(ra);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -744,7 +728,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
Label okay;
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
// Make a2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
+ // here which will cause a2 to become negative.
__ Subu(a2, sp, a2);
// Check if the arguments will overflow the stack.
if (argc_is_tagged == kArgcIsSmiTagged) {
@@ -757,7 +741,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Branch(&okay, gt, a2, Operand(t3));
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -861,6 +845,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o a1: the JS function object being called.
+// o a3: the new target
// o cp: our context
// o fp: the caller's frame pointer
// o sp: stack pointer
@@ -878,6 +863,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Push(a3);
+
+ // Push zero for bytecode array offset.
+ __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -906,7 +895,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t0));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t1, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -936,16 +925,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Subu(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Addu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -1021,7 +1009,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (not including receiver)
- // -- a3 : original constructor
+ // -- a3 : new target
// -- a1 : constructor to call
// -- a2 : address of the first argument
// -----------------------------------
@@ -1044,45 +1032,114 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Branch(&loop_header, gt, a2, Operand(t0));
// Call the constructor with a0, a1, and a3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ Addu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ lw(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ lw(a1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, at);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ lw(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ Addu(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a1, MemOperand(a1));
+ __ sll(a1, a1, kPointerSizeLog2);
+ __ Addu(a1, kInterpreterDispatchTableRegister, a1);
+ __ lw(a1, MemOperand(a1));
+ __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a1);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(a1, a1);
- // Whether to compile in a background thread.
- __ LoadRoot(
- at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(at);
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(a1);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
-
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -1098,8 +1155,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1137,8 +1195,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1178,7 +1237,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1204,7 +1263,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> t2.
@@ -1246,6 +1305,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers {t2, t3, t4, t5}.
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = t2;
+ Register map = t3;
+ Register constructor = t4;
+ Register scratch = t5;
+
+ // If there is no signature, return the holder.
+ __ lw(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ Label next_prototype;
+ __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ Register type = constructor;
+ __ lw(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ Branch(&receiver_check_passed, eq, signature, Operand(type),
+ USE_DELAY_SLOT);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ GetObjectType(type, scratch, scratch);
+ __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+
+ // Otherwise load the parent function template and iterate.
+ __ lw(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ Branch(&function_template_loop);
+
+ // Load the next prototype and iterate.
+ __ bind(&next_prototype);
+ __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+
+ __ Branch(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : callee
+ // -- ra : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ lw(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Addu(t8, sp, at);
+ __ lw(t0, MemOperand(t8));
+ CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ lw(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset));
+ __ lw(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset));
+ __ Addu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t2);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ Addu(t8, t8, Operand(kPointerSize));
+ __ addu(sp, t8, zero_reg);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1253,7 +1415,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1286,7 +1448,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1297,7 +1459,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into a0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(a0);
+ __ JumpIfSmi(a0, &receiver_not_date);
+ __ GetObjectType(a0, t0, t0);
+ __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot.
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ lw(a1, MemOperand(a1));
+ __ lw(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
+ __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(
+ a0, JSDate::kValueOffset +
+ field_index * kPointerSize)); // In delay slot.
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, t0);
+ __ li(a1, Operand(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into a1, argArray into a0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg;
+ Register scratch = t0;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ mov(a3, a2);
+ __ sll(scratch, a0, kPointerSizeLog2);
+ __ Addu(a0, sp, Operand(scratch));
+ __ lw(a1, MemOperand(a0)); // receiver
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a2, MemOperand(a0)); // thisArg
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a3, MemOperand(a0)); // argArray
+ __ bind(&no_arg);
+ __ Addu(sp, sp, Operand(scratch));
+ __ sw(a2, MemOperand(sp));
+ __ mov(a0, a3);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a0 : argArray
+ // -- a1 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(a1, &receiver_not_callable);
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsCallable));
+ __ Branch(&receiver_not_callable, eq, t0, Operand(zero_reg));
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ sw(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
{
@@ -1341,189 +1623,145 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ lw(key, MemOperand(fp, indexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array.
- __ bind(&loop);
- __ lw(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- __ lw(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ lw(key, MemOperand(fp, indexOffset));
- __ Addu(key, key, Operand(1 << kSmiTagSize));
- __ sw(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, limitOffset));
- __ Branch(&loop, ne, key, Operand(a1));
-
- // On exit, the pushed arguments count is in a0, untagged
- __ mov(a0, key);
- __ SmiUntag(a0);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ lw(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array.
- __ Push(a0, a1);
- // Returns (in v0) number of arguments to copy to stack as Smi.
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- // Returns the result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ mov(a1, zero_reg);
- __ lw(a2, MemOperand(fp, kReceiverOffset));
- __ Push(v0, a1, a2); // limit, initial index and receiver.
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-
- // Tear down the internal frame and remove function, receiver and args.
+ Label no_arg;
+ Register scratch = t0;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ mov(a3, a1);
+ __ sll(scratch, a0, kPointerSizeLog2);
+ __ mov(a0, scratch);
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(zero_reg));
+ __ Addu(a0, sp, Operand(a0));
+ __ lw(a1, MemOperand(a0)); // target
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a2, MemOperand(a0)); // thisArgument
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a3, MemOperand(a0)); // argumentsList
+ __ bind(&no_arg);
+ __ Addu(sp, sp, Operand(scratch));
+ __ sw(a2, MemOperand(sp));
+ __ mov(a0, a3);
}
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
-}
-
-
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(a1, &target_not_callable);
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsCallable));
+ __ Branch(&target_not_callable, eq, t0, Operand(zero_reg));
+
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ lw(a0, MemOperand(fp, kNewTargetOffset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&validate_arguments, ne, a0, Operand(at));
- __ lw(a0, MemOperand(fp, kFunctionOffset));
- __ sw(a0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(a0);
- __ lw(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(a0);
- // Returns argument count in v0.
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- // Returns result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(v0); // limit
- __ mov(a1, zero_reg); // initial index
- __ push(a1);
- // Push the constructor function as callee.
- __ lw(a0, MemOperand(fp, kFunctionOffset));
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ lw(t0, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ __ sw(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ jr(ra);
- __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ Label no_arg;
+ Register scratch = t0;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ sll(scratch, a0, kPointerSizeLog2);
+ __ Addu(a0, sp, Operand(scratch));
+ __ sw(a2, MemOperand(a0)); // receiver
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a1, MemOperand(a0)); // target
+ __ mov(a3, a1); // new.target defaults to target
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a2, MemOperand(a0)); // argumentsList
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ lw(a3, MemOperand(a0)); // new.target
+ __ bind(&no_arg);
+ __ Addu(sp, sp, Operand(scratch));
+ __ mov(a0, a2);
+ }
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a3 : new.target
+ // -- a1 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(a1, &target_not_constructor);
+ __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsConstructor));
+ __ Branch(&target_not_constructor, eq, t0, Operand(zero_reg));
+
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ And(t0, t0, Operand(1 << Map::kIsConstructor));
+ __ Branch(&new_target_not_constructor, eq, t0, Operand(zero_reg));
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ sw(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ sw(a3, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1533,6 +1771,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- a0 : actual number of arguments
// -- a1 : function (passed through to callee)
// -- a2 : expected number of arguments
+ // -- a3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1575,6 +1814,130 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(a0, &create_runtime);
+
+ // Load the map of argumentsList into a2.
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+
+ // Load native context into t0.
+ __ lw(t0, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ lw(at, ContextMemOperand(t0, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+ __ lw(at, ContextMemOperand(t0, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+
+ // Check if argumentsList is a fast JSArray.
+ __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3, a0);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ mov(a0, v0);
+ __ Pop(a1, a3);
+ __ lw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
+ }
+ __ Branch(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ lw(a2,
+ FieldMemOperand(a0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
+ __ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ Branch(&create_runtime, ne, a2, Operand(at));
+ __ SmiUntag(a2);
+ __ mov(a0, t0);
+ __ Branch(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(a2);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
+ __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+ __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ SmiUntag(a2);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(t0, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ Subu(t0, sp, t0);
+ // Check if the arguments will overflow the stack.
+ __ sll(at, a2, kPointerSizeLog2);
+ __ Branch(&done, gt, t0, Operand(at)); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : args (a FixedArray built from argumentsList)
+ // -- a2 : len (number of elements to push from args)
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ mov(t0, zero_reg);
+ Label done, loop;
+ __ bind(&loop);
+ __ Branch(&done, eq, t0, Operand(a2));
+ __ sll(at, t0, kPointerSizeLog2);
+ __ Addu(at, a0, at);
+ __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+ __ Push(at);
+ __ Addu(t0, t0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done);
+ __ Move(a0, t0);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ Label construct;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&construct, ne, a3, Operand(at));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ bind(&construct);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1669,21 +2032,117 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize); // Un-tag.
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
- __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
}
}
// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ sll(t0, a0, kPointerSizeLog2);
+ __ addu(t0, t0, sp);
+ __ sw(at, MemOperand(t0));
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into t0.
+ __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- t0 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ Subu(sp, sp, Operand(t1));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Addu(sp, sp, Operand(t1));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(t1, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, gt, t1, Operand(a0));
+ __ sll(t2, t0, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ lw(at, MemOperand(t2));
+ __ sll(t2, t1, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ sw(at, MemOperand(t2));
+ __ Addu(t0, t0, Operand(1));
+ __ Addu(t1, t1, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+ __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Subu(t0, t0, Operand(1));
+ __ Branch(&done_loop, lt, t0, Operand(zero_reg));
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ addu(t1, t1, a2);
+ __ lw(at, MemOperand(t1));
+ __ sll(t1, a0, kPointerSizeLog2);
+ __ addu(t1, t1, sp);
+ __ sw(at, MemOperand(t1));
+ __ Addu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ lw(at, MemOperand(at));
+ __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -1696,13 +2155,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ GetObjectType(a1, t1, t2);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(a1);
- __ Branch(&non_smi);
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(a1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ Addu(a0, a0, 2);
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1716,7 +2180,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ addu(at, sp, at);
__ sw(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1726,7 +2190,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1736,10 +2200,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (checked to be a JSFunction)
- // -- a3 : the original constructor (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(a1);
- __ AssertFunction(a3);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
@@ -1755,17 +2218,117 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into t0.
+ __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- t0 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ Subu(sp, sp, Operand(t1));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Addu(sp, sp, Operand(t1));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(t1, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, ge, t1, Operand(a0));
+ __ sll(t2, t0, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ lw(at, MemOperand(t2));
+ __ sll(t2, t1, kPointerSizeLog2);
+ __ addu(t2, t2, sp);
+ __ sw(at, MemOperand(t2));
+ __ Addu(t0, t0, Operand(1));
+ __ Addu(t1, t1, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(t0);
+ __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Subu(t0, t0, Operand(1));
+ __ Branch(&done_loop, lt, t0, Operand(zero_reg));
+ __ sll(t1, t0, kPointerSizeLog2);
+ __ addu(t1, t1, a2);
+ __ lw(at, MemOperand(t1));
+ __ sll(t1, a0, kPointerSizeLog2);
+ __ addu(t1, t1, sp);
+ __ sw(at, MemOperand(t1));
+ __ Addu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ lw(at, MemOperand(at));
+ __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a1 : the constructor to call (checked to be a JSProxy)
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(a1, a3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ Addu(a0, a0, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1774,24 +2337,33 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (can be any Object)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(a1, &non_constructor);
- __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t2, t2, Operand(1 << Map::kIsCallable));
- __ Branch(&non_constructor, eq, t2, Operand(zero_reg));
// Dispatch based on instance type.
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
+ // Check if target has a [[Construct]] internal method.
+ __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+ eq, t2, Operand(JS_PROXY_TYPE));
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
@@ -1800,7 +2372,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ addu(at, sp, at);
__ sw(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1808,11 +2380,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1822,14 +2391,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
+ // -- a3: new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1839,9 +2406,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
@@ -1856,7 +2424,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// t1: copy end address
Label copy;
@@ -1888,17 +2456,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t3.
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a0, fp, a0);
// Adjust for return address and receiver.
@@ -1910,7 +2479,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// t3: copy end address
Label copy;
__ bind(&copy);
@@ -1923,7 +2492,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sll(t2, a2, kPointerSizeLog2);
__ Subu(t1, fp, Operand(t2));
@@ -1943,7 +2512,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(a0, a2);
// a0 : expected number of arguments
// a1 : function (passed through to callee)
- __ Call(a3);
+ // a3 : new target (passed through to callee)
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(t0);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1957,13 +2528,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Jump(a3);
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(t0);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ break_(0xCC);
}
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 0b536504c2..f88d3bd5b4 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -291,7 +291,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ GetObjectType(a0, t4, t4);
if (cc == less || cc == greater) {
// Call runtime on identical JSObjects.
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -307,7 +307,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -459,12 +459,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
+ // FIRST_JS_RECEIVER_TYPE.
__ GetObjectType(lhs, a2, a2);
- __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
// Return non-zero.
Label return_not_equal;
@@ -477,7 +477,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
__ GetObjectType(rhs, a3, a3);
- __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
@@ -539,9 +539,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ li(v0, Operand(1)); // Non-zero indicates not equal.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -728,8 +728,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
@@ -743,9 +742,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -979,7 +977,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1472,15 +1470,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ lw(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(scratch,
- FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
- __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
// Get the "prototype" (or initial map) of the {function}.
__ lw(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1505,25 +1494,49 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ Register const result = v0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Branch(&done, eq, object_prototype, Operand(function_prototype));
- __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
- __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+
+ // Check if the object needs to be access checked.
+ __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
+ // Check if the current object is a Proxy.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ __ Branch(&fast_runtime_fallback, eq, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
+ __ lw(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
+ __ lw(object_map,
+ FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot.
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
__ Ret(USE_DELAY_SLOT);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // Slow-case: Call the runtime function.
+ __ StoreRoot(result,
+ Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ DCHECK(Smi::FromInt(0) == 0);
+ __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1594,7 +1607,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1622,7 +1635,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1688,7 +1701,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, t0, t5, &runtime, TAG_OBJECT);
+ __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1698,8 +1711,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
+ __ lw(t0, NativeContextMemOperand());
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
@@ -1837,7 +1849,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// t1 = argument count (tagged)
__ bind(&runtime);
__ Push(a1, a3, t1);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1856,7 +1868,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1902,10 +1914,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ lw(t0, FieldMemOperand(t0, JSGlobalObject::kNativeContextOffset));
- __ lw(t0, MemOperand(
- t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
@@ -1953,7 +1962,32 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // a1 : rest parameter index (tagged)
+ // Check if the calling frame is an arguments adaptor frame.
+
+ Label runtime;
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, t1,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer.
+ __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, t0, Operand(t1));
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ Push(a2, a3, a1);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1962,7 +1996,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2247,7 +2281,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, eq, v0, Operand(a1));
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2343,7 +2377,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2389,19 +2423,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
- // t0 : original constructor (for IsSuperConstructorCall)
FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- BoolToInt(is_super) << 8; // t0
+ const RegList kSavedRegs = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -2414,7 +2445,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2422,7 +2453,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
- // t0 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2463,7 +2493,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&megamorphic, ne, a1, Operand(t2));
__ jmp(&done);
@@ -2485,19 +2515,19 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&not_array_function, ne, a1, Operand(t2));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -2507,7 +2537,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // t0 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2516,29 +2545,23 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetObjectType(a1, t1, t1);
__ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, a2, at);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t1, a2, at);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(a2, t1);
- }
+ __ AssertUndefinedOrAllocationSite(a2, t1);
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mov(a3, t0);
- } else {
- __ mov(a3, a1);
- }
+ // Pass function as new target.
+ __ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2558,7 +2581,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a3 - slot id
// a2 - vector
// t0 - loaded from vector[slot]
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
@@ -2581,11 +2604,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2622,9 +2641,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ bind(&call);
- __ li(a0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ bind(&call_function);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2659,14 +2680,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Addu(t0, a2, Operand(t0));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ lw(t0, FieldMemOperand(a2, with_types_offset));
- __ Subu(t0, t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(a2, with_types_offset));
- __ lw(t0, FieldMemOperand(a2, generic_offset));
- __ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &call);
- __ sw(t0, FieldMemOperand(a2, generic_offset)); // In delay slot.
+
+ __ bind(&call);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&uninitialized);
@@ -2679,13 +2698,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
__ Branch(&miss, eq, a1, Operand(t0));
- // Update stats.
- __ lw(t0, FieldMemOperand(a2, with_types_offset));
- __ Addu(t0, t0, Operand(Smi::FromInt(1)));
- __ sw(t0, FieldMemOperand(a2, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
+ __ lw(t1, NativeContextMemOperand());
+ __ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter.
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -2705,7 +2725,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(a1);
}
- __ Branch(&call);
+ __ Branch(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2723,7 +2743,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -2791,11 +2811,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
@@ -2823,7 +2843,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, v0);
@@ -2870,7 +2890,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, v0);
call_helper.AfterCall(masm);
@@ -3131,7 +3151,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// v0: original string
@@ -3176,7 +3196,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ mov(v0, a0);
__ bind(&slow_string);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3186,7 +3206,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3203,7 +3223,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3235,7 +3255,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3373,7 +3393,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3412,7 +3432,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
@@ -3705,9 +3725,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3715,18 +3735,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
__ GetObjectType(a0, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(a1, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
@@ -3735,7 +3756,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ And(a2, a1, a0);
@@ -3750,7 +3771,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3758,7 +3779,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(LESS)));
}
__ Push(a1, a0, a2);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -4246,11 +4267,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4273,73 +4294,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : element value to store
- // -- a3 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers a1, a2, t0
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ lw(t0, MemOperand(sp, 0 * kPointerSize));
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
-
- __ CheckFastElements(a2, t1, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiElements(a2, t1, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(a1, a3, a0);
- __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
- __ Push(t1, t0);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, t1, t2);
- __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sw(a0, MemOperand(t2, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, t1, t2);
- __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5071,6 +5025,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, t0);
}
+ // Enter the context of the Array function.
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -5090,26 +5047,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ Push(a1);
- __ Push(a3);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ li(at, Operand(2));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ sw(a1, MemOperand(at));
+ __ li(at, Operand(3));
__ addu(a0, a0, at);
break;
case NONE:
- __ li(a0, Operand(2));
+ __ sw(a1, MemOperand(sp, 0 * kPointerSize));
+ __ li(a0, Operand(3));
break;
case ONE:
- __ li(a0, Operand(3));
+ __ sw(a1, MemOperand(sp, 1 * kPointerSize));
+ __ li(a0, Operand(4));
break;
}
-
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(a3, a2);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5195,14 +5152,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ lw(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = result_reg;
}
// Load the PropertyCell value at the specified slot.
__ sll(at, slot_reg, kPointerSizeLog2);
__ Addu(at, at, Operand(context_reg));
- __ lw(result_reg, ContextOperand(at, 0));
+ __ lw(result_reg, ContextMemOperand(at, 0));
__ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
// Check that value is not the_hole.
@@ -5214,7 +5171,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ bind(&slow_case);
__ SmiTag(slot_reg);
__ Push(slot_reg);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5234,14 +5191,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ lw(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = cell_reg;
}
// Load the PropertyCell at the specified slot.
__ sll(at, slot_reg, kPointerSizeLog2);
__ Addu(at, at, Operand(context_reg));
- __ lw(cell_reg, ContextOperand(at, 0));
+ __ lw(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ lw(cell_details_reg,
@@ -5328,8 +5285,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot_reg, value_reg);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5453,7 +5409,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 9009ec2692..751095d8d8 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -140,9 +140,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
@@ -160,8 +159,8 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(),
- 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 4a1255e1b4..2a144d990c 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -18,23 +18,22 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
+byte* fast_exp_mips_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = f12;
@@ -59,11 +58,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_mips_machine_code = buffer;
return &fast_exp_simulator;
@@ -72,7 +71,8 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_MIPS)
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
defined(_MIPS_ARCH_MIPS32RX)
return stub;
@@ -80,11 +80,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
@@ -597,23 +598,24 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
__ sqrt_d(f0, f12);
@@ -624,9 +626,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -1187,15 +1189,17 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
#endif
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->Push(ra, fp, cp, a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
@@ -1239,10 +1243,11 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 22784fcf53..ad7abb30c5 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -7,7 +7,7 @@
#define V8_MIPS_CODEGEN_MIPS_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index b0c2ebbdf8..8327501b6f 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -143,8 +143,11 @@ const int kInvalidFPURegister = -1;
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1 << 31);
const uint64_t kFPU64InvalidResult =
static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -152,12 +155,14 @@ const uint32_t kFCSRUnderflowFlagBit = 3;
const uint32_t kFCSROverflowFlagBit = 4;
const uint32_t kFCSRDivideByZeroFlagBit = 5;
const uint32_t kFCSRInvalidOpFlagBit = 6;
+const uint32_t kFCSRNaN2008FlagBit = 18;
const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
@@ -256,6 +261,7 @@ const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
+const int kLsaSaBits = 2;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
@@ -394,6 +400,7 @@ enum SecondaryField : uint32_t {
SRL = ((0U << 3) + 2),
SRA = ((0U << 3) + 3),
SLLV = ((0U << 3) + 4),
+ LSA = ((0U << 3) + 5),
SRLV = ((0U << 3) + 6),
SRAV = ((0U << 3) + 7),
@@ -772,7 +779,12 @@ enum FPURoundingMode {
kRoundToNearest = RN,
kRoundToZero = RZ,
kRoundToPlusInf = RP,
- kRoundToMinusInf = RM
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
};
const uint32_t kFPURoundingModeMask = 3 << 0;
@@ -901,20 +913,21 @@ class Instruction {
FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) |
FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(SRA) |
FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(SRLV) |
- FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(MFHI) |
- FunctionFieldToBitNumber(MFLO) | FunctionFieldToBitNumber(MULT) |
- FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DIV) |
- FunctionFieldToBitNumber(DIVU) | FunctionFieldToBitNumber(ADD) |
- FunctionFieldToBitNumber(ADDU) | FunctionFieldToBitNumber(SUB) |
- FunctionFieldToBitNumber(SUBU) | FunctionFieldToBitNumber(AND) |
- FunctionFieldToBitNumber(OR) | FunctionFieldToBitNumber(XOR) |
- FunctionFieldToBitNumber(NOR) | FunctionFieldToBitNumber(SLT) |
- FunctionFieldToBitNumber(SLTU) | FunctionFieldToBitNumber(TGE) |
- FunctionFieldToBitNumber(TGEU) | FunctionFieldToBitNumber(TLT) |
- FunctionFieldToBitNumber(TLTU) | FunctionFieldToBitNumber(TEQ) |
- FunctionFieldToBitNumber(TNE) | FunctionFieldToBitNumber(MOVZ) |
- FunctionFieldToBitNumber(MOVN) | FunctionFieldToBitNumber(MOVCI) |
- FunctionFieldToBitNumber(SELEQZ_S) | FunctionFieldToBitNumber(SELNEZ_S);
+ FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(LSA) |
+ FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) |
+ FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(MULTU) |
+ FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DIVU) |
+ FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(ADDU) |
+ FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(SUBU) |
+ FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) |
+ FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) |
+ FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) |
+ FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) |
+ FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) |
+ FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
+ FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
+ FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
+ FunctionFieldToBitNumber(SELNEZ_S);
// Get the encoding type of the instruction.
@@ -948,6 +961,11 @@ class Instruction {
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
+ inline int LsaSaValue() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+ }
+
inline int FunctionValue() const {
DCHECK(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index dff1d30402..1199365b7d 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -23,12 +23,12 @@ namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
+#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
-#if !defined (USE_SIMULATOR)
#if defined(ANDROID)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
@@ -42,14 +42,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // ANDROID
-#else // USE_SIMULATOR.
- // Not generating mips instructions for C-code. This means that we are
- // building a mips emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#endif // USE_SIMULATOR.
+#endif // !USE_SIMULATOR.
}
} // namespace internal
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 8ea1b0bb3e..a9e30de44d 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -38,14 +38,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->break_(0xCC);
}
}
@@ -66,7 +67,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 5502f4170c..936514aab2 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -66,6 +66,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -73,6 +74,7 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintLsaSa(Instruction* instr);
void PrintSd(Instruction* instr);
void PrintSs1(Instruction* instr);
void PrintSs2(Instruction* instr);
@@ -182,6 +184,17 @@ void Decoder::PrintFPURegister(int freg) {
}
+void Decoder::PrintFPUStatusRegister(int freg) {
+ switch (freg) {
+ case kFCSRRegister:
+ Print("FCSR");
+ break;
+ default:
+ Print(converter_.NameOfXMMRegister(freg));
+ }
+}
+
+
void Decoder::PrintFs(Instruction* instr) {
int freg = instr->RsValue();
PrintFPURegister(freg);
@@ -207,6 +220,13 @@ void Decoder::PrintSa(Instruction* instr) {
}
+// Print the integer value of the sa field of a lsa instruction.
+void Decoder::PrintLsaSa(Instruction* instr) {
+ int sa = instr->LsaSaValue() + 1;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
@@ -476,22 +496,42 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register.
- int reg = instr->FsValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'ft: ft register.
- int reg = instr->FtValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'fd: fd register.
- int reg = instr->FdValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'r') { // 'fr: fr register.
- int reg = instr->FrValue();
- PrintFPURegister(reg);
- return 2;
+ if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ }
+ } else {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
}
UNREACHABLE();
return -1;
@@ -651,11 +691,17 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': { // 'sa.
switch (format[1]) {
- case 'a': {
- DCHECK(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
- return 2;
- }
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2
+ PrintLsaSa(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1026,6 +1072,9 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
case SRAV:
Format(instr, "srav 'rd, 'rt, 'rs");
break;
+ case LSA:
+ Format(instr, "lsa 'rd, 'rt, 'rs, 'sa2");
+ break;
case MFHI:
if (instr->Bits(25, 16) == 0) {
Format(instr, "mfhi 'rd");
@@ -1498,7 +1547,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "lui 'rt, 'imm16x");
} else {
if (instr->RsValue() != 0) {
- Format(instr, "aui 'rt, 'imm16x");
+ Format(instr, "aui 'rt, 'rs, 'imm16x");
} else {
Format(instr, "lui 'rt, 'imm16x");
}
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 2fe3554b68..3f4fb38028 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::parameter_count() { return a2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return a1; }
+
+
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -125,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3, a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3, a2, a1};
@@ -187,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // t0 : original constructor (for IsSuperConstructorCall)
+ // t0 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, t0, a2};
@@ -204,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ Register registers[] = {a1, a3, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a1, a0};
@@ -342,6 +375,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // JSFunction
+ a3, // the new target
a0, // actual number of arguments
a2, // expected number of arguments
};
@@ -374,27 +408,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
- a2, // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -410,7 +423,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
- a3, // original constructor
+ a3, // new target
a1, // constructor to call
a2 // address of the first argument
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 4a5a386fa0..3c866ac453 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -19,12 +19,13 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -433,10 +434,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- lw(scratch, FieldMemOperand(scratch, offset));
- lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1054,6 +1052,19 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (IsMipsArchVariant(kMips32r6) && sa <= 4) {
+ lsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ sll(tmp, rs, sa);
+ Addu(rd, rt, tmp);
+ }
+}
+
+
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
@@ -1267,50 +1278,40 @@ void MacroAssembler::Ins(Register rt,
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
FPURegister scratch) {
- // Move the data from fs to t8.
- mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
-}
-
-
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
+ // In FP64Mode we do convertion from long.
+ if (IsFp64Mode()) {
+ mtc1(rs, scratch);
+ Mthc1(zero_reg, scratch);
+ cvt_d_l(fd, scratch);
+ } else {
+ // Convert rs to a FP value in fd.
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
- DCHECK(!fd.is(scratch));
- DCHECK(!rs.is(t9));
- DCHECK(!rs.is(at));
+ Label msb_clear, conversion_done;
+ // For a value which is < 2^31, regard it as a signed positve word.
+ Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT);
+ mtc1(rs, fd);
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
+ li(at, 0x41F00000); // FP value: 2^32.
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
+ // For unsigned inputs > 2^31, we convert to double as a signed int32,
+ // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1.
+ mtc1(zero_reg, scratch);
+ Mthc1(at, scratch);
- Label conversion_done;
+ cvt_d_w(fd, fd);
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_d(fd, fd, scratch);
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(zero_reg, scratch);
- Mthc1(at, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
+ bind(&msb_clear);
+ cvt_d_w(fd, fd);
- bind(&conversion_done);
+ bind(&conversion_done);
+ }
}
@@ -1438,13 +1439,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (!IsMipsArchVariant(kMips32r6)) {
if (long_branch) {
Label skip;
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
BranchLong(nan, bd);
bind(&skip);
} else {
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
@@ -1456,13 +1457,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
BranchLong(nan, bd);
bind(&skip);
} else {
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
@@ -3270,12 +3271,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(t9));
- DCHECK(!scratch2.is(t9));
- DCHECK(!result.is(t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3291,54 +3287,52 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ lw(result, MemOperand(top_address));
+ lw(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ lw(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ lw(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
+ And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
}
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Addu(scratch2, result, Operand(object_size));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sw(scratch2, MemOperand(topaddr));
+ Addu(result_end, result, Operand(object_size));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
+ sw(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3347,28 +3341,25 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
+ li(scratch, 0x7191);
+ li(result_end, 0x7291);
}
jmp(gc_required);
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(t9));
- DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, t9));
+ DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3377,45 +3368,42 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- lw(result, MemOperand(topaddr));
- lw(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ lw(result, MemOperand(top_address));
+ lw(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- lw(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ lw(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- lw(t9, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ lw(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
- And(scratch2, result, Operand(kDoubleAlignmentMask));
+ And(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- Branch(&aligned, eq, scratch2, Operand(zero_reg));
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
if ((flags & PRETENURE) != 0) {
- Branch(gc_required, Ugreater_equal, result, Operand(t9));
+ Branch(gc_required, Ugreater_equal, result, Operand(alloc_limit));
}
- li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- sw(scratch2, MemOperand(result));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
Addu(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
}
@@ -3424,19 +3412,19 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- sll(scratch2, object_size, kPointerSizeLog2);
- Addu(scratch2, result, scratch2);
+ sll(result_end, object_size, kPointerSizeLog2);
+ Addu(result_end, result, result_end);
} else {
- Addu(scratch2, result, Operand(object_size));
+ Addu(result_end, result, Operand(object_size));
}
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
}
- sw(scratch2, MemOperand(topaddr));
+ sw(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3612,29 +3600,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.reg_code = i;
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- lw(tmp, FieldMemOperand(src, i * kPointerSize));
- sw(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ sw(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sw(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ sw(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3706,16 +3690,16 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
- sw(filler, MemOperand(start_offset));
- Addu(start_offset, start_offset, kPointerSize);
+ sw(filler, MemOperand(current_address));
+ Addu(current_address, current_address, kPointerSize);
bind(&entry);
- Branch(&loop, ult, start_offset, Operand(end_offset));
+ Branch(&loop, ult, current_address, Operand(end_address));
}
@@ -3766,6 +3750,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch3,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
+ scratch3));
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3820,7 +3806,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
- Register untagged_value = elements_reg;
+ Register untagged_value = scratch2;
SmiUntag(untagged_value, value_reg);
mtc1(untagged_value, f2);
cvt_d_w(f0, f2);
@@ -3985,8 +3971,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4006,7 +3990,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(a0));
DCHECK(expected.is_immediate() || expected.reg().is(a2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4034,11 +4017,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -4056,21 +4034,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ li(t0, Operand(step_in_enabled));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(a1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- Label done;
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ }
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = t0;
+ lw(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4087,6 +4122,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -4096,18 +4132,18 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function.is(a1));
Register expected_reg = a2;
- Register code_reg = a3;
+ Register temp_reg = t0;
- lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
sra(expected_reg, expected_reg, kSmiTagSize);
- lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(function, new_target, expected, actual, flag,
+ call_wrapper);
}
@@ -4125,11 +4161,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4301,108 +4333,161 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::AddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
if (right.is_reg()) {
- AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- Addu(dst, left, right.immediate()); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- // Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(overflow_dst, dst, t9);
- and_(overflow_dst, overflow_dst, scratch);
+ if (IsMipsArchVariant(kMips32r6)) {
+ Register right_reg = t9;
+ DCHECK(!left.is(right_reg));
+ li(right_reg, Operand(right));
+ AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
} else {
- Addu(dst, left, right.immediate());
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, dst, t9);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Addu(dst, left, right.immediate()); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ // Load right since xori takes uint16 as immediate.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Addu(dst, left, right.immediate());
+ xor_(overflow_dst, dst, left);
+ // Load right since xori takes uint16 as immediate.
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, dst, scratch);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
}
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- if (left.is(right) && dst.is(left)) {
- DCHECK(!dst.is(t9));
- DCHECK(!scratch.is(t9));
- DCHECK(!left.is(t9));
- DCHECK(!right.is(t9));
- DCHECK(!overflow_dst.is(t9));
- mov(t9, right);
- right = t9;
- }
-
- if (dst.is(left)) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst.is(right)) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
+void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ DCHECK(!dst.is(scratch));
+ Register left_reg = left.is(dst) ? scratch : left;
+ Register right_reg = right.is(dst) ? t9 : right;
+ DCHECK(!dst.is(left_reg));
+ DCHECK(!dst.is(right_reg));
+ Move(left_reg, left);
+ Move(right_reg, right);
+ addu(dst, left, right);
+ bnvc(left_reg, right_reg, no_overflow_label);
+ } else {
+ bovc(left, right, overflow_label);
+ addu(dst, left, right);
+ if (no_overflow_label) bc(no_overflow_label);
+ }
} else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ addu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ addu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ addu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right,
- Register overflow_dst,
- Register scratch) {
+void MacroAssembler::SubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
if (right.is_reg()) {
- SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+ SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
} else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
if (dst.is(left)) {
mov(scratch, left); // Preserve left.
- Subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
+ Subu(dst, left, right.immediate()); // Left is overwritten.
// Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, scratch, t9); // scratch is original left.
+ Addu(overflow_dst, zero_reg, right);
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
and_(overflow_dst, scratch, overflow_dst);
} else {
Subu(dst, left, right);
xor_(overflow_dst, dst, left);
// Load right since xori takes uint16 as immediate.
- Addu(t9, zero_reg, right);
- xor_(scratch, left, t9);
+ Addu(scratch, zero_reg, right);
+ xor_(scratch, left, scratch);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
}
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
+void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
DCHECK(!overflow_dst.is(left));
DCHECK(!overflow_dst.is(right));
DCHECK(!scratch.is(left));
@@ -4412,8 +4497,9 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
// left == right, let's not make that restriction here.
if (left.is(right)) {
mov(dst, zero_reg);
- mov(overflow_dst, zero_reg);
- return;
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
}
if (dst.is(left)) {
@@ -4434,6 +4520,7 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
xor_(scratch, left, right);
and_(overflow_dst, scratch, overflow_dst);
}
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
@@ -4469,24 +4556,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -4508,34 +4584,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- lw(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, native_context_index);
- // Load the code entry point from the builtins object.
- lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, a1);
+ InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -4672,47 +4724,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- lw(dst, GlobalObjectOperand());
- lw(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- lw(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- lw(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(at, FieldMemOperand(scratch, offset));
+ lw(scratch, NativeContextMemOperand());
+ lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- lw(map_in_out, FieldMemOperand(scratch, offset));
+ lw(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- lw(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- lw(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- lw(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ lw(dst, NativeContextMemOperand());
+ lw(dst, ContextMemOperand(dst, index));
}
@@ -5115,6 +5149,17 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5385,8 +5430,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -5420,28 +5465,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- DCHECK(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5457,112 +5480,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- sll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, FieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- srl(t9, t9, 1);
- bind(&skip);
- }
- Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ Branch(value_is_white, eq, t8, Operand(zero_reg));
}
@@ -5767,17 +5701,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -5788,18 +5718,19 @@ bool AreAliased(Register reg1,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -5811,7 +5742,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 8890be8131..4f6a3c868b 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -23,6 +23,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
const Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
@@ -98,26 +99,23 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
- Register reg8 = no_reg);
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -140,11 +138,8 @@ inline MemOperand CFunctionArgumentOperand(int index) {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
@@ -386,22 +381,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -538,12 +521,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_new,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@@ -587,6 +566,12 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -648,7 +633,10 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+ void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
void Pref(int32_t hint, const MemOperand& rs);
@@ -781,7 +769,6 @@ class MacroAssembler: public Assembler {
// FPU macros. These do not handle special cases like NaN or +- inf.
// Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
// Convert double to unsigned word.
@@ -941,8 +928,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -955,7 +949,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -973,15 +967,20 @@ class MacroAssembler: public Assembler {
// JavaScript invokes.
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1021,9 +1020,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
@@ -1032,12 +1028,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// -------------------------------------------------------------------------
// Support functions.
@@ -1187,45 +1182,42 @@ class MacroAssembler: public Assembler {
// Usage: first call the appropriate arithmetic function, then call one of the
// jump functions with the overflow_dst register as the second parameter.
- void AdduAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
+ inline void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
- void AdduAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch = at);
+ inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
- void SubuAndCheckForOverflow(Register dst,
- Register left,
- Register right,
- Register overflow_dst,
- Register scratch = at);
+ void AddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
- void SubuAndCheckForOverflow(Register dst, Register left,
- const Operand& right, Register overflow_dst,
- Register scratch = at);
+ void AddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
- void BranchOnOverflow(Label* label,
- Register overflow_check,
- BranchDelaySlot bd = PROTECT) {
- Branch(label, lt, overflow_check, Operand(zero_reg), bd);
- }
- void BranchOnNoOverflow(Label* label,
- Register overflow_check,
- BranchDelaySlot bd = PROTECT) {
- Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ inline void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
}
- void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
- Ret(lt, overflow_check, Operand(zero_reg), bd);
+ inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
}
- void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
- Ret(ge, overflow_check, Operand(zero_reg), bd);
- }
+ void SubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void SubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1262,6 +1254,14 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
}
// Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles, bd);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT) {
@@ -1273,17 +1273,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
int num_arguments,
BranchDelaySlot bd = PROTECT);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1339,13 +1331,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in a1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1411,14 +1396,23 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Addu(reg, reg, reg);
}
+ void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
+
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
void SmiTagCheckOverflow(Register reg, Register overflow);
void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
- void SmiTag(Register dst, Register src) {
- Addu(dst, src, src);
+ void BranchOnOverflow(Label* label, Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, lt, overflow_check, Operand(zero_reg), bd);
}
+ void BranchOnNoOverflow(Label* label, Register overflow_check,
+ BranchDelaySlot bd = PROTECT) {
+ Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+ }
+
+
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
@@ -1488,6 +1482,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1674,8 +1672,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1729,8 +1725,7 @@ class CodePatcher {
DONT_FLUSH
};
- CodePatcher(byte* address,
- int instructions,
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index e9dd0d32dc..aa4224a54c 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -589,7 +589,7 @@ void MipsDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -968,7 +968,12 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumFPURegisters; i++) {
FPUregisters_[i] = 0;
}
- FCSR_ = 0;
+ if (IsMipsArchVariant(kMips32r6)) {
+ FCSR_ = kFCSRNaN2008FlagMask;
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
+ FCSR_ = 0;
+ }
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
@@ -995,12 +1000,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -1016,14 +1021,13 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -1068,9 +1072,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1296,6 +1301,129 @@ unsigned int Simulator::get_fcsr_rounding_mode() {
}
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
@@ -1332,6 +1460,8 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(double original, double rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1349,7 +1479,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1396,6 +1526,8 @@ bool Simulator::set_fcsr_round_error(float original, float rounded) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(float original, float rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1413,7 +1545,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -2372,11 +2504,13 @@ void Simulator::DecodeTypeRegisterDRsType() {
set_fpu_register_double(fd_reg(), -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_D: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- double result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ double result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_double(fd_reg(), result);
break;
}
@@ -2413,7 +2547,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case ROUND_W_D: // Round double to word (round half to even).
@@ -2427,7 +2561,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
@@ -2436,7 +2570,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case FLOOR_W_D: // Round double to word towards negative infinity.
@@ -2445,7 +2579,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CEIL_W_D: // Round double to word towards positive infinity.
@@ -2454,7 +2588,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CVT_S_D: // Convert double to float (single).
@@ -2467,7 +2601,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2482,7 +2616,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2502,7 +2636,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2516,7 +2650,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2530,7 +2664,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2775,11 +2909,13 @@ void Simulator::DecodeTypeRegisterSRsType() {
set_fpu_register_float(fd_reg(), -fs);
break;
case SQRT_S:
- set_fpu_register_float(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_S: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- float result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ float result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_float(fd_reg(), result);
break;
}
@@ -2931,7 +3067,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case TRUNC_L_S: { // Mips32r2 instruction.
@@ -2941,7 +3077,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2954,7 +3090,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case FLOOR_L_S: { // Mips32r2 instruction.
@@ -2964,7 +3100,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -2981,7 +3117,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2998,7 +3134,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -3011,7 +3147,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CEIL_L_S: { // Mips32r2 instruction.
@@ -3021,7 +3157,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
if (IsFp64Mode()) {
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -3103,7 +3239,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
} else {
UNSUPPORTED();
@@ -3116,7 +3252,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -3245,11 +3381,18 @@ void Simulator::DecodeTypeRegisterCOP1() {
case MFHC1:
set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
- case CTC1:
+ case CTC1: {
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
- FCSR_ = registers_[rt_reg()];
+ int32_t reg = registers_[rt_reg()];
+ if (IsMipsArchVariant(kMips32r6)) {
+ FCSR_ = reg | kFCSRNaN2008FlagMask;
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
+ FCSR_ = reg & ~kFCSRNaN2008FlagMask;
+ }
break;
+ }
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg(), 0);
@@ -3371,9 +3514,19 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
SetResult(rd_reg(), static_cast<int32_t>(alu_out));
break;
case SRAV:
- alu_out = rt() >> rs();
- SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ SetResult(rd_reg(), rt() >> rs());
+ break;
+ case LSA: {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ int8_t sa = lsa_sa() + 1;
+ int32_t _rt = rt();
+ int32_t _rs = rs();
+ int32_t res = _rs << sa;
+ res += _rt;
+ DCHECK_EQ(res, (rs() << (lsa_sa() + 1)) + rt());
+ SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt());
break;
+ }
case MFHI: // MFHI == CLZ on R6.
if (!IsMipsArchVariant(kMips32r6)) {
DCHECK(sa() == 0);
@@ -3947,7 +4100,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg != 0) { // BEQZC
BranchCompactHelper(rs == 0, 21);
} else { // JIC
- CheckForbiddenSlot(get_pc());
next_pc = rt + imm16;
}
break;
@@ -3955,9 +4107,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg != 0) { // BNEZC
BranchCompactHelper(rs != 0, 21);
} else { // JIALC
- int32_t current_pc = get_pc();
- CheckForbiddenSlot(current_pc);
- set_register(31, current_pc + Instruction::kInstrSize);
+ set_register(31, get_pc() + Instruction::kInstrSize);
next_pc = rt + imm16;
}
break;
@@ -4040,7 +4190,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
SetResult(rt_reg, rs ^ oe_imm16);
break;
case LUI:
- SetResult(rt_reg, oe_imm16 << 16);
+ if (rs_reg != 0) {
+ // AUI
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ SetResult(rt_reg, rs + (se_imm16 << 16));
+ } else {
+ // LUI
+ SetResult(rt_reg, oe_imm16 << 16);
+ }
break;
// ------------- Memory instructions.
case LB:
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 624d4acf80..8efe0bba9c 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
@@ -34,9 +34,10 @@ typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
+ p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -48,11 +49,13 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
} // namespace internal
@@ -168,6 +171,12 @@ class Simulator {
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
@@ -335,6 +344,7 @@ class Simulator {
inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
inline int32_t sa() const { return currentInstr_->SaValue(); }
+ inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
inline void SetResult(int32_t rd_reg, int32_t alu_out) {
set_register(rd_reg, alu_out);
@@ -408,7 +418,8 @@ class Simulator {
void SignalException(Exception e);
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@@ -464,13 +475,14 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current())->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate) \
+ ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -484,13 +496,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index e35bf2facb..09436ed1d4 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -92,7 +92,7 @@ void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
byte* p = reinterpret_cast<byte*>(pc_);
int count = Assembler::RelocateInternalReference(rmode_, p, delta);
- CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
+ Assembler::FlushICache(isolate_, p, count * sizeof(uint32_t));
}
}
@@ -144,7 +144,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -178,7 +179,7 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
DCHECK(IsJ(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
@@ -206,7 +207,7 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, host_,
+ Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
@@ -308,8 +309,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
- host_,
+ Assembler::set_target_address_at(isolate_, pc_ + Assembler::kInstrSize, host_,
stub->instruction_start());
}
@@ -326,7 +326,7 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
// The pc_ offset of 0 assumes patched debug break slot or return
// sequence.
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -344,7 +344,7 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -438,23 +438,59 @@ void Assembler::CheckTrampolinePoolQuick(int extra_instructions) {
}
-void Assembler::emit(Instr x) {
+void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
+ if (IsPrevInstrCompactBranch()) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+
+ ClearCompactBranchState();
+ }
+}
+
+
+void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
+ if (IsPrevInstrCompactBranch()) {
+ if (Instruction::IsForbiddenAfterBranchInstr(x)) {
+ // Nop instruction to preceed a CTI in forbidden slot:
+ Instr nop = SPECIAL | SLL;
+ *reinterpret_cast<Instr*>(pc_) = nop;
+ pc_ += kInstrSize;
+ }
+ ClearCompactBranchState();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) {
+ EmittedCompactBranchInstruction();
+ }
CheckTrampolinePoolQuick();
}
-void Assembler::emit(uint64_t x) {
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+ CheckTrampolinePoolQuick();
+}
+
+
+void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
- *reinterpret_cast<uint64_t*>(pc_) = x;
- pc_ += kInstrSize * 2;
- CheckTrampolinePoolQuick();
+ EmitHelper(x, is_compact_branch);
+}
+
+
+void Assembler::emit(uint64_t data) {
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index e0f12ed020..9c313a18d6 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -264,6 +264,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
+ EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -272,11 +273,13 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -433,19 +436,38 @@ bool Assembler::IsBranch(Instr instr) {
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
// Checks if the instruction is a branch.
- return opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL ||
+ bool isBranch =
+ opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ ||
+ opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
(opcode == COP1 && rs_field == BC1NEZ);
+ if (!isBranch && kArchVariant == kMips64r6) {
+ // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
+ // POP30 (BNVC, BNEC, BNEZALC) are branch ops.
+ isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC ||
+ opcode == BALC ||
+ (opcode == POP66 && rs_field != 0) || // BEQZC
+ (opcode == POP76 && rs_field != 0); // BNEZC
+ }
+ return isBranch;
+}
+
+
+bool Assembler::IsBc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a BC or BALC.
+ return opcode == BC || opcode == BALC;
+}
+
+
+bool Assembler::IsBzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is BEQZC or BNEZC.
+ return (opcode == POP66 && GetRsField(instr) != 0) ||
+ (opcode == POP76 && GetRsField(instr) != 0);
}
@@ -465,6 +487,34 @@ bool Assembler::IsBne(Instr instr) {
}
+bool Assembler::IsBeqzc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP66 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBnezc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ return opcode == POP76 && GetRsField(instr) != 0;
+}
+
+
+bool Assembler::IsBeqc(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
+bool Assembler::IsBnec(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs = GetRsField(instr);
+ uint32_t rt = GetRtField(instr);
+ return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0
+}
+
+
bool Assembler::IsJump(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
@@ -544,7 +594,7 @@ int32_t Assembler::GetBranchOffset(Instr instr) {
bool Assembler::IsLw(Instr instr) {
- return ((instr & kOpcodeMask) == LW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
}
@@ -566,7 +616,7 @@ Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
bool Assembler::IsSw(Instr instr) {
- return ((instr & kOpcodeMask) == SW);
+ return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
}
@@ -592,6 +642,36 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (kArchVariant == kMips64r6) {
+ if (Assembler::IsBc(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBzc(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ }
+ }
+ return Assembler::OffsetSize::kOffset16;
+}
+
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ int bits = OffsetSizeInBits(instr);
+ const int32_t mask = (1 << bits) - 1;
+ bits = 32 - bits;
+
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ int32_t imm = ((instr & mask) << bits) >> (bits - 2);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + Assembler::kBranchPCOffset + imm;
+ }
+}
+
+
int Assembler::target_at(int pos, bool is_internal) {
if (is_internal) {
int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
@@ -621,13 +701,7 @@ int Assembler::target_at(int pos, bool is_internal) {
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
- } else {
- return pos + kBranchPCOffset + imm18;
- }
+ return AddBranchOffset(pos, instr);
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
@@ -667,6 +741,21 @@ int Assembler::target_at(int pos, bool is_internal) {
}
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset);
+ DCHECK((imm & 3) == 0);
+ imm >>= 2;
+
+ const int32_t mask = (1 << bits) - 1;
+ instr &= ~mask;
+ DCHECK(is_intn(imm, bits));
+
+ return instr | (imm & mask);
+}
+
+
void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
if (is_internal) {
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
@@ -683,14 +772,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
}
if (IsBranch(instr)) {
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- DCHECK((imm18 & 3) == 0);
-
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- DCHECK(is_int16(imm16));
-
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
@@ -779,22 +862,25 @@ void Assembler::bind_to(Label* L, int pos) {
Instr instr = instr_at(fixup_pos);
if (is_internal) {
target_at_put(fixup_pos, pos, is_internal);
- } else if (IsBranch(instr)) {
- if (dist > kMaxBranchOffset) {
- if (trampoline_pos == kInvalidSlotPos) {
- trampoline_pos = get_trampoline_entry(fixup_pos);
- CHECK(trampoline_pos != kInvalidSlotPos);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
}
- CHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos, false);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
+ IsEmittedConstant(instr));
+ target_at_put(fixup_pos, pos, false);
}
- target_at_put(fixup_pos, pos, false);
- } else {
- DCHECK(IsJ(instr) || IsJal(instr) || IsLui(instr) ||
- IsEmittedConstant(instr));
- target_at_put(fixup_pos, pos, false);
}
}
L->bind_to(pos);
@@ -825,10 +911,48 @@ void Assembler::next(Label* L, bool is_internal) {
bool Assembler::is_near(Label* L) {
- if (L->is_bound()) {
- return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+}
+
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ if (L == nullptr || !L->is_bound()) return true;
+ return ((pc_offset() - L->pos()) <
+ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
+}
+
+
+bool Assembler::is_near_branch(Label* L) {
+ DCHECK(L->is_bound());
+ return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
+}
+
+
+int Assembler::BranchOffset(Instr instr) {
+ // At pre-R6 and for other R6 branches the offset is 16 bits.
+ int bits = OffsetSize::kOffset16;
+
+ if (kArchVariant == kMips64r6) {
+ uint32_t opcode = GetOpcodeField(instr);
+ switch (opcode) {
+ // Checks BC or BALC.
+ case BC:
+ case BALC:
+ bits = OffsetSize::kOffset26;
+ break;
+
+ // Checks BEQZC or BNEZC.
+ case POP66:
+ case POP76:
+ if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21;
+ break;
+ default:
+ break;
+ }
}
- return false;
+
+ return (1 << (bits + 2 - 1)) - 1;
}
@@ -919,49 +1043,56 @@ void Assembler::GenInstrRegister(Opcode opcode,
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode,
- Register rs,
- FPURegister ft,
- int32_t j) {
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
+ int32_t j,
+ CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch) {
+ DCHECK(rs.is_valid() && (is_int21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
+ emit(instr, is_compact_branch);
}
-void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t j) {
- DCHECK(rs.is_valid() && (is_uint21(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (j & kImm21Mask);
+void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
+ uint32_t offset21) {
+ DCHECK(rs.is_valid() && (is_uint21(offset21)));
+ Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask);
emit(instr);
}
-void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26) {
+void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch) {
DCHECK(is_int26(offset26));
Instr instr = opcode | (offset26 & kImm26Mask);
- emit(instr);
+ emit(instr, is_compact_branch);
}
@@ -1013,87 +1144,38 @@ uint64_t Assembler::jump_address(Label* L) {
uint64_t Assembler::jump_offset(Label* L) {
int64_t target_pos;
+ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos(); // L's link.
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
} else {
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
return kEndOfJumpChain;
}
}
- int64_t imm = target_pos - pc_offset();
+ int64_t imm = target_pos - (pc_offset() + pad);
DCHECK((imm & 3) == 0);
return static_cast<uint64_t>(imm);
}
-int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
+ int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
- int32_t offset = target_pos - pc_offset();
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
} else {
if (L->is_linked()) {
target_pos = L->pos();
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
} else {
- L->link_to(pc_offset());
+ L->link_to(pc_offset() + pad);
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
@@ -1102,36 +1184,9 @@ int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
}
}
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad);
+ DCHECK(is_intn(offset, bits + 2));
DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
return offset;
}
@@ -1178,14 +1233,14 @@ void Assembler::bal(int16_t offset) {
void Assembler::bc(int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
- GenInstrImmediate(BC, offset);
+ GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::balc(int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
positions_recorder()->WriteRecordedPositions();
- GenInstrImmediate(BALC, offset);
+ GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1206,7 +1261,7 @@ void Assembler::bgez(Register rs, int16_t offset) {
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, rt, rt, offset);
+ GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1215,7 +1270,7 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZ, rs, rt, offset);
+ GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1224,7 +1279,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZL, rs, rt, offset);
+ GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1247,7 +1302,8 @@ void Assembler::bgtz(Register rs, int16_t offset) {
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
@@ -1261,14 +1317,15 @@ void Assembler::blez(Register rs, int16_t offset) {
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, rt, rt, offset);
+ DCHECK(!rt.is(zero_reg));
+ GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1277,16 +1334,16 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(!(rs.is(zero_reg)));
DCHECK(!(rt.is(zero_reg)));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZ, rs, rt, offset);
+ GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!rs.is(zero_reg));
+ DCHECK(!rt.is(zero_reg));
DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZL, rs, rt, offset);
+ GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1317,7 +1374,7 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1325,21 +1382,24 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1347,6 +1407,7 @@ void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6);
DCHECK(!(rs.is(zero_reg)));
BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1355,58 +1416,71 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, rt, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(ADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(ADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(DADDI, zero_reg, rt, offset);
+ positions_recorder()->WriteRecordedPositions();
+ GenInstrImmediate(DADDI, zero_reg, rt, offset,
+ CompactBranchType::COMPACT_BRANCH);
}
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP66 | (rs.code() << kRsShift) | (offset & kImm21Mask);
- emit(instr);
+ GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
+ DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
+ if (rs.code() < rt.code()) {
+ GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
+ } else {
+ GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH);
+ }
}
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rs.is(zero_reg)));
- Instr instr = POP76 | (rs.code() << kRsShift) | offset;
- emit(instr);
+ GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1477,9 +1551,7 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::jic(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- Instr instr = POP66 | (JIC << kRsShift) | (rt.code() << kRtShift) |
- (offset & kImm16Mask);
- emit(instr);
+ GenInstrImmediate(POP66, zero_reg, rt, offset);
}
@@ -1715,7 +1787,7 @@ void Assembler::sll(Register rd,
// nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
// instructions.
DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
@@ -1725,7 +1797,7 @@ void Assembler::sllv(Register rd, Register rt, Register rs) {
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
}
@@ -1735,7 +1807,7 @@ void Assembler::srlv(Register rd, Register rt, Register rs) {
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
}
@@ -1756,7 +1828,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
- DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
@@ -1765,7 +1837,7 @@ void Assembler::rotrv(Register rd, Register rt, Register rs) {
void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
}
@@ -1775,7 +1847,7 @@ void Assembler::dsllv(Register rd, Register rt, Register rs) {
void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
}
@@ -1801,7 +1873,7 @@ void Assembler::drotrv(Register rd, Register rt, Register rs) {
void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
}
@@ -1811,17 +1883,37 @@ void Assembler::dsrav(Register rd, Register rt, Register rs) {
void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
}
void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
}
void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
- GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
+}
+
+
+void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ DCHECK(sa < 5 && sa > 0);
+ DCHECK(kArchVariant == kMips64r6);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa - 1) << kSaShift | LSA;
+ emit(instr);
+}
+
+
+void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ DCHECK(sa < 5 && sa > 0);
+ DCHECK(kArchVariant == kMips64r6);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa - 1) << kSaShift | DLSA;
+ emit(instr);
}
@@ -1954,17 +2046,17 @@ void Assembler::lui(Register rd, int32_t j) {
}
-void Assembler::aui(Register rs, Register rt, int32_t j) {
+void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
- DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
}
-void Assembler::daui(Register rs, Register rt, int32_t j) {
+void Assembler::daui(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
+ DCHECK(!rs.is(zero_reg));
GenInstrImmediate(DAUI, rs, rt, j);
}
@@ -2026,7 +2118,7 @@ void Assembler::sd(Register rd, const MemOperand& rs) {
void Assembler::addiupc(Register rs, int32_t imm19) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int19(imm19));
- int32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
+ uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2034,7 +2126,7 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
void Assembler::lwpc(Register rs, int32_t offset19) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
- int32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
+ uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2042,7 +2134,7 @@ void Assembler::lwpc(Register rs, int32_t offset19) {
void Assembler::lwupc(Register rs, int32_t offset19) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
- int32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
+ uint32_t imm21 = LWUPC << kImm19Bits | (offset19 & kImm19Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2050,23 +2142,23 @@ void Assembler::lwupc(Register rs, int32_t offset19) {
void Assembler::ldpc(Register rs, int32_t offset18) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(rs.is_valid() && is_int18(offset18));
- int32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
+ uint32_t imm21 = LDPC << kImm18Bits | (offset18 & kImm18Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::auipc(Register rs, int16_t imm16) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
void Assembler::aluipc(Register rs, int16_t imm16) {
DCHECK(kArchVariant == kMips64r6);
- DCHECK(rs.is_valid() && is_int16(imm16));
- int32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
+ DCHECK(rs.is_valid());
+ uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask);
GenInstrImmediate(PCREL, rs, imm21);
}
@@ -2311,6 +2403,14 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
}
+void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dins.
+ // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
+}
+
+
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2320,13 +2420,29 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
- // Should be called via MacroAssembler::Ext.
+ // Should be called via MacroAssembler::Dext.
// Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
}
+void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dextm.
+ // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
+}
+
+
+void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dextu.
+ // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
+}
+
+
void Assembler::bitswap(Register rd, Register rt) {
DCHECK(kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
@@ -2938,7 +3054,6 @@ void Assembler::bc1t(int16_t offset, uint16_t cc) {
}
-// Debugging.
int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta) {
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2992,6 +3107,8 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
instr_at_put(pc, instr | (imm26 & kImm26Mask));
return 1; // Number of instructions patched.
} else {
+ DCHECK(((instr & kJumpRawMask) == kJRawMark) ||
+ ((instr & kJumpRawMask) == kJalRawMark));
// Unbox raw offset and emit j/jal.
int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
// Sign extend 28-bit offset to 32-bit.
@@ -3023,6 +3140,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
@@ -3057,54 +3175,42 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
- CheckBuffer();
- *reinterpret_cast<uint8_t*>(pc_) = data;
- pc_ += sizeof(uint8_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(uint32_t data) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) = data;
- pc_ += sizeof(uint32_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dq(uint64_t data) {
- CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) = data;
- pc_ += sizeof(uint64_t);
+ CheckForEmitInForbiddenSlot();
+ EmitHelper(data);
}
void Assembler::dd(Label* label) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
uint64_t data;
+ CheckForEmitInForbiddenSlot();
if (label->is_bound()) {
data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
} else {
data = jump_address(label);
internal_reference_positions_.insert(label->pos());
}
- *reinterpret_cast<uint64_t*>(pc_) = data;
- pc_ += sizeof(uint64_t);
-}
-
-
-void Assembler::emit_code_stub_address(Code* stub) {
- CheckBuffer();
- *reinterpret_cast<uint64_t*>(pc_) =
- reinterpret_cast<uint64_t>(stub->instruction_start());
- pc_ += sizeof(uint64_t);
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
if (rmode >= RelocInfo::COMMENT &&
- rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CONSTRUCT_CALL) {
+ rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
|| RelocInfo::IsComment(rmode)
@@ -3119,10 +3225,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
- RecordedAstId().ToInt(),
- NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), pc_, rmode,
+ RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -3162,9 +3266,14 @@ void Assembler::CheckTrampolinePool() {
// First we emit jump (2 instructions), then we emit trampoline pool.
{ BlockTrampolinePoolScope block_trampoline_pool(this);
Label after_pool;
- b(&after_pool);
- nop();
+ if (kArchVariant == kMips64r6) {
+ bc(&after_pool);
+ } else {
+ b(&after_pool);
+ nop();
+ }
+ EmitForbiddenSlotInstruction();
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{ BlockGrowBufferScope block_buf_growth(this);
@@ -3240,7 +3349,7 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Address pc,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
// There is an optimization where only 4 instructions are used to load address
@@ -3273,7 +3382,7 @@ void Assembler::set_target_address_at(Address pc,
| (itarget & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index f0f54aab1c..f8d315d835 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -411,27 +411,46 @@ class Assembler : public AssemblerBase {
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
// Determines if Label is bound and near enough so that branch instruction
// can be used to reach it, instead of jump instruction.
bool is_near(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_branch(Label* L);
+ inline bool is_near_pre_r6(Label* L) {
+ DCHECK(!(kArchVariant == kMips64r6));
+ return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
+ }
+ inline bool is_near_r6(Label* L) {
+ DCHECK(kArchVariant == kMips64r6);
+ return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
+ }
+
+ int BranchOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
- int32_t branch_offset(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
- int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
- int32_t o = branch_offset(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
- }
- int32_t shifted_branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t o = branch_offset_compact(L, jump_elimination_allowed);
- DCHECK((o & 3) == 0); // Assert the offset is aligned.
- return o >> 2;
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
}
uint64_t jump_address(Label* L);
uint64_t jump_offset(Label* L);
@@ -442,30 +461,28 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
- static void set_target_address_at(Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED);
+ static void set_target_address_at(
+ Isolate* isolate, Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// On MIPS there is no Constant Pool so we skip that parameter.
INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
return target_address_at(pc);
}
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- set_target_address_at(pc, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, target, icache_flush_mode);
}
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- INLINE(static void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED)) {
+ INLINE(static void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -480,16 +497,17 @@ class Assembler : public AssemblerBase {
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
set_target_address_at(
- instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
- code,
+ isolate,
+ instruction_payload - kInstructionsFor64BitConstant * kInstrSize, code,
target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -573,112 +591,111 @@ class Assembler : public AssemblerBase {
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
- void b(Label* L) { b(branch_offset(L, false)>>2); }
+ inline void b(Label* L) { b(shifted_branch_offset(L)); }
void bal(int16_t offset);
- void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+ inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
void bc(int32_t offset);
- void bc(Label* L) { bc(branch_offset(L, false) >> 2); }
+ inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
void balc(int32_t offset);
- void balc(Label* L) { balc(branch_offset(L, false) >> 2); }
+ inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
void beq(Register rs, Register rt, int16_t offset);
- void beq(Register rs, Register rt, Label* L) {
- beq(rs, rt, branch_offset(L, false) >> 2);
+ inline void beq(Register rs, Register rt, Label* L) {
+ beq(rs, rt, shifted_branch_offset(L));
}
void bgez(Register rs, int16_t offset);
void bgezc(Register rt, int16_t offset);
- void bgezc(Register rt, Label* L) {
- bgezc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezc(Register rt, Label* L) {
+ bgezc(rt, shifted_branch_offset(L));
}
void bgeuc(Register rs, Register rt, int16_t offset);
- void bgeuc(Register rs, Register rt, Label* L) {
- bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgeuc(Register rs, Register rt, Label* L) {
+ bgeuc(rs, rt, shifted_branch_offset(L));
}
void bgec(Register rs, Register rt, int16_t offset);
- void bgec(Register rs, Register rt, Label* L) {
- bgec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bgec(Register rs, Register rt, Label* L) {
+ bgec(rs, rt, shifted_branch_offset(L));
}
void bgezal(Register rs, int16_t offset);
void bgezalc(Register rt, int16_t offset);
- void bgezalc(Register rt, Label* L) {
- bgezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgezalc(Register rt, Label* L) {
+ bgezalc(rt, shifted_branch_offset(L));
}
void bgezall(Register rs, int16_t offset);
- void bgezall(Register rs, Label* L) {
- bgezall(rs, branch_offset(L, false)>>2);
+ inline void bgezall(Register rs, Label* L) {
+ bgezall(rs, branch_offset(L) >> 2);
}
void bgtz(Register rs, int16_t offset);
void bgtzc(Register rt, int16_t offset);
- void bgtzc(Register rt, Label* L) {
- bgtzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzc(Register rt, Label* L) {
+ bgtzc(rt, shifted_branch_offset(L));
}
void blez(Register rs, int16_t offset);
void blezc(Register rt, int16_t offset);
- void blezc(Register rt, Label* L) {
- blezc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezc(Register rt, Label* L) {
+ blezc(rt, shifted_branch_offset(L));
}
void bltz(Register rs, int16_t offset);
void bltzc(Register rt, int16_t offset);
- void bltzc(Register rt, Label* L) {
- bltzc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzc(Register rt, Label* L) {
+ bltzc(rt, shifted_branch_offset(L));
}
void bltuc(Register rs, Register rt, int16_t offset);
- void bltuc(Register rs, Register rt, Label* L) {
- bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltuc(Register rs, Register rt, Label* L) {
+ bltuc(rs, rt, shifted_branch_offset(L));
}
void bltc(Register rs, Register rt, int16_t offset);
- void bltc(Register rs, Register rt, Label* L) {
- bltc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bltc(Register rs, Register rt, Label* L) {
+ bltc(rs, rt, shifted_branch_offset(L));
}
-
void bltzal(Register rs, int16_t offset);
void blezalc(Register rt, int16_t offset);
- void blezalc(Register rt, Label* L) {
- blezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void blezalc(Register rt, Label* L) {
+ blezalc(rt, shifted_branch_offset(L));
}
void bltzalc(Register rt, int16_t offset);
- void bltzalc(Register rt, Label* L) {
- bltzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bltzalc(Register rt, Label* L) {
+ bltzalc(rt, shifted_branch_offset(L));
}
void bgtzalc(Register rt, int16_t offset);
- void bgtzalc(Register rt, Label* L) {
- bgtzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bgtzalc(Register rt, Label* L) {
+ bgtzalc(rt, shifted_branch_offset(L));
}
void beqzalc(Register rt, int16_t offset);
- void beqzalc(Register rt, Label* L) {
- beqzalc(rt, branch_offset_compact(L, false)>>2);
+ inline void beqzalc(Register rt, Label* L) {
+ beqzalc(rt, shifted_branch_offset(L));
}
void beqc(Register rs, Register rt, int16_t offset);
- void beqc(Register rs, Register rt, Label* L) {
- beqc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void beqc(Register rs, Register rt, Label* L) {
+ beqc(rs, rt, shifted_branch_offset(L));
}
void beqzc(Register rs, int32_t offset);
- void beqzc(Register rs, Label* L) {
- beqzc(rs, branch_offset21_compact(L, false)>>2);
+ inline void beqzc(Register rs, Label* L) {
+ beqzc(rs, shifted_branch_offset21(L));
}
void bnezalc(Register rt, int16_t offset);
- void bnezalc(Register rt, Label* L) {
- bnezalc(rt, branch_offset_compact(L, false)>>2);
+ inline void bnezalc(Register rt, Label* L) {
+ bnezalc(rt, shifted_branch_offset(L));
}
void bnec(Register rs, Register rt, int16_t offset);
- void bnec(Register rs, Register rt, Label* L) {
- bnec(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnec(Register rs, Register rt, Label* L) {
+ bnec(rs, rt, shifted_branch_offset(L));
}
void bnezc(Register rt, int32_t offset);
- void bnezc(Register rt, Label* L) {
- bnezc(rt, branch_offset21_compact(L, false)>>2);
+ inline void bnezc(Register rt, Label* L) {
+ bnezc(rt, shifted_branch_offset21(L));
}
void bne(Register rs, Register rt, int16_t offset);
- void bne(Register rs, Register rt, Label* L) {
- bne(rs, rt, branch_offset(L, false)>>2);
+ inline void bne(Register rs, Register rt, Label* L) {
+ bne(rs, rt, shifted_branch_offset(L));
}
void bovc(Register rs, Register rt, int16_t offset);
- void bovc(Register rs, Register rt, Label* L) {
- bovc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bovc(Register rs, Register rt, Label* L) {
+ bovc(rs, rt, shifted_branch_offset(L));
}
void bnvc(Register rs, Register rt, int16_t offset);
- void bnvc(Register rs, Register rt, Label* L) {
- bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+ inline void bnvc(Register rs, Register rt, Label* L) {
+ bnvc(rs, rt, shifted_branch_offset(L));
}
// Never use the int16_t b(l)cond version with a branch offset
@@ -742,8 +759,8 @@ class Assembler : public AssemblerBase {
void ori(Register rd, Register rs, int32_t j);
void xori(Register rd, Register rs, int32_t j);
void lui(Register rd, int32_t j);
- void aui(Register rs, Register rt, int32_t j);
- void daui(Register rs, Register rt, int32_t j);
+ void aui(Register rt, Register rs, int32_t j);
+ void daui(Register rt, Register rs, int32_t j);
void dahi(Register rs, int32_t j);
void dati(Register rs, int32_t j);
@@ -771,6 +788,9 @@ class Assembler : public AssemblerBase {
void dsrl32(Register rt, Register rd, uint16_t sa);
void dsra32(Register rt, Register rd, uint16_t sa);
+ // Address computing instructions with shift.
+ void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+ void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
// ------------Memory-instructions-------------
@@ -866,6 +886,9 @@ class Assembler : public AssemblerBase {
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void bitswap(Register rd, Register rt);
void dbitswap(Register rd, Register rt);
void align(Register rd, Register rs, Register rt, uint8_t bp);
@@ -972,12 +995,12 @@ class Assembler : public AssemblerBase {
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void bc1eqz(int16_t offset, FPURegister ft);
- void bc1eqz(Label* L, FPURegister ft) {
- bc1eqz(branch_offset(L, false)>>2, ft);
+ inline void bc1eqz(Label* L, FPURegister ft) {
+ bc1eqz(shifted_branch_offset(L), ft);
}
void bc1nez(int16_t offset, FPURegister ft);
- void bc1nez(Label* L, FPURegister ft) {
- bc1nez(branch_offset(L, false)>>2, ft);
+ inline void bc1nez(Label* L, FPURegister ft) {
+ bc1nez(shifted_branch_offset(L), ft);
}
// Conditions and branches for non MIPSr6.
@@ -987,12 +1010,12 @@ class Assembler : public AssemblerBase {
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void bc1f(int16_t offset, uint16_t cc = 0);
- void bc1f(Label* L, uint16_t cc = 0) {
- bc1f(branch_offset(L, false)>>2, cc);
+ inline void bc1f(Label* L, uint16_t cc = 0) {
+ bc1f(shifted_branch_offset(L), cc);
}
void bc1t(int16_t offset, uint16_t cc = 0);
- void bc1t(Label* L, uint16_t cc = 0) {
- bc1t(branch_offset(L, false)>>2, cc);
+ inline void bc1t(Label* L, uint16_t cc = 0) {
+ bc1t(shifted_branch_offset(L), cc);
}
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
@@ -1047,7 +1070,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
@@ -1082,9 +1105,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dd(Label* label);
- // Emits the address of the code stub's first instruction.
- void emit_code_stub_address(Code* stub);
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1113,8 +1133,16 @@ class Assembler : public AssemblerBase {
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
+ static bool IsBc(Instr instr);
+ static bool IsBzc(Instr instr);
+
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
+ static bool IsBeqzc(Instr instr);
+ static bool IsBnezc(Instr instr);
+ static bool IsBeqc(Instr instr);
+ static bool IsBnec(Instr instr);
+
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
@@ -1173,6 +1201,8 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
+ bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1239,6 +1269,15 @@ class Assembler : public AssemblerBase {
return block_buffer_growth_;
}
+ void EmitForbiddenSlotInstruction() {
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ ClearCompactBranchState();
+ }
+ }
+
+ inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+
private:
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
@@ -1278,12 +1317,19 @@ class Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
+ // Readable constants for compact branch handling in emit()
+ enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
+
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
- inline void emit(Instr x);
+ inline void emit(Instr x,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
inline void emit(uint64_t x);
- inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
+ inline void CheckForEmitInForbiddenSlot();
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -1335,21 +1381,22 @@ class Assembler : public AssemblerBase {
SecondaryField func = NULLSF);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- Register rt,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register rs,
- SecondaryField SF,
- int32_t j);
- void GenInstrImmediate(Opcode opcode,
- Register r1,
- FPURegister r2,
- int32_t j);
- void GenInstrImmediate(Opcode opcode, Register rs, int32_t j);
- void GenInstrImmediate(Opcode opcode, int32_t offset26);
-
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, Register rt, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, SecondaryField SF, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register r1, FPURegister r2, int32_t j,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(
+ Opcode opcode, Register rs, int32_t offset21,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
+ void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
+ void GenInstrImmediate(
+ Opcode opcode, int32_t offset26,
+ CompactBranchType is_compact_branch = CompactBranchType::NO);
void GenInstrJump(Opcode opcode,
uint32_t address);
@@ -1423,12 +1470,17 @@ class Assembler : public AssemblerBase {
bool trampoline_emitted_;
static const int kTrampolineSlotsSize = 2 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int64_t> internal_reference_positions_;
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
Trampoline trampoline_;
bool internal_trampoline_exception_;
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index fc81e712d2..3a9980beab 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -22,9 +22,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- a1 : called function
+ // -- a1 : target
+ // -- a3 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[8 * (argc - 1)] : first argument
@@ -36,33 +35,30 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(a1);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(a1);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(a3);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(a1, a3);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects a0 to contain the number of arguments
- // including the receiver and the extra arguments. But a0 is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- Label argc, done_argc;
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2,
- FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Branch(&argc, eq, a2,
- Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ Daddu(a0, a2, num_extra_args + 1);
- __ jmp(&done_argc);
- __ bind(&argc);
+ // including the receiver and the extra arguments.
__ Daddu(a0, a0, num_extra_args + 1);
- __ bind(&done_argc);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -71,30 +67,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ ld(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the InternalArray function from the native context.
- __ ld(result,
- MemOperand(result,
- Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ ld(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(result, FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
// Load the Array function from the native context.
- __ ld(result,
- MemOperand(result,
- Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -160,6 +141,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ __ Drop(2);
+ }
+
+ // 2a. Convert first argument to number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ Move(v0, Smi::FromInt(0));
+ __ DropAndRet(1);
+}
+
+
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ __ Drop(2);
+ __ jmp(&done);
+ __ bind(&no_arguments);
+ __ Move(a0, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure a0 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(a0, &done_convert);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&done_convert, eq, t0, Operand(HEAP_NUMBER_TYPE));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(a0, v0);
+ __ Pop(a1, a3);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -213,7 +295,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(a0);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -222,13 +304,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
- // -- a3 : original constructor
+ // -- a3 : new target
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
- // 1. Load the first argument into a0 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into a0 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -245,7 +330,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure a0 is a string.
+ // 3. Make sure a0 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
@@ -264,68 +349,42 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- a0 : the first argument
- // -- a1 : constructor function
- // -- a3 : original constructor
- // -- ra : return address
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
-
- // Fall back to runtime if the original constructor and function differ.
- __ Branch(&rt_call, ne, a1, Operand(a3));
-
- __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ Branch(&new_object, ne, a1, Operand(a3));
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(a1, a2, a3);
- __ sd(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Move(a2, Smi::FromInt(JSValue::kSize));
- __ Push(a0, a1, a2);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(a0, a1);
- }
- __ jmp(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a1, a3); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(a0, a1);
- }
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a0, a1, a3); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- // Push call kind information and function as parameter to the runtime call.
- __ Push(a1, a1);
+ // Push a copy of the target function and the new target.
+ __ Push(a1, a3, a1);
__ CallRuntime(function_id, 1);
- // Restore call kind information and receiver.
- __ Pop(a1);
+ // Restore target function and new target.
+ __ Pop(a1, a3);
}
@@ -362,12 +421,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a2 : allocation site or undefined
- // -- a3 : original constructor
+ // -- a3 : new target
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -381,170 +441,158 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(a2, a0, a1, a3);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ li(a2, Operand(debug_step_in_fp));
- __ ld(a2, MemOperand(a2));
- __ Branch(&rt_call, ne, a2, Operand(zero_reg));
-
- // Verify that the original constructor is a JSFunction.
- __ GetObjectType(a3, a5, a4);
- __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it is in fact a map.
- // a3: original constructor
- __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&rt_call, ne, a1, Operand(a5));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwu(a4, bit_field3);
- __ DecodeField<Map::Counter>(a6, a4);
- __ Branch(&allocate, lt, a6,
- Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
- // Decrease generous allocation count.
- __ Dsubu(a4, a4, Operand(1 << Map::Counter::kShift));
- __ Branch(USE_DELAY_SLOT, &allocate, ne, a6,
- Operand(Map::kSlackTrackingCounterEnd));
- __ sw(a4, bit_field3); // In delay slot.
-
- __ Push(a1, a2, a2); // a2 = Initial map.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(a1, a2);
- __ li(a6, Operand(Map::kSlackTrackingCounterEnd - 1));
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- Label rt_call_reload_new_target;
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
- __ Allocate(a3, t0, t1, t2, &rt_call_reload_new_target, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t0: JSObject (not tagged)
- __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t1, t0);
- __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
- __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
- __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
- __ Daddu(t1, t1, Operand(3*kPointerSize));
- DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
- DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
- DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
- // Fill all the in-object properties with appropriate filler.
- // a1: constructor function
- // a2: initial map
- // a3: object size (in words)
- // t0: JSObject (not tagged)
- // t1: First in-object property of JSObject (not tagged)
- // a6: slack tracking counter (non-API function case)
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-
- // Use t3 to hold undefined, which is used in several places below.
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ Branch(&no_inobject_slack_tracking, lt, a6,
- Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
-
- // Allocate object with a slack.
- __ lbu(
- a0,
- FieldMemOperand(
- a2, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ dsubu(a0, a0, a2);
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a0, t1, at);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ dsll(at, a3, kPointerSizeLog2);
- __ Daddu(t2, t0, Operand(at)); // End of object.
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
- a0, Operand(t2));
+ __ Push(a2, a0);
+
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ GetObjectType(a3, a5, a4);
+ __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it is in fact a map.
+ // a3: new target
+ __ ld(a2,
+ FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &rt_call);
+ __ GetObjectType(a2, t1, t0);
+ __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&rt_call, ne, a1, Operand(a5));
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
+
+ // Now allocate the JSObject on the heap.
+ // a1: constructor function
+ // a2: initial map
+ __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Allocate(a4, t0, a4, t2, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // a1: constructor function
+ // a2: initial map
+ // a3: object size
+ // t0: JSObject (not HeapObject tagged - the actual address).
+ // a4: start of next object
+ __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
+ __ mov(t1, t0);
+ STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
+ __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
+ STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
+ __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
+ STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
+ __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
+ STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
+ __ Daddu(t1, t1, Operand(3 * kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ Daddu(t0, t0, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with appropriate filler.
+ // t0: JSObject (tagged)
+ // t1: First in-object property of JSObject (not tagged)
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwu(t2, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(a6, t2);
+ // a6: slack tracking counter
+ __ Branch(&no_inobject_slack_tracking, lt, a6,
+ Operand(Map::kSlackTrackingCounterEnd));
+ // Decrease generous allocation count.
+ __ Dsubu(t2, t2, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(t2, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ dsubu(a0, a4, a0);
+ // a0: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t1,
+ Operand(a0));
+ }
+ __ InitializeFieldsWithFiller(t1, a0, t3);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(t1, a4, t3);
+
+ // a6: slack tracking counter value before decreasing.
+ __ Branch(&allocated, ne, a6, Operand(Map::kSlackTrackingCounterEnd));
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(a1, a3, t0, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(a1, a3, t0);
+
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t0: JSObject
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(t1, a0, t3);
- // To allow for truncation.
- __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
- }
+ __ InitializeFieldsWithFiller(t1, a4, t3);
- __ dsll(at, a3, kPointerSizeLog2);
- __ Daddu(a0, t0, Operand(at)); // End of object.
- __ InitializeFieldsWithFiller(t1, a0, t3);
+ // Continue with JSObject being successfully allocated.
+ // a1: constructor function
+ // a3: new target
+ // t0: JSObject
+ __ jmp(&allocated);
+ }
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Daddu(t0, t0, Operand(kHeapObjectTag));
+ // Allocate the new receiver object using the runtime call.
+ // a1: constructor function
+ // a3: new target
+ __ bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(a1, a3, a1, a3); // constructor function, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(t0, v0);
+ __ Pop(a1, a3);
- // Continue with JSObject being successfully allocated.
- // a4: JSObject
- __ jmp(&allocated);
+ // Receiver for constructor call allocated.
+ // a1: constructor function
+ // a3: new target
+ // t0: JSObject
+ __ bind(&allocated);
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ ld(a3, MemOperand(sp, 0 * kPointerSize));
+ __ ld(a0, MemOperand(sp));
}
-
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: original constructor
- __ bind(&rt_call);
-
- __ Push(a1, a3); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(t0, v0);
-
- // Receiver for constructor call allocated.
- // t0: JSObject
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(a3); // new.target
- __ Pop(a1);
-
- __ ld(a0, MemOperand(sp));
__ SmiUntag(a0);
- __ Push(a3, t0, t0);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(t0, t0);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -553,26 +601,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a0: number of arguments
// a1: constructor function
// a2: address of last argument (caller sp)
- // a3: number of arguments (smi-tagged)
+ // a3: new target
+ // t0: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ mov(a3, a0);
+ __ mov(t0, a0);
__ jmp(&entry);
__ bind(&loop);
- __ dsll(a4, a3, kPointerSizeLog2);
+ __ dsll(a4, t0, kPointerSizeLog2);
__ Daddu(a4, a2, Operand(a4));
__ ld(a5, MemOperand(a4));
__ push(a5);
__ bind(&entry);
- __ Daddu(a3, a3, Operand(-1));
- __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+ __ Daddu(t0, t0, Operand(-1));
+ __ Branch(&loop, greater_equal, t0, Operand(zero_reg));
// Call the function.
// a0: number of arguments
// a1: constructor function
+ // a3: new target
if (is_api_function) {
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Handle<Code> code =
@@ -580,47 +629,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ld(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(v0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, a1, a3);
+ __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ld(v0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // v0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ ld(a1, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -628,106 +680,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiScale(a4, a1, kPointerSizeLog2);
__ Daddu(sp, sp, a4);
__ Daddu(sp, sp, kPointerSize);
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+ }
__ Ret();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- a1 : constructor function
- // -- a2 : allocation site or undefined
- // -- a3 : original constructor
- // -- ra : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(a2, t0);
- __ push(a2);
-
- __ mov(a4, a0);
- __ SmiTag(a4);
- __ push(a4); // Smi-tagged arguments count.
-
- // Push new.target.
- __ push(a3);
-
- // receiver is the hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ push(at);
-
- // Set up pointer to last argument.
- __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, entry;
- __ SmiUntag(a4);
- __ jmp(&entry);
- __ bind(&loop);
- __ dsll(at, a4, kPointerSizeLog2);
- __ Daddu(at, a2, Operand(at));
- __ ld(at, MemOperand(at));
- __ push(at);
- __ bind(&entry);
- __ Daddu(a4, a4, Operand(-1));
- __ Branch(&loop, ge, a4, Operand(zero_reg));
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ li(a2, Operand(debug_step_in_fp));
- __ ld(a2, MemOperand(a2));
- __ Branch(&skip_step_in, eq, a2, Operand(zero_reg));
-
- __ Push(a0, a1, a1);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(a0, a1);
-
- __ bind(&skip_step_in);
-
-
- // Call the function.
- // a0: number of arguments
- // a1: constructor function
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- // v0: result
- // sp[0]: new.target
- // sp[1]: number of arguments (smi-tagged)
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ ld(a1, MemOperand(sp, kPointerSize));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Leave construct frame.
- }
- __ SmiScale(at, a1, kPointerSizeLog2);
- __ Daddu(sp, sp, Operand(at));
- __ Daddu(sp, sp, Operand(kPointerSize));
- __ Jump(ra);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -755,7 +733,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -857,6 +835,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o a1: the JS function object being called.
+// o a3: the new target
// o cp: our context
// o fp: the caller's frame pointer
// o sp: stack pointer
@@ -874,6 +853,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Push(a3);
+
+ // Push zero for bytecode array offset.
+ __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -902,7 +885,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Dsubu(a5, sp, Operand(a4));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, a5, Operand(a2));
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -932,16 +915,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Dsubu(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ Daddu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -1017,7 +999,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (not including receiver)
- // -- a3 : original constructor
+ // -- a3 : new target
// -- a1 : constructor to call
// -- a2 : address of the first argument
// -----------------------------------
@@ -1040,40 +1022,110 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Branch(&loop_header, gt, a2, Operand(t0));
// Call the constructor with a0, a1, and a3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use this for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ Daddu(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ ld(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ ld(a1,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, at);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, at,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a1,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ ld(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ Daddu(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a1, MemOperand(a1));
+ __ dsll(a1, a1, kPointerSizeLog2);
+ __ Daddu(a1, kInterpreterDispatchTableRegister, a1);
+ __ ld(a1, MemOperand(a1));
+ __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(a1);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(a1, a1);
- // Whether to compile in a background thread.
- __ LoadRoot(
- at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(at);
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(a1);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1093,8 +1145,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1132,8 +1185,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// a0 - contains return address (beginning of patch sequence)
// a1 - isolate
+ // a3 - new target
RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ (a0.bit() | a1.bit() | a3.bit() | ra.bit() | fp.bit()) & ~sp.bit();
FrameScope scope(masm, StackFrame::MANUAL);
__ MultiPush(saved_regs);
__ PrepareCallCFunction(2, 0, a2);
@@ -1173,7 +1227,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1199,7 +1253,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> a6.
@@ -1241,6 +1295,109 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers {t2, t3, a4, a5}.
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = t2;
+ Register map = t3;
+ Register constructor = a4;
+ Register scratch = a5;
+
+ // If there is no signature, return the holder.
+ __ ld(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ Label next_prototype;
+ __ Branch(&next_prototype, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ Register type = constructor;
+ __ ld(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(type, FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ Branch(&receiver_check_passed, eq, signature, Operand(type),
+ USE_DELAY_SLOT);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ GetObjectType(type, scratch, scratch);
+ __ Branch(&next_prototype, ne, scratch, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+
+ // Otherwise load the parent function template and iterate.
+ __ ld(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ Branch(&function_template_loop);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lwu(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+ // Iterate.
+ __ Branch(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments excluding receiver
+ // -- a1 : callee
+ // -- ra : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[8 * (argc - 1)] : first argument
+ // -- sp[8 * argc] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ ld(t1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(t1, FieldMemOperand(t1, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check
+ Label receiver_check_failed;
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Daddu(t8, sp, at);
+ __ ld(t0, MemOperand(t8));
+ CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ ld(t2, FieldMemOperand(t1, FunctionTemplateInfo::kCallCodeOffset));
+ __ ld(t2, FieldMemOperand(t2, CallHandlerInfo::kFastHandlerOffset));
+ __ Daddu(t2, t2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t2);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ Daddu(t8, t8, Operand(kPointerSize));
+ __ daddu(sp, t8, zero_reg);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1248,7 +1405,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(a0);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1281,7 +1438,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ Branch(&ok, hs, sp, Operand(at));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1292,7 +1449,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into a0 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(a0);
+ __ JumpIfSmi(a0, &receiver_not_date);
+ __ GetObjectType(a0, t0, t0);
+ __ Branch(&receiver_not_date, ne, t0, Operand(JS_DATE_TYPE));
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(a0, JSDate::kValueOffset)); // In delay slot.
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ li(a1, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ ld(a1, MemOperand(a1));
+ __ ld(t0, FieldMemOperand(a0, JSDate::kCacheStampOffset));
+ __ Branch(&stamp_mismatch, ne, t0, Operand(a1));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(
+ a0, JSDate::kValueOffset +
+ field_index * kPointerSize)); // In delay slot.
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, t0);
+ __ li(a1, Operand(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into a1, argArray into a0 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg;
+ Register scratch = a4;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ mov(a3, a2);
+ __ dsll(scratch, a0, kPointerSizeLog2);
+ __ Daddu(a0, sp, Operand(scratch));
+ __ ld(a1, MemOperand(a0)); // receiver
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a2, MemOperand(a0)); // thisArg
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a3, MemOperand(a0)); // argArray
+ __ bind(&no_arg);
+ __ Daddu(sp, sp, Operand(scratch));
+ __ sd(a2, MemOperand(sp));
+ __ mov(a0, a3);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a0 : argArray
+ // -- a1 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(a1, &receiver_not_callable);
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsCallable));
+ __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ sd(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
{
@@ -1336,190 +1613,145 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ ld(key, MemOperand(fp, indexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array.
- __ bind(&loop);
- __ ld(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ li(slot, Operand(Smi::FromInt(slot_index)));
- __ ld(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ ld(key, MemOperand(fp, indexOffset));
- __ Daddu(key, key, Operand(Smi::FromInt(1)));
- __ sd(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ld(a1, MemOperand(fp, limitOffset));
- __ Branch(&loop, ne, key, Operand(a1));
-
- // On exit, the pushed arguments count is in a0, untagged
- __ mov(a0, key);
- __ SmiUntag(a0);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- __ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ ld(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array.
- __ Push(a0, a1);
-
- // Returns (in v0) number of arguments to copy to stack as Smi.
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- // Returns the result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ mov(a1, zero_reg);
- __ ld(a2, MemOperand(fp, kReceiverOffset));
- __ Push(v0, a1, a2); // limit, initial index and receiver.
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ ld(a1, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
-
- // Tear down the internal frame and remove function, receiver and args.
+ Label no_arg;
+ Register scratch = a4;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ mov(a3, a1);
+ __ dsll(scratch, a0, kPointerSizeLog2);
+ __ mov(a0, scratch);
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(zero_reg));
+ __ Daddu(a0, sp, Operand(a0));
+ __ ld(a1, MemOperand(a0)); // target
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a2, MemOperand(a0)); // thisArgument
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a3, MemOperand(a0)); // argumentsList
+ __ bind(&no_arg);
+ __ Daddu(sp, sp, Operand(scratch));
+ __ sd(a2, MemOperand(sp));
+ __ mov(a0, a3);
}
- __ Ret(USE_DELAY_SLOT);
- __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
-}
-
-
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(a1, &target_not_callable);
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsCallable));
+ __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
+
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a1, FieldMemOperand(a1, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(a1);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ ld(a0, MemOperand(fp, kNewTargetOffset));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&validate_arguments, ne, a0, Operand(at));
- __ ld(a0, MemOperand(fp, kFunctionOffset));
- __ sd(a0, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ ld(a0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(a0);
- __ ld(a0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(a0);
- __ ld(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(a0);
- // Returns argument count in v0.
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- // Returns result in v0.
- Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(v0); // limit
- __ mov(a1, zero_reg); // initial index
- __ push(a1);
- // Push the constructor function as callee.
- __ ld(a0, MemOperand(fp, kFunctionOffset));
- __ push(a0);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ ld(a1, MemOperand(fp, kFunctionOffset));
- __ ld(a4, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ __ sd(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ jr(ra);
- __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into a1 (if present), argumentsList into a0 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ Label no_arg;
+ Register scratch = a4;
+ __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+ __ mov(a2, a1);
+ __ dsll(scratch, a0, kPointerSizeLog2);
+ __ Daddu(a0, sp, Operand(scratch));
+ __ sd(a2, MemOperand(a0)); // receiver
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a1, MemOperand(a0)); // target
+ __ mov(a3, a1); // new.target defaults to target
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a2, MemOperand(a0)); // argumentsList
+ __ Dsubu(a0, a0, Operand(kPointerSize));
+ __ Branch(&no_arg, lt, a0, Operand(sp));
+ __ ld(a3, MemOperand(a0)); // new.target
+ __ bind(&no_arg);
+ __ Daddu(sp, sp, Operand(scratch));
+ __ mov(a0, a2);
+ }
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a3 : new.target
+ // -- a1 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(a1, &target_not_constructor);
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsConstructor));
+ __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
+
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ And(a4, a4, Operand(1 << Map::kIsConstructor));
+ __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
+
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ sd(a1, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ sd(a3, MemOperand(sp));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1529,6 +1761,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- a0 : actual number of arguments
// -- a1 : function (passed through to callee)
// -- a2 : expected number of arguments
+ // -- a3 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1572,6 +1805,130 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(a0, &create_runtime);
+
+ // Load the map of argumentsList into a2.
+ __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+
+ // Load native context into a4.
+ __ ld(a4, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ ld(at, ContextMemOperand(a4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+ __ ld(at, ContextMemOperand(a4, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, a2, Operand(at));
+
+ // Check if argumentsList is a fast JSArray.
+ __ ld(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a3, a0);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ mov(a0, v0);
+ __ Pop(a1, a3);
+ __ ld(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
+ }
+ __ Branch(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ ld(a2,
+ FieldMemOperand(a0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
+ __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ Branch(&create_runtime, ne, a2, Operand(at));
+ __ SmiUntag(a2);
+ __ mov(a0, a4);
+ __ Branch(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ ld(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(a2);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
+ __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ ld(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+ __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ SmiUntag(a2);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(a4, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ Dsubu(a4, sp, a4);
+ // Check if the arguments will overflow the stack.
+ __ dsll(at, a2, kPointerSizeLog2);
+ __ Branch(&done, gt, a4, Operand(at)); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : args (a FixedArray built from argumentsList)
+ // -- a2 : len (number of elements to push from args)
+ // -- a3 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ mov(a4, zero_reg);
+ Label done, loop;
+ __ bind(&loop);
+ __ Branch(&done, eq, a4, Operand(a2));
+ __ dsll(at, a4, kPointerSizeLog2);
+ __ Daddu(at, a0, at);
+ __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+ __ Push(at);
+ __ Daddu(a4, a4, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done);
+ __ Move(a0, a4);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ Label construct;
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&construct, ne, a3, Operand(at));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ bind(&construct);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1665,21 +2022,117 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
- __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(a1, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
}
}
// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ dsll(a4, a0, kPointerSizeLog2);
+ __ daddu(a4, a4, sp);
+ __ sd(at, MemOperand(a4));
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ Dsubu(sp, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Daddu(sp, sp, Operand(a5));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(a5, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, gt, a5, Operand(a0));
+ __ dsll(a6, a4, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ ld(at, MemOperand(a6));
+ __ dsll(a6, a5, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ sd(at, MemOperand(a6));
+ __ Daddu(a4, a4, Operand(1));
+ __ Daddu(a5, a5, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+ __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Dsubu(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ daddu(a5, a5, a2);
+ __ ld(at, MemOperand(a5));
+ __ dsll(a5, a0, kPointerSizeLog2);
+ __ daddu(a5, a5, sp);
+ __ sd(at, MemOperand(a5));
+ __ Daddu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ ld(at, MemOperand(at));
+ __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -1692,13 +2145,18 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ GetObjectType(a1, t1, t2);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ ld(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(a1);
- __ Branch(&non_smi);
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(a1);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ Daddu(a0, a0, 2);
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1712,7 +2170,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ daddu(at, sp, at);
__ sd(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1722,7 +2180,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1731,10 +2189,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (checked to be a JSFunction)
- // -- a3 : the original constructor (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(a1);
- __ AssertFunction(a3);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
@@ -1750,17 +2207,117 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ Dsubu(sp, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadRoot(at, Heap::kRealStackLimitRootIndex);
+ __ Branch(&done, gt, sp, Operand(at)); // Signed comparison.
+ // Restore the stack pointer.
+ __ Daddu(sp, sp, Operand(a5));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ {
+ Label loop, done_loop;
+ __ mov(a5, zero_reg);
+ __ bind(&loop);
+ __ Branch(&done_loop, ge, a5, Operand(a0));
+ __ dsll(a6, a4, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ ld(at, MemOperand(a6));
+ __ dsll(a6, a5, kPointerSizeLog2);
+ __ daddu(a6, a6, sp);
+ __ sd(at, MemOperand(a6));
+ __ Daddu(a4, a4, Operand(1));
+ __ Daddu(a5, a5, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop, done_loop;
+ __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ SmiUntag(a4);
+ __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Dsubu(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ dsll(a5, a4, kPointerSizeLog2);
+ __ daddu(a5, a5, a2);
+ __ ld(at, MemOperand(a5));
+ __ dsll(a5, a0, kPointerSizeLog2);
+ __ daddu(a5, a5, sp);
+ __ sd(at, MemOperand(a5));
+ __ Daddu(a0, a0, Operand(1));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ ld(at, MemOperand(at));
+ __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
- // -- a1 : the constructor to call (checked to be a JSFunctionProxy)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a1 : the constructor to call (checked to be a JSProxy)
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ ld(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(a1, a3);
+ // Include the pushed new_target, constructor and the receiver.
+ __ Daddu(a0, a0, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1769,24 +2326,33 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the constructor to call (can be any Object)
- // -- a3 : the original constructor (either the same as the constructor or
+ // -- a3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(a1, &non_constructor);
- __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t2, t2, Operand(1 << Map::kIsCallable));
- __ Branch(&non_constructor, eq, t2, Operand(zero_reg));
// Dispatch based on instance type.
+ __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
+ // Check if target has a [[Construct]] internal method.
+ __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(1 << Map::kIsConstructor));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
- eq, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+ eq, t2, Operand(JS_PROXY_TYPE));
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
@@ -1795,7 +2361,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ daddu(at, sp, at);
__ sd(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1803,11 +2369,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1817,14 +2380,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- a0: actual arguments count
// -- a1: function (passed through to callee)
// -- a2: expected arguments count
+ // -- a3: new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Branch(&dont_adapt_arguments, eq,
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
@@ -1834,9 +2395,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a4.
__ SmiScale(a0, a0, kPointerSizeLog2);
@@ -1851,7 +2413,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// a4: copy end address
Label copy;
@@ -1883,17 +2445,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into a7.
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ SmiScale(a0, a0, kPointerSizeLog2);
__ Daddu(a0, fp, a0);
// Adjust for return address and receiver.
@@ -1905,7 +2468,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0: copy start address
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
// a7: copy end address
Label copy;
__ bind(&copy);
@@ -1918,7 +2481,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// a1: function
// a2: expected number of arguments
- // a3: code entry to call
+ // a3: new target (passed through to callee)
__ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
__ dsll(a6, a2, kPointerSizeLog2);
__ Dsubu(a4, fp, Operand(a6));
@@ -1938,7 +2501,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(a0, a2);
// a0 : expected number of arguments
// a1 : function (passed through to callee)
- __ Call(a3);
+ // a3: new target (passed through to callee)
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Call(a4);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1952,13 +2517,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ Jump(a3);
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(a4);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ break_(0xCC);
}
}
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index a6c4f33806..2531d6b3f1 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -289,7 +289,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ GetObjectType(a0, t0, t0);
if (cc == less || cc == greater) {
// Call runtime on identical JSObjects.
- __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -305,7 +305,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
// Call runtime on identical symbols since we need to throw a TypeError.
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
@@ -455,12 +455,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
+ // FIRST_JS_RECEIVER_TYPE.
__ GetObjectType(lhs, a2, a2);
- __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
// Return non-zero.
Label return_not_equal;
@@ -473,7 +473,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
__ GetObjectType(rhs, a3, a3);
- __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
@@ -535,9 +535,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ li(v0, Operand(1)); // Non-zero indicates not equal.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -725,8 +725,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
@@ -740,9 +739,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -977,7 +975,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1474,15 +1472,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
__ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ ld(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(scratch,
- FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
- __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
// Get the "prototype" (or initial map) of the {function}.
__ ld(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1507,25 +1496,49 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ Register const result = v0;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Branch(&done, eq, object_prototype, Operand(function_prototype));
- __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
- __ ld(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+
+ // Check if the object needs to be access checked.
+ __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
+ // Check if the current object is a Proxy.
+ __ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ __ Branch(&fast_runtime_fallback, eq, object_instance_type,
+ Operand(JS_PROXY_TYPE));
+
+ __ ld(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
+ __ ld(object_map,
+ FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot.
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
__ Ret(USE_DELAY_SLOT);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // Slow-case: Call the runtime function.
+ __ StoreRoot(result,
+ Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ DCHECK(Smi::FromInt(0) == 0);
+ __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1596,7 +1609,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1624,7 +1637,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1690,7 +1703,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, a4, t1, &runtime, TAG_OBJECT);
+ __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1700,8 +1713,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
+ __ ld(a4, NativeContextMemOperand());
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kNormalOffset));
@@ -1839,7 +1851,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// a5 = argument count (tagged)
__ bind(&runtime);
__ Push(a1, a3, a5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1858,7 +1870,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1904,10 +1916,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ ld(a4, FieldMemOperand(a4, JSGlobalObject::kNativeContextOffset));
- __ ld(a4, MemOperand(a4, Context::SlotOffset(
- Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
__ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
@@ -1955,7 +1964,33 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // a4 : rest parameter index (tagged)
+ // Check if the calling frame is an arguments adaptor frame.
+
+ Label runtime;
+ __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a5, MemOperand(a0, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a5,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ld(a2, MemOperand(a0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(at, a2, kPointerSizeLog2);
+
+ __ Daddu(a3, a0, Operand(at));
+ __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ Push(a2, a3, a4);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1964,7 +1999,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2281,7 +2316,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, eq, v0, Operand(a1));
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2377,7 +2412,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2423,19 +2458,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
// a1 : the function to call
- // a4 : original constructor (for IsSuperConstructorCall)
FrameScope scope(masm, StackFrame::INTERNAL);
- const RegList kSavedRegs = 1 << 4 | // a0
- 1 << 5 | // a1
- 1 << 6 | // a2
- 1 << 7 | // a3
- BoolToInt(is_super) << 8; // a4
+ const RegList kSavedRegs = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7; // a3
// Number-of-arguments register must be smi-tagged to call out.
@@ -2449,7 +2481,7 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2457,7 +2489,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
- // a4 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2498,7 +2529,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&miss, ne, feedback_map, Operand(at));
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&megamorphic, ne, a1, Operand(a5));
__ jmp(&done);
@@ -2520,20 +2551,20 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&not_array_function, ne, a1, Operand(a5));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ Branch(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -2543,7 +2574,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // a4 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2552,29 +2582,23 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetObjectType(a1, a5, a5);
__ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ dsrl(at, a3, 32 - kPointerSizeLog2);
- __ Daddu(a5, a2, at);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ dsrl(at, a3, 32 - kPointerSizeLog2);
+ __ Daddu(a5, a2, at);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(a2, a5);
- }
+ __ AssertUndefinedOrAllocationSite(a2, a5);
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mov(a3, a4);
- } else {
- __ mov(a3, a1);
- }
+ // Pass function as new target.
+ __ mov(a3, a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -2634,7 +2658,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a3 - slot id
// a2 - vector
// a4 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
@@ -2657,11 +2681,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2698,9 +2718,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ bind(&call);
- __ li(a0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ bind(&call_function);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2735,14 +2757,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Daddu(a4, a2, Operand(a4));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
- // We have to update statistics for runtime profiling.
- __ ld(a4, FieldMemOperand(a2, with_types_offset));
- __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a2, with_types_offset));
- __ ld(a4, FieldMemOperand(a2, generic_offset));
- __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ Branch(USE_DELAY_SLOT, &call);
- __ sd(a4, FieldMemOperand(a2, generic_offset)); // In delay slot.
+
+ __ bind(&call);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
+ USE_DELAY_SLOT);
+ __ li(a0, Operand(argc)); // In delay slot.
__ bind(&uninitialized);
@@ -2755,13 +2775,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
__ Branch(&miss, eq, a1, Operand(a4));
- // Update stats.
- __ ld(a4, FieldMemOperand(a2, with_types_offset));
- __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a2, with_types_offset));
+ // Make sure the function belongs to the same native context.
+ __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
+ __ ld(t1, NativeContextMemOperand());
+ __ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter.
__ dsrl(at, a3, 32 - kPointerSizeLog2);
@@ -2781,7 +2802,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(a1);
}
- __ Branch(&call);
+ __ Branch(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2799,7 +2820,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -2828,11 +2849,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
@@ -2861,7 +2882,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, v0);
@@ -2900,7 +2921,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, v0);
call_helper.AfterCall(masm);
@@ -3162,7 +3183,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// v0: original string
@@ -3207,7 +3228,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ mov(v0, a0);
__ bind(&slow_string);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3217,7 +3238,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3234,7 +3255,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3266,7 +3287,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3404,7 +3425,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(a1, a0);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3443,7 +3464,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
@@ -3736,9 +3757,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3746,18 +3767,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
__ GetObjectType(a0, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
__ GetObjectType(a1, a2, a2);
- __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+ __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- DCHECK(GetCondition() == eq);
+ DCHECK_EQ(eq, GetCondition());
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
@@ -3766,7 +3788,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ And(a2, a1, a0);
@@ -3781,7 +3803,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3789,7 +3811,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ li(a2, Operand(Smi::FromInt(LESS)));
}
__ Push(a1, a0, a2);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -4278,11 +4300,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4305,73 +4327,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : element value to store
- // -- a3 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers a1, a2, a4
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ ld(a4, MemOperand(sp, 0 * kPointerSize));
- __ ld(a1, MemOperand(sp, 1 * kPointerSize));
- __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
-
- __ CheckFastElements(a2, a5, &double_elements);
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(a0, &smi_element);
- __ CheckFastSmiElements(a2, a5, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(a1, a3, a0);
- __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
- __ Push(a5, a4);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ SmiScale(a6, a3, kPointerSizeLog2);
- __ Daddu(a6, a5, a6);
- __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sd(a0, MemOperand(a6, 0));
- // Update the write barrier for the array store.
- __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ SmiScale(a6, a3, kPointerSizeLog2);
- __ Daddu(a6, a5, a6);
- __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, &slow_elements);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -5074,7 +5029,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- a0 : argc (only if argument_count() == ANY)
// -- a1 : constructor
// -- a2 : AllocationSite or undefined
- // -- a3 : original constructor
+ // -- a3 : new target
// -- sp[0] : last argument
// -----------------------------------
@@ -5096,6 +5051,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(a2, a4);
}
+ // Enter the context of the Array function.
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -5115,26 +5073,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ Push(a1);
- __ Push(a3);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ li(at, Operand(2));
- __ addu(a0, a0, at);
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ Daddu(at, sp, at);
+ __ sd(a1, MemOperand(at));
+ __ li(at, Operand(3));
+ __ Daddu(a0, a0, at);
break;
case NONE:
- __ li(a0, Operand(2));
+ __ sd(a1, MemOperand(sp, 0 * kPointerSize));
+ __ li(a0, Operand(3));
break;
case ONE:
- __ li(a0, Operand(3));
+ __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ li(a0, Operand(4));
break;
}
-
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(a3, a2);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5220,14 +5178,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ ld(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = result_reg;
}
// Load the PropertyCell value at the specified slot.
__ dsll(at, slot_reg, kPointerSizeLog2);
__ Daddu(at, at, Operand(context_reg));
- __ ld(result_reg, ContextOperand(at, 0));
+ __ ld(result_reg, ContextMemOperand(at, 0));
__ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
// Check that value is not the_hole.
@@ -5239,7 +5197,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ bind(&slow_case);
__ SmiTag(slot_reg);
__ Push(slot_reg);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5259,14 +5217,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ ld(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
+ __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = cell_reg;
}
// Load the PropertyCell at the specified slot.
__ dsll(at, slot_reg, kPointerSizeLog2);
__ Daddu(at, at, Operand(context_reg));
- __ ld(cell_reg, ContextOperand(at, 0));
+ __ ld(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ ld(cell_details_reg,
@@ -5353,8 +5311,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot_reg, value_reg);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5479,7 +5436,7 @@ static void CallApiFunctionAndReturn(
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index 2e18f59915..fdaf4c80df 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -141,9 +141,8 @@ class RecordWriteStub: public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
@@ -161,8 +160,8 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(),
- 4 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 4 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index d30bdbb294..022426e7d7 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -18,23 +18,22 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
+byte* fast_exp_mips_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = f12;
@@ -59,11 +58,11 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_mips_machine_code = buffer;
return &fast_exp_simulator;
@@ -72,7 +71,8 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_MIPS)
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
@@ -80,11 +80,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
- if (buffer == NULL) return stub;
+ if (buffer == nullptr) return stub;
// This code assumes that cache lines are 32 bytes and if the cache line is
// larger it will not work correctly.
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
Label lastb, unaligned, aligned, chkw,
@@ -598,23 +599,24 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
#endif
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
__ MovFromFloatParameter(f12);
__ sqrt_d(f0, f12);
@@ -625,9 +627,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -1182,15 +1184,17 @@ static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
#endif
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->Push(ra, fp, cp, a1);
patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
@@ -1236,10 +1240,11 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
diff --git a/deps/v8/src/mips64/codegen-mips64.h b/deps/v8/src/mips64/codegen-mips64.h
index 22784fcf53..ad7abb30c5 100644
--- a/deps/v8/src/mips64/codegen-mips64.h
+++ b/deps/v8/src/mips64/codegen-mips64.h
@@ -7,7 +7,7 @@
#define V8_MIPS_CODEGEN_MIPS_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/mips64/constants-mips64.cc
index efabfe4f26..c0e98eb623 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/mips64/constants-mips64.cc
@@ -126,24 +126,28 @@ int FPURegisters::Number(const char* name) {
// -----------------------------------------------------------------------------
// Instructions.
-bool Instruction::IsForbiddenInBranchDelay() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
+ Opcode opcode = static_cast<Opcode>(instr & kOpcodeMask);
+ switch (opcode) {
case J:
case JAL:
case BEQ:
case BNE:
- case BLEZ:
- case BGTZ:
+ case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc
+ case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc
case BEQL:
case BNEL:
- case BLEZL:
- case BGTZL:
+ case BLEZL: // POP26 bgezc, blezc, bgec/blec
+ case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc
case BC:
case BALC:
+ case POP10: // beqzalc, bovc, beqc
+ case POP30: // bnezalc, bnvc, bnec
+ case POP66: // beqzc, jic
+ case POP76: // bnezc, jialc
return true;
case REGIMM:
- switch (RtFieldRaw()) {
+ switch (instr & kRtFieldMask) {
case BLTZ:
case BGEZ:
case BLTZAL:
@@ -154,7 +158,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
}
break;
case SPECIAL:
- switch (FunctionFieldRaw()) {
+ switch (instr & kFunctionFieldMask) {
case JR:
case JALR:
return true;
@@ -162,6 +166,17 @@ bool Instruction::IsForbiddenInBranchDelay() const {
return false;
}
break;
+ case COP1:
+ switch (instr & kRsFieldMask) {
+ case BC1:
+ case BC1EQZ:
+ case BC1NEZ:
+ return true;
+ break;
+ default:
+ return false;
+ }
+ break;
default:
return false;
}
@@ -169,8 +184,7 @@ bool Instruction::IsForbiddenInBranchDelay() const {
bool Instruction::IsLinkingInstruction() const {
- const int op = OpcodeFieldRaw();
- switch (op) {
+ switch (OpcodeFieldRaw()) {
case JAL:
return true;
case POP76:
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index d2e0756e95..226e3ed5ba 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -119,8 +119,11 @@ const int kInvalidFPURegister = -1;
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1 << 31);
const uint64_t kFPU64InvalidResult =
static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -128,12 +131,14 @@ const uint32_t kFCSRUnderflowFlagBit = 3;
const uint32_t kFCSROverflowFlagBit = 4;
const uint32_t kFCSRDivideByZeroFlagBit = 5;
const uint32_t kFCSRInvalidOpFlagBit = 6;
+const uint32_t kFCSRNaN2008FlagBit = 18;
const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
kFCSRInexactFlagMask |
@@ -232,6 +237,7 @@ const int kRdShift = 11;
const int kRdBits = 5;
const int kSaShift = 6;
const int kSaBits = 5;
+const int kLsaSaBits = 2;
const int kFunctionShift = 0;
const int kFunctionBits = 6;
const int kLuiShift = 16;
@@ -298,355 +304,366 @@ const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32;
const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16;
const int32_t kJalRawMark = 0x00000000;
const int32_t kJRawMark = 0xf0000000;
+const int32_t kJumpRawMask = 0xf0000000;
// ----- MIPS Opcodes and Function Fields.
// We use this presentation to stay close to the table representation in
// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
-enum Opcode {
- SPECIAL = 0 << kOpcodeShift,
- REGIMM = 1 << kOpcodeShift,
-
- J = ((0 << 3) + 2) << kOpcodeShift,
- JAL = ((0 << 3) + 3) << kOpcodeShift,
- BEQ = ((0 << 3) + 4) << kOpcodeShift,
- BNE = ((0 << 3) + 5) << kOpcodeShift,
- BLEZ = ((0 << 3) + 6) << kOpcodeShift,
- BGTZ = ((0 << 3) + 7) << kOpcodeShift,
-
- ADDI = ((1 << 3) + 0) << kOpcodeShift,
- ADDIU = ((1 << 3) + 1) << kOpcodeShift,
- SLTI = ((1 << 3) + 2) << kOpcodeShift,
- SLTIU = ((1 << 3) + 3) << kOpcodeShift,
- ANDI = ((1 << 3) + 4) << kOpcodeShift,
- ORI = ((1 << 3) + 5) << kOpcodeShift,
- XORI = ((1 << 3) + 6) << kOpcodeShift,
- LUI = ((1 << 3) + 7) << kOpcodeShift, // LUI/AUI family.
- DAUI = ((3 << 3) + 5) << kOpcodeShift,
-
- BEQC = ((2 << 3) + 0) << kOpcodeShift,
- COP1 = ((2 << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
- BEQL = ((2 << 3) + 4) << kOpcodeShift,
- BNEL = ((2 << 3) + 5) << kOpcodeShift,
- BLEZL = ((2 << 3) + 6) << kOpcodeShift,
- BGTZL = ((2 << 3) + 7) << kOpcodeShift,
-
- DADDI = ((3 << 3) + 0) << kOpcodeShift, // This is also BNEC.
- DADDIU = ((3 << 3) + 1) << kOpcodeShift,
- LDL = ((3 << 3) + 2) << kOpcodeShift,
- LDR = ((3 << 3) + 3) << kOpcodeShift,
- SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
- SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
-
- LB = ((4 << 3) + 0) << kOpcodeShift,
- LH = ((4 << 3) + 1) << kOpcodeShift,
- LWL = ((4 << 3) + 2) << kOpcodeShift,
- LW = ((4 << 3) + 3) << kOpcodeShift,
- LBU = ((4 << 3) + 4) << kOpcodeShift,
- LHU = ((4 << 3) + 5) << kOpcodeShift,
- LWR = ((4 << 3) + 6) << kOpcodeShift,
- LWU = ((4 << 3) + 7) << kOpcodeShift,
-
- SB = ((5 << 3) + 0) << kOpcodeShift,
- SH = ((5 << 3) + 1) << kOpcodeShift,
- SWL = ((5 << 3) + 2) << kOpcodeShift,
- SW = ((5 << 3) + 3) << kOpcodeShift,
- SDL = ((5 << 3) + 4) << kOpcodeShift,
- SDR = ((5 << 3) + 5) << kOpcodeShift,
- SWR = ((5 << 3) + 6) << kOpcodeShift,
-
- LWC1 = ((6 << 3) + 1) << kOpcodeShift,
- BC = ((6 << 3) + 2) << kOpcodeShift,
- LLD = ((6 << 3) + 4) << kOpcodeShift,
- LDC1 = ((6 << 3) + 5) << kOpcodeShift,
- POP66 = ((6 << 3) + 6) << kOpcodeShift,
- LD = ((6 << 3) + 7) << kOpcodeShift,
-
- PREF = ((6 << 3) + 3) << kOpcodeShift,
-
- SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- BALC = ((7 << 3) + 2) << kOpcodeShift,
- PCREL = ((7 << 3) + 3) << kOpcodeShift,
- SCD = ((7 << 3) + 4) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift,
- POP76 = ((7 << 3) + 6) << kOpcodeShift,
- SD = ((7 << 3) + 7) << kOpcodeShift,
-
- COP1X = ((1 << 4) + 3) << kOpcodeShift
+enum Opcode : uint32_t {
+ SPECIAL = 0U << kOpcodeShift,
+ REGIMM = 1U << kOpcodeShift,
+
+ J = ((0U << 3) + 2) << kOpcodeShift,
+ JAL = ((0U << 3) + 3) << kOpcodeShift,
+ BEQ = ((0U << 3) + 4) << kOpcodeShift,
+ BNE = ((0U << 3) + 5) << kOpcodeShift,
+ BLEZ = ((0U << 3) + 6) << kOpcodeShift,
+ BGTZ = ((0U << 3) + 7) << kOpcodeShift,
+
+ ADDI = ((1U << 3) + 0) << kOpcodeShift,
+ ADDIU = ((1U << 3) + 1) << kOpcodeShift,
+ SLTI = ((1U << 3) + 2) << kOpcodeShift,
+ SLTIU = ((1U << 3) + 3) << kOpcodeShift,
+ ANDI = ((1U << 3) + 4) << kOpcodeShift,
+ ORI = ((1U << 3) + 5) << kOpcodeShift,
+ XORI = ((1U << 3) + 6) << kOpcodeShift,
+ LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family.
+ DAUI = ((3U << 3) + 5) << kOpcodeShift,
+
+ BEQC = ((2U << 3) + 0) << kOpcodeShift,
+ COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class.
+ BEQL = ((2U << 3) + 4) << kOpcodeShift,
+ BNEL = ((2U << 3) + 5) << kOpcodeShift,
+ BLEZL = ((2U << 3) + 6) << kOpcodeShift,
+ BGTZL = ((2U << 3) + 7) << kOpcodeShift,
+
+ DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC.
+ DADDIU = ((3U << 3) + 1) << kOpcodeShift,
+ LDL = ((3U << 3) + 2) << kOpcodeShift,
+ LDR = ((3U << 3) + 3) << kOpcodeShift,
+ SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift,
+
+ LB = ((4U << 3) + 0) << kOpcodeShift,
+ LH = ((4U << 3) + 1) << kOpcodeShift,
+ LWL = ((4U << 3) + 2) << kOpcodeShift,
+ LW = ((4U << 3) + 3) << kOpcodeShift,
+ LBU = ((4U << 3) + 4) << kOpcodeShift,
+ LHU = ((4U << 3) + 5) << kOpcodeShift,
+ LWR = ((4U << 3) + 6) << kOpcodeShift,
+ LWU = ((4U << 3) + 7) << kOpcodeShift,
+
+ SB = ((5U << 3) + 0) << kOpcodeShift,
+ SH = ((5U << 3) + 1) << kOpcodeShift,
+ SWL = ((5U << 3) + 2) << kOpcodeShift,
+ SW = ((5U << 3) + 3) << kOpcodeShift,
+ SDL = ((5U << 3) + 4) << kOpcodeShift,
+ SDR = ((5U << 3) + 5) << kOpcodeShift,
+ SWR = ((5U << 3) + 6) << kOpcodeShift,
+
+ LWC1 = ((6U << 3) + 1) << kOpcodeShift,
+ BC = ((6U << 3) + 2) << kOpcodeShift,
+ LLD = ((6U << 3) + 4) << kOpcodeShift,
+ LDC1 = ((6U << 3) + 5) << kOpcodeShift,
+ POP66 = ((6U << 3) + 6) << kOpcodeShift,
+ LD = ((6U << 3) + 7) << kOpcodeShift,
+
+ PREF = ((6U << 3) + 3) << kOpcodeShift,
+
+ SWC1 = ((7U << 3) + 1) << kOpcodeShift,
+ BALC = ((7U << 3) + 2) << kOpcodeShift,
+ PCREL = ((7U << 3) + 3) << kOpcodeShift,
+ SCD = ((7U << 3) + 4) << kOpcodeShift,
+ SDC1 = ((7U << 3) + 5) << kOpcodeShift,
+ POP76 = ((7U << 3) + 6) << kOpcodeShift,
+ SD = ((7U << 3) + 7) << kOpcodeShift,
+
+ COP1X = ((1U << 4) + 3) << kOpcodeShift,
+
+ // New r6 instruction.
+ POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc
+ POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc
+ POP10 = ADDI, // beqzalc, bovc, beqc
+ POP26 = BLEZL, // bgezc, blezc, bgec/blec
+ POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
+ POP30 = DADDI, // bnezalc, bnvc, bnec
};
-enum SecondaryField {
+enum SecondaryField : uint32_t {
// SPECIAL Encoding of Function Field.
- SLL = ((0 << 3) + 0),
- MOVCI = ((0 << 3) + 1),
- SRL = ((0 << 3) + 2),
- SRA = ((0 << 3) + 3),
- SLLV = ((0 << 3) + 4),
- SRLV = ((0 << 3) + 6),
- SRAV = ((0 << 3) + 7),
-
- JR = ((1 << 3) + 0),
- JALR = ((1 << 3) + 1),
- MOVZ = ((1 << 3) + 2),
- MOVN = ((1 << 3) + 3),
- BREAK = ((1 << 3) + 5),
-
- MFHI = ((2 << 3) + 0),
- CLZ_R6 = ((2 << 3) + 0),
- CLO_R6 = ((2 << 3) + 1),
- MFLO = ((2 << 3) + 2),
- DCLZ_R6 = ((2 << 3) + 2),
- DCLO_R6 = ((2 << 3) + 3),
- DSLLV = ((2 << 3) + 4),
- DSRLV = ((2 << 3) + 6),
- DSRAV = ((2 << 3) + 7),
-
- MULT = ((3 << 3) + 0),
- MULTU = ((3 << 3) + 1),
- DIV = ((3 << 3) + 2),
- DIVU = ((3 << 3) + 3),
- DMULT = ((3 << 3) + 4),
- DMULTU = ((3 << 3) + 5),
- DDIV = ((3 << 3) + 6),
- DDIVU = ((3 << 3) + 7),
-
- ADD = ((4 << 3) + 0),
- ADDU = ((4 << 3) + 1),
- SUB = ((4 << 3) + 2),
- SUBU = ((4 << 3) + 3),
- AND = ((4 << 3) + 4),
- OR = ((4 << 3) + 5),
- XOR = ((4 << 3) + 6),
- NOR = ((4 << 3) + 7),
-
- SLT = ((5 << 3) + 2),
- SLTU = ((5 << 3) + 3),
- DADD = ((5 << 3) + 4),
- DADDU = ((5 << 3) + 5),
- DSUB = ((5 << 3) + 6),
- DSUBU = ((5 << 3) + 7),
-
- TGE = ((6 << 3) + 0),
- TGEU = ((6 << 3) + 1),
- TLT = ((6 << 3) + 2),
- TLTU = ((6 << 3) + 3),
- TEQ = ((6 << 3) + 4),
- SELEQZ_S = ((6 << 3) + 5),
- TNE = ((6 << 3) + 6),
- SELNEZ_S = ((6 << 3) + 7),
-
- DSLL = ((7 << 3) + 0),
- DSRL = ((7 << 3) + 2),
- DSRA = ((7 << 3) + 3),
- DSLL32 = ((7 << 3) + 4),
- DSRL32 = ((7 << 3) + 6),
- DSRA32 = ((7 << 3) + 7),
+ SLL = ((0U << 3) + 0),
+ MOVCI = ((0U << 3) + 1),
+ SRL = ((0U << 3) + 2),
+ SRA = ((0U << 3) + 3),
+ SLLV = ((0U << 3) + 4),
+ LSA = ((0U << 3) + 5),
+ SRLV = ((0U << 3) + 6),
+ SRAV = ((0U << 3) + 7),
+
+ JR = ((1U << 3) + 0),
+ JALR = ((1U << 3) + 1),
+ MOVZ = ((1U << 3) + 2),
+ MOVN = ((1U << 3) + 3),
+ BREAK = ((1U << 3) + 5),
+
+ MFHI = ((2U << 3) + 0),
+ CLZ_R6 = ((2U << 3) + 0),
+ CLO_R6 = ((2U << 3) + 1),
+ MFLO = ((2U << 3) + 2),
+ DCLZ_R6 = ((2U << 3) + 2),
+ DCLO_R6 = ((2U << 3) + 3),
+ DSLLV = ((2U << 3) + 4),
+ DLSA = ((2U << 3) + 5),
+ DSRLV = ((2U << 3) + 6),
+ DSRAV = ((2U << 3) + 7),
+
+ MULT = ((3U << 3) + 0),
+ MULTU = ((3U << 3) + 1),
+ DIV = ((3U << 3) + 2),
+ DIVU = ((3U << 3) + 3),
+ DMULT = ((3U << 3) + 4),
+ DMULTU = ((3U << 3) + 5),
+ DDIV = ((3U << 3) + 6),
+ DDIVU = ((3U << 3) + 7),
+
+ ADD = ((4U << 3) + 0),
+ ADDU = ((4U << 3) + 1),
+ SUB = ((4U << 3) + 2),
+ SUBU = ((4U << 3) + 3),
+ AND = ((4U << 3) + 4),
+ OR = ((4U << 3) + 5),
+ XOR = ((4U << 3) + 6),
+ NOR = ((4U << 3) + 7),
+
+ SLT = ((5U << 3) + 2),
+ SLTU = ((5U << 3) + 3),
+ DADD = ((5U << 3) + 4),
+ DADDU = ((5U << 3) + 5),
+ DSUB = ((5U << 3) + 6),
+ DSUBU = ((5U << 3) + 7),
+
+ TGE = ((6U << 3) + 0),
+ TGEU = ((6U << 3) + 1),
+ TLT = ((6U << 3) + 2),
+ TLTU = ((6U << 3) + 3),
+ TEQ = ((6U << 3) + 4),
+ SELEQZ_S = ((6U << 3) + 5),
+ TNE = ((6U << 3) + 6),
+ SELNEZ_S = ((6U << 3) + 7),
+
+ DSLL = ((7U << 3) + 0),
+ DSRL = ((7U << 3) + 2),
+ DSRA = ((7U << 3) + 3),
+ DSLL32 = ((7U << 3) + 4),
+ DSRL32 = ((7U << 3) + 6),
+ DSRA32 = ((7U << 3) + 7),
// Multiply integers in r6.
- MUL_MUH = ((3 << 3) + 0), // MUL, MUH.
- MUL_MUH_U = ((3 << 3) + 1), // MUL_U, MUH_U.
- D_MUL_MUH = ((7 << 2) + 0), // DMUL, DMUH.
- D_MUL_MUH_U = ((7 << 2) + 1), // DMUL_U, DMUH_U.
- RINT = ((3 << 3) + 2),
-
- MUL_OP = ((0 << 3) + 2),
- MUH_OP = ((0 << 3) + 3),
- DIV_OP = ((0 << 3) + 2),
- MOD_OP = ((0 << 3) + 3),
-
- DIV_MOD = ((3 << 3) + 2),
- DIV_MOD_U = ((3 << 3) + 3),
- D_DIV_MOD = ((3 << 3) + 6),
- D_DIV_MOD_U = ((3 << 3) + 7),
+ MUL_MUH = ((3U << 3) + 0), // MUL, MUH.
+ MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U.
+ D_MUL_MUH = ((7U << 2) + 0), // DMUL, DMUH.
+ D_MUL_MUH_U = ((7U << 2) + 1), // DMUL_U, DMUH_U.
+ RINT = ((3U << 3) + 2),
+
+ MUL_OP = ((0U << 3) + 2),
+ MUH_OP = ((0U << 3) + 3),
+ DIV_OP = ((0U << 3) + 2),
+ MOD_OP = ((0U << 3) + 3),
+
+ DIV_MOD = ((3U << 3) + 2),
+ DIV_MOD_U = ((3U << 3) + 3),
+ D_DIV_MOD = ((3U << 3) + 6),
+ D_DIV_MOD_U = ((3U << 3) + 7),
// drotr in special4?
// SPECIAL2 Encoding of Function Field.
- MUL = ((0 << 3) + 2),
- CLZ = ((4 << 3) + 0),
- CLO = ((4 << 3) + 1),
- DCLZ = ((4 << 3) + 4),
- DCLO = ((4 << 3) + 5),
+ MUL = ((0U << 3) + 2),
+ CLZ = ((4U << 3) + 0),
+ CLO = ((4U << 3) + 1),
+ DCLZ = ((4U << 3) + 4),
+ DCLO = ((4U << 3) + 5),
// SPECIAL3 Encoding of Function Field.
- EXT = ((0 << 3) + 0),
- DEXTM = ((0 << 3) + 1),
- DEXTU = ((0 << 3) + 2),
- DEXT = ((0 << 3) + 3),
- INS = ((0 << 3) + 4),
- DINSM = ((0 << 3) + 5),
- DINSU = ((0 << 3) + 6),
- DINS = ((0 << 3) + 7),
-
- BSHFL = ((4 << 3) + 0),
- DBSHFL = ((4 << 3) + 4),
+ EXT = ((0U << 3) + 0),
+ DEXTM = ((0U << 3) + 1),
+ DEXTU = ((0U << 3) + 2),
+ DEXT = ((0U << 3) + 3),
+ INS = ((0U << 3) + 4),
+ DINSM = ((0U << 3) + 5),
+ DINSU = ((0U << 3) + 6),
+ DINS = ((0U << 3) + 7),
+
+ BSHFL = ((4U << 3) + 0),
+ DBSHFL = ((4U << 3) + 4),
// SPECIAL3 Encoding of sa Field.
- BITSWAP = ((0 << 3) + 0),
- ALIGN = ((0 << 3) + 2),
- WSBH = ((0 << 3) + 2),
- SEB = ((2 << 3) + 0),
- SEH = ((3 << 3) + 0),
-
- DBITSWAP = ((0 << 3) + 0),
- DALIGN = ((0 << 3) + 1),
- DBITSWAP_SA = ((0 << 3) + 0) << kSaShift,
- DSBH = ((0 << 3) + 2),
- DSHD = ((0 << 3) + 5),
+ BITSWAP = ((0U << 3) + 0),
+ ALIGN = ((0U << 3) + 2),
+ WSBH = ((0U << 3) + 2),
+ SEB = ((2U << 3) + 0),
+ SEH = ((3U << 3) + 0),
+
+ DBITSWAP = ((0U << 3) + 0),
+ DALIGN = ((0U << 3) + 1),
+ DBITSWAP_SA = ((0U << 3) + 0) << kSaShift,
+ DSBH = ((0U << 3) + 2),
+ DSHD = ((0U << 3) + 5),
// REGIMM encoding of rt Field.
- BLTZ = ((0 << 3) + 0) << 16,
- BGEZ = ((0 << 3) + 1) << 16,
- BLTZAL = ((2 << 3) + 0) << 16,
- BGEZAL = ((2 << 3) + 1) << 16,
- BGEZALL = ((2 << 3) + 3) << 16,
- DAHI = ((0 << 3) + 6) << 16,
- DATI = ((3 << 3) + 6) << 16,
+ BLTZ = ((0U << 3) + 0) << 16,
+ BGEZ = ((0U << 3) + 1) << 16,
+ BLTZAL = ((2U << 3) + 0) << 16,
+ BGEZAL = ((2U << 3) + 1) << 16,
+ BGEZALL = ((2U << 3) + 3) << 16,
+ DAHI = ((0U << 3) + 6) << 16,
+ DATI = ((3U << 3) + 6) << 16,
// COP1 Encoding of rs Field.
- MFC1 = ((0 << 3) + 0) << 21,
- DMFC1 = ((0 << 3) + 1) << 21,
- CFC1 = ((0 << 3) + 2) << 21,
- MFHC1 = ((0 << 3) + 3) << 21,
- MTC1 = ((0 << 3) + 4) << 21,
- DMTC1 = ((0 << 3) + 5) << 21,
- CTC1 = ((0 << 3) + 6) << 21,
- MTHC1 = ((0 << 3) + 7) << 21,
- BC1 = ((1 << 3) + 0) << 21,
- S = ((2 << 3) + 0) << 21,
- D = ((2 << 3) + 1) << 21,
- W = ((2 << 3) + 4) << 21,
- L = ((2 << 3) + 5) << 21,
- PS = ((2 << 3) + 6) << 21,
+ MFC1 = ((0U << 3) + 0) << 21,
+ DMFC1 = ((0U << 3) + 1) << 21,
+ CFC1 = ((0U << 3) + 2) << 21,
+ MFHC1 = ((0U << 3) + 3) << 21,
+ MTC1 = ((0U << 3) + 4) << 21,
+ DMTC1 = ((0U << 3) + 5) << 21,
+ CTC1 = ((0U << 3) + 6) << 21,
+ MTHC1 = ((0U << 3) + 7) << 21,
+ BC1 = ((1U << 3) + 0) << 21,
+ S = ((2U << 3) + 0) << 21,
+ D = ((2U << 3) + 1) << 21,
+ W = ((2U << 3) + 4) << 21,
+ L = ((2U << 3) + 5) << 21,
+ PS = ((2U << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
- ADD_S = ((0 << 3) + 0),
- SUB_S = ((0 << 3) + 1),
- MUL_S = ((0 << 3) + 2),
- DIV_S = ((0 << 3) + 3),
- ABS_S = ((0 << 3) + 5),
- SQRT_S = ((0 << 3) + 4),
- MOV_S = ((0 << 3) + 6),
- NEG_S = ((0 << 3) + 7),
- ROUND_L_S = ((1 << 3) + 0),
- TRUNC_L_S = ((1 << 3) + 1),
- CEIL_L_S = ((1 << 3) + 2),
- FLOOR_L_S = ((1 << 3) + 3),
- ROUND_W_S = ((1 << 3) + 4),
- TRUNC_W_S = ((1 << 3) + 5),
- CEIL_W_S = ((1 << 3) + 6),
- FLOOR_W_S = ((1 << 3) + 7),
- RECIP_S = ((2 << 3) + 5),
- RSQRT_S = ((2 << 3) + 6),
- CLASS_S = ((3 << 3) + 3),
- CVT_D_S = ((4 << 3) + 1),
- CVT_W_S = ((4 << 3) + 4),
- CVT_L_S = ((4 << 3) + 5),
- CVT_PS_S = ((4 << 3) + 6),
+ ADD_S = ((0U << 3) + 0),
+ SUB_S = ((0U << 3) + 1),
+ MUL_S = ((0U << 3) + 2),
+ DIV_S = ((0U << 3) + 3),
+ ABS_S = ((0U << 3) + 5),
+ SQRT_S = ((0U << 3) + 4),
+ MOV_S = ((0U << 3) + 6),
+ NEG_S = ((0U << 3) + 7),
+ ROUND_L_S = ((1U << 3) + 0),
+ TRUNC_L_S = ((1U << 3) + 1),
+ CEIL_L_S = ((1U << 3) + 2),
+ FLOOR_L_S = ((1U << 3) + 3),
+ ROUND_W_S = ((1U << 3) + 4),
+ TRUNC_W_S = ((1U << 3) + 5),
+ CEIL_W_S = ((1U << 3) + 6),
+ FLOOR_W_S = ((1U << 3) + 7),
+ RECIP_S = ((2U << 3) + 5),
+ RSQRT_S = ((2U << 3) + 6),
+ CLASS_S = ((3U << 3) + 3),
+ CVT_D_S = ((4U << 3) + 1),
+ CVT_W_S = ((4U << 3) + 4),
+ CVT_L_S = ((4U << 3) + 5),
+ CVT_PS_S = ((4U << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
- ADD_D = ((0 << 3) + 0),
- SUB_D = ((0 << 3) + 1),
- MUL_D = ((0 << 3) + 2),
- DIV_D = ((0 << 3) + 3),
- SQRT_D = ((0 << 3) + 4),
- ABS_D = ((0 << 3) + 5),
- MOV_D = ((0 << 3) + 6),
- NEG_D = ((0 << 3) + 7),
- ROUND_L_D = ((1 << 3) + 0),
- TRUNC_L_D = ((1 << 3) + 1),
- CEIL_L_D = ((1 << 3) + 2),
- FLOOR_L_D = ((1 << 3) + 3),
- ROUND_W_D = ((1 << 3) + 4),
- TRUNC_W_D = ((1 << 3) + 5),
- CEIL_W_D = ((1 << 3) + 6),
- FLOOR_W_D = ((1 << 3) + 7),
- RECIP_D = ((2 << 3) + 5),
- RSQRT_D = ((2 << 3) + 6),
- CLASS_D = ((3 << 3) + 3),
- MIN = ((3 << 3) + 4),
- MINA = ((3 << 3) + 5),
- MAX = ((3 << 3) + 6),
- MAXA = ((3 << 3) + 7),
- CVT_S_D = ((4 << 3) + 0),
- CVT_W_D = ((4 << 3) + 4),
- CVT_L_D = ((4 << 3) + 5),
- C_F_D = ((6 << 3) + 0),
- C_UN_D = ((6 << 3) + 1),
- C_EQ_D = ((6 << 3) + 2),
- C_UEQ_D = ((6 << 3) + 3),
- C_OLT_D = ((6 << 3) + 4),
- C_ULT_D = ((6 << 3) + 5),
- C_OLE_D = ((6 << 3) + 6),
- C_ULE_D = ((6 << 3) + 7),
+ ADD_D = ((0U << 3) + 0),
+ SUB_D = ((0U << 3) + 1),
+ MUL_D = ((0U << 3) + 2),
+ DIV_D = ((0U << 3) + 3),
+ SQRT_D = ((0U << 3) + 4),
+ ABS_D = ((0U << 3) + 5),
+ MOV_D = ((0U << 3) + 6),
+ NEG_D = ((0U << 3) + 7),
+ ROUND_L_D = ((1U << 3) + 0),
+ TRUNC_L_D = ((1U << 3) + 1),
+ CEIL_L_D = ((1U << 3) + 2),
+ FLOOR_L_D = ((1U << 3) + 3),
+ ROUND_W_D = ((1U << 3) + 4),
+ TRUNC_W_D = ((1U << 3) + 5),
+ CEIL_W_D = ((1U << 3) + 6),
+ FLOOR_W_D = ((1U << 3) + 7),
+ RECIP_D = ((2U << 3) + 5),
+ RSQRT_D = ((2U << 3) + 6),
+ CLASS_D = ((3U << 3) + 3),
+ MIN = ((3U << 3) + 4),
+ MINA = ((3U << 3) + 5),
+ MAX = ((3U << 3) + 6),
+ MAXA = ((3U << 3) + 7),
+ CVT_S_D = ((4U << 3) + 0),
+ CVT_W_D = ((4U << 3) + 4),
+ CVT_L_D = ((4U << 3) + 5),
+ C_F_D = ((6U << 3) + 0),
+ C_UN_D = ((6U << 3) + 1),
+ C_EQ_D = ((6U << 3) + 2),
+ C_UEQ_D = ((6U << 3) + 3),
+ C_OLT_D = ((6U << 3) + 4),
+ C_ULT_D = ((6U << 3) + 5),
+ C_OLE_D = ((6U << 3) + 6),
+ C_ULE_D = ((6U << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
- CVT_S_W = ((4 << 3) + 0),
- CVT_D_W = ((4 << 3) + 1),
- CVT_S_L = ((4 << 3) + 0),
- CVT_D_L = ((4 << 3) + 1),
- BC1EQZ = ((2 << 2) + 1) << 21,
- BC1NEZ = ((3 << 2) + 1) << 21,
+ CVT_S_W = ((4U << 3) + 0),
+ CVT_D_W = ((4U << 3) + 1),
+ CVT_S_L = ((4U << 3) + 0),
+ CVT_D_L = ((4U << 3) + 1),
+ BC1EQZ = ((2U << 2) + 1) << 21,
+ BC1NEZ = ((3U << 2) + 1) << 21,
// COP1 CMP positive predicates Bit 5..4 = 00.
- CMP_AF = ((0 << 3) + 0),
- CMP_UN = ((0 << 3) + 1),
- CMP_EQ = ((0 << 3) + 2),
- CMP_UEQ = ((0 << 3) + 3),
- CMP_LT = ((0 << 3) + 4),
- CMP_ULT = ((0 << 3) + 5),
- CMP_LE = ((0 << 3) + 6),
- CMP_ULE = ((0 << 3) + 7),
- CMP_SAF = ((1 << 3) + 0),
- CMP_SUN = ((1 << 3) + 1),
- CMP_SEQ = ((1 << 3) + 2),
- CMP_SUEQ = ((1 << 3) + 3),
- CMP_SSLT = ((1 << 3) + 4),
- CMP_SSULT = ((1 << 3) + 5),
- CMP_SLE = ((1 << 3) + 6),
- CMP_SULE = ((1 << 3) + 7),
+ CMP_AF = ((0U << 3) + 0),
+ CMP_UN = ((0U << 3) + 1),
+ CMP_EQ = ((0U << 3) + 2),
+ CMP_UEQ = ((0U << 3) + 3),
+ CMP_LT = ((0U << 3) + 4),
+ CMP_ULT = ((0U << 3) + 5),
+ CMP_LE = ((0U << 3) + 6),
+ CMP_ULE = ((0U << 3) + 7),
+ CMP_SAF = ((1U << 3) + 0),
+ CMP_SUN = ((1U << 3) + 1),
+ CMP_SEQ = ((1U << 3) + 2),
+ CMP_SUEQ = ((1U << 3) + 3),
+ CMP_SSLT = ((1U << 3) + 4),
+ CMP_SSULT = ((1U << 3) + 5),
+ CMP_SLE = ((1U << 3) + 6),
+ CMP_SULE = ((1U << 3) + 7),
// COP1 CMP negative predicates Bit 5..4 = 01.
- CMP_AT = ((2 << 3) + 0), // Reserved, not implemented.
- CMP_OR = ((2 << 3) + 1),
- CMP_UNE = ((2 << 3) + 2),
- CMP_NE = ((2 << 3) + 3),
- CMP_UGE = ((2 << 3) + 4), // Reserved, not implemented.
- CMP_OGE = ((2 << 3) + 5), // Reserved, not implemented.
- CMP_UGT = ((2 << 3) + 6), // Reserved, not implemented.
- CMP_OGT = ((2 << 3) + 7), // Reserved, not implemented.
- CMP_SAT = ((3 << 3) + 0), // Reserved, not implemented.
- CMP_SOR = ((3 << 3) + 1),
- CMP_SUNE = ((3 << 3) + 2),
- CMP_SNE = ((3 << 3) + 3),
- CMP_SUGE = ((3 << 3) + 4), // Reserved, not implemented.
- CMP_SOGE = ((3 << 3) + 5), // Reserved, not implemented.
- CMP_SUGT = ((3 << 3) + 6), // Reserved, not implemented.
- CMP_SOGT = ((3 << 3) + 7), // Reserved, not implemented.
-
- SEL = ((2 << 3) + 0),
- MOVF = ((2 << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
- MOVZ_C = ((2 << 3) + 2), // COP1 on FPR registers.
- MOVN_C = ((2 << 3) + 3), // COP1 on FPR registers.
- SELEQZ_C = ((2 << 3) + 4), // COP1 on FPR registers.
- SELNEZ_C = ((2 << 3) + 7), // COP1 on FPR registers.
+ CMP_AT = ((2U << 3) + 0), // Reserved, not implemented.
+ CMP_OR = ((2U << 3) + 1),
+ CMP_UNE = ((2U << 3) + 2),
+ CMP_NE = ((2U << 3) + 3),
+ CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented.
+ CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented.
+ CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented.
+ CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented.
+ CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented.
+ CMP_SOR = ((3U << 3) + 1),
+ CMP_SUNE = ((3U << 3) + 2),
+ CMP_SNE = ((3U << 3) + 3),
+ CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented.
+ CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented.
+ CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented.
+ CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented.
+
+ SEL = ((2U << 3) + 0),
+ MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt
+ MOVZ_C = ((2U << 3) + 2), // COP1 on FPR registers.
+ MOVN_C = ((2U << 3) + 3), // COP1 on FPR registers.
+ SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers.
+ SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers.
// COP1 Encoding of Function Field When rs=PS.
// COP1X Encoding of Function Field.
- MADD_D = ((4 << 3) + 1),
+ MADD_D = ((4U << 3) + 1),
// PCREL Encoding of rt Field.
- ADDIUPC = ((0 << 2) + 0),
- LWPC = ((0 << 2) + 1),
- LWUPC = ((0 << 2) + 2),
- LDPC = ((0 << 3) + 6),
- // reserved ((1 << 3) + 6),
- AUIPC = ((3 << 3) + 6),
- ALUIPC = ((3 << 3) + 7),
+ ADDIUPC = ((0U << 2) + 0),
+ LWPC = ((0U << 2) + 1),
+ LWUPC = ((0U << 2) + 2),
+ LDPC = ((0U << 3) + 6),
+ // reserved ((1U << 3) + 6),
+ AUIPC = ((3U << 3) + 6),
+ ALUIPC = ((3U << 3) + 7),
// POP66 Encoding of rs Field.
- JIC = ((0 << 5) + 0),
+ JIC = ((0U << 5) + 0),
// POP76 Encoding of rs Field.
- JIALC = ((0 << 5) + 0),
+ JIALC = ((0U << 5) + 0),
- NULLSF = 0
+ NULLSF = 0U
};
@@ -808,7 +825,12 @@ enum FPURoundingMode {
kRoundToNearest = RN,
kRoundToZero = RZ,
kRoundToPlusInf = RP,
- kRoundToMinusInf = RM
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
};
const uint32_t kFPURoundingModeMask = 3 << 0;
@@ -864,6 +886,11 @@ const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
+static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) {
+ return 1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift);
+}
+
+
class Instruction {
public:
enum {
@@ -891,7 +918,7 @@ class Instruction {
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
- return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
// Instruction type.
@@ -905,10 +932,7 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA };
-#define OpcodeToBitNumber(opcode) \
- (1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift))
-
- static const uint64_t kOpcodeImmediateTypeMask =
+ static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) |
@@ -927,7 +951,7 @@ class Instruction {
OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) |
OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) |
OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) |
- OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
+ OpcodeToBitNumber(DAUI) | OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
#define FunctionFieldToBitNumber(function) (1ULL << function)
@@ -942,6 +966,7 @@ class Instruction {
FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(DSLLV) |
FunctionFieldToBitNumber(SRLV) | FunctionFieldToBitNumber(DSRLV) |
FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(DSRAV) |
+ FunctionFieldToBitNumber(LSA) | FunctionFieldToBitNumber(DLSA) |
FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) |
FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(DMULT) |
FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DMULTU) |
@@ -994,6 +1019,11 @@ class Instruction {
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
+ inline int LsaSaValue() const {
+ DCHECK(InstructionType() == kRegisterType);
+ return Bits(kSaShift + kLsaSaBits - 1, kSaShift);
+ }
+
inline int FunctionValue() const {
DCHECK(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@@ -1092,6 +1122,11 @@ class Instruction {
}
}
+ inline int32_t ImmValue(int bits) const {
+ DCHECK(InstructionType() == kImmediateType);
+ return Bits(bits - 1, 0);
+ }
+
inline int32_t Imm16Value() const {
DCHECK(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
@@ -1118,8 +1153,14 @@ class Instruction {
return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
}
- // Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay() const;
+ static bool IsForbiddenAfterBranchInstr(Instr instr);
+
+ // Say if the instruction should not be used in a branch delay slot or
+ // immediately after a compact branch.
+ inline bool IsForbiddenAfterBranch() const {
+ return IsForbiddenAfterBranchInstr(InstructionBits());
+ }
+
// Say if the instruction 'links'. e.g. jal, bal.
bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
@@ -1187,6 +1228,8 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
case INS:
case EXT:
case DEXT:
+ case DEXTM:
+ case DEXTU:
return kRegisterType;
case BSHFL: {
int sa = SaFieldRaw() >> kSaShift;
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/mips64/cpu-mips64.cc
index 6c24fd06a9..ab9cf69620 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/mips64/cpu-mips64.cc
@@ -23,33 +23,26 @@ namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
+#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
-#if !defined (USE_SIMULATOR)
#if defined(ANDROID) && !defined(__LP64__)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char *end = reinterpret_cast<char *>(start) + size;
cacheflush(
reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
#else // ANDROID
- int res;
+ long res; // NOLINT(runtime/int)
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
#endif // ANDROID
-#else // USE_SIMULATOR.
- // Not generating mips instructions for C-code. This means that we are
- // building a mips emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#endif // USE_SIMULATOR.
+#endif // !USE_SIMULATOR.
}
} // namespace internal
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 24e690dfb3..8daba04ac7 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -38,14 +38,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->break_(0xCC);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->break_(0xCC);
}
}
@@ -66,7 +67,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index a8fd48e6a2..3d0e10c20a 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -67,6 +67,7 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -74,6 +75,7 @@ class Decoder {
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintLsaSa(Instruction* instr);
void PrintSd(Instruction* instr);
void PrintSs1(Instruction* instr);
void PrintSs2(Instruction* instr);
@@ -90,7 +92,7 @@ class Decoder {
void PrintXImm19(Instruction* instr);
void PrintSImm19(Instruction* instr);
void PrintXImm21(Instruction* instr);
-
+ void PrintSImm21(Instruction* instr);
void PrintPCImm21(Instruction* instr, int delta_pc, int n_bits);
void PrintXImm26(Instruction* instr);
void PrintSImm26(Instruction* instr);
@@ -190,6 +192,17 @@ void Decoder::PrintFPURegister(int freg) {
}
+void Decoder::PrintFPUStatusRegister(int freg) {
+ switch (freg) {
+ case kFCSRRegister:
+ Print("FCSR");
+ break;
+ default:
+ Print(converter_.NameOfXMMRegister(freg));
+ }
+}
+
+
void Decoder::PrintFs(Instruction* instr) {
int freg = instr->RsValue();
PrintFPURegister(freg);
@@ -215,6 +228,13 @@ void Decoder::PrintSa(Instruction* instr) {
}
+// Print the integer value of the sa field of a lsa instruction.
+void Decoder::PrintLsaSa(Instruction* instr) {
+ int sa = instr->LsaSaValue() + 1;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
@@ -325,6 +345,16 @@ void Decoder::PrintXImm21(Instruction* instr) {
}
+// Print 21-bit signed immediate value.
+void Decoder::PrintSImm21(Instruction* instr) {
+ int32_t imm21 = instr->Imm21Value();
+ // set sign
+ imm21 <<= (32 - kImm21Bits);
+ imm21 >>= (32 - kImm21Bits);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21);
+}
+
+
// Print absoulte address for 21-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -481,22 +511,42 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'f');
- if (format[1] == 's') { // 'fs: fs register.
- int reg = instr->FsValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 't') { // 'ft: ft register.
- int reg = instr->FtValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'd') { // 'fd: fd register.
- int reg = instr->FdValue();
- PrintFPURegister(reg);
- return 2;
- } else if (format[1] == 'r') { // 'fr: fr register.
- int reg = instr->FrValue();
- PrintFPURegister(reg);
- return 2;
+ if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPUStatusRegister(reg);
+ return 2;
+ }
+ } else {
+ if (format[1] == 's') { // 'fs: fs register.
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 't') { // 'ft: ft register.
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
}
UNREACHABLE();
return -1;
@@ -582,6 +632,10 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
} else if (format[3] == '2' && format[4] == '1') {
DCHECK(STRING_STARTS_WITH(format, "imm21"));
switch (format[5]) {
+ case 's':
+ DCHECK(STRING_STARTS_WITH(format, "imm21s"));
+ PrintSImm21(instr);
+ break;
case 'x':
DCHECK(STRING_STARTS_WITH(format, "imm21x"));
PrintXImm21(instr);
@@ -652,11 +706,17 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': { // 'sa.
switch (format[1]) {
- case 'a': {
- DCHECK(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
- return 2;
- }
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2
+ PrintLsaSa(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1166,6 +1226,12 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
case DSRAV:
Format(instr, "dsrav 'rd, 'rt, 'rs");
break;
+ case LSA:
+ Format(instr, "lsa 'rd, 'rt, 'rs, 'sa2");
+ break;
+ case DLSA:
+ Format(instr, "dlsa 'rd, 'rt, 'rs, 'sa2");
+ break;
case MFHI:
if (instr->Bits(25, 16) == 0) {
Format(instr, "mfhi 'rd");
@@ -1535,10 +1601,10 @@ void Decoder::DecodeTypeImmediateREGIMM(Instruction* instr) {
Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2");
break;
case DAHI:
- Format(instr, "dahi 'rs, 'imm16u");
+ Format(instr, "dahi 'rs, 'imm16x");
break;
case DATI:
- Format(instr, "dati 'rs, 'imm16u");
+ Format(instr, "dati 'rs, 'imm16x");
break;
default:
UNREACHABLE();
@@ -1573,12 +1639,12 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() == instr->RsValue()) &&
(instr->RtValue() != 0)) {
- Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
- Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2");
} else {
UNREACHABLE();
}
@@ -1615,7 +1681,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RtValue() != instr->RsValue()) &&
(instr->RsValue() != 0) && (instr->RtValue() != 0)) {
- Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
+ Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2");
} else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) {
Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2");
} else {
@@ -1626,14 +1692,14 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (instr->RsValue() == JIC) {
Format(instr, "jic 'rt, 'imm16s");
} else {
- Format(instr, "beqzc 'rs, 'imm21x -> 'imm21p4s2");
+ Format(instr, "beqzc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
case POP76:
if (instr->RsValue() == JIALC) {
- Format(instr, "jialc 'rt, 'imm16x");
+ Format(instr, "jialc 'rt, 'imm16s");
} else {
- Format(instr, "bnezc 'rs, 'imm21x -> 'imm21p4s2");
+ Format(instr, "bnezc 'rs, 'imm21s -> 'imm21p4s2");
}
break;
// ------------- Arithmetic instructions.
@@ -1641,13 +1707,18 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (kArchVariant != kMips64r6) {
Format(instr, "addi 'rt, 'rs, 'imm16s");
} else {
- // Check if BOVC or BEQC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BOVC, BEQZALC or BEQC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ DCHECK(rt_reg > 0);
+ if (rs_reg == 0) {
+ Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
@@ -1655,13 +1726,18 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (kArchVariant != kMips64r6) {
Format(instr, "daddi 'rt, 'rs, 'imm16s");
} else {
- // Check if BNVC or BNEC instruction.
- if (instr->RsValue() >= instr->RtValue()) {
+ int rs_reg = instr->RsValue();
+ int rt_reg = instr->RtValue();
+ // Check if BNVC, BNEZALC or BNEC instruction.
+ if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
- } else if (instr->RsValue() < instr->RtValue()) {
- Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
- UNREACHABLE();
+ DCHECK(rt_reg > 0);
+ if (rs_reg == 0) {
+ Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
+ } else {
+ Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2");
+ }
}
}
break;
@@ -1691,14 +1767,14 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "lui 'rt, 'imm16x");
} else {
if (instr->RsValue() != 0) {
- Format(instr, "aui 'rt, 'imm16x");
+ Format(instr, "aui 'rt, 'rs, 'imm16x");
} else {
Format(instr, "lui 'rt, 'imm16x");
}
}
break;
case DAUI:
- Format(instr, "daui 'rt, 'imm16x");
+ Format(instr, "daui 'rt, 'rs, 'imm16x");
break;
// ------------- Memory instructions.
case LB:
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index cf8a4456f3..c5c1311d94 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::parameter_count() { return a2; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return a4; }
+
+
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -125,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a3, a2, a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a3, a2, a1};
@@ -187,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// a1 : the function to call
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
- // a4 : original constructor (for IsSuperConstructorCall)
+ // a4 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {a0, a1, a4, a2};
@@ -204,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ Register registers[] = {a1, a3, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ Register registers[] = {a1, a3, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a1, a0};
@@ -342,6 +375,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a1, // JSFunction
+ a3, // the new target
a0, // actual number of arguments
a2, // expected number of arguments
};
@@ -374,27 +408,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // math rounding function
- a3, // vector slot id
- a2, // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -410,7 +423,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
a0, // argument count (not including receiver)
- a3, // original constructor
+ a3, // new target
a1, // constructor to call
a2 // address of the first argument
};
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 3cad6ba82f..7b73ac74e4 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -17,12 +17,13 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -436,10 +437,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- ld(scratch, FieldMemOperand(scratch, offset));
- ld(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1190,6 +1188,32 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
}
+void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (kArchVariant == kMips64r6 && sa <= 4) {
+ lsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ sll(tmp, rs, sa);
+ Addu(rd, rt, tmp);
+ }
+}
+
+
+void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
+ Register scratch) {
+ if (kArchVariant == kMips64r6 && sa <= 4) {
+ dlsa(rd, rt, rs, sa);
+ } else {
+ Register tmp = rd.is(rt) ? scratch : rd;
+ DCHECK(!tmp.is(rt));
+ dsll(tmp, rs, sa);
+ Daddu(rd, rt, tmp);
+ }
+}
+
+
// ------------Pseudo-instructions-------------
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
@@ -1483,6 +1507,31 @@ void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
}
+void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(size <= 64);
+ dextm(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos >= 32 && pos < 64);
+ DCHECK(size < 33);
+ dextu(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 32);
+ DCHECK(pos + size <= 32);
+ DCHECK(size != 0);
+ dins_(rt, rs, pos, size);
+}
+
+
void MacroAssembler::Ins(Register rt,
Register rs,
uint16_t pos,
@@ -1494,49 +1543,90 @@ void MacroAssembler::Ins(Register rt,
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- FPURegister fs,
- FPURegister scratch) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
- Cvt_d_uw(fd, t8, scratch);
+ Cvt_d_uw(fd, t8);
}
-void MacroAssembler::Cvt_d_uw(FPURegister fd,
- Register rs,
- FPURegister scratch) {
- // Convert rs to a FP value in fd (and fd + 1).
- // We do this by converting rs minus the MSB to avoid sign conversion,
- // then adding 2^31 to the result (if needed).
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ // Zero extend int32 in rs.
+ Dext(t9, rs, 0, 32);
+ dmtc1(t9, fd);
+ cvt_d_l(fd, fd);
+}
+
+
+void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ dmfc1(t8, fs);
+ Cvt_d_ul(fd, t8);
+}
+
+
+void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
- DCHECK(!fd.is(scratch));
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
- // Save rs's MSB to t9.
- Ext(t9, rs, 31, 1);
- // Remove rs's MSB.
- Ext(at, rs, 0, 31);
- // Move the result to fd.
- mtc1(at, fd);
- mthc1(zero_reg, fd);
+ Label msb_clear, conversion_done;
- // Convert fd to a real FP value.
- cvt_d_w(fd, fd);
+ Branch(&msb_clear, ge, rs, Operand(zero_reg));
- Label conversion_done;
+ // Rs >= 2^63
+ andi(t9, rs, 1);
+ dsrl(rs, rs, 1);
+ or_(t9, t9, rs);
+ dmtc1(t9, fd);
+ cvt_d_l(fd, fd);
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_d(fd, fd, fd); // In delay slot.
- // If rs's MSB was 0, it's done.
- // Otherwise we need to add that to the FP register.
- Branch(&conversion_done, eq, t9, Operand(zero_reg));
+ bind(&msb_clear);
+ // Rs < 2^63, we can do simple conversion.
+ dmtc1(rs, fd);
+ cvt_d_l(fd, fd);
+
+ bind(&conversion_done);
+}
- // Load 2^31 into f20 as its float representation.
- li(at, 0x41E00000);
- mtc1(zero_reg, scratch);
- mthc1(at, scratch);
- // Add it to fd.
- add_d(fd, fd, scratch);
+
+void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ dmfc1(t8, fs);
+ Cvt_s_ul(fd, t8);
+}
+
+
+void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ Label positive, conversion_done;
+
+ Branch(&positive, ge, rs, Operand(zero_reg));
+
+ // Rs >= 2^31.
+ andi(t9, rs, 1);
+ dsrl(rs, rs, 1);
+ or_(t9, t9, rs);
+ dmtc1(t9, fd);
+ cvt_s_l(fd, fd);
+ Branch(USE_DELAY_SLOT, &conversion_done);
+ add_s(fd, fd, fd); // In delay slot.
+
+ bind(&positive);
+ // Rs < 2^31, we can do simple conversion.
+ dmtc1(rs, fd);
+ cvt_s_l(fd, fd);
bind(&conversion_done);
}
@@ -1582,6 +1672,19 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
+ FPURegister scratch, Register result) {
+ Trunc_ul_d(fs, t8, scratch, result);
+ dmtc1(t8, fd);
+}
+
+
+void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch, Register result) {
+ Trunc_ul_s(fs, t8, scratch, result);
+ dmtc1(t8, fd);
+}
+
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
@@ -1636,6 +1739,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
}
+void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
+ FPURegister scratch, Register result) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!AreAliased(rs, result, at));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0);
+ // If fd =< -1 or unordered, then the conversion fails.
+ BranchF(&fail, &fail, le, fd, scratch);
+ }
+
+ // Load 2^63 into scratch as its double representation.
+ li(at, 0x43e0000000000000);
+ dmtc1(at, scratch);
+
+ // Test if scratch > fd.
+ // If fd < 2^63 we can convert it normally.
+ BranchF(&simple_convert, nullptr, lt, fd, scratch);
+
+ // First we subtract 2^63 from fd, then trunc it to rs
+ // and add 2^63 to rs.
+ sub_d(scratch, fd, scratch);
+ trunc_l_d(scratch, scratch);
+ dmfc1(rs, scratch);
+ Or(rs, rs, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_l_d(scratch, fd);
+ dmfc1(rs, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative.
+ addiu(at, zero_reg, -1);
+ dsrl(at, at, 1); // Load 2^62.
+ dmfc1(result, scratch);
+ xor_(result, result, at);
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+
+void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
+ FPURegister scratch, Register result) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!AreAliased(rs, result, at));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0f);
+ // If fd =< -1 or unordered, then the conversion fails.
+ BranchF32(&fail, &fail, le, fd, scratch);
+ }
+
+ // Load 2^63 into scratch as its float representation.
+ li(at, 0x5f000000);
+ mtc1(at, scratch);
+
+ // Test if scratch > fd.
+ // If fd < 2^63 we can convert it normally.
+ BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+
+ // First we subtract 2^63 from fd, then trunc it to rs
+ // and add 2^63 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_l_s(scratch, scratch);
+ dmfc1(rs, scratch);
+ Or(rs, rs, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_l_s(scratch, fd);
+ dmfc1(rs, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative or unordered.
+ addiu(at, zero_reg, -1);
+ dsrl(at, at, 1); // Load 2^62.
+ dmfc1(result, scratch);
+ xor_(result, result, at);
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (0) { // TODO(plind): find reasonable arch-variant symbol names.
@@ -1669,13 +1868,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
if (kArchVariant != kMips64r6) {
if (long_branch) {
Label skip;
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
- J(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- c(UN, D, cmp1, cmp2);
+ c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
@@ -1688,13 +1887,13 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
- J(nan, bd);
+ BranchLong(nan, bd);
bind(&skip);
} else {
- cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+ cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
@@ -1710,7 +1909,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- J(target, bd);
+ BranchLong(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
@@ -2129,28 +2328,30 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
+ DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchShort(L, bdslot);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- J(L, bdslot);
+ BranchLong(L, bdslot);
} else {
BranchShort(L, bdslot);
}
@@ -2162,17 +2363,15 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
}
} else {
@@ -2181,10 +2380,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- J(L, bdslot);
+ BranchLong(L, bdslot);
bind(&skip);
} else {
- J(L, bdslot);
+ BranchLong(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
@@ -2203,7 +2402,10 @@ void MacroAssembler::Branch(Label* L,
}
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset);
// Emit a nop in the branch delay slot if required.
@@ -2212,549 +2414,544 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
}
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- DCHECK(!rs.is(zero_reg));
- Register r2 = no_reg;
- Register scratch = at;
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ bc(offset);
+}
- if (rt.is_reg()) {
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
- // rt.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- r2 = rt.rm_;
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
- // Signed comparison.
- case greater:
- if (r2.is(zero_reg)) {
- bgtz(rs, offset);
- } else {
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (r2.is(zero_reg)) {
- bgez(rs, offset);
- } else {
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (r2.is(zero_reg)) {
- bltz(rs, offset);
- } else {
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (r2.is(zero_reg)) {
- blez(rs, offset);
- } else {
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (r2.is(zero_reg)) {
- bne(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (r2.is(zero_reg)) {
- b(offset);
- } else {
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
- } else {
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (r2.is(zero_reg)) {
- beq(rs, zero_reg, offset);
- } else {
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchShortHelperR6(offset, nullptr);
} else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- if (rt.imm64_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- beq(rs, r2, offset);
- }
- break;
- case ne:
- if (rt.imm64_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- // We don't want any other register but scratch clobbered.
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- bne(rs, r2, offset);
- }
- break;
- // Signed comparison.
- case greater:
- if (rt.imm64_ == 0) {
- bgtz(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case greater_equal:
- if (rt.imm64_ == 0) {
- bgez(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case less:
- if (rt.imm64_ == 0) {
- bltz(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case less_equal:
- if (rt.imm64_ == 0) {
- blez(rs, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- // Unsigned comparison.
- case Ugreater:
- if (rt.imm64_ == 0) {
- bne(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Ugreater_equal:
- if (rt.imm64_ == 0) {
- b(offset);
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- beq(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- }
- break;
- case Uless:
- if (rt.imm64_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- bne(scratch, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- }
- break;
- case Uless_equal:
- if (rt.imm64_ == 0) {
- beq(rs, zero_reg, offset);
- } else {
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- }
- break;
- default:
- UNREACHABLE();
- }
+ DCHECK(is_int16(offset));
+ BranchShortHelper(offset, nullptr, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
- // We use branch_offset as an argument for the branch instructions to be sure
- // it is called just before generating the branch instruction, as needed.
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ BranchShortHelperR6(0, L);
+ } else {
+ BranchShortHelper(0, L, bdslot);
+ }
+}
- b(shifted_branch_offset(L, false));
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm().is(zero_reg);
+ } else {
+ return rt.immediate() == 0;
+ }
}
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
+ if (L) {
+ offset = branch_offset_helper(L, bits) >> 2;
+ } else {
+ DCHECK(is_intn(offset, bits));
+ }
+ return offset;
+}
- int32_t offset = 0;
+
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
+ Register scratch) {
Register r2 = no_reg;
- Register scratch = at;
if (rt.is_reg()) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ return r2;
+}
+
+
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
break;
case eq:
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 beq is used here to make the code patchable. Otherwise bc
+ // should be used which has no condition field so is not patchable.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beq(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqc(rs, scratch, offset);
+ }
break;
case ne:
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ if (rs.code() == rt.rm_.reg_code) {
+ // Pre R6 bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bne(rs, scratch, offset);
+ nop();
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
+ } else {
+ // We don't want any other register but scratch clobbered.
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnec(rs, scratch, offset);
+ }
break;
+
// Signed comparison.
case greater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(scratch, rs, offset);
}
break;
case greater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(rs, scratch, offset);
}
break;
case less:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzc(rs, offset);
} else {
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltc(rs, scratch, offset);
}
break;
case less_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezc(rs, offset);
} else {
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgec(scratch, rs, offset);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bnezc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ beqzc(scratch, offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(rs, scratch, offset);
}
break;
case Uless:
- if (r2.is(zero_reg)) {
- // No code needs to be emitted.
- return;
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bnezc(scratch, offset);
+ } else if (IsZero(rt)) {
+ break; // No code needs to be emitted.
} else {
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
- if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ // rs <= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (rs.is(zero_reg)) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bc(offset);
+ } else if (IsZero(rt)) {
+ bits = OffsetSize::kOffset21;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ beqzc(rs, offset);
} else {
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ bits = OffsetSize::kOffset16;
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ DCHECK(!rs.is(scratch));
+ offset = GetOffset(offset, L, bits);
+ bgeuc(scratch, rs, offset);
}
break;
default:
UNREACHABLE();
}
- } else {
- // Be careful to always use shifted_branch_offset only just before the
- // branch instruction, as the location will be remember for patching the
- // target.
+ }
+ CheckTrampolinePoolQuick(1);
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+
+ Register scratch = at;
+ int32_t offset32;
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
- offset = shifted_branch_offset(L, false);
- b(offset);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
break;
case eq:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- beq(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, scratch, offset32);
}
break;
case ne:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- offset = shifted_branch_offset(L, false);
- bne(rs, r2, offset);
+ // We don't want any other register but scratch clobbered.
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, scratch, offset32);
}
break;
+
// Signed comparison.
case greater:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgtz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case greater_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
case less:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bltz(rs, offset);
- } else if (is_int16(rt.imm64_)) {
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltz(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Slt(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case less_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- blez(rs, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ blez(rs, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- slt(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
+
// Unsigned comparison.
case Ugreater:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- bne(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Ugreater_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ b(offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
- case Uless:
- if (rt.imm64_ == 0) {
- // No code needs to be emitted.
- return;
- } else if (is_int16(rt.imm64_)) {
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_));
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ case Uless:
+ if (IsZero(rt)) {
+ return true; // No code needs to be emitted.
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, rs, r2);
- offset = shifted_branch_offset(L, false);
- bne(scratch, zero_reg, offset);
+ Sltu(scratch, rs, rt);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ bne(scratch, zero_reg, offset32);
}
break;
case Uless_equal:
- if (rt.imm64_ == 0) {
- offset = shifted_branch_offset(L, false);
- beq(rs, zero_reg, offset);
+ if (IsZero(rt)) {
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(rs, zero_reg, offset32);
} else {
- DCHECK(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- sltu(scratch, r2, rs);
- offset = shifted_branch_offset(L, false);
- beq(scratch, zero_reg, offset);
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
+ beq(scratch, zero_reg, offset32);
}
break;
default:
UNREACHABLE();
}
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
+
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ return BranchShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
+}
+
+
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ BranchShortCheck(0, L, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot) {
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
+ DCHECK(is_near);
+ USE(is_near);
}
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
+ if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
} else {
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
@@ -2766,13 +2963,11 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
- if (is_near(L)) {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
- } else {
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
}
} else {
@@ -2780,20 +2975,19 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
- Jal(L, bdslot);
+ BranchAndLinkLong(L, bdslot);
bind(&skip);
} else {
- BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
}
}
}
-// We need to use a bgezal or bltzal, but they can't be used directly with the
-// slt instructions. We could use sub or add instead but we would miss overflow
-// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLinkShort(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
// Emit a nop in the branch delay slot if required.
@@ -2802,230 +2996,306 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset,
}
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
- Register rs, const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- Register r2 = no_reg;
- Register scratch = at;
-
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
+ DCHECK(L == nullptr || offset == 0);
+ offset = GetOffset(offset, L, OffsetSize::kOffset26);
+ balc(offset);
+}
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
+void MacroAssembler::BranchAndLinkShort(int32_t offset,
+ BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ BranchAndLinkShortHelperR6(offset, nullptr);
+ } else {
+ DCHECK(is_int16(offset));
+ BranchAndLinkShortHelper(offset, nullptr, bdslot);
+ }
+}
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- bal(offset);
- break;
- default:
- UNREACHABLE();
- }
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ BranchAndLinkShortHelper(0, L, bdslot);
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
}
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
- bal(shifted_branch_offset(L, false));
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt) {
+ DCHECK(L == nullptr || offset == 0);
+ Register scratch = rs.is(at) ? t8 : at;
+ OffsetSize bits = OffsetSize::kOffset16;
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
-}
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
+ switch (cond) {
+ case cc_always:
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ break;
+ case eq:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case ne:
+ if (!is_near(L, bits)) return false;
+ Subu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ // Signed comparison.
+ case greater:
+ // rs > rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bltzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case greater_equal:
+ // rs >= rt
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ blezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bgezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
+ case less:
+ // rs < rt
+ if (rs.code() == rt.rm_.reg_code) {
+ break; // No code needs to be emitted.
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgtzalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ bltzalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ }
+ break;
+ case less_equal:
+ // rs <= r2
+ if (rs.code() == rt.rm_.reg_code) {
+ bits = OffsetSize::kOffset26;
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ balc(offset);
+ } else if (rs.is(zero_reg)) {
+ if (!is_near(L, bits)) return false;
+ scratch = GetRtAsRegisterHelper(rt, scratch);
+ offset = GetOffset(offset, L, bits);
+ bgezalc(scratch, offset);
+ } else if (IsZero(rt)) {
+ if (!is_near(L, bits)) return false;
+ offset = GetOffset(offset, L, bits);
+ blezalc(rs, offset);
+ } else {
+ if (!is_near(L, bits)) return false;
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ }
+ break;
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
- BRANCH_ARGS_CHECK(cond, rs, rt);
- int32_t offset = 0;
- Register r2 = no_reg;
- Register scratch = at;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
+ // Unsigned comparison.
+ case Ugreater:
+ // rs > r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Ugreater_equal:
+ // rs >= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ case Uless:
+ // rs < r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, rs, rt);
+ offset = GetOffset(offset, L, bits);
+ bnezalc(scratch, offset);
+ break;
+ case Uless_equal:
+ // rs <= r2
+ if (!is_near(L, bits)) return false;
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ offset = GetOffset(offset, L, bits);
+ beqzalc(scratch, offset);
+ break;
+ default:
+ UNREACHABLE();
}
+ return true;
+}
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- switch (cond) {
- case cc_always:
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- // Signed comparison.
- case greater:
- // rs > rt
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case greater_equal:
- // rs >= rt
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less:
- // rs < r2
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case less_equal:
- // rs <= r2
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
+// with the slt instructions. We could use sub or add instead but we would miss
+// overflow cases, so we keep slt and add an intermediate third instruction.
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ DCHECK(L == nullptr || offset == 0);
+ if (!is_near(L, OffsetSize::kOffset16)) return false;
+ Register scratch = t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
- // Unsigned comparison.
- case Ugreater:
- // rs > rt
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Ugreater_equal:
- // rs >= rt
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless:
- // rs < r2
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
- case Uless_equal:
- // rs <= r2
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, 2);
- nop();
- offset = shifted_branch_offset(L, false);
- bal(offset);
- break;
+ switch (cond) {
+ case cc_always:
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
+ nop();
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bal(offset);
+ break;
- default:
- UNREACHABLE();
- }
+ // Signed comparison.
+ case greater:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ Slt(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ Sltu(scratch, rs, rt);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
+ addiu(scratch, scratch, -1);
+ offset = GetOffset(offset, L, OffsetSize::kOffset16);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
}
- // Check that offset could actually hold on an int16_t.
- DCHECK(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
+
+ return true;
+}
+
+
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
+ Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ if (!L) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ DCHECK(is_int26(offset));
+ return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
+ } else {
+ DCHECK(is_int16(offset));
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
+ }
+ } else {
+ DCHECK(offset == 0);
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
+ return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
+ } else {
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
+ }
+ }
+ return false;
}
@@ -3115,6 +3385,10 @@ void MacroAssembler::Call(Register target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
@@ -3129,8 +3403,10 @@ void MacroAssembler::Call(Register target,
if (bd == PROTECT)
nop();
- DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
- SizeOfCodeGeneratedSince(&start));
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
}
@@ -3208,31 +3484,43 @@ void MacroAssembler::Ret(Condition cond,
}
-void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- j(L);
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchShortHelperR6(0, L);
+ } else {
+ EmitForbiddenSlotInstruction();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ j(L);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
-void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- {
- BlockGrowBufferScope block_buf_growth(this);
- // Buffer growth (and relocation) must be blocked for internal references
- // until associated instructions are emitted and available to be patched.
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- jal(L);
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
+ (!L->is_bound() || is_near_r6(L))) {
+ BranchAndLinkShortHelperR6(0, L);
+ } else {
+ EmitForbiddenSlotInstruction();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ jal(L);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT) nop();
}
- // Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT) nop();
}
@@ -3426,12 +3714,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(t9));
- DCHECK(!scratch2.is(t9));
- DCHECK(!result.is(t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3447,34 +3730,35 @@ void MacroAssembler::Allocate(int object_size,
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- ld(result, MemOperand(topaddr));
- ld(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ld(result, MemOperand(top_address));
+ ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- ld(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ ld(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
+ // Load allocation limit. Result already contains allocation top.
+ ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
- DCHECK(kPointerSize == kDoubleSize);
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
@@ -3482,9 +3766,9 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- Daddu(scratch2, result, Operand(object_size));
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
- sd(scratch2, MemOperand(topaddr));
+ Daddu(result_end, result, Operand(object_size));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
+ sd(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3493,28 +3777,23 @@ void MacroAssembler::Allocate(int object_size,
}
-void MacroAssembler::Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, 0x7091);
- li(scratch1, 0x7191);
- li(scratch2, 0x7291);
+ li(scratch, 0x7191);
+ li(result_end, 0x7291);
}
jmp(gc_required);
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(t9));
- DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, t9));
+ DCHECK(!AreAliased(result_end, result, scratch, t9));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3523,34 +3802,34 @@ void MacroAssembler::Allocate(Register object_size,
AllocationUtils::GetAllocationTopReference(isolate(), flags);
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
- intptr_t top =
- reinterpret_cast<intptr_t>(allocation_top.address());
- intptr_t limit =
- reinterpret_cast<intptr_t>(allocation_limit.address());
+ intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+ intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
- Register topaddr = scratch1;
- li(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch;
// This code stores a temporary value in t9.
+ Register alloc_limit = t9;
+ li(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into t9.
- ld(result, MemOperand(topaddr));
- ld(t9, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit.
+ ld(result, MemOperand(top_address));
+ ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. t9 is used
- // immediately below so this use of t9 does not cause difference with
- // respect to register content between debug and release mode.
- ld(t9, MemOperand(topaddr));
- Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+ // Assert that result actually contains top on entry.
+ ld(alloc_limit, MemOperand(top_address));
+ Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
- // Load allocation limit into t9. Result already contains allocation top.
- ld(t9, MemOperand(topaddr, static_cast<int32_t>(limit - top)));
+ // Load allocation limit. Result already contains allocation top.
+ ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
- DCHECK(kPointerSize == kDoubleSize);
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
if (emit_debug_code()) {
And(at, result, Operand(kDoubleAlignmentMask));
Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
@@ -3560,19 +3839,19 @@ void MacroAssembler::Allocate(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- dsll(scratch2, object_size, kPointerSizeLog2);
- Daddu(scratch2, result, scratch2);
+ dsll(result_end, object_size, kPointerSizeLog2);
+ Daddu(result_end, result, result_end);
} else {
- Daddu(scratch2, result, Operand(object_size));
+ Daddu(result_end, result, Operand(object_size));
}
- Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- And(t9, scratch2, Operand(kObjectAlignmentMask));
- Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+ And(at, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
- sd(scratch2, MemOperand(topaddr));
+ sd(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -3749,29 +4028,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst,
- Register src,
- RegList temps,
- int field_count) {
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < kNumRegisters; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.reg_code = i;
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- ld(tmp, FieldMemOperand(src, i * kPointerSize));
- sd(tmp, FieldMemOperand(dst, i * kPointerSize));
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ sd(value, FieldMemOperand(result, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -3858,16 +4133,16 @@ void MacroAssembler::CopyBytes(Register src,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
- sd(filler, MemOperand(start_offset));
- Daddu(start_offset, start_offset, kPointerSize);
+ sd(filler, MemOperand(current_address));
+ Daddu(current_address, current_address, kPointerSize);
bind(&entry);
- Branch(&loop, ult, start_offset, Operand(end_offset));
+ Branch(&loop, ult, current_address, Operand(end_address));
}
@@ -3917,6 +4192,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch2,
Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
Label smi_value, done;
// Handle smi values specially.
@@ -3938,10 +4214,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
FPUCanonicalizeNaN(double_result, double_result);
bind(&smi_value);
- // scratch1 is now effective address of the double element.
// Untag and transfer.
- dsrl32(at, value_reg, 0);
- mtc1(at, double_scratch);
+ dsrl32(scratch1, value_reg, 0);
+ mtc1(scratch1, double_scratch);
cvt_d_w(double_result, double_scratch);
bind(&done);
@@ -3950,6 +4225,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
elements_offset));
dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
Daddu(scratch1, scratch1, scratch2);
+ // scratch1 is now effective address of the double element.
sdc1(double_result, MemOperand(scratch1, 0));
}
@@ -4115,8 +4391,6 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4136,7 +4410,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg().is(a0));
DCHECK(expected.is_immediate() || expected.reg().is(a2));
- DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -4164,11 +4437,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- li(a3, Operand(code_constant));
- daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
- }
-
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
@@ -4186,21 +4454,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ li(t0, Operand(step_in_enabled));
+ lb(t0, MemOperand(t0));
+ Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(a1));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- Label done;
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ }
+
+ Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag,
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = t0;
+ ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4217,6 +4542,7 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -4226,17 +4552,16 @@ void MacroAssembler::InvokeFunction(Register function,
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function.is(a1));
Register expected_reg = a2;
- Register code_reg = a3;
- ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Register temp_reg = t0;
+ ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
lw(expected_reg,
- FieldMemOperand(code_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
- ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
}
@@ -4254,11 +4579,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- InvokeCode(a3, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4563,6 +4884,89 @@ void MacroAssembler::DadduAndCheckForOverflow(Register dst, Register left,
}
+static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DaddBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ if (right.is_reg()) {
+ DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ li(overflow_dst, right); // Load right.
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Daddu(dst, left, overflow_dst); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, overflow_dst);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Daddu(dst, left, overflow_dst);
+ xor_(scratch, dst, overflow_dst);
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!right.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ DCHECK(!right.is(scratch));
+
+ if (left.is(right) && dst.is(left)) {
+ mov(overflow_dst, right);
+ right = overflow_dst;
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ daddu(dst, left, right); // Left is overwritten.
+ xor_(scratch, dst, scratch); // Original left.
+ xor_(overflow_dst, dst, right);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ daddu(dst, left, right); // Right is overwritten.
+ xor_(scratch, dst, scratch); // Original right.
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, overflow_dst, scratch);
+ } else {
+ daddu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, dst, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
+
+
void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
const Operand& right,
Register overflow_dst,
@@ -4694,6 +5098,83 @@ void MacroAssembler::DsubuAndCheckForOverflow(Register dst, Register left,
}
}
+
+void MacroAssembler::DsubBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+ li(overflow_dst, right); // Load right.
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ Dsubu(dst, left, overflow_dst); // Left is overwritten.
+ xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
+ xor_(scratch, dst, scratch); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ Dsubu(dst, left, overflow_dst);
+ xor_(scratch, left, overflow_dst);
+ xor_(overflow_dst, dst, left);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+
+void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ // This happens with some crankshaft code. Since Subu works fine if
+ // left == right, let's not make that restriction here.
+ if (left.is(right)) {
+ mov(dst, zero_reg);
+ if (no_overflow_label) {
+ Branch(no_overflow_label);
+ }
+ }
+
+ if (dst.is(left)) {
+ mov(scratch, left); // Preserve left.
+ dsubu(dst, left, right); // Left is overwritten.
+ xor_(overflow_dst, dst, scratch); // scratch is original left.
+ xor_(scratch, scratch, right); // scratch is original left.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else if (dst.is(right)) {
+ mov(scratch, right); // Preserve right.
+ dsubu(dst, left, right); // Right is overwritten.
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, scratch); // Original right.
+ and_(overflow_dst, scratch, overflow_dst);
+ } else {
+ dsubu(dst, left, right);
+ xor_(overflow_dst, dst, left);
+ xor_(scratch, left, right);
+ and_(overflow_dst, scratch, overflow_dst);
+ }
+ BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
+}
+
+
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
@@ -4726,24 +5207,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- PrepareCEntryArgs(num_arguments);
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -4765,34 +5235,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(t9));
- Call(t9);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Jump(t9);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- ld(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, native_context_index);
- // Load the code entry point from the builtins object.
- ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, a1);
+ InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
}
@@ -4929,46 +5375,29 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- ld(dst, GlobalObjectOperand());
- ld(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- ld(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(scratch, FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- ld(scratch,
- MemOperand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- int offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- ld(at, FieldMemOperand(scratch, offset));
+ ld(scratch, NativeContextMemOperand());
+ ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- ld(map_in_out, FieldMemOperand(scratch, offset));
+ ld(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- ld(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- ld(function, FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- ld(function, MemOperand(function, Context::SlotOffset(index)));
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ ld(dst, NativeContextMemOperand());
+ ld(dst, ContextMemOperand(dst, index));
}
@@ -5440,6 +5869,17 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5703,8 +6143,8 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -5733,28 +6173,6 @@ void MacroAssembler::HasColor(Register object,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object) {
- DCHECK(!AreAliased(value, scratch, t8, no_reg));
- Label is_data_object;
- ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- Branch(&is_data_object, eq, t8, Operand(scratch));
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(not_data_object, ne, t8, Operand(zero_reg));
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5774,23 +6192,18 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
// Note that we are using a 4-byte aligned 8-byte load.
@@ -5801,93 +6214,7 @@ void MacroAssembler::EnsureNotWhite(
lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
}
And(t8, mask_scratch, load_scratch);
- Branch(&done, ne, t8, Operand(zero_reg));
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // sll may overflow, making the check conservative.
- dsll(t8, mask_scratch, 1);
- And(t8, load_scratch, t8);
- Branch(&ok, eq, t8, Operand(zero_reg));
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object;
-
- // Check for heap-number
- ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
- LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(map));
- li(length, HeapNumber::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- And(t8, instance_type, Operand(kExternalStringTag));
- {
- Label skip;
- Branch(&skip, eq, t8, Operand(zero_reg));
- li(length, ExternalString::kSize);
- Branch(&is_data_object);
- bind(&skip);
- }
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
- // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
- // getting the length multiplied by 2.
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
- lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
- And(t8, instance_type, Operand(kStringEncodingMask));
- {
- Label skip;
- Branch(&skip, ne, t8, Operand(zero_reg));
- // Adjust length for UC16.
- dsll(t9, t9, 1);
- bind(&skip);
- }
- Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- DCHECK(!length.is(t8));
- And(length, length, Operand(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- Or(t8, t8, Operand(mask_scratch));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- Addu(t8, t8, Operand(length));
- sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ Branch(value_is_white, eq, t8, Operand(zero_reg));
}
@@ -6092,17 +6419,13 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
- Register reg6,
- Register reg7,
- Register reg8) {
- int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
- reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+ reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -6113,18 +6436,19 @@ bool AreAliased(Register reg1,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
-CodePatcher::CodePatcher(byte* address,
- int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -6136,7 +6460,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -6154,25 +6478,10 @@ void CodePatcher::Emit(Address addr) {
}
-void CodePatcher::ChangeBranchCondition(Condition cond) {
- Instr instr = Assembler::instr_at(masm_.pc_);
- DCHECK(Assembler::IsBranch(instr));
- uint32_t opcode = Assembler::GetOpcodeField(instr);
- // Currently only the 'eq' and 'ne' cond values are supported and the simple
- // branch instructions (with opcode being the branch type).
- // There are some special cases (see Assembler::IsBranch()) so extending this
- // would be tricky.
- DCHECK(opcode == BEQ ||
- opcode == BNE ||
- opcode == BLEZ ||
- opcode == BGTZ ||
- opcode == BEQL ||
- opcode == BNEL ||
- opcode == BLEZL ||
- opcode == BGTZL);
- opcode = (cond == eq) ? BEQ : BNE;
- instr = (instr & ~kOpcodeMask) | opcode;
- masm_.emit(instr);
+void CodePatcher::ChangeBranchCondition(Instr current_instr,
+ uint32_t new_opcode) {
+ current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
+ masm_.emit(current_instr);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index fa3808fa74..31ed8a32e1 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -23,6 +23,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_a0};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_a3};
const Register kRuntimeCallFunctionRegister = {Register::kCode_a1};
const Register kRuntimeCallArgCountRegister = {Register::kCode_a0};
@@ -104,14 +105,11 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg5 = no_reg,
Register reg6 = no_reg);
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
- Register reg8 = no_reg);
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
// -----------------------------------------------------------------------------
@@ -124,13 +122,13 @@ bool AreAliased(Register reg1,
#endif
-inline MemOperand ContextOperand(Register context, int index) {
+inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -168,11 +166,8 @@ inline MemOperand CFunctionArgumentOperand(int index) {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
@@ -193,9 +188,9 @@ class MacroAssembler: public Assembler {
Name(target, COND_ARGS, bd); \
}
-#define DECLARE_BRANCH_PROTOTYPES(Name) \
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
- DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+ DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
@@ -232,6 +227,8 @@ class MacroAssembler: public Assembler {
Ret(cond, rs, rt, bd);
}
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
void Branch(Label* L,
Condition cond,
Register rs,
@@ -412,22 +409,10 @@ class MacroAssembler: public Assembler {
Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -564,12 +549,8 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
- void Allocate(Register object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
@@ -614,6 +595,12 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -676,6 +663,12 @@ class MacroAssembler: public Assembler {
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
+ void Dlsa(Register rd, Register rs, Register rt, uint8_t sa,
+ Register scratch = at);
void Pref(int32_t hint, const MemOperand& rs);
@@ -811,15 +804,26 @@ class MacroAssembler: public Assembler {
// MIPS64 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
// Convert unsigned word to double.
- void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
- void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+ void Cvt_d_uw(FPURegister fd, FPURegister fs);
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // Convert unsigned long to double.
+ void Cvt_d_ul(FPURegister fd, FPURegister fs);
+ void Cvt_d_ul(FPURegister fd, Register rs);
+
+ // Convert unsigned long to float.
+ void Cvt_s_ul(FPURegister fd, FPURegister fs);
+ void Cvt_s_ul(FPURegister fd, Register rs);
// Convert double to unsigned long.
void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
@@ -833,6 +837,18 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert double to unsigned long.
+ void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
+ Register result = no_reg);
+ void Trunc_ul_d(FPURegister fd, Register rs, FPURegister scratch,
+ Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch,
+ Register result = no_reg);
+ void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
+ Register result = no_reg);
+
void Trunc_w_d(FPURegister fd, FPURegister fs);
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
@@ -979,8 +995,15 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -993,7 +1016,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -1011,15 +1034,19 @@ class MacroAssembler: public Assembler {
// JavaScript invokes.
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1060,9 +1087,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
-
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
@@ -1071,12 +1095,11 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// -------------------------------------------------------------------------
// Support functions.
@@ -1256,6 +1279,24 @@ class MacroAssembler: public Assembler {
const Operand& right, Register overflow_dst,
Register scratch);
+ inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void DaddBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void DaddBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
void DsubuAndCheckForOverflow(Register dst, Register left, Register right,
Register overflow_dst, Register scratch = at);
@@ -1263,6 +1304,24 @@ class MacroAssembler: public Assembler {
const Operand& right, Register overflow_dst,
Register scratch);
+ inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void DsubBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void DsubBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
void BranchOnOverflow(Label* label,
Register overflow_check,
BranchDelaySlot bd = PROTECT) {
@@ -1312,16 +1371,24 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
BranchDelaySlot bd = PROTECT) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles, bd);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles, bd);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ BranchDelaySlot bd = PROTECT) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles, bd);
}
// Convenience function: call an external reference.
@@ -1329,17 +1396,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
int num_arguments,
BranchDelaySlot bd = PROTECT);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -1395,13 +1453,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in a1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1589,6 +1640,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1727,16 +1782,32 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
int num_reg_arguments,
int num_double_arguments);
- void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
+ inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
+ inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
+ void BranchShortHelperR6(int32_t offset, Label* L);
+ void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
+ bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+ bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
+ const Operand& rt, BranchDelaySlot bdslot);
+
+ void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
+ void BranchAndLinkShortHelper(int16_t offset, Label* L,
+ BranchDelaySlot bdslot);
+ void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
- void BranchAndLinkShort(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot = PROTECT);
- void J(Label* L, BranchDelaySlot bdslot);
- void Jal(Label* L, BranchDelaySlot bdslot);
+ bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt);
+ bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot);
+ void BranchLong(Label* L, BranchDelaySlot bdslot);
+ void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
void Jr(Label* L, BranchDelaySlot bdslot);
void Jalr(Label* L, BranchDelaySlot bdslot);
@@ -1753,8 +1824,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1808,8 +1877,7 @@ class CodePatcher {
DONT_FLUSH
};
- CodePatcher(byte* address,
- int instructions,
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
@@ -1824,7 +1892,7 @@ class CodePatcher {
// Change the condition part of an instruction leaving the rest of the current
// instruction unchanged.
- void ChangeBranchCondition(Condition cond);
+ void ChangeBranchCondition(Instr current_instr, uint32_t new_opcode);
private:
byte* address_; // The address of the code being patched.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 4a7fd7c10f..7fa96442f9 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -146,7 +146,7 @@ void MipsDebugger::Stop(Instruction* instr) {
#else // GENERATED_CODE_COVERAGE
-#define UNSUPPORTED() printf("Unsupported instruction.\n");
+#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
static void InitializeCoverage() {}
@@ -519,7 +519,7 @@ void MipsDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -898,7 +898,12 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
for (int i = 0; i < kNumFPURegisters; i++) {
FPUregisters_[i] = 0;
}
- FCSR_ = 0;
+
+ if (kArchVariant == kMips64r6) {
+ FCSR_ = kFCSRNaN2008FlagMask;
+ } else {
+ FCSR_ = 0;
+ }
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
@@ -926,12 +931,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@@ -947,14 +952,13 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -999,9 +1003,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1256,6 +1261,8 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(double original, double rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1273,7 +1280,7 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1315,11 +1322,135 @@ bool Simulator::set_fcsr_round_error(float original, float rounded) {
return ret;
}
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ }
+}
+
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ if (FCSR_ & kFCSRNaN2008FlagMask) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = std::numeric_limits<int64_t>::max();
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+ } else {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ }
+}
+
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(float original, float rounded) {
bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
double max_int64 = std::numeric_limits<int64_t>::max();
double min_int64 = std::numeric_limits<int64_t>::min();
@@ -1337,7 +1468,7 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
ret = true;
}
- if (rounded > max_int64 || rounded < min_int64) {
+ if (rounded >= max_int64 || rounded < min_int64) {
set_fcsr_bit(kFCSROverflowFlagBit, true);
// The reference is not really clear but it seems this is required:
set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -2259,10 +2390,12 @@ void Simulator::DecodeTypeRegisterSRsType() {
set_fpu_register_float(fd_reg(), -fs);
break;
case SQRT_S:
- set_fpu_register_float(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_float(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_S: {
- float result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ float result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_float(fd_reg(), result);
break;
}
@@ -2369,7 +2502,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2379,7 +2512,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2388,7 +2521,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case TRUNC_L_S: { // Mips64r2 instruction.
@@ -2396,7 +2529,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2410,7 +2543,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2425,7 +2558,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t i64 = static_cast<int64_t>(result);
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2434,7 +2567,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2444,7 +2577,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
} break;
case CEIL_W_S: // Round double to word towards positive infinity.
@@ -2453,7 +2586,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CEIL_L_S: { // Mips64r2 instruction.
@@ -2461,7 +2594,7 @@ void Simulator::DecodeTypeRegisterSRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2764,10 +2897,12 @@ void Simulator::DecodeTypeRegisterDRsType() {
set_fpu_register_double(fd_reg(), -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg(), fast_sqrt(fs));
+ lazily_initialize_fast_sqrt(isolate_);
+ set_fpu_register_double(fd_reg(), fast_sqrt(fs, isolate_));
break;
case RSQRT_D: {
- double result = 1.0 / fast_sqrt(fs);
+ lazily_initialize_fast_sqrt(isolate_);
+ double result = 1.0 / fast_sqrt(fs, isolate_);
set_fpu_register_double(fd_reg(), result);
break;
}
@@ -2803,7 +2938,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_word_invalid_result(fs, rounded);
}
break;
}
@@ -2818,7 +2953,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
@@ -2827,7 +2962,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case FLOOR_W_D: // Round double to word towards negative infinity.
@@ -2836,7 +2971,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CEIL_W_D: // Round double to word towards positive infinity.
@@ -2845,7 +2980,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int32_t result = static_cast<int32_t>(rounded);
set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPUInvalidResult);
+ set_fpu_register_invalid_result(fs, rounded);
}
} break;
case CVT_S_D: // Convert double to float (single).
@@ -2857,7 +2992,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
round64_according_to_fcsr(fs, rounded, result, fs);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2872,7 +3007,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t i64 = static_cast<int64_t>(result);
set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2881,7 +3016,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2890,7 +3025,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -2899,7 +3034,7 @@ void Simulator::DecodeTypeRegisterDRsType() {
int64_t result = static_cast<int64_t>(rounded);
set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ set_fpu_register_invalid_result64(fs, rounded);
}
break;
}
@@ -3184,11 +3319,18 @@ void Simulator::DecodeTypeRegisterCOP1() {
case MFHC1:
set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
- case CTC1:
+ case CTC1: {
// At the moment only FCSR is supported.
DCHECK(fs_reg() == kFCSRRegister);
- FCSR_ = static_cast<uint32_t>(rt());
+ uint32_t reg = static_cast<uint32_t>(rt());
+ if (kArchVariant == kMips64r6) {
+ FCSR_ = reg | kFCSRNaN2008FlagMask;
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ FCSR_ = reg & ~kFCSRNaN2008FlagMask;
+ }
break;
+ }
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
set_fpu_register_hi_word(fs_reg(), 0);
@@ -3352,6 +3494,20 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case DSRAV:
SetResult(rd_reg(), rt() >> rs());
break;
+ case LSA: {
+ DCHECK(kArchVariant == kMips64r6);
+ int8_t sa = lsa_sa() + 1;
+ int32_t _rt = static_cast<int32_t>(rt());
+ int32_t _rs = static_cast<int32_t>(rs());
+ int32_t res = _rs << sa;
+ res += _rt;
+ SetResult(rd_reg(), static_cast<int64_t>(res));
+ break;
+ }
+ case DLSA:
+ DCHECK(kArchVariant == kMips64r6);
+ SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt());
+ break;
case MFHI: // MFHI == CLZ on R6.
if (kArchVariant != kMips64r6) {
DCHECK(sa() == 0);
@@ -3692,7 +3848,19 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
void Simulator::DecodeTypeRegisterSPECIAL3() {
int64_t alu_out;
switch (get_instr()->FunctionFieldRaw()) {
- case INS: { // Mips32r2 instruction.
+ case INS: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int32_t>((rt_u() & ~(mask << lsb)) |
+ ((rs_u() & mask) << lsb));
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINS: { // Mips64r2 instruction.
// Interpret rd field as 5-bit msb of insert.
uint16_t msb = rd_reg();
// Interpret sa field as 5-bit lsb of insert.
@@ -3703,7 +3871,7 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rt_reg(), alu_out);
break;
}
- case EXT: { // Mips32r2 instruction.
+ case EXT: { // Mips64r2 instruction.
// Interpret rd field as 5-bit msb of extract.
uint16_t msb = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
@@ -3714,11 +3882,33 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
SetResult(rt_reg(), alu_out);
break;
}
- case DEXT: { // Mips32r2 instruction.
+ case DEXT: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa();
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DEXTM: {
// Interpret rd field as 5-bit msb of extract.
uint16_t msb = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
+ uint16_t size = msb + 33;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DEXTU: {
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa() + 32;
uint16_t size = msb + 1;
uint64_t mask = (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
@@ -3905,27 +4095,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
}
-// Branch instructions common part.
-#define BranchAndLinkHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- set_register(31, current_pc + kBranchReturnOffset); \
- } else { \
- next_pc = current_pc + kBranchReturnOffset; \
- }
-
-
-#define BranchHelper(do_branch) \
- execute_branch_delay_instruction = true; \
- if (do_branch) { \
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
- } else { \
- next_pc = current_pc + kBranchReturnOffset; \
- }
-
-
-// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
@@ -3936,21 +4106,15 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int64_t rt = get_register(rt_reg);
int16_t imm16 = instr->Imm16Value();
int32_t imm18 = instr->Imm18Value();
- int32_t imm21 = instr->Imm21Value();
- int32_t imm26 = instr->Imm26Value();
int32_t ft_reg = instr->FtValue(); // Destination register.
- int64_t ft = get_fpu_register(ft_reg);
// Zero extended immediate.
uint64_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int64_t se_imm16 = imm16;
int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xfffffffffffc0000 : 0);
- int64_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfffffffffc000000 : 0);
- // Get current pc.
- int64_t current_pc = get_pc();
// Next pc.
int64_t next_pc = bad_ra;
@@ -3965,7 +4129,57 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Alignment for 32-bit integers used in LWL, LWR, etc.
const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
- // ---------- Configuration (and execution for REGIMM).
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](
+ bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int64_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + 2 * Instruction::kInstrSize);
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchHelper = [this, instr, &next_pc,
+ &execute_branch_delay_instruction](bool do_branch) {
+ execute_branch_delay_instruction = true;
+ int64_t current_pc = get_pc();
+ if (do_branch) {
+ int16_t imm16 = instr->Imm16Value();
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
+ }
+ };
+
+ auto BranchAndLinkCompactHelper = [this, instr, &next_pc](bool do_branch,
+ int bits) {
+ int64_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = current_pc + (imm << 2) + Instruction::kInstrSize;
+ set_register(31, current_pc + Instruction::kInstrSize);
+ }
+ };
+
+ auto BranchCompactHelper = [&next_pc, this, instr](bool do_branch, int bits) {
+ int64_t current_pc = get_pc();
+ CheckForbiddenSlot(current_pc);
+ if (do_branch) {
+ int32_t imm = instr->ImmValue(bits);
+ imm <<= 32 - bits;
+ imm >>= 32 - bits;
+ next_pc = get_pc() + (imm << 2) + Instruction::kInstrSize;
+ }
+ };
+
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
@@ -3975,32 +4189,14 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
uint32_t cc_value = test_fcsr_bit(fcsr_cc);
bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(do_branch);
break;
}
case BC1EQZ:
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (!(ft & 0x1)) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
break;
case BC1NEZ:
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (ft & 0x1) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
+ BranchHelper(get_fpu_register(ft_reg) & 0x1);
break;
default:
UNREACHABLE();
@@ -4021,6 +4217,12 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BGEZAL:
BranchAndLinkHelper(rs >= 0);
break;
+ case DAHI:
+ SetResult(rs_reg, rs + (se_imm16 << 32));
+ break;
+ case DATI:
+ SetResult(rs_reg, rs + (se_imm16 << 48));
+ break;
default:
UNREACHABLE();
}
@@ -4034,55 +4236,156 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case BNE:
BranchHelper(rs != rt);
break;
- case BLEZ:
- BranchHelper(rs <= 0);
- break;
- case BGTZ:
- BranchHelper(rs > 0);
- break;
- case POP66: {
- if (rs_reg) { // BEQZC
- int32_t se_imm21 =
- static_cast<int32_t>(imm21 << (kOpcodeBits + kRsBits));
- se_imm21 = se_imm21 >> (kOpcodeBits + kRsBits);
- if (rs == 0)
- next_pc = current_pc + 4 + (se_imm21 << 2);
- else
- next_pc = current_pc + 4;
+ case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZALC
+ BranchAndLinkCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZALC
+ BranchAndLinkCompactHelper(rt >= 0, 16);
+ } else { // BGEUC
+ BranchCompactHelper(
+ static_cast<uint64_t>(rs) >= static_cast<uint64_t>(rt), 16);
+ }
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ } else { // BLEZ
+ BranchHelper(rs <= 0);
+ }
+ break;
+ case POP07: // BGTZALC, BLTZALC, BLTUC, BGTZ (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZALC
+ BranchAndLinkCompactHelper(rt > 0, 16);
+ } else {
+ if (rt_reg == rs_reg) { // BLTZALC
+ BranchAndLinkCompactHelper(rt < 0, 16);
+ } else { // BLTUC
+ BranchCompactHelper(
+ static_cast<uint64_t>(rs) < static_cast<uint64_t>(rt), 16);
+ }
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ } else { // BGTZ
+ BranchHelper(rs > 0);
+ }
+ break;
+ case POP26: // BLEZC, BGEZC, BGEC/BLEC / BLEZL (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BLEZC
+ BranchCompactHelper(rt <= 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BGEZC
+ BranchCompactHelper(rt >= 0, 16);
+ } else { // BGEC/BLEC
+ BranchCompactHelper(rs >= rt, 16);
+ }
+ }
+ }
+ } else { // BLEZL
+ BranchAndLinkHelper(rs <= 0);
+ }
+ break;
+ case POP27: // BGTZC, BLTZC, BLTC/BGTC / BGTZL (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rt_reg != 0) {
+ if (rs_reg == 0) { // BGTZC
+ BranchCompactHelper(rt > 0, 16);
+ } else {
+ if (rs_reg == rt_reg) { // BLTZC
+ BranchCompactHelper(rt < 0, 16);
+ } else { // BLTC/BGTC
+ BranchCompactHelper(rs < rt, 16);
+ }
+ }
+ }
+ } else { // BGTZL
+ BranchAndLinkHelper(rs > 0);
+ }
+ break;
+ case POP66: // BEQZC, JIC
+ if (rs_reg != 0) { // BEQZC
+ BranchCompactHelper(rs == 0, 21);
} else { // JIC
next_pc = rt + imm16;
}
break;
- }
- case BC: {
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case POP76: // BNEZC, JIALC
+ if (rs_reg != 0) { // BNEZC
+ BranchCompactHelper(rs != 0, 21);
+ } else { // JIALC
+ int64_t current_pc = get_pc();
+ set_register(31, current_pc + Instruction::kInstrSize);
+ next_pc = rt + imm16;
+ }
break;
- }
- case BALC: {
- set_register(31, current_pc + 4);
- next_pc = current_pc + 4 + (se_imm26 << 2);
- set_pc(next_pc);
- pc_modified_ = true;
+ case BC:
+ BranchCompactHelper(true, 26);
break;
- }
- // ------------- Arithmetic instructions.
- case ADDI:
- case DADDI:
- if (HaveSameSign(rs, se_imm16)) {
- if (rs > 0) {
- if (rs > Registers::kMaxValue - se_imm16) {
- SignalException(kIntegerOverflow);
+ case BALC:
+ BranchAndLinkCompactHelper(true, 26);
+ break;
+ case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rs_reg >= rt_reg) { // BOVC
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ BranchCompactHelper(rs > Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs < Registers::kMinValue - rt, 16);
+ }
}
- } else if (rs < 0) {
- if (rs < Registers::kMinValue - se_imm16) {
- SignalException(kIntegerUnderflow);
+ } else {
+ if (rs_reg == 0) { // BEQZALC
+ BranchAndLinkCompactHelper(rt == 0, 16);
+ } else { // BEQC
+ BranchCompactHelper(rt == rs, 16);
}
}
+ } else { // ADDI
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ if (rs <= Registers::kMaxValue - se_imm16) {
+ SignalException(kIntegerOverflow);
+ }
+ } else if (rs < 0) {
+ if (rs >= Registers::kMinValue - se_imm16) {
+ SignalException(kIntegerUnderflow);
+ }
+ }
+ }
+ SetResult(rt_reg, rs + se_imm16);
}
- SetResult(rt_reg, rs + se_imm16);
break;
+ case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6)
+ if (kArchVariant == kMips64r6) {
+ if (rs_reg >= rt_reg) { // BNVC
+ if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) {
+ BranchCompactHelper(true, 16);
+ } else {
+ if (rs > 0) {
+ BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16);
+ } else if (rs < 0) {
+ BranchCompactHelper(rs >= Registers::kMinValue - rt, 16);
+ }
+ }
+ } else {
+ if (rs_reg == 0) { // BNEZALC
+ BranchAndLinkCompactHelper(rt != 0, 16);
+ } else { // BNEC
+ BranchCompactHelper(rt != rs, 16);
+ }
+ }
+ }
+ break;
+ // ------------- Arithmetic instructions.
case ADDIU: {
int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
// Sign-extend result of 32bit operation into 64bit register.
@@ -4107,12 +4410,24 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case XORI:
SetResult(rt_reg, rs ^ oe_imm16);
break;
- case LUI: {
- int32_t alu32_out = static_cast<int32_t>(oe_imm16 << 16);
- // Sign-extend result of 32bit operation into 64bit register.
- SetResult(rt_reg, static_cast<int64_t>(alu32_out));
+ case LUI:
+ if (rs_reg != 0) {
+ // AUI instruction.
+ DCHECK(kArchVariant == kMips64r6);
+ int32_t alu32_out = static_cast<int32_t>(rs + (se_imm16 << 16));
+ SetResult(rt_reg, static_cast<int64_t>(alu32_out));
+ } else {
+ // LUI instruction.
+ int32_t alu32_out = static_cast<int32_t>(oe_imm16 << 16);
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rt_reg, static_cast<int64_t>(alu32_out));
+ }
+ break;
+ case DAUI:
+ DCHECK(kArchVariant == kMips64r6);
+ DCHECK(rs_reg != 0);
+ SetResult(rt_reg, rs + (se_imm16 << 16));
break;
- }
// ------------- Memory instructions.
case LB:
set_register(rt_reg, ReadB(rs + se_imm16));
@@ -4205,22 +4520,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case SDC1:
WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
break;
- // ------------- JIALC and BNEZC instructions.
- case POP76: {
- // Next pc.
- next_pc = rt + se_imm16;
- // The instruction after the jump is NOT executed.
- uint16_t pc_increment = Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + pc_increment);
- }
- set_pc(next_pc);
- pc_modified_ = true;
- break;
- }
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
+ int32_t imm21 = instr->Imm21Value();
+ int64_t current_pc = get_pc();
uint8_t rt = (imm21 >> kImm16Bits);
switch (rt) {
case ALUIPC:
@@ -4290,7 +4594,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+ reinterpret_cast<Instruction*>(get_pc() + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -4300,9 +4604,6 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
-#undef BranchHelper
-#undef BranchAndLinkHelper
-
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
@@ -4393,7 +4694,7 @@ void Simulator::Execute() {
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
- if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ if (icount_ == static_cast<uint64_t>(::v8::internal::FLAG_stop_sim_at)) {
MipsDebugger dbg(this);
dbg.Debug();
} else {
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 68d518ea10..1d156d860f 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -23,7 +23,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
@@ -43,9 +43,10 @@ typedef int (*mips_regexp_matcher)(String* input,
void* return_address,
Isolate* isolate);
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ NULL, p8))
#else // O32 Abi.
@@ -60,9 +61,10 @@ typedef int (*mips_regexp_matcher)(String* input,
int32_t direct_call,
Isolate* isolate);
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
- p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
+ p7, p8))
#endif // MIPS_ABI_N64
@@ -77,11 +79,13 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
} // namespace internal
@@ -198,6 +202,12 @@ class Simulator {
void set_fpu_register_hi_word(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
int64_t get_fpu_register(int fpureg) const;
int32_t get_fpu_register_word(int fpureg) const;
int32_t get_fpu_register_signed_word(int fpureg) const;
@@ -369,6 +379,7 @@ class Simulator {
inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
inline int32_t sa() const { return currentInstr_->SaValue(); }
+ inline int32_t lsa_sa() const { return currentInstr_->LsaSaValue(); }
inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
set_register(rd_reg, alu_out);
@@ -381,6 +392,18 @@ class Simulator {
// Used for breakpoints and traps.
void SoftwareInterrupt(Instruction* instr);
+ // Compact branch guard.
+ void CheckForbiddenSlot(int64_t current_pc) {
+ Instruction* instr_after_compact_branch =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Error: Unexpected instruction 0x%08x immediately after a "
+ "compact branch instruction.",
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
+ }
+ }
+
// Stop helper functions.
bool IsWatchpoint(uint64_t code);
void PrintWatchpoint(uint64_t code);
@@ -403,7 +426,7 @@ class Simulator {
return;
}
- if (instr->IsForbiddenInBranchDelay()) {
+ if (instr->IsForbiddenAfterBranch()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
instr->OpcodeValue());
@@ -430,7 +453,8 @@ class Simulator {
void SignalException(Exception e);
// Runtime call support.
- static void* RedirectExternalReference(void* external_function,
+ static void* RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
@@ -485,24 +509,24 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, reinterpret_cast<int64_t*>(p0), \
- reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
+ FUNCTION_ADDR(entry), 5, reinterpret_cast<int64_t*>(p0), \
+ reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
#ifdef MIPS_ABI_N64
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>(Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, p0, p1, p2, p3, p4, \
- reinterpret_cast<int64_t*>(p5), p6, p7, NULL, \
- p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ static_cast<int>(Simulator::current(isolate)->Call( \
+ entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
+ NULL, p8))
#else // Must be O32 Abi.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>( \
- Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ static_cast<int>(Simulator::current(isolate)->Call( \
+ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#endif // MIPS_ABI_N64
@@ -517,13 +541,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
new file mode 100644
index 0000000000..ba3c4be52f
--- /dev/null
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -0,0 +1,565 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#define V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+
+#include "src/objects-body-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+template <int start_offset>
+int FlexibleBodyDescriptor<start_offset>::SizeOf(Map* map, HeapObject* object) {
+ return object->SizeFromMap(map);
+}
+
+
+bool BodyDescriptorBase::IsValidSlotImpl(HeapObject* obj, int offset) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ return true;
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ return helper.IsTagged(offset);
+ }
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateBodyImpl(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ IteratePointers(obj, start_offset, end_offset, v);
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kPointerSize) &&
+ IsAligned(end_offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers(obj, offset, end_of_region_offset, v);
+ }
+ offset = end_of_region_offset;
+ }
+ }
+}
+
+
+template <typename StaticVisitor>
+void BodyDescriptorBase::IterateBodyImpl(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset) {
+ if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+ IteratePointers<StaticVisitor>(heap, obj, start_offset, end_offset);
+ } else {
+ DCHECK(FLAG_unbox_double_fields);
+ DCHECK(IsAligned(start_offset, kPointerSize) &&
+ IsAligned(end_offset, kPointerSize));
+
+ LayoutDescriptorHelper helper(obj->map());
+ DCHECK(!helper.all_fields_tagged());
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ IteratePointers<StaticVisitor>(heap, obj, offset, end_of_region_offset);
+ }
+ offset = end_of_region_offset;
+ }
+ }
+}
+
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IteratePointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v) {
+ v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
+}
+
+
+template <typename StaticVisitor>
+void BodyDescriptorBase::IteratePointers(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset) {
+ StaticVisitor::VisitPointers(heap, obj,
+ HeapObject::RawField(obj, start_offset),
+ HeapObject::RawField(obj, end_offset));
+}
+
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IteratePointer(HeapObject* obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitPointer(HeapObject::RawField(obj, offset));
+}
+
+
+template <typename StaticVisitor>
+void BodyDescriptorBase::IteratePointer(Heap* heap, HeapObject* obj,
+ int offset) {
+ StaticVisitor::VisitPointer(heap, obj, HeapObject::RawField(obj, offset));
+}
+
+
+// Iterates the function object according to the visiting policy.
+template <JSFunction::BodyVisitingPolicy body_visiting_policy>
+class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kNonWeakFieldsEndOffset == kCodeEntryOffset);
+ STATIC_ASSERT(kCodeEntryOffset + kPointerSize == kNextFunctionLinkOffset);
+ STATIC_ASSERT(kNextFunctionLinkOffset + kPointerSize == kSize);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ if (offset < kSize) return true;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOffset, kNonWeakFieldsEndOffset, v);
+
+ if (body_visiting_policy & kVisitCodeEntry) {
+ v->VisitCodeEntry(obj->address() + kCodeEntryOffset);
+ }
+
+ if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers(obj, kNextFunctionLinkOffset, kSize, v);
+ }
+ IterateBodyImpl(obj, kSize, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kNonWeakFieldsEndOffset);
+
+ if (body_visiting_policy & kVisitCodeEntry) {
+ StaticVisitor::VisitCodeEntry(heap, obj,
+ obj->address() + kCodeEntryOffset);
+ }
+
+ if (body_visiting_policy & kVisitNextFunction) {
+ IteratePointers<StaticVisitor>(heap, obj, kNextFunctionLinkOffset, kSize);
+ }
+ IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
+class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
+ STATIC_ASSERT(kBackingStoreOffset + kPointerSize == kBitFieldSlot);
+ STATIC_ASSERT(kBitFieldSlot + kPointerSize == kSize);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ if (offset < kBackingStoreOffset) return true;
+ if (offset < kSize) return false;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOffset, kBackingStoreOffset, v);
+ IterateBodyImpl(obj, kSize, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kBackingStoreOffset);
+ IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
+class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return offset == kConstantPoolOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kConstantPoolOffset, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointer<StaticVisitor>(heap, obj, kConstantPoolOffset);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return reinterpret_cast<BytecodeArray*>(obj)->BytecodeArraySize();
+ }
+};
+
+
+class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return offset == kBasePointerOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointer(obj, kBasePointerOffset, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IteratePointer<StaticVisitor>(heap, obj, kBasePointerOffset);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ }
+};
+
+
+template <JSWeakCollection::BodyVisitingPolicy body_visiting_policy>
+class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kTableOffset + kPointerSize == kNextOffset);
+ STATIC_ASSERT(kNextOffset + kPointerSize == kSize);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ if (body_visiting_policy == kVisitStrong) {
+ IterateBodyImpl(obj, kPropertiesOffset, object_size, v);
+ } else {
+ IteratePointers(obj, kPropertiesOffset, kTableOffset, v);
+ IterateBodyImpl(obj, kSize, object_size, v);
+ }
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ if (body_visiting_policy == kVisitStrong) {
+ IterateBodyImpl<StaticVisitor>(heap, obj, kPropertiesOffset, object_size);
+ } else {
+ IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
+ kTableOffset);
+ IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
+ }
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+
+class Foreign::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ v->VisitExternalReference(reinterpret_cast<Address*>(
+ HeapObject::RawField(obj, kForeignAddressOffset)));
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ StaticVisitor::VisitExternalReference(reinterpret_cast<Address*>(
+ HeapObject::RawField(obj, kForeignAddressOffset)));
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+};
+
+
+class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ v->VisitExternalOneByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ StaticVisitor::VisitExternalOneByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+};
+
+
+class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ typedef v8::String::ExternalStringResource Resource;
+ v->VisitExternalTwoByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ typedef v8::String::ExternalStringResource Resource;
+ StaticVisitor::VisitExternalTwoByteString(reinterpret_cast<Resource**>(
+ HeapObject::RawField(obj, kResourceOffset)));
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
+};
+
+
+class Code::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ STATIC_ASSERT(kRelocationInfoOffset + kPointerSize == kHandlerTableOffset);
+ STATIC_ASSERT(kHandlerTableOffset + kPointerSize ==
+ kDeoptimizationDataOffset);
+ STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
+ kTypeFeedbackInfoOffset);
+ STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize == kNextCodeLinkOffset);
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ // Slots in code can't be invalid because we never trim code objects.
+ return true;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kDebugBreakSlotMask;
+
+ IteratePointers(obj, kRelocationInfoOffset, kNextCodeLinkOffset, v);
+ v->VisitNextCodeLink(HeapObject::RawField(obj, kNextCodeLinkOffset));
+
+ RelocIterator it(reinterpret_cast<Code*>(obj), mode_mask);
+ Isolate* isolate = obj->GetIsolate();
+ for (; !it.done(); it.next()) {
+ it.rinfo()->Visit(isolate, v);
+ }
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBody(obj, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj) {
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::kDebugBreakSlotMask;
+
+ Heap* heap = obj->GetHeap();
+ IteratePointers<StaticVisitor>(heap, obj, kRelocationInfoOffset,
+ kNextCodeLinkOffset);
+ StaticVisitor::VisitNextCodeLink(
+ heap, HeapObject::RawField(obj, kNextCodeLinkOffset));
+
+ RelocIterator it(reinterpret_cast<Code*>(obj), mode_mask);
+ for (; !it.done(); it.next()) {
+ it.rinfo()->template Visit<StaticVisitor>(heap);
+ }
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ IterateBody<StaticVisitor>(obj);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return reinterpret_cast<Code*>(object)->CodeSize();
+ }
+};
+
+
+template <typename Op, typename ReturnType, typename T1, typename T2,
+ typename T3>
+ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
+ if (type < FIRST_NONSTRING_TYPE) {
+ switch (type & kStringRepresentationMask) {
+ case kSeqStringTag:
+ return ReturnType();
+ case kConsStringTag:
+ return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3);
+ case kSlicedStringTag:
+ return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3);
+ case kExternalStringTag:
+ if ((type & kStringEncodingMask) == kOneByteStringTag) {
+ return Op::template apply<ExternalOneByteString::BodyDescriptor>(
+ p1, p2, p3);
+ } else {
+ return Op::template apply<ExternalTwoByteString::BodyDescriptor>(
+ p1, p2, p3);
+ }
+ }
+ UNREACHABLE();
+ return ReturnType();
+ }
+
+ switch (type) {
+ case FIXED_ARRAY_TYPE:
+ return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3);
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return ReturnType();
+ case TRANSITION_ARRAY_TYPE:
+ return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
+ case JS_OBJECT_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3);
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3);
+ case JS_ARRAY_BUFFER_TYPE:
+ return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3);
+ case JS_FUNCTION_TYPE:
+ return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3);
+ case ODDBALL_TYPE:
+ return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3);
+ case JS_PROXY_TYPE:
+ return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3);
+ case FOREIGN_TYPE:
+ return Op::template apply<Foreign::BodyDescriptor>(p1, p2, p3);
+ case MAP_TYPE:
+ return Op::template apply<Map::BodyDescriptor>(p1, p2, p3);
+ case CODE_TYPE:
+ return Op::template apply<Code::BodyDescriptor>(p1, p2, p3);
+ case CELL_TYPE:
+ return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3);
+ case PROPERTY_CELL_TYPE:
+ return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3);
+ case WEAK_CELL_TYPE:
+ return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3);
+ case SYMBOL_TYPE:
+ return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3);
+ case BYTECODE_ARRAY_TYPE:
+ return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3);
+
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
+ return ReturnType();
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return Op::template apply<FixedTypedArrayBase::BodyDescriptor>(p1, p2, p3);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ case SHARED_FUNCTION_INFO_TYPE: {
+ return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3);
+ }
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ if (type == ALLOCATION_SITE_TYPE) {
+ return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3);
+ } else {
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
+ }
+ default:
+ PrintF("Unknown type: %d\n", type);
+ UNREACHABLE();
+ return ReturnType();
+ }
+}
+
+
+template <typename ObjectVisitor>
+void HeapObject::IterateFast(ObjectVisitor* v) {
+ BodyDescriptorBase::IteratePointer(this, kMapOffset, v);
+ IterateBodyFast(v);
+}
+
+
+template <typename ObjectVisitor>
+void HeapObject::IterateBodyFast(ObjectVisitor* v) {
+ Map* m = map();
+ IterateBodyFast(m->instance_type(), SizeFromMap(m), v);
+}
+
+
+struct CallIterateBody {
+ template <typename BodyDescriptor, typename ObjectVisitor>
+ static void apply(HeapObject* obj, int object_size, ObjectVisitor* v) {
+ BodyDescriptor::IterateBody(obj, object_size, v);
+ }
+};
+
+template <typename ObjectVisitor>
+void HeapObject::IterateBodyFast(InstanceType type, int object_size,
+ ObjectVisitor* v) {
+ BodyDescriptorApply<CallIterateBody, void>(type, this, object_size, v);
+}
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
new file mode 100644
index 0000000000..91cb8883be
--- /dev/null
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -0,0 +1,141 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_BODY_DESCRIPTORS_H_
+#define V8_OBJECTS_BODY_DESCRIPTORS_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// This is the base class for object's body descriptors.
+//
+// Each BodyDescriptor subclass must provide the following methods:
+//
+// 1) Returns true if the object contains a tagged value at given offset.
+// It is used for invalid slots filtering. If the offset points outside
+// of the object or to the map word, the result is UNDEFINED (!!!).
+//
+// static bool IsValidSlot(HeapObject* obj, int offset);
+//
+//
+// 2) Iterate object's body using stateful object visitor.
+//
+// template <typename ObjectVisitor>
+// static inline void IterateBody(HeapObject* obj, int object_size,
+// ObjectVisitor* v);
+//
+//
+// 3) Iterate object's body using stateless object visitor.
+//
+// template <typename StaticVisitor>
+// static inline void IterateBody(HeapObject* obj, int object_size);
+//
+class BodyDescriptorBase BASE_EMBEDDED {
+ public:
+ template <typename ObjectVisitor>
+ static inline void IteratePointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ static inline void IteratePointers(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset);
+
+ template <typename ObjectVisitor>
+ static inline void IteratePointer(HeapObject* obj, int offset,
+ ObjectVisitor* v);
+
+ template <typename StaticVisitor>
+ static inline void IteratePointer(Heap* heap, HeapObject* obj, int offset);
+
+ protected:
+ // Returns true for all header and internal fields.
+ static inline bool IsValidSlotImpl(HeapObject* obj, int offset);
+
+ // Treats all header and internal fields in the range as tagged.
+ template <typename ObjectVisitor>
+ static inline void IterateBodyImpl(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ // Treats all header and internal fields in the range as tagged.
+ template <typename StaticVisitor>
+ static inline void IterateBodyImpl(Heap* heap, HeapObject* obj,
+ int start_offset, int end_offset);
+};
+
+
+// This class describes a body of an object of a fixed size
+// in which all pointer fields are located in the [start_offset, end_offset)
+// interval.
+template <int start_offset, int end_offset, int size>
+class FixedBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+ static const int kEndOffset = end_offset;
+ static const int kSize = size;
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ return offset >= kStartOffset && offset < kEndOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ IterateBodyImpl(obj, start_offset, end_offset, v);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBody(obj, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj) {
+ Heap* heap = obj->GetHeap();
+ IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, end_offset);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ IterateBody(obj);
+ }
+};
+
+
+// This class describes a body of an object of a variable size
+// in which all pointer fields are located in the [start_offset, object_size)
+// interval.
+template <int start_offset>
+class FlexibleBodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static const int kStartOffset = start_offset;
+
+ static bool IsValidSlot(HeapObject* obj, int offset) {
+ if (offset < kStartOffset) return false;
+ return IsValidSlotImpl(obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateBodyImpl(obj, start_offset, object_size, v);
+ }
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {
+ Heap* heap = obj->GetHeap();
+ IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, object_size);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object);
+};
+
+
+typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_BODY_DESCRIPTORS_H_
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 242ff754ad..b6dd42553c 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -76,6 +76,9 @@ void HeapObject::HeapObjectVerify() {
case BYTECODE_ARRAY_TYPE:
BytecodeArray::cast(this)->BytecodeArrayVerify();
break;
+ case TRANSITION_ARRAY_TYPE:
+ TransitionArray::cast(this)->TransitionArrayVerify();
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpaceVerify();
break;
@@ -96,6 +99,7 @@ void HeapObject::HeapObjectVerify() {
break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_PROMISE_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
case JS_GENERATOR_OBJECT_TYPE:
@@ -110,6 +114,9 @@ void HeapObject::HeapObjectVerify() {
case JS_DATE_TYPE:
JSDate::cast(this)->JSDateVerify();
break;
+ case JS_BOUND_FUNCTION_TYPE:
+ JSBoundFunction::cast(this)->JSBoundFunctionVerify();
+ break;
case JS_FUNCTION_TYPE:
JSFunction::cast(this)->JSFunctionVerify();
break;
@@ -160,9 +167,6 @@ void HeapObject::HeapObjectVerify() {
case JS_PROXY_TYPE:
JSProxy::cast(this)->JSProxyVerify();
break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::cast(this)->JSFunctionProxyVerify();
- break;
case FOREIGN_TYPE:
Foreign::cast(this)->ForeignVerify();
break;
@@ -329,6 +333,8 @@ void Map::MapVerify() {
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
instance_size() < heap->Capacity()));
+ CHECK(GetBackPointer()->IsUndefined() ||
+ !Map::cast(GetBackPointer())->is_stable());
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
@@ -354,8 +360,7 @@ void Map::VerifyOmittedMapChecks() {
if (!is_stable() ||
is_deprecated() ||
is_dictionary_map()) {
- CHECK_EQ(0, dependent_code()->number_of_entries(
- DependentCode::kPrototypeCheckGroup));
+ CHECK(dependent_code()->IsEmpty(DependentCode::kPrototypeCheckGroup));
}
}
@@ -411,6 +416,17 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
+void TransitionArray::TransitionArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ Object* e = get(i);
+ VerifyPointer(e);
+ }
+ CHECK_LE(LengthFor(number_of_transitions()), length());
+ CHECK(next_link()->IsUndefined() || next_link()->IsSmi() ||
+ next_link()->IsTransitionArray());
+}
+
+
void JSGeneratorObject::JSGeneratorObjectVerify() {
// In an expression like "new g()", there can be a point where a generator
// object is allocated but its fields are all undefined, as it hasn't yet been
@@ -531,6 +547,20 @@ void SlicedString::SlicedStringVerify() {
}
+void JSBoundFunction::JSBoundFunctionVerify() {
+ CHECK(IsJSBoundFunction());
+ JSObjectVerify();
+ VerifyObjectField(kBoundThisOffset);
+ VerifyObjectField(kBoundTargetFunctionOffset);
+ VerifyObjectField(kBoundArgumentsOffset);
+ VerifyObjectField(kCreationContextOffset);
+ CHECK(bound_target_function()->IsCallable());
+ CHECK(creation_context()->IsNativeContext());
+ CHECK(IsCallable());
+ CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
+}
+
+
void JSFunction::JSFunctionVerify() {
CHECK(IsJSFunction());
VerifyObjectField(kPrototypeOrInitialMapOffset);
@@ -805,17 +835,14 @@ void JSRegExp::JSRegExpVerify() {
void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
+ VerifyPointer(target());
VerifyPointer(handler());
+ CHECK_EQ(target()->IsCallable(), map()->is_callable());
+ CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
CHECK(hash()->IsSmi() || hash()->IsUndefined());
-}
-
-
-void JSFunctionProxy::JSFunctionProxyVerify() {
- CHECK(IsJSFunctionProxy());
- JSProxyVerify();
- VerifyPointer(call_trap());
- VerifyPointer(construct_trap());
- CHECK(map()->is_callable());
+ CHECK(map()->prototype()->IsNull());
+ // There should be no properties on a Proxy.
+ CHECK_EQ(0, map()->NumberOfOwnDescriptors());
}
@@ -881,7 +908,6 @@ void PrototypeInfo::PrototypeInfoVerify() {
CHECK(prototype_users()->IsSmi());
}
CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
- VerifyPointer(constructor_name());
}
@@ -973,12 +999,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
}
-void TypeSwitchInfo::TypeSwitchInfoVerify() {
- CHECK(IsTypeSwitchInfo());
- VerifyPointer(types());
-}
-
-
void AllocationSite::AllocationSiteVerify() {
CHECK(IsAllocationSite());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 052fc51472..0509a80b23 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -134,6 +134,14 @@ bool Object::IsFixedArrayBase() const {
}
+bool Object::IsFixedArray() const {
+ if (!IsHeapObject()) return false;
+ InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+ return instance_type == FIXED_ARRAY_TYPE ||
+ instance_type == TRANSITION_ARRAY_TYPE;
+}
+
+
// External objects are not extensible, so the map check is enough.
bool Object::IsExternal() const {
return Object::IsHeapObject() &&
@@ -179,6 +187,13 @@ bool Object::IsUniqueName() const {
}
+bool Object::IsFunction() const {
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->instance_type() >= FIRST_FUNCTION_TYPE;
+}
+
+
bool Object::IsCallable() const {
return Object::IsHeapObject() && HeapObject::cast(this)->map()->is_callable();
}
@@ -190,12 +205,6 @@ bool Object::IsConstructor() const {
}
-bool Object::IsSpecObject() const {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
-}
-
-
bool Object::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
@@ -286,20 +295,13 @@ bool Object::KeyEquals(Object* second) {
}
-bool Object::FilterKey(PropertyAttributes filter) {
- if ((filter & SYMBOLIC) && IsSymbol()) {
- return true;
- }
-
- if ((filter & PRIVATE_SYMBOL) && IsSymbol() &&
- Symbol::cast(this)->is_private()) {
- return true;
- }
-
- if ((filter & STRING) && !IsSymbol()) {
- return true;
+bool Object::FilterKey(PropertyFilter filter) {
+ if (IsSymbol()) {
+ if (filter & SKIP_SYMBOLS) return true;
+ if (Symbol::cast(this)->is_private()) return true;
+ } else {
+ if (filter & SKIP_STRINGS) return true;
}
-
return false;
}
@@ -707,7 +709,6 @@ bool Object::IsJSProxy() const {
}
-TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
@@ -717,9 +718,9 @@ TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
bool Object::IsJSWeakCollection() const {
@@ -740,11 +741,6 @@ bool Object::IsLayoutDescriptor() const {
}
-bool Object::IsTransitionArray() const {
- return IsFixedArray();
-}
-
-
bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
@@ -752,7 +748,6 @@ bool Object::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
bool Object::IsLiteralsArray() const { return IsFixedArray(); }
-bool Object::IsBindingsArray() const { return IsFixedArray(); }
bool Object::IsDeoptimizationInputData() const {
@@ -833,6 +828,7 @@ bool Object::IsScopeInfo() const {
}
+TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
@@ -1194,21 +1190,14 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
}
-Handle<Object> Object::GetPrototype(Isolate* isolate, Handle<Object> obj) {
+MaybeHandle<Object> Object::GetPrototype(Isolate* isolate,
+ Handle<Object> receiver) {
// We don't expect access checks to be needed on JSProxy objects.
- DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
- Handle<Context> context(isolate->context());
- if (obj->IsAccessCheckNeeded() &&
- !isolate->MayAccess(context, Handle<JSObject>::cast(obj))) {
- return isolate->factory()->null_value();
- }
-
- PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
+ DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
+ PrototypeIterator iter(isolate, receiver,
+ PrototypeIterator::START_AT_RECEIVER);
do {
- iter.AdvanceIgnoringProxies();
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return PrototypeIterator::GetCurrent(iter);
- }
+ if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
} while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
return PrototypeIterator::GetCurrent(iter);
}
@@ -1490,62 +1479,6 @@ int HeapObject::Size() {
}
-HeapObjectContents HeapObject::ContentType() {
- InstanceType type = map()->instance_type();
- if (type <= LAST_NAME_TYPE) {
- if (type == SYMBOL_TYPE) {
- return HeapObjectContents::kTaggedValues;
- }
- DCHECK(type < FIRST_NONSTRING_TYPE);
- // There are four string representations: sequential strings, external
- // strings, cons strings, and sliced strings.
- // Only the former two contain raw values and no heap pointers (besides the
- // map-word).
- if (((type & kIsIndirectStringMask) != kIsIndirectStringTag))
- return HeapObjectContents::kRawValues;
- else
- return HeapObjectContents::kTaggedValues;
-#if 0
- // TODO(jochen): Enable eventually.
- } else if (type == JS_FUNCTION_TYPE) {
- return HeapObjectContents::kMixedValues;
-#endif
- } else if (type == BYTECODE_ARRAY_TYPE) {
- return HeapObjectContents::kMixedValues;
- } else if (type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
- type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
- return HeapObjectContents::kMixedValues;
- } else if (type == JS_ARRAY_BUFFER_TYPE) {
- return HeapObjectContents::kMixedValues;
- } else if (type <= LAST_DATA_TYPE) {
- // TODO(jochen): Why do we claim that Code and Map contain only raw values?
- return HeapObjectContents::kRawValues;
- } else {
- if (FLAG_unbox_double_fields) {
- LayoutDescriptorHelper helper(map());
- if (!helper.all_fields_tagged()) return HeapObjectContents::kMixedValues;
- }
- return HeapObjectContents::kTaggedValues;
- }
-}
-
-
-void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
- v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
- reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
-}
-
-
-void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
- v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
-}
-
-
-void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) {
- v->VisitNextCodeLink(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
-}
-
-
double HeapNumber::value() const {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
@@ -1666,7 +1599,7 @@ SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize)
#undef SIMD128_WRITE_LANE
-ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
+ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset)
Object** FixedArray::GetFirstElementAddress() {
@@ -1840,12 +1773,12 @@ void AllocationSite::set_memento_create_count(int count) {
}
-inline bool AllocationSite::IncrementMementoFoundCount() {
+bool AllocationSite::IncrementMementoFoundCount(int increment) {
if (IsZombie()) return false;
int value = memento_found_count();
- set_memento_found_count(value + 1);
- return memento_found_count() == kPretenureMinimumCreated;
+ set_memento_found_count(value + increment);
+ return memento_found_count() >= kPretenureMinimumCreated;
}
@@ -1899,11 +1832,12 @@ inline bool AllocationSite::DigestPretenuringFeedback(
}
if (FLAG_trace_pretenuring_statistics) {
- PrintF(
- "AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n",
- static_cast<void*>(this), create_count, found_count, ratio,
- PretenureDecisionName(current_decision),
- PretenureDecisionName(pretenure_decision()));
+ PrintIsolate(GetIsolate(),
+ "pretenuring: AllocationSite(%p): (created, found, ratio) "
+ "(%d, %d, %f) %s => %s\n",
+ this, create_count, found_count, ratio,
+ PretenureDecisionName(current_decision),
+ PretenureDecisionName(pretenure_decision()));
}
// Clear feedback calculation fields until the next gc.
@@ -2035,18 +1969,22 @@ void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
}
-void JSObject::initialize_properties() {
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
-}
-
-
void JSObject::initialize_elements() {
FixedArrayBase* elements = map()->GetInitialElements();
WRITE_FIELD(this, kElementsOffset, elements);
}
+InterceptorInfo* JSObject::GetIndexedInterceptor() {
+ DCHECK(map()->has_indexed_interceptor());
+ JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
+ DCHECK(constructor->shared()->IsApiFunction());
+ Object* result =
+ constructor->shared()->get_api_func_data()->indexed_property_handler();
+ return InterceptorInfo::cast(result);
+}
+
+
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
@@ -2088,7 +2026,10 @@ Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
void WeakCell::clear() {
- DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT);
+ // Either the garbage collector is clearing the cell or we are simply
+ // initializing the root empty weak cell.
+ DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
+ this == GetHeap()->empty_weak_cell());
WRITE_FIELD(this, kValueOffset, Smi::FromInt(0));
}
@@ -2119,8 +2060,9 @@ void WeakCell::set_next(Object* val, WriteBarrierMode mode) {
}
-void WeakCell::clear_next(Heap* heap) {
- set_next(heap->the_hole_value(), SKIP_WRITE_BARRIER);
+void WeakCell::clear_next(Object* the_hole_value) {
+ DCHECK_EQ(GetHeap()->the_hole_value(), the_hole_value);
+ set_next(the_hole_value, SKIP_WRITE_BARRIER);
}
@@ -2144,6 +2086,8 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
return JSGlobalObject::kSize;
+ case JS_BOUND_FUNCTION_TYPE:
+ return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::kSize;
case JS_VALUE_TYPE:
@@ -2172,6 +2116,8 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
return JSWeakSet::kSize;
+ case JS_PROMISE_TYPE:
+ return JSObject::kHeaderSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -2338,8 +2284,7 @@ Object* JSObject::InObjectPropertyAtPut(int index,
}
-
-void JSObject::InitializeBody(Map* map,
+void JSObject::InitializeBody(Map* map, int start_offset,
Object* pre_allocated_value,
Object* filler_value) {
DCHECK(!filler_value->IsHeapObject() ||
@@ -2347,12 +2292,12 @@ void JSObject::InitializeBody(Map* map,
DCHECK(!pre_allocated_value->IsHeapObject() ||
!GetHeap()->InNewSpace(pre_allocated_value));
int size = map->instance_size();
- int offset = kHeaderSize;
+ int offset = start_offset;
if (filler_value != pre_allocated_value) {
- int pre_allocated =
- map->GetInObjectProperties() - map->unused_property_fields();
- DCHECK(pre_allocated * kPointerSize + kHeaderSize <= size);
- for (int i = 0; i < pre_allocated; i++) {
+ int end_of_pre_allocated_offset =
+ size - (map->unused_property_fields() * kPointerSize);
+ DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
+ while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(this, offset, pre_allocated_value);
offset += kPointerSize;
}
@@ -2364,12 +2309,6 @@ void JSObject::InitializeBody(Map* map,
}
-bool JSObject::HasFastProperties() {
- DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
- return !properties()->IsDictionary();
-}
-
-
bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
@@ -2411,7 +2350,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
void Object::VerifyApiCallResultType() {
#if DEBUG
- if (!(IsSmi() || IsString() || IsSymbol() || IsSpecObject() ||
+ if (!(IsSmi() || IsString() || IsSymbol() || IsJSReceiver() ||
IsHeapNumber() || IsSimd128Value() || IsUndefined() || IsTrue() ||
IsFalse() || IsNull())) {
FATAL("API call returned invalid object");
@@ -2447,7 +2386,7 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
- DCHECK_EQ(FIXED_ARRAY_TYPE, map()->instance_type());
+ DCHECK(IsFixedArray());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
@@ -2636,20 +2575,6 @@ void FixedArray::set(int index,
}
-void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
- int index,
- Object* value) {
- DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < array->length());
- int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(array, offset, value);
- Heap* heap = array->GetHeap();
- if (heap->InNewSpace(value)) {
- heap->RecordWrite(array->address(), offset);
- }
-}
-
-
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
@@ -3088,20 +3013,12 @@ void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
}
-void DescriptorArray::Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&) {
+void DescriptorArray::SetDescriptor(int descriptor_number, Descriptor* desc) {
// Range check.
DCHECK(descriptor_number < number_of_descriptors());
-
- NoIncrementalWriteBarrierSet(this,
- ToKeyIndex(descriptor_number),
- *desc->GetKey());
- NoIncrementalWriteBarrierSet(this,
- ToValueIndex(descriptor_number),
- *desc->GetValue());
- NoIncrementalWriteBarrierSet(this, ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
+ set(ToKeyIndex(descriptor_number), *desc->GetKey());
+ set(ToValueIndex(descriptor_number), *desc->GetValue());
+ set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
}
@@ -3142,19 +3059,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
}
-DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
- : marking_(array->GetHeap()->incremental_marking()) {
- marking_->EnterNoMarkingScope();
- DCHECK(!marking_->IsMarking() ||
- Marking::Color(array) == Marking::WHITE_OBJECT);
-}
-
-
-DescriptorArray::WhitenessWitness::~WhitenessWitness() {
- marking_->LeaveNoMarkingScope();
-}
-
-
PropertyType DescriptorArray::Entry::type() { return descs_->GetType(index_); }
@@ -3311,10 +3215,10 @@ CAST_ACCESSOR(Int8x16)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(JSFunctionProxy)
CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
@@ -3515,75 +3419,6 @@ int LiteralsArray::literals_count() const {
}
-Object* BindingsArray::get(int index) const { return FixedArray::get(index); }
-
-
-void BindingsArray::set(int index, Object* value) {
- FixedArray::set(index, value);
-}
-
-
-void BindingsArray::set(int index, Smi* value) {
- FixedArray::set(index, value);
-}
-
-
-void BindingsArray::set(int index, Object* value, WriteBarrierMode mode) {
- FixedArray::set(index, value, mode);
-}
-
-
-int BindingsArray::length() const { return FixedArray::length(); }
-
-
-BindingsArray* BindingsArray::cast(Object* object) {
- SLOW_DCHECK(object->IsBindingsArray());
- return reinterpret_cast<BindingsArray*>(object);
-}
-
-void BindingsArray::set_feedback_vector(TypeFeedbackVector* vector) {
- set(kVectorIndex, vector);
-}
-
-
-TypeFeedbackVector* BindingsArray::feedback_vector() const {
- return TypeFeedbackVector::cast(get(kVectorIndex));
-}
-
-
-JSReceiver* BindingsArray::bound_function() const {
- return JSReceiver::cast(get(kBoundFunctionIndex));
-}
-
-
-void BindingsArray::set_bound_function(JSReceiver* function) {
- set(kBoundFunctionIndex, function);
-}
-
-
-Object* BindingsArray::bound_this() const { return get(kBoundThisIndex); }
-
-
-void BindingsArray::set_bound_this(Object* bound_this) {
- set(kBoundThisIndex, bound_this);
-}
-
-
-Object* BindingsArray::binding(int binding_index) const {
- return get(kFirstBindingIndex + binding_index);
-}
-
-
-void BindingsArray::set_binding(int binding_index, Object* binding) {
- set(kFirstBindingIndex + binding_index, binding);
-}
-
-
-int BindingsArray::bindings_count() const {
- return length() - kFirstBindingIndex;
-}
-
-
void HandlerTable::SetRangeStart(int index, int value) {
set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
}
@@ -4173,11 +4008,6 @@ Address ByteArray::GetDataStartAddress() {
}
-void BytecodeArray::BytecodeArrayIterateBody(ObjectVisitor* v) {
- IteratePointer(v, kConstantPoolOffset);
-}
-
-
byte BytecodeArray::get(int index) {
DCHECK(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -4524,11 +4354,10 @@ int Map::GetInObjectPropertyOffset(int index) {
}
-Handle<Map> Map::CopyInstallDescriptorsForTesting(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor) {
- return CopyInstallDescriptors(map, new_descriptor, descriptors,
- layout_descriptor);
+Handle<Map> Map::AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ return AddMissingTransitions(split_map, descriptors, full_layout_descriptor);
}
@@ -4537,8 +4366,10 @@ int HeapObject::SizeFromMap(Map* map) {
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
InstanceType instance_type = map->instance_type();
- if (instance_type == FIXED_ARRAY_TYPE) {
- return FixedArray::BodyDescriptor::SizeOf(map, this);
+ if (instance_type == FIXED_ARRAY_TYPE ||
+ instance_type == TRANSITION_ARRAY_TYPE) {
+ return FixedArray::SizeFor(
+ reinterpret_cast<FixedArray*>(this)->synchronized_length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
@@ -4639,12 +4470,8 @@ bool Map::has_non_instance_prototype() {
}
-void Map::set_is_constructor(bool value) {
- if (value) {
- set_bit_field(bit_field() | (1 << kIsConstructor));
- } else {
- set_bit_field(bit_field() & ~(1 << kIsConstructor));
- }
+void Map::set_is_constructor() {
+ set_bit_field(bit_field() | (1 << kIsConstructor));
}
@@ -4843,12 +4670,22 @@ bool Map::is_strong() {
}
-void Map::set_counter(int value) {
- set_bit_field3(Counter::update(bit_field3(), value));
+void Map::set_new_target_is_base(bool value) {
+ set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
+}
+
+
+bool Map::new_target_is_base() { return NewTargetIsBase::decode(bit_field3()); }
+
+
+void Map::set_construction_counter(int value) {
+ set_bit_field3(ConstructionCounter::update(bit_field3(), value));
}
-int Map::counter() { return Counter::decode(bit_field3()); }
+int Map::construction_counter() {
+ return ConstructionCounter::decode(bit_field3());
+}
void Map::mark_unstable() {
@@ -4902,6 +4739,10 @@ bool Map::IsPrimitiveMap() {
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
return instance_type() <= LAST_PRIMITIVE_TYPE;
}
+bool Map::IsJSReceiverMap() {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ return instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
bool Map::IsJSObjectMap() {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return instance_type() >= FIRST_JS_OBJECT_TYPE;
@@ -4909,10 +4750,7 @@ bool Map::IsJSObjectMap() {
bool Map::IsJSArrayMap() { return instance_type() == JS_ARRAY_TYPE; }
bool Map::IsJSFunctionMap() { return instance_type() == JS_FUNCTION_TYPE; }
bool Map::IsStringMap() { return instance_type() < FIRST_NONSTRING_TYPE; }
-bool Map::IsJSProxyMap() {
- InstanceType type = instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
-}
+bool Map::IsJSProxyMap() { return instance_type() == JS_PROXY_TYPE; }
bool Map::IsJSGlobalProxyMap() {
return instance_type() == JS_GLOBAL_PROXY_TYPE;
}
@@ -4920,6 +4758,7 @@ bool Map::IsJSGlobalObjectMap() {
return instance_type() == JS_GLOBAL_OBJECT_TYPE;
}
bool Map::IsJSTypedArrayMap() { return instance_type() == JS_TYPED_ARRAY_TYPE; }
+bool Map::IsJSDataViewMap() { return instance_type() == JS_DATA_VIEW_TYPE; }
bool Map::CanOmitMapChecks() {
@@ -4927,14 +4766,38 @@ bool Map::CanOmitMapChecks() {
}
-int DependentCode::number_of_entries(DependencyGroup group) {
- if (length() == 0) return 0;
- return Smi::cast(get(group))->value();
+DependentCode* DependentCode::next_link() {
+ return DependentCode::cast(get(kNextLinkIndex));
+}
+
+
+void DependentCode::set_next_link(DependentCode* next) {
+ set(kNextLinkIndex, next);
+}
+
+
+int DependentCode::flags() { return Smi::cast(get(kFlagsIndex))->value(); }
+
+
+void DependentCode::set_flags(int flags) {
+ set(kFlagsIndex, Smi::FromInt(flags));
+}
+
+
+int DependentCode::count() { return CountField::decode(flags()); }
+
+void DependentCode::set_count(int value) {
+ set_flags(CountField::update(flags(), value));
+}
+
+
+DependentCode::DependencyGroup DependentCode::group() {
+ return static_cast<DependencyGroup>(GroupField::decode(flags()));
}
-void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
- set(group, Smi::FromInt(value));
+void DependentCode::set_group(DependentCode::DependencyGroup group) {
+ set_flags(GroupField::update(flags(), static_cast<int>(group)));
}
@@ -4958,16 +4821,6 @@ void DependentCode::copy(int from, int to) {
}
-void DependentCode::ExtendGroup(DependencyGroup group) {
- GroupStartIndexes starts(this);
- for (int g = kGroupCount - 1; g > group; g--) {
- if (starts.at(g) < starts.at(g + 1)) {
- copy(starts.at(g), starts.at(g + 1));
- }
- }
-}
-
-
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
WRITE_INT_FIELD(this, kFlagsOffset, flags);
@@ -5226,21 +5079,6 @@ bool Code::back_edges_patched_for_osr() {
uint16_t Code::to_boolean_state() { return extra_ic_state(); }
-bool Code::has_function_cache() {
- DCHECK(kind() == STUB);
- return HasFunctionCacheField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_has_function_cache(bool flag) {
- DCHECK(kind() == STUB);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = HasFunctionCacheField::update(previous, flag);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
bool Code::marked_for_deoptimization() {
DCHECK(kind() == OPTIMIZED_FUNCTION);
return MarkedForDeoptimizationField::decode(
@@ -5389,8 +5227,6 @@ Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
bool Code::CanContainWeakObjects() {
- // is_turbofanned() implies !can_have_weak_objects().
- DCHECK(!is_optimized_code() || !is_turbofanned() || !can_have_weak_objects());
return is_optimized_code() && can_have_weak_objects();
}
@@ -5410,16 +5246,12 @@ bool Code::IsWeakObjectInOptimizedCode(Object* object) {
} else if (object->IsPropertyCell()) {
object = PropertyCell::cast(object)->value();
}
- if (object->IsJSObject() || object->IsJSProxy()) {
- // JSProxy is handled like JSObject because it can morph into one.
+ if (object->IsJSReceiver()) {
return FLAG_weak_embedded_objects_in_optimized_code;
}
- if (object->IsFixedArray()) {
+ if (object->IsContext()) {
// Contexts of inlined functions are embedded in optimized code.
- Map* map = HeapObject::cast(object)->map();
- Heap* heap = map->GetHeap();
- return FLAG_weak_embedded_objects_in_optimized_code &&
- map == heap->function_context_map();
+ return FLAG_weak_embedded_objects_in_optimized_code;
}
return false;
}
@@ -5582,8 +5414,7 @@ void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
+ DCHECK((value->IsMap() && GetBackPointer()->IsUndefined()));
DCHECK(!value->IsMap() ||
Map::cast(value)->GetConstructor() == constructor_or_backpointer());
set_constructor_or_backpointer(value, mode);
@@ -5621,8 +5452,16 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
}
+ACCESSORS(JSBoundFunction, length, Object, kLengthOffset)
+ACCESSORS(JSBoundFunction, name, Object, kNameOffset)
+ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
+ kBoundTargetFunctionOffset)
+ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
+ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
+ACCESSORS(JSBoundFunction, creation_context, Context, kCreationContextOffset)
+
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction, literals, LiteralsArray, kLiteralsOffset)
ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
@@ -5645,7 +5484,6 @@ ACCESSORS(Box, value, Object, kValueOffset)
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
-ACCESSORS(PrototypeInfo, constructor_name, Object, kConstructorNameOffset)
ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
kScopeInfoOffset)
@@ -5674,6 +5512,7 @@ BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
+ACCESSORS(CallHandlerInfo, fast_handler, Object, kFastHandlerOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
@@ -5703,8 +5542,6 @@ ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
kInternalFieldCountOffset)
-ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
-
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
SMI_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
@@ -5773,8 +5610,8 @@ SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
- kOptimizedCodeMapOffset)
+ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
+ kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
kFeedbackVectorOffset)
@@ -5967,7 +5804,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
@@ -6221,6 +6057,31 @@ bool SharedFunctionInfo::IsBuiltin() {
bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
+bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
+ return optimized_code_map() == GetHeap()->cleared_optimized_code_map();
+}
+
+
+// static
+void SharedFunctionInfo::AddToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<Code> code, Handle<LiteralsArray> literals, BailoutId osr_ast_id) {
+ AddToOptimizedCodeMapInternal(shared, native_context, code, literals,
+ osr_ast_id);
+}
+
+
+// static
+void SharedFunctionInfo::AddLiteralsToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<LiteralsArray> literals) {
+ Isolate* isolate = shared->GetIsolate();
+ Handle<Oddball> undefined = isolate->factory()->undefined_value();
+ AddToOptimizedCodeMapInternal(shared, native_context, undefined, literals,
+ BailoutId::None());
+}
+
+
bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
@@ -6244,9 +6105,25 @@ bool JSFunction::IsInOptimizationQueue() {
}
-bool JSFunction::IsInobjectSlackTrackingInProgress() {
- return has_initial_map() &&
- initial_map()->counter() >= Map::kSlackTrackingCounterEnd;
+void JSFunction::CompleteInobjectSlackTrackingIfActive() {
+ if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
+ initial_map()->CompleteInobjectSlackTracking();
+ }
+}
+
+
+bool Map::IsInobjectSlackTrackingInProgress() {
+ return construction_counter() != Map::kNoSlackTracking;
+}
+
+
+void Map::InobjectSlackTrackingStep() {
+ if (!IsInobjectSlackTrackingInProgress()) return;
+ int counter = construction_counter();
+ set_construction_counter(counter - 1);
+ if (counter == kSlackTrackingCounterEnd) {
+ CompleteInobjectSlackTracking();
+ }
}
@@ -6307,6 +6184,9 @@ JSObject* JSFunction::global_proxy() {
}
+Context* JSFunction::native_context() { return context()->native_context(); }
+
+
void JSFunction::set_context(Object* value) {
DCHECK(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
@@ -6368,53 +6248,16 @@ bool JSFunction::is_compiled() {
}
-LiteralsArray* JSFunction::literals() {
- DCHECK(!shared()->bound());
- return LiteralsArray::cast(literals_or_bindings());
-}
-
-
-void JSFunction::set_literals(LiteralsArray* literals) {
- DCHECK(!shared()->bound());
- set_literals_or_bindings(literals);
-}
-
-
-BindingsArray* JSFunction::function_bindings() {
- DCHECK(shared()->bound());
- return BindingsArray::cast(literals_or_bindings());
-}
-
-
-void JSFunction::set_function_bindings(BindingsArray* bindings) {
- DCHECK(shared()->bound());
- // Bound function literal may be initialized to the empty fixed array
- // before the bindings are set.
- DCHECK(bindings == GetHeap()->empty_fixed_array() ||
- bindings->map() == GetHeap()->fixed_array_map());
- set_literals_or_bindings(bindings);
-}
-
-
int JSFunction::NumberOfLiterals() {
- DCHECK(!shared()->bound());
return literals()->length();
}
+ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
ACCESSORS(JSProxy, hash, Object, kHashOffset)
-ACCESSORS(JSFunctionProxy, call_trap, JSReceiver, kCallTrapOffset)
-ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
-
-
-void JSProxy::InitializeBody(int object_size, Object* value) {
- DCHECK(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
- for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
- WRITE_FIELD(this, offset, value);
- }
-}
+bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
ACCESSORS(JSCollection, table, Object, kTableOffset)
@@ -6684,32 +6527,6 @@ void JSArrayBuffer::set_is_shared(bool value) {
}
-// static
-template <typename StaticVisitor>
-void JSArrayBuffer::JSArrayBufferIterateBody(Heap* heap, HeapObject* obj) {
- StaticVisitor::VisitPointers(
- heap, obj,
- HeapObject::RawField(obj, JSArrayBuffer::BodyDescriptor::kStartOffset),
- HeapObject::RawField(obj,
- JSArrayBuffer::kByteLengthOffset + kPointerSize));
- StaticVisitor::VisitPointers(
- heap, obj, HeapObject::RawField(obj, JSArrayBuffer::kSize),
- HeapObject::RawField(obj, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
-void JSArrayBuffer::JSArrayBufferIterateBody(HeapObject* obj,
- ObjectVisitor* v) {
- v->VisitPointers(
- HeapObject::RawField(obj, JSArrayBuffer::BodyDescriptor::kStartOffset),
- HeapObject::RawField(obj,
- JSArrayBuffer::kByteLengthOffset + kPointerSize));
- v->VisitPointers(
- HeapObject::RawField(obj, JSArrayBuffer::kSize),
- HeapObject::RawField(obj, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
Object* JSArrayBufferView::byte_offset() const {
if (WasNeutered()) return Smi::FromInt(0);
return Object::cast(READ_FIELD(this, kByteOffsetOffset));
@@ -6935,13 +6752,6 @@ bool JSObject::HasIndexedInterceptor() {
}
-NameDictionary* JSObject::property_dictionary() {
- DCHECK(!HasFastProperties());
- DCHECK(!IsJSGlobalObject());
- return NameDictionary::cast(properties());
-}
-
-
GlobalDictionary* JSObject::global_dictionary() {
DCHECK(!HasFastProperties());
DCHECK(IsJSGlobalObject());
@@ -7255,6 +7065,31 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<JSReceiver> holder,
}
+void JSReceiver::initialize_properties() {
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_properties_dictionary()));
+ if (map()->is_dictionary_map()) {
+ WRITE_FIELD(this, kPropertiesOffset,
+ GetHeap()->empty_properties_dictionary());
+ } else {
+ WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
+ }
+}
+
+
+bool JSReceiver::HasFastProperties() {
+ DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
+ return !properties()->IsDictionary();
+}
+
+
+NameDictionary* JSReceiver::property_dictionary() {
+ DCHECK(!HasFastProperties());
+ DCHECK(!IsJSGlobalObject());
+ return NameDictionary::cast(properties());
+}
+
+
Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
Handle<Name> name) {
LookupIterator it =
@@ -7265,9 +7100,16 @@ Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Handle<Name> name) {
- LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, LookupIterator::HIDDEN);
- return HasProperty(&it);
+ if (object->IsJSObject()) { // Shortcut
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ object->GetIsolate(), object, name, LookupIterator::HIDDEN);
+ return HasProperty(&it);
+ }
+
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetOwnPropertyAttributes(object, name);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ return Just(attributes.FromJust() != ABSENT);
}
@@ -7293,14 +7135,6 @@ Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
}
-Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
- uint32_t index) {
- LookupIterator it(object->GetIsolate(), object, index,
- LookupIterator::HIDDEN);
- return HasProperty(&it);
-}
-
-
Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
Handle<JSReceiver> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
@@ -7842,189 +7676,6 @@ Relocatable::~Relocatable() {
}
-// static
-template <int start_offset>
-int FlexibleBodyDescriptor<start_offset>::SizeOf(Map* map, HeapObject* object) {
- return map->instance_size();
-}
-
-
-// static
-int FixedArray::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
- return SizeFor(reinterpret_cast<FixedArray*>(object)->synchronized_length());
-}
-
-
-void Foreign::ForeignIterateBody(ObjectVisitor* v) {
- v->VisitExternalReference(
- reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
-}
-
-
-template<typename StaticVisitor>
-void Foreign::ForeignIterateBody() {
- StaticVisitor::VisitExternalReference(
- reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
-}
-
-
-void FixedTypedArrayBase::FixedTypedArrayBaseIterateBody(ObjectVisitor* v) {
- v->VisitPointer(
- reinterpret_cast<Object**>(FIELD_ADDR(this, kBasePointerOffset)));
-}
-
-
-template <typename StaticVisitor>
-void FixedTypedArrayBase::FixedTypedArrayBaseIterateBody() {
- StaticVisitor::VisitPointer(
- reinterpret_cast<Object**>(FIELD_ADDR(this, kBasePointerOffset)));
-}
-
-
-void ExternalOneByteString::ExternalOneByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalOneByteStringResource Resource;
- v->VisitExternalOneByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template <typename StaticVisitor>
-void ExternalOneByteString::ExternalOneByteStringIterateBody() {
- typedef v8::String::ExternalOneByteStringResource Resource;
- StaticVisitor::VisitExternalOneByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalStringResource Resource;
- v->VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-template<typename StaticVisitor>
-void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
- typedef v8::String::ExternalStringResource Resource;
- StaticVisitor::VisitExternalTwoByteString(
- reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
-}
-
-
-void BodyDescriptorBase::IterateBodyImpl(HeapObject* obj, int start_offset,
- int end_offset, ObjectVisitor* v) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
- IteratePointers(obj, start_offset, end_offset, v);
- } else {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
-
- LayoutDescriptorHelper helper(obj->map());
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
- IteratePointers(obj, offset, end_of_region_offset, v);
- }
- offset = end_of_region_offset;
- }
- }
-}
-
-
-template <typename StaticVisitor>
-void BodyDescriptorBase::IterateBodyImpl(Heap* heap, HeapObject* obj,
- int start_offset, int end_offset) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
- IteratePointers<StaticVisitor>(heap, obj, start_offset, end_offset);
- } else {
- DCHECK(FLAG_unbox_double_fields);
- DCHECK(IsAligned(start_offset, kPointerSize) &&
- IsAligned(end_offset, kPointerSize));
-
- LayoutDescriptorHelper helper(obj->map());
- DCHECK(!helper.all_fields_tagged());
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
- IteratePointers<StaticVisitor>(heap, obj, offset, end_of_region_offset);
- }
- offset = end_of_region_offset;
- }
- }
-}
-
-
-void BodyDescriptorBase::IteratePointers(HeapObject* obj, int start_offset,
- int end_offset, ObjectVisitor* v) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
-}
-
-
-template <typename StaticVisitor>
-void BodyDescriptorBase::IteratePointers(Heap* heap, HeapObject* obj,
- int start_offset, int end_offset) {
- StaticVisitor::VisitPointers(heap, obj,
- HeapObject::RawField(obj, start_offset),
- HeapObject::RawField(obj, end_offset));
-}
-
-
-// Iterates the function object according to the visiting policy.
-template <JSFunction::BodyVisitingPolicy body_visiting_policy>
-class JSFunction::BodyDescriptorImpl : public BodyDescriptorBase {
- public:
- STATIC_ASSERT(kNonWeakFieldsEndOffset == kCodeEntryOffset);
- STATIC_ASSERT(kCodeEntryOffset + kPointerSize == kNextFunctionLinkOffset);
- STATIC_ASSERT(kNextFunctionLinkOffset + kPointerSize == kSize);
-
- static inline void IterateBody(HeapObject* obj, int object_size,
- ObjectVisitor* v) {
- IteratePointers(obj, kPropertiesOffset, kNonWeakFieldsEndOffset, v);
-
- if (body_visiting_policy & kVisitCodeEntry) {
- v->VisitCodeEntry(obj->address() + kCodeEntryOffset);
- }
-
- if (body_visiting_policy & kVisitNextFunction) {
- IteratePointers(obj, kNextFunctionLinkOffset, kSize, v);
- }
-
- // TODO(ishell): v8:4531, fix when JFunctions are allowed to have in-object
- // properties
- // IterateBodyImpl(obj, kSize, object_size, v);
- }
-
- template <typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size) {
- Heap* heap = obj->GetHeap();
- IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
- kNonWeakFieldsEndOffset);
-
- if (body_visiting_policy & kVisitCodeEntry) {
- StaticVisitor::VisitCodeEntry(heap, obj,
- obj->address() + kCodeEntryOffset);
- }
-
- if (body_visiting_policy & kVisitNextFunction) {
- IteratePointers<StaticVisitor>(heap, obj, kNextFunctionLinkOffset, kSize);
- }
-
- // TODO(ishell): v8:4531, fix when JFunctions are allowed to have in-object
- // properties
- // IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
- }
-
- static inline int SizeOf(Map* map, HeapObject* object) {
- // TODO(ishell): v8:4531, fix when JFunctions are allowed to have in-object
- // properties
- return JSFunction::kSize;
- }
-};
-
-
template<class Derived, class TableType>
Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType* table(TableType::cast(this->table()));
@@ -8108,6 +7759,14 @@ String::SubStringRange::iterator String::SubStringRange::end() {
}
+// Predictably converts HeapObject* or Address to uint32 by calculating
+// offset of the address in respective MemoryChunk.
+static inline uint32_t ObjectAddressForHashing(void* object) {
+ uint32_t value = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object));
+ return value & MemoryChunk::kAlignmentMask;
+}
+
+
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index a845e06f23..db716505de 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -76,6 +76,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case BYTECODE_ARRAY_TYPE:
BytecodeArray::cast(this)->BytecodeArrayPrint(os);
break;
+ case TRANSITION_ARRAY_TYPE:
+ TransitionArray::cast(this)->TransitionArrayPrint(os);
+ break;
case FREE_SPACE_TYPE:
FreeSpace::cast(this)->FreeSpacePrint(os);
break;
@@ -95,6 +98,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_ARRAY_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_PROMISE_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
case JS_REGEXP_TYPE:
@@ -106,6 +110,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_MODULE_TYPE:
JSModule::cast(this)->JSModulePrint(os);
break;
+ case JS_BOUND_FUNCTION_TYPE:
+ JSBoundFunction::cast(this)->JSBoundFunctionPrint(os);
+ break;
case JS_FUNCTION_TYPE:
JSFunction::cast(this)->JSFunctionPrint(os);
break;
@@ -127,9 +134,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_PROXY_TYPE:
JSProxy::cast(this)->JSProxyPrint(os);
break;
- case JS_FUNCTION_PROXY_TYPE:
- JSFunctionProxy::cast(this)->JSFunctionProxyPrint(os);
- break;
case JS_SET_TYPE:
JSSet::cast(this)->JSSetPrint(os);
break;
@@ -427,17 +431,6 @@ void JSModule::JSModulePrint(std::ostream& os) { // NOLINT
}
-static const char* TypeToString(InstanceType type) {
- switch (type) {
-#define TYPE_TO_STRING(TYPE) case TYPE: return #TYPE;
- INSTANCE_TYPE_LIST(TYPE_TO_STRING)
-#undef TYPE_TO_STRING
- }
- UNREACHABLE();
- return "UNKNOWN"; // Keep the compiler happy.
-}
-
-
void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Symbol");
os << " - hash: " << Hash();
@@ -452,7 +445,7 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
void Map::MapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Map");
- os << " - type: " << TypeToString(instance_type()) << "\n";
+ os << " - type: " << instance_type() << "\n";
os << " - instance size: " << instance_size() << "\n";
if (IsJSObjectMap()) {
os << " - inobject properties: " << GetInObjectProperties() << "\n";
@@ -494,6 +487,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
os << "\n - constructor: " << Brief(GetConstructor());
os << "\n - code cache: " << Brief(code_cache());
os << "\n - dependent code: " << Brief(dependent_code());
+ os << "\n - construction counter: " << construction_counter();
os << "\n";
}
@@ -552,6 +546,19 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
}
+void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "TransitionArray");
+ os << " - capacity: " << length();
+ for (int i = 0; i < length(); i++) {
+ os << "\n [" << i << "]: " << Brief(get(i));
+ if (i == kNextLinkIndex) os << " (next link)";
+ if (i == kPrototypeTransitionsIndex) os << " (prototype transitions)";
+ if (i == kTransitionLengthIndex) os << " (number of transitions)";
+ }
+ os << "\n";
+}
+
+
void TypeFeedbackMetadata::Print() {
OFStream os(stdout);
TypeFeedbackMetadataPrint(os);
@@ -593,9 +600,6 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
- os << "\n - ics with type info: " << ic_with_type_info_count();
- os << "\n - generic ics: " << ic_generic_count();
-
TypeFeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackVectorSlot slot = iter.Next();
@@ -733,24 +737,13 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSProxy");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - handler = ";
- handler()->Print(os);
+ os << " - map = " << reinterpret_cast<void*>(map());
+ os << "\n - target = ";
+ target()->ShortPrint(os);
+ os << "\n - handler = ";
+ handler()->ShortPrint(os);
os << "\n - hash = ";
- hash()->Print(os);
- os << "\n";
-}
-
-
-void JSFunctionProxy::JSFunctionProxyPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSFunctionProxy");
- os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
- os << " - handler = ";
- handler()->Print(os);
- os << "\n - call_trap = ";
- call_trap()->Print(os);
- os << "\n - construct_trap = ";
- construct_trap()->Print(os);
+ hash()->ShortPrint(os);
os << "\n";
}
@@ -854,21 +847,26 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
}
+void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSBoundFunction");
+ os << "\n - bound_target_function = " << Brief(bound_target_function());
+ os << "\n - bound_this = " << Brief(bound_this());
+ os << "\n - bound_arguments = " << Brief(bound_arguments());
+ JSObjectPrintBody(os, this);
+}
+
+
void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "Function");
os << "\n - initial_map = ";
if (has_initial_map()) os << Brief(initial_map());
os << "\n - shared_info = " << Brief(shared());
- os << "\n - name = " << Brief(shared()->name());
+ os << "\n - name = " << Brief(shared()->name());
if (shared()->is_generator()) {
os << "\n - generator";
}
os << "\n - context = " << Brief(context());
- if (shared()->bound()) {
- os << "\n - bindings = " << Brief(function_bindings());
- } else {
- os << "\n - literals = " << Brief(literals());
- }
+ os << "\n - literals = " << Brief(literals());
os << "\n - code = " << Brief(code());
JSObjectPrintBody(os, this);
}
@@ -992,7 +990,6 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
os << "\n - validity cell: " << Brief(validity_cell());
- os << "\n - constructor name: " << Brief(constructor_name());
os << "\n";
}
@@ -1079,13 +1076,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
}
-void TypeSwitchInfo::TypeSwitchInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "TypeSwitchInfo");
- os << "\n - types: " << Brief(types());
- os << "\n";
-}
-
-
void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AllocationSite");
os << " - weak_next: " << Brief(weak_next());
@@ -1304,6 +1294,10 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
} else if (key == heap->elements_transition_symbol()) {
os << "(transition to " << ElementsKindToString(target->elements_kind())
<< ")";
+ } else if (key == heap->strict_function_transition_symbol()) {
+ os << " (transition to strict function)";
+ } else if (key == heap->strong_function_transition_symbol()) {
+ os << " (transition to strong function)";
} else if (key == heap->observed_symbol()) {
os << " (transition to Object.observe)";
} else {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 7f5ce5a091..ef846d6c42 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -28,6 +28,7 @@
#include "src/field-index-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
+#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
#include "src/key-accumulator.h"
@@ -37,14 +38,17 @@
#include "src/macro-assembler.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/objects-body-descriptors-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
+#include "src/regexp/jsregexp.h"
#include "src/safepoint-table.h"
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
#include "src/utils.h"
+#include "src/zone.h"
#ifdef ENABLE_DISASSEMBLER
#include "src/disasm.h"
@@ -54,6 +58,19 @@
namespace v8 {
namespace internal {
+std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
+ switch (instance_type) {
+#define WRITE_TYPE(TYPE) \
+ case TYPE: \
+ return os << #TYPE;
+ INSTANCE_TYPE_LIST(WRITE_TYPE)
+#undef WRITE_TYPE
+ }
+ UNREACHABLE();
+ return os << "UNKNOWN"; // Keep the compiler happy.
+}
+
+
Handle<HeapType> Object::OptimalType(Isolate* isolate,
Representation representation) {
if (representation.IsNone()) return HeapType::None(isolate);
@@ -61,9 +78,7 @@ Handle<HeapType> Object::OptimalType(Isolate* isolate,
if (representation.IsHeapObject() && IsHeapObject()) {
// We can track only JavaScript objects with stable maps.
Handle<Map> map(HeapObject::cast(this)->map(), isolate);
- if (map->is_stable() &&
- map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE &&
- map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE) {
+ if (map->is_stable() && map->IsJSReceiverMap()) {
return HeapType::Class(map, isolate);
}
}
@@ -608,6 +623,23 @@ MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
}
+Maybe<bool> Object::IsArray(Handle<Object> object) {
+ if (object->IsJSArray()) return Just(true);
+ if (object->IsJSProxy()) {
+ Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
+ Isolate* isolate = proxy->GetIsolate();
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked,
+ isolate->factory()->NewStringFromAsciiChecked("IsArray")));
+ return Nothing<bool>();
+ }
+ return Object::IsArray(handle(proxy->target(), isolate));
+ }
+ return Just(false);
+}
+
+
bool Object::IsPromise(Handle<Object> object) {
if (!object->IsJSObject()) return false;
auto js_object = Handle<JSObject>::cast(object);
@@ -633,9 +665,8 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
return isolate->factory()->undefined_value();
}
if (!func->IsCallable()) {
- // TODO(bmeurer): Better error message here?
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCalledNonCallable, func),
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kPropertyNotFunction,
+ func, name, receiver),
Object);
}
return func;
@@ -643,6 +674,72 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
// static
+MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+ // 1. ReturnIfAbrupt(object).
+ // 2. (default elementTypes -- not applicable.)
+ // 3. If Type(obj) is not Object, throw a TypeError exception.
+ if (!object->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "CreateListFromArrayLike")),
+ FixedArray);
+ }
+ // 4. Let len be ? ToLength(? Get(obj, "length")).
+ Handle<Object> raw_length_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, raw_length_obj,
+ JSReceiver::GetProperty(object, isolate->factory()->length_string()),
+ FixedArray);
+ Handle<Object> raw_length_number;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
+ Object::ToLength(isolate, raw_length_obj),
+ FixedArray);
+ uint32_t len;
+ if (!raw_length_number->ToUint32(&len) ||
+ len > static_cast<uint32_t>(FixedArray::kMaxLength)) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayLength),
+ FixedArray);
+ }
+ // 5. Let list be an empty List.
+ Handle<FixedArray> list = isolate->factory()->NewFixedArray(len);
+ // 6. Let index be 0.
+ // 7. Repeat while index < len:
+ for (uint32_t index = 0; index < len; ++index) {
+ // 7a. Let indexName be ToString(index).
+ // 7b. Let next be ? Get(obj, indexName).
+ Handle<Object> next;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, next, Object::GetElement(isolate, object, index), FixedArray);
+ switch (element_types) {
+ case ElementTypes::kAll:
+ // Nothing to do.
+ break;
+ case ElementTypes::kStringAndSymbol: {
+ // 7c. If Type(next) is not an element of elementTypes, throw a
+ // TypeError exception.
+ if (!next->IsName()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNotPropertyName, next),
+ FixedArray);
+ }
+ // 7d. Append next as the last element of list.
+ // Internalize on the fly so we can use pointer identity later.
+ next = isolate->factory()->InternalizeName(Handle<Name>::cast(next));
+ break;
+ }
+ }
+ list->set(index, *next);
+ // 7e. Set index to index + 1. (See loop header.)
+ }
+ // 8. Return list.
+ return list;
+}
+
+
+// static
Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -651,8 +748,8 @@ Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
UNREACHABLE();
case LookupIterator::JSPROXY:
// Call the "has" trap on proxies.
- return JSProxy::HasPropertyWithHandler(it->GetHolder<JSProxy>(),
- it->GetName());
+ return JSProxy::HasProperty(it->isolate(), it->GetHolder<JSProxy>(),
+ it->GetName());
case LookupIterator::INTERCEPTOR: {
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithInterceptor(it);
@@ -688,8 +785,9 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- return JSProxy::GetPropertyWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName());
+ return JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
+ it->GetName(), it->GetReceiver(),
+ language_mode);
case LookupIterator::INTERCEPTOR: {
bool done;
Handle<Object> result;
@@ -714,6 +812,104 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
}
+#define STACK_CHECK(result_value) \
+ do { \
+ StackLimitCheck stack_check(isolate); \
+ if (stack_check.HasOverflowed()) { \
+ isolate->Throw(*isolate->factory()->NewRangeError( \
+ MessageTemplate::kStackOverflow)); \
+ return result_value; \
+ } \
+ } while (false)
+
+
+// static
+MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ Handle<Object> receiver,
+ LanguageMode language_mode) {
+ if (receiver->IsJSGlobalObject()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kReadGlobalReferenceThroughProxy, name),
+ Object);
+ }
+
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(MaybeHandle<Object>());
+ Handle<Name> trap_name = isolate->factory()->get_string();
+ // 1. Assert: IsPropertyKey(P) is true.
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
+ Object);
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "get").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name), Object);
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 7.a Return target.[[Get]](P, Receiver).
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, name, target);
+ return Object::GetProperty(&it, language_mode);
+ }
+ // 8. Let trapResult be ? Call(trap, handler, «target, P, Receiver»).
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name, receiver};
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args), Object);
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN_NULL(target_found);
+ // 10. If targetDesc is not undefined, then
+ if (target_found.FromJust()) {
+ // 10.a. If IsDataDescriptor(targetDesc) and targetDesc.[[Configurable]] is
+ // false and targetDesc.[[Writable]] is false, then
+ // 10.a.i. If SameValue(trapResult, targetDesc.[[Value]]) is false,
+ // throw a TypeError exception.
+ bool inconsistent = PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ !target_desc.writable() &&
+ !trap_result->SameValue(*target_desc.value());
+ if (inconsistent) {
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kProxyGetNonConfigurableData,
+ name, target_desc.value(), trap_result),
+ Object);
+ }
+ // 10.b. If IsAccessorDescriptor(targetDesc) and targetDesc.[[Configurable]]
+ // is false and targetDesc.[[Get]] is undefined, then
+ // 10.b.i. If trapResult is not undefined, throw a TypeError exception.
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.get()->IsUndefined() &&
+ !trap_result->IsUndefined();
+ if (inconsistent) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetNonConfigurableAccessor, name,
+ trap_result),
+ Object);
+ }
+ }
+ // 11. Return trap_result
+ return trap_result;
+}
+
+
Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
Handle<Name> name) {
LookupIterator it(object, name,
@@ -730,7 +926,9 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess()) continue;
+ // Support calling this method without an active context, but refuse
+ // access to access-checked objects in that case.
+ if (it->isolate()->context() != nullptr && it->HasAccess()) continue;
// Fall through.
case LookupIterator::JSPROXY:
it->NotFound();
@@ -831,6 +1029,33 @@ Object* FunctionTemplateInfo::GetCompatibleReceiver(Isolate* isolate,
}
+// static
+MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site) {
+ // If called through new, new.target can be:
+ // - a subclass of constructor,
+ // - a proxy wrapper around constructor, or
+ // - the constructor itself.
+ // If called through Reflect.construct, it's guaranteed to be a constructor.
+ Isolate* const isolate = constructor->GetIsolate();
+ DCHECK(constructor->IsConstructor());
+ DCHECK(new_target->IsConstructor());
+ DCHECK(!constructor->has_initial_map() ||
+ constructor->initial_map()->instance_type() != JS_FUNCTION_TYPE);
+
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, constructor, new_target), JSObject);
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObjectFromMap(initial_map, NOT_TENURED, site);
+ isolate->counters()->constructed_objects()->Increment();
+ isolate->counters()->constructed_objects_runtime()->Increment();
+ return result;
+}
+
+
Handle<FixedArray> JSObject::EnsureWritableFastElements(
Handle<JSObject> object) {
DCHECK(object->HasFastSmiOrObjectElements());
@@ -845,17 +1070,64 @@ Handle<FixedArray> JSObject::EnsureWritableFastElements(
}
-MaybeHandle<Object> JSProxy::GetPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name) {
+// ES6 9.5.1
+// static
+MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
+ Handle<String> trap_name = isolate->factory()->getPrototypeOf_string();
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return isolate->factory()->undefined_value();
+ STACK_CHECK(MaybeHandle<Object>());
- Handle<Object> args[] = { receiver, name };
- return CallTrap(
- proxy, "get", isolate->derived_get_trap(), arraysize(args), args);
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot.
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyRevoked, trap_name),
+ Object);
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ // 5. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, trap, GetMethod(handler, trap_name),
+ Object);
+ // 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
+ if (trap->IsUndefined()) {
+ return Object::GetPrototype(isolate, target);
+ }
+ // 7. Let handlerProto be ? Call(trap, handler, «target»).
+ Handle<Object> argv[] = {target};
+ Handle<Object> handler_proto;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, handler_proto,
+ Execution::Call(isolate, trap, handler, arraysize(argv), argv), Object);
+ // 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError.
+ if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull())) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid),
+ Object);
+ }
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN_NULL(is_extensible);
+ // 10. If extensibleTarget is true, return handlerProto.
+ if (is_extensible.FromJust()) return handler_proto;
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ Handle<Object> target_proto;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, target_proto,
+ Object::GetPrototype(isolate, target), Object);
+ // 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError.
+ if (!handler_proto->SameValue(*target_proto)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyGetPrototypeOfNonExtensible),
+ Object);
+ }
+ // 13. Return handlerProto.
+ return handler_proto;
}
@@ -992,11 +1264,6 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
return MaybeHandle<Object>();
}
- Debug* debug = isolate->debug();
- // Handle stepping into a getter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->is_active()) debug->HandleStepIn(getter, false);
-
return Execution::Call(isolate, getter, receiver, 0, NULL);
}
@@ -1007,11 +1274,6 @@ Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
ShouldThrow should_throw) {
Isolate* isolate = setter->GetIsolate();
- Debug* debug = isolate->debug();
- // Handle stepping into a setter if step into is active.
- // TODO(rossberg): should this apply to getters that are function proxies?
- if (debug->is_active()) debug->HandleStepIn(setter, false);
-
Handle<Object> argv[] = { value };
RETURN_ON_EXCEPTION_VALUE(isolate, Execution::Call(isolate, setter, receiver,
arraysize(argv), argv),
@@ -1021,6 +1283,18 @@ Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
// static
+bool Object::IsErrorObject(Isolate* isolate, Handle<Object> object) {
+ if (!object->IsJSObject()) return false;
+ // Use stack_trace_symbol as proxy for [[ErrorData]].
+ Handle<Name> symbol = isolate->factory()->stack_trace_symbol();
+ Maybe<bool> has_stack_trace =
+ JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol);
+ DCHECK(!has_stack_trace.IsNothing());
+ return has_stack_trace.FromJust();
+}
+
+
+// static
bool JSObject::AllCanRead(LookupIterator* it) {
// Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
// which have already been checked.
@@ -1034,6 +1308,9 @@ bool JSObject::AllCanRead(LookupIterator* it) {
}
} else if (it->state() == LookupIterator::INTERCEPTOR) {
if (it->GetInterceptor()->all_can_read()) return true;
+ } else if (it->state() == LookupIterator::JSPROXY) {
+ // Stop lookupiterating. And no, AllCanNotRead.
+ return false;
}
}
return false;
@@ -1089,7 +1366,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
// static
bool JSObject::AllCanWrite(LookupIterator* it) {
- for (; it->IsFound(); it->Next()) {
+ for (; it->IsFound() && it->state() != LookupIterator::JSPROXY; it->Next()) {
if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
@@ -1160,12 +1437,13 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
-bool Object::HasInPrototypeChain(Isolate* isolate, Object* target) {
- PrototypeIterator iter(isolate, this, PrototypeIterator::START_AT_RECEIVER);
+Maybe<bool> Object::HasInPrototypeChain(Isolate* isolate, Handle<Object> object,
+ Handle<Object> proto) {
+ PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
while (true) {
- iter.AdvanceIgnoringProxies();
- if (iter.IsAtEnd()) return false;
- if (iter.IsAtEnd(target)) return true;
+ if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
+ if (iter.IsAtEnd()) return Just(false);
+ if (iter.IsAtEnd(proto)) return Just(true);
}
}
@@ -1329,6 +1607,56 @@ bool Object::SameValueZero(Object* other) {
}
+MaybeHandle<Object> Object::ArraySpeciesConstructor(
+ Isolate* isolate, Handle<Object> original_array) {
+ Handle<Context> native_context = isolate->native_context();
+ if (!FLAG_harmony_species) {
+ return Handle<Object>(native_context->array_function(), isolate);
+ }
+ Handle<Object> constructor = isolate->factory()->undefined_value();
+ Maybe<bool> is_array = Object::IsArray(original_array);
+ MAYBE_RETURN_NULL(is_array);
+ if (is_array.FromJust()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor,
+ Object::GetProperty(original_array,
+ isolate->factory()->constructor_string()),
+ Object);
+ if (constructor->IsConstructor()) {
+ Handle<Context> constructor_context;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor_context,
+ JSReceiver::GetFunctionRealm(Handle<JSReceiver>::cast(constructor)),
+ Object);
+ if (*constructor_context != *native_context &&
+ *constructor == constructor_context->array_function()) {
+ constructor = isolate->factory()->undefined_value();
+ }
+ }
+ if (constructor->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, constructor,
+ Object::GetProperty(constructor,
+ isolate->factory()->species_symbol()),
+ Object);
+ if (constructor->IsNull()) {
+ constructor = isolate->factory()->undefined_value();
+ }
+ }
+ }
+ if (constructor->IsUndefined()) {
+ return Handle<Object>(native_context->array_function(), isolate);
+ } else {
+ if (!constructor->IsConstructor()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kSpeciesNotConstructor),
+ Object);
+ }
+ return constructor;
+ }
+}
+
+
void Object::ShortPrint(FILE* out) {
OFStream os(out);
os << Brief(this);
@@ -1420,6 +1748,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
+ DCHECK(!resource->IsCompressible());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -1482,6 +1811,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
+ DCHECK(!resource->IsCompressible());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
@@ -1619,6 +1949,22 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
break;
}
+ case JS_BOUND_FUNCTION_TYPE: {
+ JSBoundFunction* bound_function = JSBoundFunction::cast(this);
+ Object* name = bound_function->name();
+ accumulator->Add("<JS BoundFunction");
+ if (name->IsString()) {
+ String* str = String::cast(name);
+ if (str->length() > 0) {
+ accumulator->Add(" ");
+ accumulator->Put(str);
+ }
+ }
+ accumulator->Add(
+ " (BoundTargetFunction %p)>",
+ reinterpret_cast<void*>(bound_function->bound_target_function()));
+ break;
+ }
case JS_WEAK_MAP_TYPE: {
accumulator->Add("<JS WeakMap>");
break;
@@ -1745,9 +2091,7 @@ MaybeHandle<JSFunction> Map::GetConstructorFunction(
void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
PropertyAttributes attributes) {
OFStream os(file);
- os << "[reconfiguring ";
- constructor_name()->PrintOn(file);
- os << "] ";
+ os << "[reconfiguring]";
Name* name = instance_descriptors()->GetKey(modify_index);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
@@ -1772,9 +2116,7 @@ void Map::PrintGeneralization(FILE* file,
HeapType* old_field_type,
HeapType* new_field_type) {
OFStream os(file);
- os << "[generalizing ";
- constructor_name()->PrintOn(file);
- os << "] ";
+ os << "[generalizing]";
Name* name = instance_descriptors()->GetKey(modify_index);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
@@ -1806,9 +2148,7 @@ void Map::PrintGeneralization(FILE* file,
void JSObject::PrintInstanceMigration(FILE* file,
Map* original_map,
Map* new_map) {
- PrintF(file, "[migrating ");
- map()->constructor_name()->PrintOn(file);
- PrintF(file, "] ");
+ PrintF(file, "[migrating]");
DescriptorArray* o = original_map->instance_descriptors();
DescriptorArray* n = new_map->instance_descriptors();
for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
@@ -1877,6 +2217,10 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case BYTECODE_ARRAY_TYPE:
os << "<BytecodeArray[" << BytecodeArray::cast(this)->length() << "]>";
break;
+ case TRANSITION_ARRAY_TYPE:
+ os << "<TransitionArray[" << TransitionArray::cast(this)->length()
+ << "]>";
+ break;
case FREE_SPACE_TYPE:
os << "<FreeSpace[" << FreeSpace::cast(this)->size() << "]>";
break;
@@ -1961,9 +2305,6 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case JS_PROXY_TYPE:
os << "<JSProxy>";
break;
- case JS_FUNCTION_PROXY_TYPE:
- os << "<JSFunctionProxy>";
- break;
case FOREIGN_TYPE:
os << "<Foreign>";
break;
@@ -1999,12 +2340,33 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
}
-void HeapObject::Iterate(ObjectVisitor* v) {
- // Handle header
- IteratePointer(v, kMapOffset);
- // Handle object body
+void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
+
+
+void HeapObject::IterateBody(ObjectVisitor* v) {
Map* m = map();
- IterateBody(m->instance_type(), SizeFromMap(m), v);
+ IterateBodyFast<ObjectVisitor>(m->instance_type(), SizeFromMap(m), v);
+}
+
+
+void HeapObject::IterateBody(InstanceType type, int object_size,
+ ObjectVisitor* v) {
+ IterateBodyFast<ObjectVisitor>(type, object_size, v);
+}
+
+
+struct CallIsValidSlot {
+ template <typename BodyDescriptor>
+ static bool apply(HeapObject* obj, int offset, int) {
+ return BodyDescriptor::IsValidSlot(obj, offset);
+ }
+};
+
+
+bool HeapObject::IsValidSlot(int offset) {
+ DCHECK_NE(0, offset);
+ return BodyDescriptorApply<CallIsValidSlot, bool>(map()->instance_type(),
+ this, offset, 0);
}
@@ -2126,7 +2488,7 @@ void Simd128Value::CopyBits(void* destination) const {
String* JSReceiver::class_name() {
- if (IsJSFunction() || IsJSFunctionProxy()) {
+ if (IsFunction()) {
return GetHeap()->Function_string();
}
Object* maybe_constructor = map()->GetConstructor();
@@ -2139,31 +2501,89 @@ String* JSReceiver::class_name() {
}
-String* Map::constructor_name() {
- if (is_prototype_map() && prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo* proto_info = PrototypeInfo::cast(prototype_info());
- if (proto_info->constructor_name()->IsString()) {
- return String::cast(proto_info->constructor_name());
+MaybeHandle<String> JSReceiver::BuiltinStringTag(Handle<JSReceiver> object) {
+ Maybe<bool> is_array = Object::IsArray(object);
+ MAYBE_RETURN(is_array, MaybeHandle<String>());
+ Isolate* const isolate = object->GetIsolate();
+ if (is_array.FromJust()) {
+ return isolate->factory()->Array_string();
+ }
+ // TODO(adamk): According to ES2015, we should return "Function" when
+ // object has a [[Call]] internal method (corresponds to IsCallable).
+ // But this is well cemented in layout tests and might cause webbreakage.
+ // if (object->IsCallable()) {
+ // return isolate->factory()->Function_string();
+ // }
+ // TODO(adamk): class_name() is expensive, replace with instance type
+ // checks where possible.
+ return handle(object->class_name(), isolate);
+}
+
+
+// static
+Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
+ Isolate* isolate = receiver->GetIsolate();
+
+ // If the object was instantiated simply with base == new.target, the
+ // constructor on the map provides the most accurate name.
+ // Don't provide the info for prototypes, since their constructors are
+ // reclaimed and replaced by Object in OptimizeAsPrototype.
+ if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
+ !receiver->map()->is_prototype_map()) {
+ Object* maybe_constructor = receiver->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ String* name = String::cast(constructor->shared()->name());
+ if (name->length() == 0) name = constructor->shared()->inferred_name();
+ if (name->length() != 0 &&
+ !name->Equals(isolate->heap()->Object_string())) {
+ return handle(name, isolate);
+ }
}
}
- Object* maybe_constructor = GetConstructor();
+
+ if (FLAG_harmony_tostring) {
+ Handle<Object> maybe_tag = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->to_string_tag_symbol());
+ if (maybe_tag->IsString()) return Handle<String>::cast(maybe_tag);
+ }
+
+ PrototypeIterator iter(isolate, receiver);
+ if (iter.IsAtEnd()) return handle(receiver->class_name());
+ Handle<JSReceiver> start = PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ LookupIterator it(receiver, isolate->factory()->constructor_string(), start,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
+ Handle<String> result = isolate->factory()->Object_string();
if (maybe_constructor->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ JSFunction* constructor = JSFunction::cast(*maybe_constructor);
String* name = String::cast(constructor->shared()->name());
- if (name->length() > 0) return name;
- String* inferred_name = constructor->shared()->inferred_name();
- if (inferred_name->length() > 0) return inferred_name;
- Object* proto = prototype();
- if (proto->IsJSObject()) return JSObject::cast(proto)->constructor_name();
+ if (name->length() == 0) name = constructor->shared()->inferred_name();
+ if (name->length() > 0) result = handle(name, isolate);
}
- // TODO(rossberg): what about proxies?
- // If the constructor is not present, return "Object".
- return GetHeap()->Object_string();
+
+ return result.is_identical_to(isolate->factory()->Object_string())
+ ? handle(receiver->class_name())
+ : result;
}
-String* JSReceiver::constructor_name() {
- return map()->constructor_name();
+Context* JSReceiver::GetCreationContext() {
+ if (IsJSBoundFunction()) {
+ return JSBoundFunction::cast(this)->creation_context();
+ }
+ Object* constructor = map()->GetConstructor();
+ JSFunction* function;
+ if (constructor->IsJSFunction()) {
+ function = JSFunction::cast(constructor);
+ } else {
+ // Functions have null as a constructor,
+ // but any JSFunction knows its context immediately.
+ CHECK(IsJSFunction());
+ function = JSFunction::cast(this);
+ }
+
+ return function->context()->native_context();
}
@@ -2271,21 +2691,6 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
}
-Context* JSObject::GetCreationContext() {
- Object* constructor = this->map()->GetConstructor();
- JSFunction* function;
- if (!constructor->IsJSFunction()) {
- // Functions have null as a constructor,
- // but any JSFunction knows its context immediately.
- function = JSFunction::cast(this);
- } else {
- function = JSFunction::cast(constructor);
- }
-
- return function->context()->native_context();
-}
-
-
MaybeHandle<Object> JSObject::EnqueueChangeRecord(Handle<JSObject> object,
const char* type_str,
Handle<Name> name,
@@ -2355,9 +2760,10 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
}
-static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
- Handle<Map> new_map,
- Isolate* isolate) {
+// static
+void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate) {
if (!FLAG_track_prototype_users) return;
if (!old_map->is_prototype_map()) return;
DCHECK(new_map->is_prototype_map());
@@ -2724,25 +3130,13 @@ static inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
}
-// Invalidates a transition target at |key|, and installs |new_descriptors| over
-// the current instance_descriptors to ensure proper sharing of descriptor
-// arrays.
-// Returns true if the transition target at given key was deprecated.
-bool Map::DeprecateTarget(PropertyKind kind, Name* key,
- PropertyAttributes attributes,
- DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor) {
- bool transition_target_deprecated = false;
- Map* maybe_transition =
- TransitionArray::SearchTransition(this, kind, key, attributes);
- if (maybe_transition != NULL) {
- maybe_transition->DeprecateTransitionTree();
- transition_target_deprecated = true;
- }
-
+// Installs |new_descriptors| over the current instance_descriptors to ensure
+// proper sharing of descriptor arrays.
+void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
+ LayoutDescriptor* new_layout_descriptor) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined()) {
- return transition_target_deprecated;
+ return;
}
DescriptorArray* to_replace = instance_descriptors();
@@ -2756,7 +3150,6 @@ bool Map::DeprecateTarget(PropertyKind kind, Name* key,
current = Map::cast(next);
}
set_owns_descriptors(false);
- return transition_target_deprecated;
}
@@ -3422,9 +3815,6 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
int split_nof = split_map->NumberOfOwnDescriptors();
DCHECK_NE(old_nof, split_nof);
- Handle<LayoutDescriptor> new_layout_descriptor =
- LayoutDescriptor::New(split_map, new_descriptors, old_nof);
-
PropertyKind split_kind;
PropertyAttributes split_attributes;
if (modify_index == split_nof) {
@@ -3435,14 +3825,19 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
split_kind = split_prop_details.kind();
split_attributes = split_prop_details.attributes();
}
- bool transition_target_deprecated = split_map->DeprecateTarget(
- split_kind, old_descriptors->GetKey(split_nof), split_attributes,
- *new_descriptors, *new_layout_descriptor);
- // If |transition_target_deprecated| is true then the transition array
- // already contains entry for given descriptor. This means that the transition
+ // Invalidate a transition target at |key|.
+ Map* maybe_transition = TransitionArray::SearchTransition(
+ *split_map, split_kind, old_descriptors->GetKey(split_nof),
+ split_attributes);
+ if (maybe_transition != NULL) {
+ maybe_transition->DeprecateTransitionTree();
+ }
+
+ // If |maybe_transition| is not NULL then the transition array already
+ // contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (!transition_target_deprecated &&
+ if (maybe_transition == NULL &&
!TransitionArray::CanHaveMoreTransitions(split_map)) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
new_kind, new_attributes,
@@ -3473,13 +3868,16 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
*old_field_type, *new_field_type);
}
- // Add missing transitions.
- Handle<Map> new_map = split_map;
- for (int i = split_nof; i < old_nof; ++i) {
- new_map = CopyInstallDescriptors(new_map, i, new_descriptors,
- new_layout_descriptor);
- }
- new_map->set_owns_descriptors(true);
+ Handle<LayoutDescriptor> new_layout_descriptor =
+ LayoutDescriptor::New(split_map, new_descriptors, old_nof);
+
+ Handle<Map> new_map =
+ AddMissingTransitions(split_map, new_descriptors, new_layout_descriptor);
+
+ // Deprecated part of the transition tree is no longer reachable, so replace
+ // current instance descriptors in the "survived" part of the tree with
+ // the new descriptors to maintain descriptors sharing invariant.
+ split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
return new_map;
}
@@ -3622,6 +4020,7 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
result = args.Call(setter, index, v8::Utils::ToLocal(value));
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
return Just(false);
@@ -3643,6 +4042,8 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
result_internal->VerifyApiCallResultType();
#endif
return Just(true);
+ // TODO(neis): In the future, we may want to actually return the interceptor's
+ // result, which then should be a boolean.
}
@@ -3684,21 +4085,8 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
should_throw);
case LookupIterator::JSPROXY:
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- return JSProxy::SetPropertyWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(), value,
- should_throw);
- } else {
- // TODO(verwaest): Use the MaybeHandle to indicate result.
- bool has_result = false;
- Maybe<bool> maybe_result =
- JSProxy::SetPropertyViaPrototypesWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName(),
- value, should_throw, &has_result);
- if (has_result) return maybe_result;
- done = true;
- }
- break;
+ return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
+ value, it->GetReceiver(), language_mode);
case LookupIterator::INTERCEPTOR:
if (it->HolderIsReceiverOrHiddenPrototype()) {
@@ -3767,12 +4155,16 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
+ RETURN_FAILURE(it->isolate(), should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
bool found = false;
Maybe<bool> result =
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
return AddDataProperty(it, value, NONE, should_throw, store_mode);
}
@@ -3782,6 +4174,11 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
StoreFromKeyed store_mode) {
ShouldThrow should_throw =
is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ Isolate* isolate = it->isolate();
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
bool found = false;
Maybe<bool> result =
@@ -3794,12 +4191,12 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
if (!it->GetReceiver()->IsJSReceiver()) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
LookupIterator::Configuration c = LookupIterator::OWN;
LookupIterator own_lookup =
- it->IsElement()
- ? LookupIterator(it->isolate(), it->GetReceiver(), it->index(), c)
- : LookupIterator(it->GetReceiver(), it->name(), c);
+ it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
+ : LookupIterator(receiver, it->name(), c);
for (; own_lookup.IsFound(); own_lookup.Next()) {
switch (own_lookup.state()) {
@@ -3811,7 +4208,8 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
break;
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
+ case LookupIterator::ACCESSOR:
+ return RedefineIncompatibleProperty(isolate, it->GetName(), value,
should_throw);
case LookupIterator::DATA: {
@@ -3822,18 +4220,26 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
return SetDataProperty(&own_lookup, value);
}
- case LookupIterator::ACCESSOR: {
- return RedefineIncompatibleProperty(it->isolate(), it->GetName(), value,
- should_throw);
- }
-
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY: {
- bool found = false;
- Maybe<bool> result = SetPropertyInternal(
- &own_lookup, value, language_mode, store_mode, &found);
- if (found) return result;
- break;
+ PropertyDescriptor desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(&own_lookup, &desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (!owned.FromJust()) {
+ return JSReceiver::CreateDataProperty(&own_lookup, value,
+ should_throw);
+ }
+ if (PropertyDescriptor::IsAccessorDescriptor(&desc) ||
+ !desc.writable()) {
+ return RedefineIncompatibleProperty(isolate, it->GetName(), value,
+ should_throw);
+ }
+
+ PropertyDescriptor value_desc;
+ value_desc.set_value(value);
+ return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
+ &value_desc, should_throw);
}
case LookupIterator::NOT_FOUND:
@@ -3913,8 +4319,8 @@ Maybe<bool> Object::RedefineIncompatibleProperty(Isolate* isolate,
Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
- // Proxies are handled on the WithHandler path. Other non-JSObjects cannot
- // have own properties.
+ // Proxies are handled elsewhere. Other non-JSObjects cannot have own
+ // properties.
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
// Store on the holder which may be hidden behind the receiver.
@@ -4372,19 +4778,16 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
return handle(native_context->fast_aliased_arguments_map());
}
- } else {
- Object* maybe_array_maps = map->is_strong()
- ? native_context->js_array_strong_maps()
- : native_context->js_array_maps();
+ } else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
// Reuse map transitions for JSArrays.
- if (maybe_array_maps->IsFixedArray()) {
- DisallowHeapAllocation no_gc;
- FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
- if (array_maps->get(from_kind) == *map) {
- Object* maybe_transitioned_map = array_maps->get(to_kind);
- if (maybe_transitioned_map->IsMap()) {
- return handle(Map::cast(maybe_transitioned_map));
- }
+ DisallowHeapAllocation no_gc;
+ Strength strength = map->is_strong() ? Strength::STRONG : Strength::WEAK;
+ if (native_context->get(Context::ArrayMapIndex(from_kind, strength)) ==
+ *map) {
+ Object* maybe_transitioned_map =
+ native_context->get(Context::ArrayMapIndex(to_kind, strength));
+ if (maybe_transitioned_map->IsMap()) {
+ return handle(Map::cast(maybe_transitioned_map), isolate);
}
}
}
@@ -4433,281 +4836,291 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
}
-Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Name> name) {
+void JSProxy::Revoke(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
+ if (!proxy->IsRevoked()) proxy->set_handler(isolate->heap()->null_value());
+ DCHECK(proxy->IsRevoked());
+}
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return Just(false);
- Handle<Object> args[] = { name };
- Handle<Object> result;
+Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Name> name) {
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(Nothing<bool>());
+ // 1. (Assert)
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate->factory()->has_string()));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "has").
+ Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result, CallTrap(proxy, "has", isolate->derived_has_trap(),
- arraysize(args), args),
+ isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->has_string()),
Nothing<bool>());
-
- return Just(result->BooleanValue());
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 7a. Return target.[[HasProperty]](P).
+ return JSReceiver::HasProperty(target, name);
+ }
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, P»)).
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ bool boolean_trap_result = trap_result_obj->BooleanValue();
+ // 9. If booleanTrapResult is false, then:
+ if (!boolean_trap_result) {
+ // 9a. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, target, name, &target_desc);
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 9b. If targetDesc is not undefined, then:
+ if (target_found.FromJust()) {
+ // 9b i. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ if (!target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyHasNonConfigurable, name));
+ return Nothing<bool>();
+ }
+ // 9b ii. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 9b iii. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyHasNonExtensible, name));
+ return Nothing<bool>();
+ }
+ }
+ }
+ // 10. Return booleanTrapResult.
+ return Just(boolean_trap_result);
}
-Maybe<bool> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name,
- Handle<Object> value,
- ShouldThrow should_throw) {
+Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
+ Handle<Object> value, Handle<Object> receiver,
+ LanguageMode language_mode) {
+ DCHECK(!name->IsPrivate());
Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->set_string();
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return Just(true);
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
- Handle<Object> args[] = { receiver, name, value };
- RETURN_ON_EXCEPTION_VALUE(isolate,
- CallTrap(proxy, "set", isolate->derived_set_trap(),
- arraysize(args), args),
- Nothing<bool>());
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ LookupIterator it =
+ LookupIterator::PropertyOrElement(isolate, receiver, name, target);
+ return Object::SetSuperProperty(&it, value, language_mode,
+ Object::MAY_BE_STORE_FROM_KEYED);
+ }
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name, value, receiver};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, name));
+ }
+
+ // Enforce the invariant.
+ PropertyDescriptor target_desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ bool inconsistent = PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ !target_desc.writable() &&
+ !value->SameValue(*target_desc.value());
+ if (inconsistent) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenData, name));
+ return Nothing<bool>();
+ }
+ inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
+ !target_desc.configurable() &&
+ target_desc.set()->IsUndefined();
+ if (inconsistent) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetFrozenAccessor, name));
+ return Nothing<bool>();
+ }
+ }
return Just(true);
- // TODO(neis): This needs to be made spec-conformant by looking at the
- // trap's result.
}
-Maybe<bool> JSProxy::SetPropertyViaPrototypesWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, ShouldThrow should_throw, bool* done) {
+Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
+ Handle<Name> name,
+ LanguageMode language_mode) {
+ DCHECK(!name->IsPrivate());
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
Isolate* isolate = proxy->GetIsolate();
- Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy.
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->deleteProperty_string();
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) {
- *done = false; // Return value will be ignored.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
return Nothing<bool>();
}
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
- *done = true; // except where redefined...
- Handle<Object> args[] = { name };
- Handle<Object> result;
+ Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result, CallTrap(proxy, "getPropertyDescriptor",
- Handle<Object>(), arraysize(args), args),
- Nothing<bool>());
-
- if (result->IsUndefined()) {
- *done = false; // Return value will be ignored.
- return Nothing<bool>();
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ return JSReceiver::DeletePropertyOrElement(target, name, language_mode);
}
- // Emulate [[GetProperty]] semantics for proxies.
- Handle<Object> argv[] = { result };
- Handle<Object> desc;
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target, name};
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, desc,
- Execution::Call(isolate, isolate->to_complete_property_descriptor(),
- result, arraysize(argv), argv),
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
Nothing<bool>());
-
- // [[GetProperty]] requires to check that all properties are configurable.
- Handle<String> configurable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("configurable_"));
- Handle<Object> configurable =
- Object::GetProperty(desc, configurable_name).ToHandleChecked();
- DCHECK(configurable->IsBoolean());
- if (configurable->IsFalse()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyPropNotConfigurable, handler, name,
- isolate->factory()->NewStringFromAsciiChecked(
- "getPropertyDescriptor")));
+ if (!trap_result->BooleanValue()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, name));
+ }
+
+ // Enforce the invariant.
+ PropertyDescriptor target_desc;
+ Maybe<bool> owned =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust() && !target_desc.configurable()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
return Nothing<bool>();
}
- DCHECK(configurable->IsTrue());
-
- // Check for DataDescriptor.
- Handle<String> hasWritable_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("hasWritable_"));
- Handle<Object> hasWritable =
- Object::GetProperty(desc, hasWritable_name).ToHandleChecked();
- DCHECK(hasWritable->IsBoolean());
- if (hasWritable->IsTrue()) {
- Handle<String> writable_name = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("writable_"));
- Handle<Object> writable =
- Object::GetProperty(desc, writable_name).ToHandleChecked();
- DCHECK(writable->IsBoolean());
- *done = writable->IsFalse();
- if (!*done) return Nothing<bool>(); // Return value will be ignored.
- return WriteToReadOnlyProperty(isolate, receiver, name, value,
- should_throw);
- }
-
- // We have an AccessorDescriptor.
- Handle<String> set_name =
- isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("set_"));
- Handle<Object> setter = Object::GetProperty(desc, set_name).ToHandleChecked();
- if (!setter->IsUndefined()) {
- // TODO(rossberg): nicer would be to cast to some JSCallable here...
- return SetPropertyWithDefinedSetter(
- receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
- }
-
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kNoSetterInCallback, name, proxy));
+ return Just(true);
}
-MaybeHandle<Object> JSProxy::DeletePropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode) {
- Isolate* isolate = proxy->GetIsolate();
+// static
+MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
+ Handle<Object> handler) {
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
+ JSProxy);
+ }
+ if (target->IsJSProxy() && JSProxy::cast(*target)->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
+ JSProxy);
+ }
+ if (!handler->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
+ JSProxy);
+ }
+ if (handler->IsJSProxy() && JSProxy::cast(*handler)->IsRevoked()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
+ JSProxy);
+ }
+ return isolate->factory()->NewJSProxy(Handle<JSReceiver>::cast(target),
+ Handle<JSReceiver>::cast(handler));
+}
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return isolate->factory()->false_value();
- Handle<Object> args[] = { name };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- CallTrap(proxy,
- "delete",
- Handle<Object>(),
- arraysize(args),
- args),
- Object);
-
- bool result_bool = result->BooleanValue();
- if (is_strict(language_mode) && !result_bool) {
- Handle<Object> handler(proxy->handler(), isolate);
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kProxyHandlerDeleteFailed, handler),
- Object);
+// static
+MaybeHandle<Context> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
+ DCHECK(proxy->map()->is_constructor());
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR(proxy->GetIsolate(),
+ NewTypeError(MessageTemplate::kProxyRevoked), Context);
}
- return isolate->factory()->ToBoolean(result_bool);
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()));
+ return JSReceiver::GetFunctionRealm(target);
}
-Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name) {
- Isolate* isolate = proxy->GetIsolate();
- HandleScope scope(isolate);
+// static
+MaybeHandle<Context> JSBoundFunction::GetFunctionRealm(
+ Handle<JSBoundFunction> function) {
+ DCHECK(function->map()->is_constructor());
+ return JSReceiver::GetFunctionRealm(
+ handle(function->bound_target_function()));
+}
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return Just(ABSENT);
- Handle<Object> args[] = { name };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result, proxy->CallTrap(proxy, "getPropertyDescriptor",
- Handle<Object>(), arraysize(args), args),
- Nothing<PropertyAttributes>());
+// static
+Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
+ DCHECK(function->map()->is_constructor());
+ return handle(function->context()->native_context());
+}
- if (result->IsUndefined()) return Just(ABSENT);
- Handle<Object> argv[] = { result };
- Handle<Object> desc;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, desc,
- Execution::Call(isolate, isolate->to_complete_property_descriptor(),
- result, arraysize(argv), argv),
- Nothing<PropertyAttributes>());
-
- // Convert result to PropertyAttributes.
- Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("enumerable_"));
- Handle<Object> enumerable;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, enumerable,
- Object::GetProperty(desc, enum_n),
- Nothing<PropertyAttributes>());
- Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("configurable_"));
- Handle<Object> configurable;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, configurable,
- Object::GetProperty(desc, conf_n),
- Nothing<PropertyAttributes>());
- Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("writable_"));
- Handle<Object> writable;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, writable,
- Object::GetProperty(desc, writ_n),
- Nothing<PropertyAttributes>());
- if (!writable->BooleanValue()) {
- Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("set_"));
- Handle<Object> setter;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, setter,
- Object::GetProperty(desc, set_n),
- Nothing<PropertyAttributes>());
- writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
- }
-
- if (configurable->IsFalse()) {
- Handle<Object> handler(proxy->handler(), isolate);
- Handle<String> trap = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("getPropertyDescriptor"));
- Handle<Object> error = isolate->factory()->NewTypeError(
- MessageTemplate::kProxyPropNotConfigurable, handler, name, trap);
- isolate->Throw(*error);
- return Nothing<PropertyAttributes>();
- }
-
- int attributes = NONE;
- if (!enumerable->BooleanValue()) attributes |= DONT_ENUM;
- if (!configurable->BooleanValue()) attributes |= DONT_DELETE;
- if (!writable->BooleanValue()) attributes |= READ_ONLY;
- return Just(static_cast<PropertyAttributes>(attributes));
-}
-
-
-void JSProxy::Fix(Handle<JSProxy> proxy) {
- Isolate* isolate = proxy->GetIsolate();
+// static
+MaybeHandle<Context> JSObject::GetFunctionRealm(Handle<JSObject> object) {
+ DCHECK(object->map()->is_constructor());
+ DCHECK(!object->IsJSFunction());
+ return handle(object->GetCreationContext());
+}
- // Save identity hash.
- Handle<Object> hash(proxy->GetIdentityHash(), isolate);
- if (proxy->IsJSFunctionProxy()) {
- isolate->factory()->BecomeJSFunction(proxy);
- // Code will be set on the JavaScript side.
- } else {
- isolate->factory()->BecomeJSObject(proxy);
+// static
+MaybeHandle<Context> JSReceiver::GetFunctionRealm(Handle<JSReceiver> receiver) {
+ if (receiver->IsJSProxy()) {
+ return JSProxy::GetFunctionRealm(Handle<JSProxy>::cast(receiver));
}
- DCHECK(proxy->IsJSObject());
- // Inherit identity, if it was present.
- if (hash->IsSmi()) {
- JSObject::SetIdentityHash(Handle<JSObject>::cast(proxy),
- Handle<Smi>::cast(hash));
+ if (receiver->IsJSFunction()) {
+ return JSFunction::GetFunctionRealm(Handle<JSFunction>::cast(receiver));
}
-}
+ if (receiver->IsJSBoundFunction()) {
+ return JSBoundFunction::GetFunctionRealm(
+ Handle<JSBoundFunction>::cast(receiver));
+ }
-MaybeHandle<Object> JSProxy::CallTrap(Handle<JSProxy> proxy,
- const char* name,
- Handle<Object> derived,
- int argc,
- Handle<Object> argv[]) {
- Isolate* isolate = proxy->GetIsolate();
- Handle<Object> handler(proxy->handler(), isolate);
+ return JSObject::GetFunctionRealm(Handle<JSObject>::cast(receiver));
+}
- Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name);
- Handle<Object> trap;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, trap,
- Object::GetPropertyOrElement(handler, trap_name),
- Object);
- if (trap->IsUndefined()) {
- if (derived.is_null()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kProxyHandlerTrapMissing,
- handler, trap_name),
- Object);
- }
- trap = Handle<Object>(derived);
- }
-
- return Execution::Call(isolate, trap, handler, argc, argv);
+Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
+ Isolate* isolate = it->isolate();
+ HandleScope scope(isolate);
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
+ isolate, it->GetHolder<JSProxy>(), it->GetName(), &desc);
+ MAYBE_RETURN(found, Nothing<PropertyAttributes>());
+ if (!found.FromJust()) return Just(ABSENT);
+ return Just(desc.ToAttributes());
}
@@ -4963,28 +5376,6 @@ MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
}
-Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
- Handle<Object> value) {
- DCHECK(it->GetReceiver()->IsJSObject());
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(it);
- if (maybe.IsNothing()) return Nothing<bool>();
-
- if (it->IsFound()) {
- if (!it->IsConfigurable()) return Just(false);
- } else {
- if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver())))
- return Just(false);
- }
-
- RETURN_ON_EXCEPTION_VALUE(
- it->isolate(),
- DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
- Nothing<bool>());
-
- return Just(true);
-}
-
-
Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
LookupIterator* it) {
Isolate* isolate = it->isolate();
@@ -5012,6 +5403,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
result = args.Call(query, index);
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
v8::GenericNamedPropertyQueryCallback query =
v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
interceptor->query());
@@ -5037,7 +5429,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
result = args.Call(getter, index);
} else {
Handle<Name> name = it->name();
-
+ DCHECK(!name->IsPrivate());
v8::GenericNamedPropertyGetterCallback getter =
v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
interceptor->getter());
@@ -5061,8 +5453,7 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::JSPROXY:
- return JSProxy::GetPropertyAttributesWithHandler(
- it->GetHolder<JSProxy>(), it->GetReceiver(), it->GetName());
+ return JSProxy::GetPropertyAttributes(it);
case LookupIterator::INTERCEPTOR: {
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithInterceptor(it);
@@ -5451,7 +5842,7 @@ void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
dictionary->set_requires_slow_elements();
// TODO(verwaest): Remove this hack.
if (map()->is_prototype_map()) {
- GetHeap()->ClearAllKeyedStoreICs();
+ TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
}
}
@@ -5582,7 +5973,6 @@ Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
}
-
Isolate* isolate = object->GetIsolate();
Handle<Object> maybe_hash(object->GetIdentityHash(), isolate);
@@ -5757,8 +6147,7 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
}
-MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
- LookupIterator* it) {
+Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -5766,7 +6155,7 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->deleter()->IsUndefined()) return MaybeHandle<Object>();
+ if (interceptor->deleter()->IsUndefined()) return Nothing<bool>();
Handle<JSObject> holder = it->GetHolder<JSObject>();
@@ -5781,9 +6170,10 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
ApiIndexedPropertyAccess("interceptor-indexed-delete", *holder, index));
result = args.Call(deleter, index);
} else if (it->name()->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return MaybeHandle<Object>();
+ return Nothing<bool>();
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
v8::GenericNamedPropertyDeleterCallback deleter =
v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
interceptor->deleter());
@@ -5792,25 +6182,26 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
result = args.Call(deleter, v8::Utils::ToLocal(name));
}
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.IsEmpty()) return MaybeHandle<Object>();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.IsEmpty()) return Nothing<bool>();
DCHECK(result->IsBoolean());
Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
result_internal->VerifyApiCallResultType();
// Rebox CustomArguments::kReturnValueOffset before returning.
- return handle(*result_internal, isolate);
+ return Just(result_internal->BooleanValue());
}
-void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name, int entry) {
+void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
+ Handle<Name> name, int entry) {
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
- Handle<GlobalDictionary> dictionary(object->global_dictionary());
+ Handle<GlobalDictionary> dictionary(
+ JSObject::cast(*object)->global_dictionary());
DCHECK_NE(GlobalDictionary::kNotFound, entry);
auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
@@ -5830,15 +6221,23 @@ void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
}
-// ECMA-262, 3rd, 8.6.2.5
-MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
- LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
+ LanguageMode language_mode) {
Isolate* isolate = it->isolate();
+
if (it->state() == LookupIterator::JSPROXY) {
- return JSProxy::DeletePropertyWithHandler(it->GetHolder<JSProxy>(),
- it->GetName(), language_mode);
+ return JSProxy::DeletePropertyOrElement(it->GetHolder<JSProxy>(),
+ it->GetName(), language_mode);
}
+ if (it->GetReceiver()->IsJSProxy()) {
+ if (it->state() != LookupIterator::NOT_FOUND) {
+ DCHECK_EQ(LookupIterator::DATA, it->state());
+ DCHECK(it->GetName()->IsPrivate());
+ it->Delete();
+ }
+ return Just(true);
+ }
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
bool is_observed =
@@ -5856,19 +6255,20 @@ MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
case LookupIterator::ACCESS_CHECK:
if (it->HasAccess()) break;
isolate->ReportFailedAccessCheck(it->GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return it->factory()->false_value();
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(false);
case LookupIterator::INTERCEPTOR: {
- MaybeHandle<Object> maybe_result =
- JSObject::DeletePropertyWithInterceptor(it);
- // Delete with interceptor succeeded. Return result.
- if (!maybe_result.is_null()) return maybe_result;
+ Maybe<bool> result = JSObject::DeletePropertyWithInterceptor(it);
// An exception was thrown in the interceptor. Propagate.
- if (isolate->has_pending_exception()) return maybe_result;
+ if (isolate->has_pending_exception()) return Nothing<bool>();
+ // Delete with interceptor succeeded. Return result.
+ // TODO(neis): In strict mode, we should probably throw if the
+ // interceptor returns false.
+ if (result.IsJust()) return result;
break;
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return it->factory()->true_value();
+ return Just(true);
case LookupIterator::DATA:
if (is_observed) {
old_value = it->GetDataValue();
@@ -5882,49 +6282,50 @@ MaybeHandle<Object> JSReceiver::DeleteProperty(LookupIterator* it,
receiver->map()->is_strong()
? MessageTemplate::kStrongDeleteProperty
: MessageTemplate::kStrictDeleteProperty;
- THROW_NEW_ERROR(
- isolate, NewTypeError(templ, it->GetName(), receiver), Object);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ templ, it->GetName(), receiver));
+ return Nothing<bool>();
}
- return it->factory()->false_value();
+ return Just(false);
}
it->Delete();
if (is_observed) {
- RETURN_ON_EXCEPTION(isolate,
- JSObject::EnqueueChangeRecord(
- receiver, "delete", it->GetName(), old_value),
- Object);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, JSObject::EnqueueChangeRecord(receiver, "delete",
+ it->GetName(), old_value),
+ Nothing<bool>());
}
- return it->factory()->true_value();
+ return Just(true);
}
}
}
- return it->factory()->true_value();
+ return Just(true);
}
-MaybeHandle<Object> JSReceiver::DeleteElement(Handle<JSReceiver> object,
- uint32_t index,
- LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
+ LanguageMode language_mode) {
LookupIterator it(object->GetIsolate(), object, index,
LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
}
-MaybeHandle<Object> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
- Handle<Name> name,
- LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
LookupIterator it(object, name, LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
}
-MaybeHandle<Object> JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver> object, Handle<Name> name, LanguageMode language_mode) {
+Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
+ Handle<Name> name,
+ LanguageMode language_mode) {
LookupIterator it = LookupIterator::PropertyOrElement(
name->GetIsolate(), object, name, LookupIterator::HIDDEN);
return DeleteProperty(&it, language_mode);
@@ -5961,8 +6362,7 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
Handle<Object> key,
Handle<Object> attributes) {
// 1. If Type(O) is not Object, throw a TypeError exception.
- // TODO(jkummerow): Implement Proxy support, change to "IsSpecObject".
- if (!object->IsJSObject()) {
+ if (!object->IsJSReceiver()) {
Handle<String> fun_name =
isolate->factory()->InternalizeUtf8String("Object.defineProperty");
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -5978,11 +6378,11 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
return isolate->heap()->exception();
}
// 6. Let success be DefinePropertyOrThrow(O,key, desc).
- bool success = DefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
- &desc, THROW_ON_ERROR);
+ Maybe<bool> success = DefineOwnProperty(
+ isolate, Handle<JSReceiver>::cast(object), key, &desc, THROW_ON_ERROR);
// 7. ReturnIfAbrupt(success).
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- CHECK(success == true);
+ MAYBE_RETURN(success, isolate->heap()->exception());
+ CHECK(success.FromJust());
// 8. Return O.
return *object;
}
@@ -5990,32 +6390,35 @@ Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
// ES6 19.1.2.3.1
// static
-Object* JSReceiver::DefineProperties(Isolate* isolate, Handle<Object> object,
- Handle<Object> properties) {
+MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> properties) {
// 1. If Type(O) is not Object, throw a TypeError exception.
- // TODO(jkummerow): Implement Proxy support, change to "IsSpecObject".
- if (!object->IsJSObject()) {
+ if (!object->IsJSReceiver()) {
Handle<String> fun_name =
isolate->factory()->InternalizeUtf8String("Object.defineProperties");
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name));
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledOnNonObject, fun_name),
+ Object);
}
// 2. Let props be ToObject(Properties).
// 3. ReturnIfAbrupt(props).
Handle<JSReceiver> props;
if (!Object::ToObject(isolate, properties).ToHandle(&props)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+ Object);
}
// 4. Let keys be props.[[OwnPropertyKeys]]().
// 5. ReturnIfAbrupt(keys).
Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ ASSIGN_RETURN_ON_EXCEPTION(
isolate, keys,
- JSReceiver::GetKeys(props, JSReceiver::OWN_ONLY, INCLUDE_SYMBOLS));
+ JSReceiver::GetKeys(props, JSReceiver::OWN_ONLY, ALL_PROPERTIES), Object);
// 6. Let descriptors be an empty List.
int capacity = keys->length();
std::vector<PropertyDescriptor> descriptors(capacity);
+ size_t descriptors_index = 0;
// 7. Repeat for each element nextKey of keys in List order,
for (int i = 0; i < keys->length(); ++i) {
Handle<Object> next_key(keys->get(i), isolate);
@@ -6025,58 +6428,60 @@ Object* JSReceiver::DefineProperties(Isolate* isolate, Handle<Object> object,
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, props, next_key, &success, LookupIterator::HIDDEN);
DCHECK(success);
- // TODO(jkummerow): Support JSProxies. Make sure we call the correct
- // getOwnPropertyDescriptor trap, and convert the result object to a
- // PropertyDescriptor.
- Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return isolate->heap()->exception();
+ Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
PropertyAttributes attrs = maybe.FromJust();
// 7c. If propDesc is not undefined and propDesc.[[Enumerable]] is true:
if (attrs == ABSENT) continue;
- // GetKeys() only returns enumerable keys.
- DCHECK((attrs & DONT_ENUM) == 0);
+ if (attrs & DONT_ENUM) continue;
// 7c i. Let descObj be Get(props, nextKey).
// 7c ii. ReturnIfAbrupt(descObj).
Handle<Object> desc_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, desc_obj,
- JSObject::GetProperty(&it));
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, desc_obj, Object::GetProperty(&it),
+ Object);
// 7c iii. Let desc be ToPropertyDescriptor(descObj).
- success = PropertyDescriptor::ToPropertyDescriptor(isolate, desc_obj,
- &descriptors[i]);
+ success = PropertyDescriptor::ToPropertyDescriptor(
+ isolate, desc_obj, &descriptors[descriptors_index]);
// 7c iv. ReturnIfAbrupt(desc).
- if (!success) return isolate->heap()->exception();
+ if (!success) return MaybeHandle<Object>();
// 7c v. Append the pair (a two element List) consisting of nextKey and
// desc to the end of descriptors.
- descriptors[i].set_name(next_key);
+ descriptors[descriptors_index].set_name(next_key);
+ descriptors_index++;
}
// 8. For each pair from descriptors in list order,
- for (size_t i = 0; i < descriptors.size(); ++i) {
+ for (size_t i = 0; i < descriptors_index; ++i) {
PropertyDescriptor* desc = &descriptors[i];
// 8a. Let P be the first element of pair.
// 8b. Let desc be the second element of pair.
// 8c. Let status be DefinePropertyOrThrow(O, P, desc).
- bool status = DefineOwnProperty(isolate, Handle<JSObject>::cast(object),
- desc->name(), desc, THROW_ON_ERROR);
+ Maybe<bool> status =
+ DefineOwnProperty(isolate, Handle<JSReceiver>::cast(object),
+ desc->name(), desc, THROW_ON_ERROR);
// 8d. ReturnIfAbrupt(status).
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- CHECK(status == true);
+ if (!status.IsJust()) return MaybeHandle<Object>();
+ CHECK(status.FromJust());
}
// 9. Return o.
- return *object;
+ return object;
}
// static
-bool JSReceiver::DefineOwnProperty(Isolate* isolate, Handle<JSReceiver> object,
- Handle<Object> key, PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
if (object->IsJSArray()) {
return JSArray::DefineOwnProperty(isolate, Handle<JSArray>::cast(object),
key, desc, should_throw);
}
+ if (object->IsJSProxy()) {
+ return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
+ key, desc, should_throw);
+ }
// TODO(jkummerow): Support Modules (ES6 9.4.6.6)
- // TODO(jkummerow): Support Proxies (ES6 9.5.6)
- if (!object->IsJSObject()) return true;
// OrdinaryDefineOwnProperty, by virtue of calling
// DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
@@ -6088,11 +6493,11 @@ bool JSReceiver::DefineOwnProperty(Isolate* isolate, Handle<JSReceiver> object,
// static
-bool JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> key,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
bool success = false;
DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -6103,8 +6508,8 @@ bool JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
if (it.state() == LookupIterator::ACCESS_CHECK) {
if (!it.HasAccess()) {
isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, false);
- return false;
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(true);
}
it.Next();
}
@@ -6115,18 +6520,15 @@ bool JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
// ES6 9.1.6.1
// static
-bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
Isolate* isolate = it->isolate();
- // == OrdinaryDefineOwnProperty (O, P, Desc) ==
// 1. Let current be O.[[GetOwnProperty]](P).
// 2. ReturnIfAbrupt(current).
PropertyDescriptor current;
- if (!GetOwnPropertyDescriptor(it, &current) &&
- isolate->has_pending_exception()) {
- return false;
- }
+ MAYBE_RETURN(GetOwnPropertyDescriptor(it, &current), Nothing<bool>());
+
// TODO(jkummerow/verwaest): It would be nice if we didn't have to reset
// the iterator every time. Currently, the reasons why we need it are:
// - handle interceptors correctly
@@ -6136,22 +6538,47 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
bool extensible = JSObject::IsExtensible(object);
+ return ValidateAndApplyPropertyDescriptor(isolate, it, extensible, desc,
+ &current, should_throw);
+}
+
+
+// ES6 9.1.6.2
+// static
+Maybe<bool> JSReceiver::IsCompatiblePropertyDescriptor(
+ Isolate* isolate, bool extensible, PropertyDescriptor* desc,
+ PropertyDescriptor* current, Handle<Name> property_name,
+ ShouldThrow should_throw) {
+ // 1. Return ValidateAndApplyPropertyDescriptor(undefined, undefined,
+ // Extensible, Desc, Current).
+ return ValidateAndApplyPropertyDescriptor(
+ isolate, NULL, extensible, desc, current, should_throw, property_name);
+}
+
+
+// ES6 9.1.6.3
+// static
+Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
+ Isolate* isolate, LookupIterator* it, bool extensible,
+ PropertyDescriptor* desc, PropertyDescriptor* current,
+ ShouldThrow should_throw, Handle<Name> property_name) {
+ // We either need a LookupIterator, or a property name.
+ DCHECK((it == NULL) != property_name.is_null());
+ Handle<JSObject> object;
+ if (it != NULL) object = Handle<JSObject>::cast(it->GetReceiver());
bool desc_is_data_descriptor = PropertyDescriptor::IsDataDescriptor(desc);
bool desc_is_accessor_descriptor =
PropertyDescriptor::IsAccessorDescriptor(desc);
bool desc_is_generic_descriptor =
PropertyDescriptor::IsGenericDescriptor(desc);
-
- // == ValidateAndApplyPropertyDescriptor (O, P, extensible, Desc, current) ==
+ // 1. (Assert)
// 2. If current is undefined, then
- if (current.is_empty()) {
+ if (current->is_empty()) {
// 2a. If extensible is false, return false.
if (!extensible) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kDefineDisallowed, it->GetName()));
- }
- return false;
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kDefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
// 2c. If IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true, then:
// (This is equivalent to !IsAccessorDescriptor(desc).)
@@ -6163,7 +6590,7 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
// [[Configurable]] attribute values are described by Desc. If the value
// of an attribute field of Desc is absent, the attribute of the newly
// created property is set to its default value.
- if (!object->IsUndefined()) {
+ if (it != NULL) {
if (!desc->has_writable()) desc->set_writable(false);
if (!desc->has_enumerable()) desc->set_enumerable(false);
if (!desc->has_configurable()) desc->set_configurable(false);
@@ -6174,7 +6601,7 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
MaybeHandle<Object> result =
JSObject::DefineOwnPropertyIgnoreAttributes(
it, value, desc->ToAttributes(), JSObject::DONT_FORCE_FIELD);
- if (result.is_null()) return false;
+ if (result.is_null()) return Nothing<bool>();
}
} else {
// 2d. Else Desc must be an accessor Property Descriptor,
@@ -6184,7 +6611,7 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
// [[Configurable]] attribute values are described by Desc. If the value
// of an attribute field of Desc is absent, the attribute of the newly
// created property is set to its default value.
- if (!object->IsUndefined()) {
+ if (it != NULL) {
if (!desc->has_enumerable()) desc->set_enumerable(false);
if (!desc->has_configurable()) desc->set_configurable(false);
Handle<Object> getter(
@@ -6197,53 +6624,50 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
: Handle<Object>::cast(isolate->factory()->null_value()));
MaybeHandle<Object> result =
JSObject::DefineAccessor(it, getter, setter, desc->ToAttributes());
- if (result.is_null()) return false;
+ if (result.is_null()) return Nothing<bool>();
}
}
// 2e. Return true.
- return true;
+ return Just(true);
}
// 3. Return true, if every field in Desc is absent.
// 4. Return true, if every field in Desc also occurs in current and the
// value of every field in Desc is the same value as the corresponding field
// in current when compared using the SameValue algorithm.
- if ((!desc->has_enumerable() || desc->enumerable() == current.enumerable()) &&
+ if ((!desc->has_enumerable() ||
+ desc->enumerable() == current->enumerable()) &&
(!desc->has_configurable() ||
- desc->configurable() == current.configurable()) &&
+ desc->configurable() == current->configurable()) &&
(!desc->has_value() ||
- (current.has_value() && current.value()->SameValue(*desc->value()))) &&
+ (current->has_value() && current->value()->SameValue(*desc->value()))) &&
(!desc->has_writable() ||
- (current.has_writable() && current.writable() == desc->writable())) &&
+ (current->has_writable() && current->writable() == desc->writable())) &&
(!desc->has_get() ||
- (current.has_get() && current.get()->SameValue(*desc->get()))) &&
+ (current->has_get() && current->get()->SameValue(*desc->get()))) &&
(!desc->has_set() ||
- (current.has_set() && current.set()->SameValue(*desc->set())))) {
- return true;
+ (current->has_set() && current->set()->SameValue(*desc->set())))) {
+ return Just(true);
}
// 5. If the [[Configurable]] field of current is false, then
- if (!current.configurable()) {
+ if (!current->configurable()) {
// 5a. Return false, if the [[Configurable]] field of Desc is true.
if (desc->has_configurable() && desc->configurable()) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
// 5b. Return false, if the [[Enumerable]] field of Desc is present and the
// [[Enumerable]] fields of current and Desc are the Boolean negation of
// each other.
- if (desc->has_enumerable() && desc->enumerable() != current.enumerable()) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ if (desc->has_enumerable() && desc->enumerable() != current->enumerable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
}
bool current_is_data_descriptor =
- PropertyDescriptor::IsDataDescriptor(&current);
+ PropertyDescriptor::IsDataDescriptor(current);
// 6. If IsGenericDescriptor(Desc) is true, no further validation is required.
if (desc_is_generic_descriptor) {
// Nothing to see here.
@@ -6252,12 +6676,10 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
// different results, then:
} else if (current_is_data_descriptor != desc_is_data_descriptor) {
// 7a. Return false, if the [[Configurable]] field of current is false.
- if (!current.configurable()) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ if (!current->configurable()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
// 7b. If IsDataDescriptor(current) is true, then:
if (current_is_data_descriptor) {
@@ -6280,70 +6702,63 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
// true, then:
} else if (current_is_data_descriptor && desc_is_data_descriptor) {
// 8a. If the [[Configurable]] field of current is false, then:
- if (!current.configurable()) {
+ if (!current->configurable()) {
// [Strong mode] Disallow changing writable -> readonly for
// non-configurable properties.
- if (current.writable() && desc->has_writable() && !desc->writable() &&
- object->map()->is_strong()) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kStrongRedefineDisallowed, object,
- it->GetName()));
- }
- return false;
+ if (it != NULL && current->writable() && desc->has_writable() &&
+ !desc->writable() && object->map()->is_strong()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrongRedefineDisallowed,
+ object, it->GetName()));
}
// 8a i. Return false, if the [[Writable]] field of current is false and
// the [[Writable]] field of Desc is true.
- if (!current.writable() && desc->has_writable() && desc->writable()) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ if (!current->writable() && desc->has_writable() && desc->writable()) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
// 8a ii. If the [[Writable]] field of current is false, then:
- if (!current.writable()) {
+ if (!current->writable()) {
// 8a ii 1. Return false, if the [[Value]] field of Desc is present and
// SameValue(Desc.[[Value]], current.[[Value]]) is false.
- if (desc->has_value() && !desc->value()->SameValue(*current.value())) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ if (desc->has_value() && !desc->value()->SameValue(*current->value())) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
}
}
} else {
// 9. Else IsAccessorDescriptor(current) and IsAccessorDescriptor(Desc)
// are both true,
- DCHECK(PropertyDescriptor::IsAccessorDescriptor(&current) &&
+ DCHECK(PropertyDescriptor::IsAccessorDescriptor(current) &&
desc_is_accessor_descriptor);
// 9a. If the [[Configurable]] field of current is false, then:
- if (!current.configurable()) {
+ if (!current->configurable()) {
// 9a i. Return false, if the [[Set]] field of Desc is present and
// SameValue(Desc.[[Set]], current.[[Set]]) is false.
- if (desc->has_set() && !desc->set()->SameValue(*current.set())) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ if (desc->has_set() && !desc->set()->SameValue(*current->set())) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
// 9a ii. Return false, if the [[Get]] field of Desc is present and
// SameValue(Desc.[[Get]], current.[[Get]]) is false.
- if (desc->has_get() && !desc->get()->SameValue(*current.get())) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed, it->GetName()));
- }
- return false;
+ if (desc->has_get() && !desc->get()->SameValue(*current->get())) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ it != NULL ? it->GetName() : property_name));
}
}
}
// 10. If O is not undefined, then:
- if (!object->IsUndefined()) {
+ if (it != NULL) {
// 10a. For each field of Desc that is present, set the corresponding
// attribute of the property named P of object O to the value of the field.
PropertyAttributes attrs = NONE;
@@ -6353,14 +6768,14 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
attrs | (desc->enumerable() ? NONE : DONT_ENUM));
} else {
attrs = static_cast<PropertyAttributes>(
- attrs | (current.enumerable() ? NONE : DONT_ENUM));
+ attrs | (current->enumerable() ? NONE : DONT_ENUM));
}
if (desc->has_configurable()) {
attrs = static_cast<PropertyAttributes>(
attrs | (desc->configurable() ? NONE : DONT_DELETE));
} else {
attrs = static_cast<PropertyAttributes>(
- attrs | (current.configurable() ? NONE : DONT_DELETE));
+ attrs | (current->configurable() ? NONE : DONT_DELETE));
}
if (desc_is_data_descriptor ||
(desc_is_generic_descriptor && current_is_data_descriptor)) {
@@ -6369,41 +6784,85 @@ bool JSReceiver::OrdinaryDefineOwnProperty(LookupIterator* it,
attrs | (desc->writable() ? NONE : READ_ONLY));
} else {
attrs = static_cast<PropertyAttributes>(
- attrs | (current.writable() ? NONE : READ_ONLY));
+ attrs | (current->writable() ? NONE : READ_ONLY));
}
Handle<Object> value(
desc->has_value() ? desc->value()
- : current.has_value()
- ? current.value()
+ : current->has_value()
+ ? current->value()
: Handle<Object>::cast(
isolate->factory()->undefined_value()));
MaybeHandle<Object> result = JSObject::DefineOwnPropertyIgnoreAttributes(
it, value, attrs, JSObject::DONT_FORCE_FIELD);
- if (result.is_null()) return false;
+ if (result.is_null()) return Nothing<bool>();
} else {
DCHECK(desc_is_accessor_descriptor ||
(desc_is_generic_descriptor &&
- PropertyDescriptor::IsAccessorDescriptor(&current)));
+ PropertyDescriptor::IsAccessorDescriptor(current)));
Handle<Object> getter(
desc->has_get()
? desc->get()
- : current.has_get()
- ? current.get()
+ : current->has_get()
+ ? current->get()
: Handle<Object>::cast(isolate->factory()->null_value()));
Handle<Object> setter(
desc->has_set()
? desc->set()
- : current.has_set()
- ? current.set()
+ : current->has_set()
+ ? current->set()
: Handle<Object>::cast(isolate->factory()->null_value()));
MaybeHandle<Object> result =
JSObject::DefineAccessor(it, getter, setter, attrs);
- if (result.is_null()) return false;
+ if (result.is_null()) return Nothing<bool>();
}
}
// 11. Return true.
- return true;
+ return Just(true);
+}
+
+
+// static
+Maybe<bool> JSReceiver::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value,
+ ShouldThrow should_throw) {
+ DCHECK(!it->check_prototype_chain());
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
+ Isolate* isolate = receiver->GetIsolate();
+
+ if (receiver->IsJSObject()) {
+ return JSObject::CreateDataProperty(it, value); // Shortcut.
+ }
+
+ PropertyDescriptor new_desc;
+ new_desc.set_value(value);
+ new_desc.set_writable(true);
+ new_desc.set_enumerable(true);
+ new_desc.set_configurable(true);
+
+ return JSReceiver::DefineOwnProperty(isolate, receiver, it->GetName(),
+ &new_desc, should_throw);
+}
+
+
+Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
+ Handle<Object> value) {
+ DCHECK(it->GetReceiver()->IsJSObject());
+ MAYBE_RETURN(JSReceiver::GetPropertyAttributes(it), Nothing<bool>());
+
+ if (it->IsFound()) {
+ if (!it->IsConfigurable()) return Just(false);
+ } else {
+ if (!JSObject::IsExtensible(Handle<JSObject>::cast(it->GetReceiver())))
+ return Just(false);
+ }
+
+ RETURN_ON_EXCEPTION_VALUE(
+ it->isolate(),
+ DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
+ Nothing<bool>());
+
+ return Just(true);
}
@@ -6424,9 +6883,10 @@ bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
// ES6 9.4.2.1
// static
-bool JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
- Handle<Object> name, PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
+ Handle<Object> name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
// 1. Assert: IsPropertyKey(P) is true. ("P" is |name|.)
// 2. If P is "length", then:
// TODO(jkummerow): Check if we need slow string comparison.
@@ -6439,10 +6899,10 @@ bool JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
if (PropertyKeyToArrayIndex(name, &index)) {
// 3a. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
PropertyDescriptor old_len_desc;
- bool success = GetOwnPropertyDescriptor(
+ Maybe<bool> success = GetOwnPropertyDescriptor(
isolate, o, isolate->factory()->length_string(), &old_len_desc);
// 3b. (Assert)
- DCHECK(success);
+ DCHECK(success.FromJust());
USE(success);
// 3c. Let oldLen be oldLenDesc.[[Value]].
uint32_t old_len = 0;
@@ -6454,30 +6914,31 @@ bool JSArray::DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
// return false.
if (index >= old_len && old_len_desc.has_writable() &&
!old_len_desc.writable()) {
- if (should_throw == THROW_ON_ERROR) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kDefineDisallowed, name));
- }
- return false;
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kDefineDisallowed, name));
}
// 3g. Let succeeded be OrdinaryDefineOwnProperty(A, P, Desc).
- bool succeeded =
+ Maybe<bool> succeeded =
OrdinaryDefineOwnProperty(isolate, o, name, desc, should_throw);
- // 3h. (Assert)
+ // 3h. Assert: succeeded is not an abrupt completion.
+ // In our case, if should_throw == THROW_ON_ERROR, it can be!
// 3i. If succeeded is false, return false.
- if (!succeeded) return false;
+ if (succeeded.IsNothing() || !succeeded.FromJust()) return succeeded;
// 3j. If index >= oldLen, then:
if (index >= old_len) {
// 3j i. Set oldLenDesc.[[Value]] to index + 1.
old_len_desc.set_value(isolate->factory()->NewNumberFromUint(index + 1));
// 3j ii. Let succeeded be
// OrdinaryDefineOwnProperty(A, "length", oldLenDesc).
- OrdinaryDefineOwnProperty(isolate, o, isolate->factory()->length_string(),
- &old_len_desc, should_throw);
- // 3j iii. (Assert)
+ succeeded = OrdinaryDefineOwnProperty(isolate, o,
+ isolate->factory()->length_string(),
+ &old_len_desc, should_throw);
+ // 3j iii. Assert: succeeded is true.
+ DCHECK(succeeded.FromJust());
+ USE(succeeded);
}
// 3k. Return true.
- return true;
+ return Just(true);
}
// 4. Return OrdinaryDefineOwnProperty(A, P, Desc).
@@ -6524,9 +6985,9 @@ bool JSArray::AnythingToArrayLength(Isolate* isolate,
// ES6 9.4.2.4
// static
-bool JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
- PropertyDescriptor* desc,
- ShouldThrow should_throw) {
+Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
// 1. If the [[Value]] field of Desc is absent, then
if (!desc->has_value()) {
// 1a. Return OrdinaryDefineOwnProperty(A, "length", Desc).
@@ -6539,20 +7000,17 @@ bool JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
// 3. - 7. Convert Desc.[[Value]] to newLen.
uint32_t new_len = 0;
if (!AnythingToArrayLength(isolate, desc->value(), &new_len)) {
- if (should_throw == THROW_ON_ERROR && !isolate->has_pending_exception()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kCannotConvertToPrimitive));
- }
- return false;
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
}
// 8. Set newLenDesc.[[Value]] to newLen.
// (Done below, if needed.)
// 9. Let oldLenDesc be OrdinaryGetOwnProperty(A, "length").
PropertyDescriptor old_len_desc;
- bool success = GetOwnPropertyDescriptor(
+ Maybe<bool> success = GetOwnPropertyDescriptor(
isolate, a, isolate->factory()->length_string(), &old_len_desc);
// 10. (Assert)
- DCHECK(success);
+ DCHECK(success.FromJust());
USE(success);
// 11. Let oldLen be oldLenDesc.[[Value]].
uint32_t old_len = 0;
@@ -6568,11 +7026,9 @@ bool JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
}
// 13. If oldLenDesc.[[Writable]] is false, return false.
if (!old_len_desc.writable()) {
- if (should_throw == THROW_ON_ERROR)
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kRedefineDisallowed,
- isolate->factory()->length_string()));
- return false;
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed,
+ isolate->factory()->length_string()));
}
// 14. If newLenDesc.[[Writable]] is absent or has the value true,
// let newWritable be true.
@@ -6590,33 +7046,186 @@ bool JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
// Most of steps 16 through 19 is implemented by JSArray::SetLength.
if (JSArray::ObservableSetLength(a, new_len).is_null()) {
DCHECK(isolate->has_pending_exception());
- return false;
+ return Nothing<bool>();
}
// Steps 19d-ii, 20.
if (!new_writable) {
PropertyDescriptor readonly;
readonly.set_writable(false);
- OrdinaryDefineOwnProperty(isolate, a, isolate->factory()->length_string(),
- &readonly, should_throw);
+ Maybe<bool> success = OrdinaryDefineOwnProperty(
+ isolate, a, isolate->factory()->length_string(), &readonly,
+ should_throw);
+ DCHECK(success.FromJust());
+ USE(success);
}
uint32_t actual_new_len = 0;
CHECK(a->length()->ToArrayLength(&actual_new_len));
// Steps 19d-v, 21. Return false if there were non-deletable elements.
- success = actual_new_len == new_len;
- if (!success && should_throw == THROW_ON_ERROR) {
+ bool result = actual_new_len == new_len;
+ if (!result) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kStrictDeleteProperty,
+ isolate->factory()->NewNumberFromUint(actual_new_len - 1),
+ a));
+ }
+ return Just(result);
+}
+
+
+// ES6 9.5.6
+// static
+Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ STACK_CHECK(Nothing<bool>());
+ if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
+ return AddPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
+ should_throw);
+ }
+ Handle<String> trap_name = isolate->factory()->defineProperty_string();
+ // 1. Assert: IsPropertyKey(P) is true.
+ DCHECK(key->IsName() || key->IsNumber());
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kStrictDeleteProperty,
- isolate->factory()->NewNumberFromUint(actual_new_len - 1), a));
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
}
- return success;
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "defineProperty").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then:
+ if (trap->IsUndefined()) {
+ // 7a. Return target.[[DefineOwnProperty]](P, Desc).
+ return JSReceiver::DefineOwnProperty(isolate, target, key, desc,
+ should_throw);
+ }
+ // 8. Let descObj be FromPropertyDescriptor(Desc).
+ Handle<Object> desc_obj = desc->ToObject(isolate);
+ // 9. Let booleanTrapResult be
+ // ToBoolean(? Call(trap, handler, «target, P, descObj»)).
+ Handle<Name> property_name =
+ key->IsName()
+ ? Handle<Name>::cast(key)
+ : Handle<Name>::cast(isolate->factory()->NumberToString(key));
+ // Do not leak private property names.
+ DCHECK(!property_name->IsPrivate());
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, property_name, desc_obj};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 10. If booleanTrapResult is false, return false.
+ if (!trap_result_obj->BooleanValue()) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsishFor,
+ trap_name, property_name));
+ }
+ // 11. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> target_found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, key, &target_desc);
+ MAYBE_RETURN(target_found, Nothing<bool>());
+ // 12. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 13. If Desc has a [[Configurable]] field and if Desc.[[Configurable]]
+ // is false, then:
+ // 13a. Let settingConfigFalse be true.
+ // 14. Else let settingConfigFalse be false.
+ bool setting_config_false = desc->has_configurable() && !desc->configurable();
+ // 15. If targetDesc is undefined, then
+ if (!target_found.FromJust()) {
+ // 15a. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonExtensible, property_name));
+ return Nothing<bool>();
+ }
+ // 15b. If settingConfigFalse is true, throw a TypeError exception.
+ if (setting_config_false) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
+ return Nothing<bool>();
+ }
+ } else {
+ // 16. Else targetDesc is not undefined,
+ // 16a. If IsCompatiblePropertyDescriptor(extensibleTarget, Desc,
+ // targetDesc) is false, throw a TypeError exception.
+ Maybe<bool> valid =
+ IsCompatiblePropertyDescriptor(isolate, extensible_target, desc,
+ &target_desc, property_name, DONT_THROW);
+ MAYBE_RETURN(valid, Nothing<bool>());
+ if (!valid.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyIncompatible, property_name));
+ return Nothing<bool>();
+ }
+ // 16b. If settingConfigFalse is true and targetDesc.[[Configurable]] is
+ // true, throw a TypeError exception.
+ if (setting_config_false && target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
+ return Nothing<bool>();
+ }
+ }
+ // 17. Return true.
+ return Just(true);
}
// static
-bool JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key,
- PropertyDescriptor* desc) {
+Maybe<bool> JSProxy::AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // Despite the generic name, this can only add private data properties.
+ if (!PropertyDescriptor::IsDataDescriptor(desc) ||
+ desc->ToAttributes() != DONT_ENUM) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
+ DCHECK(proxy->map()->is_dictionary_map());
+ Handle<Object> value =
+ desc->has_value()
+ ? desc->value()
+ : Handle<Object>::cast(isolate->factory()->undefined_value());
+
+ LookupIterator it(proxy, private_name);
+
+ if (it.IsFound()) {
+ DCHECK_EQ(LookupIterator::DATA, it.state());
+ DCHECK_EQ(DONT_ENUM, it.property_details().attributes());
+ it.WriteDataValue(value);
+ return Just(true);
+ }
+
+ Handle<NameDictionary> dict(proxy->property_dictionary());
+ PropertyDetails details(DONT_ENUM, DATA, 0, PropertyCellType::kNoCell);
+ Handle<NameDictionary> result =
+ NameDictionary::Add(dict, private_name, value, details);
+ if (!dict.is_identical_to(result)) proxy->set_properties(*result);
+ return Just(true);
+}
+
+
+// static
+Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ PropertyDescriptor* desc) {
bool success = false;
DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -6626,25 +7235,25 @@ bool JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
}
-// TODO(jkummerow): Any chance to unify this with
-// "MaybeHandle<Object> GetOwnProperty()" in runtime-object.cc?
-
-// TODO(jkummerow/verwaest): Proxy support: call getOwnPropertyDescriptor trap
-// and convert the result (if it's an object) with ToPropertyDescriptor.
-
// ES6 9.1.5.1
-// Returns true on success; false if there was an exception or no property.
+// Returns true on success, false if the property didn't exist, nothing if
+// an exception was thrown.
// static
-bool JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
- PropertyDescriptor* desc) {
+Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
+ PropertyDescriptor* desc) {
Isolate* isolate = it->isolate();
+ // "Virtual" dispatch.
+ if (it->IsFound() && it->GetHolder<JSReceiver>()->IsJSProxy()) {
+ return JSProxy::GetOwnPropertyDescriptor(isolate, it->GetHolder<JSProxy>(),
+ it->GetName(), desc);
+ }
+
// 1. (Assert)
// 2. If O does not have an own property with key P, return undefined.
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(it);
-
- if (!maybe.IsJust()) return false;
+ MAYBE_RETURN(maybe, Nothing<bool>());
PropertyAttributes attrs = maybe.FromJust();
- if (attrs == ABSENT) return false;
+ if (attrs == ABSENT) return Just(false);
DCHECK(!isolate->has_pending_exception());
// 3. Let D be a newly created Property Descriptor with no fields.
@@ -6658,7 +7267,7 @@ bool JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
Handle<Object> value;
if (!JSObject::GetProperty(it).ToHandle(&value)) {
DCHECK(isolate->has_pending_exception());
- return false;
+ return Nothing<bool>();
}
desc->set_value(value);
// 5b. Set D.[[Writable]] to the value of X's [[Writable]] attribute
@@ -6680,7 +7289,123 @@ bool JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
// 9. Return D.
DCHECK(PropertyDescriptor::IsAccessorDescriptor(desc) !=
PropertyDescriptor::IsDataDescriptor(desc));
- return true;
+ return Just(true);
+}
+
+
+// ES6 9.5.5
+// static
+Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name,
+ PropertyDescriptor* desc) {
+ DCHECK(!name->IsPrivate());
+ STACK_CHECK(Nothing<bool>());
+
+ Handle<String> trap_name =
+ isolate->factory()->getOwnPropertyDescriptor_string();
+ // 1. (Assert)
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "getOwnPropertyDescriptor").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 7a. Return target.[[GetOwnProperty]](P).
+ return JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, desc);
+ }
+ // 8. Let trapResultObj be ? Call(trap, handler, «target, P»).
+ Handle<Object> trap_result_obj;
+ Handle<Object> args[] = {target, name};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_obj,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 9. If Type(trapResultObj) is neither Object nor Undefined, throw a
+ // TypeError exception.
+ if (!trap_result_obj->IsJSReceiver() && !trap_result_obj->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorInvalid, name));
+ return Nothing<bool>();
+ }
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ PropertyDescriptor target_desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 11. If trapResultObj is undefined, then
+ if (trap_result_obj->IsUndefined()) {
+ // 11a. If targetDesc is undefined, return undefined.
+ if (!found.FromJust()) return Just(false);
+ // 11b. If targetDesc.[[Configurable]] is false, throw a TypeError
+ // exception.
+ if (!target_desc.configurable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorUndefined, name));
+ return Nothing<bool>();
+ }
+ // 11c. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 11d. (Assert)
+ // 11e. If extensibleTarget is false, throw a TypeError exception.
+ if (!extensible_target.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorNonExtensible, name));
+ return Nothing<bool>();
+ }
+ // 11f. Return undefined.
+ return Just(false);
+ }
+ // 12. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> extensible_target = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible_target, Nothing<bool>());
+ // 13. Let resultDesc be ? ToPropertyDescriptor(trapResultObj).
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, trap_result_obj,
+ desc)) {
+ DCHECK(isolate->has_pending_exception());
+ return Nothing<bool>();
+ }
+ // 14. Call CompletePropertyDescriptor(resultDesc).
+ PropertyDescriptor::CompletePropertyDescriptor(isolate, desc);
+ // 15. Let valid be IsCompatiblePropertyDescriptor (extensibleTarget,
+ // resultDesc, targetDesc).
+ Maybe<bool> valid =
+ IsCompatiblePropertyDescriptor(isolate, extensible_target.FromJust(),
+ desc, &target_desc, name, DONT_THROW);
+ MAYBE_RETURN(valid, Nothing<bool>());
+ // 16. If valid is false, throw a TypeError exception.
+ if (!valid.FromJust()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorIncompatible, name));
+ return Nothing<bool>();
+ }
+ // 17. If resultDesc.[[Configurable]] is false, then
+ if (!desc->configurable()) {
+ // 17a. If targetDesc is undefined or targetDesc.[[Configurable]] is true:
+ if (target_desc.is_empty() || target_desc.configurable()) {
+ // 17a i. Throw a TypeError exception.
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyGetOwnPropertyDescriptorNonConfigurable,
+ name));
+ return Nothing<bool>();
+ }
+ }
+ // 18. Return resultDesc.
+ return Just(true);
}
@@ -6819,15 +7544,162 @@ bool JSObject::ReferencesObject(Object* obj) {
}
+Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
+ IntegrityLevel level,
+ ShouldThrow should_throw) {
+ DCHECK(level == SEALED || level == FROZEN);
+
+ if (receiver->IsJSObject()) {
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+ if (!object->HasSloppyArgumentsElements() &&
+ !object->map()->is_observed() &&
+ (!object->map()->is_strong() || level == SEALED)) { // Fast path.
+ if (level == SEALED) {
+ return JSObject::PreventExtensionsWithTransition<SEALED>(object,
+ should_throw);
+ } else {
+ return JSObject::PreventExtensionsWithTransition<FROZEN>(object,
+ should_throw);
+ }
+ }
+ }
+
+ Isolate* isolate = receiver->GetIsolate();
+
+ MAYBE_RETURN(JSReceiver::PreventExtensions(receiver, should_throw),
+ Nothing<bool>());
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys, JSReceiver::OwnPropertyKeys(receiver), Nothing<bool>());
+
+ PropertyDescriptor no_conf;
+ no_conf.set_configurable(false);
+
+ PropertyDescriptor no_conf_no_write;
+ no_conf_no_write.set_configurable(false);
+ no_conf_no_write.set_writable(false);
+
+ if (level == SEALED) {
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ MAYBE_RETURN(
+ DefineOwnProperty(isolate, receiver, key, &no_conf, THROW_ON_ERROR),
+ Nothing<bool>());
+ }
+ return Just(true);
+ }
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ PropertyDescriptor current_desc;
+ Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, receiver, key, &current_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ PropertyDescriptor desc =
+ PropertyDescriptor::IsAccessorDescriptor(&current_desc)
+ ? no_conf
+ : no_conf_no_write;
+ MAYBE_RETURN(
+ DefineOwnProperty(isolate, receiver, key, &desc, THROW_ON_ERROR),
+ Nothing<bool>());
+ }
+ }
+ return Just(true);
+}
+
+
+Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> object,
+ IntegrityLevel level) {
+ DCHECK(level == SEALED || level == FROZEN);
+ Isolate* isolate = object->GetIsolate();
+
+ Maybe<bool> extensible = JSReceiver::IsExtensible(object);
+ MAYBE_RETURN(extensible, Nothing<bool>());
+ if (extensible.FromJust()) return Just(false);
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys, JSReceiver::OwnPropertyKeys(object), Nothing<bool>());
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Object> key(keys->get(i), isolate);
+ PropertyDescriptor current_desc;
+ Maybe<bool> owned = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, object, key, &current_desc);
+ MAYBE_RETURN(owned, Nothing<bool>());
+ if (owned.FromJust()) {
+ if (current_desc.configurable()) return Just(false);
+ if (level == FROZEN &&
+ PropertyDescriptor::IsDataDescriptor(&current_desc) &&
+ current_desc.writable()) {
+ return Just(false);
+ }
+ }
+ }
+ return Just(true);
+}
+
+
Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
ShouldThrow should_throw) {
- if (!object->IsJSObject()) return Just(false);
- // TODO(neis): Deal with proxies.
+ if (object->IsJSProxy()) {
+ return JSProxy::PreventExtensions(Handle<JSProxy>::cast(object),
+ should_throw);
+ }
+ DCHECK(object->IsJSObject());
return JSObject::PreventExtensions(Handle<JSObject>::cast(object),
should_throw);
}
+Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
+ ShouldThrow should_throw) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->preventExtensions_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ return JSReceiver::PreventExtensions(target, should_throw);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ if (!trap_result->BooleanValue()) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+
+ // Enforce the invariant.
+ Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(target_result, Nothing<bool>());
+ if (target_result.FromJust()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyPreventExtensionsExtensible));
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+
Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
ShouldThrow should_throw) {
Isolate* isolate = object->GetIsolate();
@@ -6840,7 +7712,6 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
!isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- UNREACHABLE();
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kNoAccess));
}
@@ -6885,6 +7756,55 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
}
+Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
+ if (object->IsJSProxy()) {
+ return JSProxy::IsExtensible(Handle<JSProxy>::cast(object));
+ }
+ return Just(JSObject::IsExtensible(Handle<JSObject>::cast(object)));
+}
+
+
+Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Factory* factory = isolate->factory();
+ Handle<String> trap_name = factory->isExtensible_string();
+
+ if (proxy->IsRevoked()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ Handle<JSReceiver> handler(JSReceiver::cast(proxy->handler()), isolate);
+
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
+ if (trap->IsUndefined()) {
+ return JSReceiver::IsExtensible(target);
+ }
+
+ Handle<Object> trap_result;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+
+ // Enforce the invariant.
+ Maybe<bool> target_result = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(target_result, Nothing<bool>());
+ if (target_result.FromJust() != trap_result->BooleanValue()) {
+ isolate->Throw(
+ *factory->NewTypeError(MessageTemplate::kProxyIsExtensibleInconsistent,
+ factory->ToBoolean(target_result.FromJust())));
+ return Nothing<bool>();
+ }
+ return target_result;
+}
+
+
bool JSObject::IsExtensible(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
@@ -6939,7 +7859,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
!isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- UNREACHABLE();
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kNoAccess));
}
@@ -7046,20 +7965,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
-MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
- MAYBE_RETURN_NULL(
- PreventExtensionsWithTransition<FROZEN>(object, THROW_ON_ERROR));
- return object;
-}
-
-
-MaybeHandle<Object> JSObject::Seal(Handle<JSObject> object) {
- MAYBE_RETURN_NULL(
- PreventExtensionsWithTransition<SEALED>(object, THROW_ON_ERROR));
- return object;
-}
-
-
void JSObject::SetObserved(Handle<JSObject> object) {
DCHECK(!object->IsJSGlobalProxy());
DCHECK(!object->IsJSGlobalObject());
@@ -7205,22 +8110,20 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
} else {
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(copy->NumberOfOwnProperties());
- copy->GetOwnPropertyNames(*names, 0);
+ // Only deep copy fields from the object literal expression.
+ // In particular, don't try to copy the length attribute of
+ // an array.
+ PropertyFilter filter = static_cast<PropertyFilter>(
+ ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
+ KeyAccumulator accumulator(isolate, filter);
+ accumulator.NextPrototype();
+ copy->CollectOwnPropertyNames(&accumulator, filter);
+ Handle<FixedArray> names = accumulator.GetKeys();
for (int i = 0; i < names->length(); i++) {
- DCHECK(names->get(i)->IsString());
- Handle<String> key_string(String::cast(names->get(i)));
- Maybe<PropertyAttributes> maybe =
- JSReceiver::GetOwnPropertyAttributes(copy, key_string);
- DCHECK(maybe.IsJust());
- PropertyAttributes attributes = maybe.FromJust();
- // Only deep copy fields from the object literal expression.
- // In particular, don't try to copy the length attribute of
- // an array.
- if (attributes != NONE) continue;
+ DCHECK(names->get(i)->IsName());
+ Handle<Name> name(Name::cast(names->get(i)));
Handle<Object> value =
- Object::GetProperty(copy, key_string).ToHandleChecked();
+ Object::GetProperty(copy, name).ToHandleChecked();
if (value->IsJSObject()) {
Handle<JSObject> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -7229,7 +8132,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
JSObject);
if (copying) {
// Creating object copy for literals. No strict mode needed.
- JSObject::SetProperty(copy, key_string, result, SLOPPY).Assert();
+ JSObject::SetProperty(copy, name, result, SLOPPY).Assert();
}
}
}
@@ -7409,6 +8312,71 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
}
+// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
+bool HasEnumerableElements(JSObject* object) {
+ if (object->IsJSValue()) {
+ Object* value = JSValue::cast(object)->value();
+ if (value->IsString()) {
+ if (String::cast(value)->length() > 0) return true;
+ }
+ }
+ switch (object->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ int length = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : object->elements()->length();
+ return length > 0;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ FixedArray* elements = FixedArray::cast(object->elements());
+ int length = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : elements->length();
+ for (int i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) return true;
+ }
+ return false;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ int length = object->IsJSArray()
+ ? Smi::cast(JSArray::cast(object)->length())->value()
+ : object->elements()->length();
+ // Zero-length arrays would use the empty FixedArray...
+ if (length == 0) return false;
+ // ...so only cast to FixedDoubleArray otherwise.
+ FixedDoubleArray* elements = FixedDoubleArray::cast(object->elements());
+ for (int i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) return true;
+ }
+ return false;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ int length = object->elements()->length();
+ return length > 0;
+ }
+ case DICTIONARY_ELEMENTS: {
+ SeededNumberDictionary* elements =
+ SeededNumberDictionary::cast(object->elements());
+ return elements->NumberOfElementsFilterAttributes(ONLY_ENUMERABLE) > 0;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ // We're approximating non-empty arguments objects here.
+ return true;
+ }
+ UNREACHABLE();
+ return true;
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -7425,7 +8393,7 @@ bool JSReceiver::IsSimpleEnum() {
if (current->IsAccessCheckNeeded()) return false;
DCHECK(!current->HasNamedInterceptor());
DCHECK(!current->HasIndexedInterceptor());
- if (current->NumberOfEnumElements() > 0) return false;
+ if (HasEnumerableElements(current)) return false;
if (current != this && enum_length != 0) return false;
}
return true;
@@ -7433,7 +8401,7 @@ bool JSReceiver::IsSimpleEnum() {
int Map::NumberOfDescribedProperties(DescriptorFlag which,
- PropertyAttributes filter) {
+ PropertyFilter filter) {
int result = 0;
DescriptorArray* descs = instance_descriptors();
int limit = which == ALL_DESCRIPTORS
@@ -7498,14 +8466,14 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
// If the enum length of the given map is set to kInvalidEnumCache, this
// means that the map itself has never used the present enum cache. The
// first step to using the cache is to set the enum length of the map by
- // counting the number of own descriptors that are not DONT_ENUM or
- // SYMBOLIC.
+ // counting the number of own descriptors that are ENUMERABLE_STRINGS.
if (own_property_count == kInvalidEnumCacheSentinel) {
own_property_count =
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_SHOW);
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
} else {
- DCHECK(own_property_count ==
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_SHOW));
+ DCHECK(
+ own_property_count ==
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
}
if (descs->HasEnumCache()) {
@@ -7592,103 +8560,368 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
}
-MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
- KeyCollectionType type,
- KeyFilter filter,
- GetKeysConversion getConversion) {
- USE(ContainsOnlyValidKeys);
- Isolate* isolate = object->GetIsolate();
- KeyAccumulator accumulator(isolate, filter);
- Handle<JSFunction> arguments_function(
- JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
- PrototypeIterator::WhereToEnd end = type == OWN_ONLY
+enum IndexedOrNamed { kIndexed, kNamed };
+
+
+// Returns |true| on success, |nothing| on exception.
+template <class Callback, IndexedOrNamed type>
+static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ PropertyFilter filter,
+ KeyAccumulator* accumulator) {
+ if (type == kIndexed) {
+ if (!object->HasIndexedInterceptor()) return Just(true);
+ } else {
+ if (!object->HasNamedInterceptor()) return Just(true);
+ }
+ Handle<InterceptorInfo> interceptor(type == kIndexed
+ ? object->GetIndexedInterceptor()
+ : object->GetNamedInterceptor(),
+ isolate);
+ if ((filter & ONLY_ALL_CAN_READ) && !interceptor->all_can_read()) {
+ return Just(true);
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *object);
+ v8::Local<v8::Object> result;
+ if (!interceptor->enumerator()->IsUndefined()) {
+ Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
+ const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
+ : "interceptor-named-enum";
+ LOG(isolate, ApiObjectAccess(log_tag, *object));
+ result = args.Call(enum_fun);
+ }
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.IsEmpty()) return Just(true);
+ DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
+ (v8::Utils::OpenHandle(*result)->IsJSObject() &&
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*result))
+ ->HasSloppyArgumentsElements()));
+ // The accumulator takes care of string/symbol filtering.
+ if (type == kIndexed) {
+ accumulator->AddElementKeysFromInterceptor(
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+ } else {
+ accumulator->AddKeys(
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+ }
+ return Just(true);
+}
+
+
+// Returns |true| on success, |false| if prototype walking should be stopped,
+// |nothing| if an exception was thrown.
+static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ PropertyFilter* filter,
+ JSReceiver::KeyCollectionType type,
+ KeyAccumulator* accumulator) {
+ accumulator->NextPrototype();
+ // Check access rights if required.
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), object)) {
+ // The cross-origin spec says that [[Enumerate]] shall return an empty
+ // iterator when it doesn't have access...
+ if (type == JSReceiver::INCLUDE_PROTOS) {
+ return Just(false);
+ }
+ // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
+ DCHECK(type == JSReceiver::OWN_ONLY);
+ *filter = static_cast<PropertyFilter>(*filter | ONLY_ALL_CAN_READ);
+ }
+
+ JSObject::CollectOwnElementKeys(object, accumulator, *filter);
+
+ // Add the element keys from the interceptor.
+ Maybe<bool> success =
+ GetKeysFromInterceptor<v8::IndexedPropertyEnumeratorCallback, kIndexed>(
+ isolate, receiver, object, *filter, accumulator);
+ MAYBE_RETURN(success, Nothing<bool>());
+
+ if (*filter == ENUMERABLE_STRINGS) {
+ // We can cache the computed property keys if access checks are
+ // not needed and no interceptors are involved.
+ //
+ // We do not use the cache if the object has elements and
+ // therefore it does not make sense to cache the property names
+ // for arguments objects. Arguments objects will always have
+ // elements.
+ // Wrapped strings have elements, but don't have an elements
+ // array or dictionary. So the fast inline test for whether to
+ // use the cache says yes, so we should not create a cache.
+ Handle<JSFunction> arguments_function(
+ JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
+ bool cache_enum_length =
+ ((object->map()->GetConstructor() != *arguments_function) &&
+ !object->IsJSValue() && !object->IsAccessCheckNeeded() &&
+ !object->HasNamedInterceptor() && !object->HasIndexedInterceptor());
+ // Compute the property keys and cache them if possible.
+ Handle<FixedArray> enum_keys =
+ JSObject::GetEnumPropertyKeys(object, cache_enum_length);
+ accumulator->AddKeys(enum_keys);
+ } else {
+ object->CollectOwnPropertyNames(accumulator, *filter);
+ }
+
+ // Add the property keys from the interceptor.
+ success = GetKeysFromInterceptor<v8::GenericNamedPropertyEnumeratorCallback,
+ kNamed>(isolate, receiver, object, *filter,
+ accumulator);
+ MAYBE_RETURN(success, Nothing<bool>());
+ return Just(true);
+}
+
+
+// Helper function for JSReceiver::GetKeys() below. Can be called recursively.
+// Returns |true| or |nothing|.
+static Maybe<bool> GetKeys_Internal(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSReceiver> object,
+ JSReceiver::KeyCollectionType type,
+ PropertyFilter filter,
+ KeyAccumulator* accumulator) {
+ PrototypeIterator::WhereToEnd end = type == JSReceiver::OWN_ONLY
? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
- // Only collect keys if access is permitted.
for (PrototypeIterator iter(isolate, object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(end); iter.Advance()) {
- accumulator.NextPrototype();
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- Handle<JSProxy> proxy = PrototypeIterator::GetCurrent<JSProxy>(iter);
- Handle<Object> args[] = { proxy };
- Handle<Object> names;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, names,
- Execution::Call(isolate,
- isolate->proxy_enumerate(),
- object,
- arraysize(args),
- args),
- FixedArray);
- accumulator.AddKeysFromProxy(Handle<JSObject>::cast(names));
- break;
- }
-
- Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
-
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), current)) {
- if (iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- isolate->ReportFailedAccessCheck(current);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ Handle<JSReceiver> current =
+ PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ Maybe<bool> result = Just(false); // Dummy initialization.
+ if (current->IsJSProxy()) {
+ if (type == JSReceiver::OWN_ONLY) {
+ result = JSProxy::OwnPropertyKeys(isolate, receiver,
+ Handle<JSProxy>::cast(current),
+ filter, accumulator);
+ } else {
+ DCHECK(type == JSReceiver::INCLUDE_PROTOS);
+ result = JSProxy::Enumerate(
+ isolate, receiver, Handle<JSProxy>::cast(current), accumulator);
}
- break;
+ } else {
+ DCHECK(current->IsJSObject());
+ result = GetKeysFromJSObject(isolate, receiver,
+ Handle<JSObject>::cast(current), &filter,
+ type, accumulator);
}
+ MAYBE_RETURN(result, Nothing<bool>());
+ if (!result.FromJust()) break; // |false| means "stop iterating".
+ }
+ return Just(true);
+}
- JSObject::CollectOwnElementKeys(current, &accumulator,
- static_cast<PropertyAttributes>(DONT_ENUM));
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForIndexedInterceptor(current, object)
- .ToHandle(&result)) {
- accumulator.AddElementKeysFromInterceptor(result);
- }
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
- }
+// ES6 9.5.11
+// Returns false in case of exception.
+// static
+Maybe<bool> JSProxy::Enumerate(Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy,
+ KeyAccumulator* accumulator) {
+ STACK_CHECK(Nothing<bool>());
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked,
+ isolate->factory()->enumerate_string()));
+ return Nothing<bool>();
+ }
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 5. Let trap be ? GetMethod(handler, "enumerate").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->enumerate_string()),
+ Nothing<bool>());
+ // 6. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 6a. Return target.[[Enumerate]]().
+ return GetKeys_Internal(isolate, receiver, target, INCLUDE_PROTOS,
+ ENUMERABLE_STRINGS, accumulator);
+ }
+ // The "proxy_enumerate" helper calls the trap (steps 7 - 9), which returns
+ // a generator; it then iterates over that generator until it's exhausted
+ // and returns an array containing the generated values.
+ Handle<Object> trap_result_array;
+ Handle<Object> args[] = {trap, handler, target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_array,
+ Execution::Call(isolate, isolate->proxy_enumerate(),
+ isolate->factory()->undefined_value(), arraysize(args),
+ args),
+ Nothing<bool>());
+ accumulator->NextPrototype();
+ accumulator->AddKeysFromProxy(Handle<JSObject>::cast(trap_result_array));
+ return Just(true);
+}
- if (filter == SKIP_SYMBOLS) {
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- bool cache_enum_length =
- ((current->map()->GetConstructor() != *arguments_function) &&
- !current->IsJSValue() && !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() &&
- !current->HasIndexedInterceptor());
- // Compute the property keys and cache them if possible.
- Handle<FixedArray> enum_keys =
- JSObject::GetEnumPropertyKeys(current, cache_enum_length);
- accumulator.AddKeys(enum_keys);
+
+// ES6 9.5.12
+// Returns |true| on success, |nothing| in case of exception.
+// static
+Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy,
+ PropertyFilter filter,
+ KeyAccumulator* accumulator) {
+ STACK_CHECK(Nothing<bool>());
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate->factory()->ownKeys_string()));
+ return Nothing<bool>();
+ }
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 5. Let trap be ? GetMethod(handler, "ownKeys").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->ownKeys_string()),
+ Nothing<bool>());
+ // 6. If trap is undefined, then
+ if (trap->IsUndefined()) {
+ // 6a. Return target.[[OwnPropertyKeys]]().
+ return GetKeys_Internal(isolate, receiver, target, OWN_ONLY, filter,
+ accumulator);
+ }
+ // 7. Let trapResultArray be Call(trap, handler, «target»).
+ Handle<Object> trap_result_array;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result_array,
+ Execution::Call(isolate, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
+ // «String, Symbol»).
+ Handle<FixedArray> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Object::CreateListFromArrayLike(isolate, trap_result_array,
+ ElementTypes::kStringAndSymbol),
+ Nothing<bool>());
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
+ Handle<FixedArray> target_keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_keys,
+ JSReceiver::OwnPropertyKeys(target),
+ Nothing<bool>());
+ // 11. (Assert)
+ // 12. Let targetConfigurableKeys be an empty List.
+ // To save memory, we're re-using target_keys and will modify it in-place.
+ Handle<FixedArray> target_configurable_keys = target_keys;
+ // 13. Let targetNonconfigurableKeys be an empty List.
+ Handle<FixedArray> target_nonconfigurable_keys =
+ isolate->factory()->NewFixedArray(target_keys->length());
+ int nonconfigurable_keys_length = 0;
+ // 14. Repeat, for each element key of targetKeys:
+ for (int i = 0; i < target_keys->length(); ++i) {
+ // 14a. Let desc be ? target.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, target, handle(target_keys->get(i), isolate), &desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
+ if (found.FromJust() && !desc.configurable()) {
+ // 14b i. Append key as an element of targetNonconfigurableKeys.
+ target_nonconfigurable_keys->set(nonconfigurable_keys_length,
+ target_keys->get(i));
+ nonconfigurable_keys_length++;
+ // The key was moved, null it out in the original list.
+ target_keys->set(i, Smi::FromInt(0));
} else {
- DCHECK(filter == INCLUDE_SYMBOLS);
- PropertyAttributes attr_filter =
- static_cast<PropertyAttributes>(DONT_ENUM | PRIVATE_SYMBOL);
- current->CollectOwnPropertyNames(&accumulator, attr_filter);
- }
-
- // Add the property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForNamedInterceptor(current, object)
- .ToHandle(&result)) {
- accumulator.AddKeys(result);
- }
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
+ // 14c. Else,
+ // 14c i. Append key as an element of targetConfigurableKeys.
+ // (No-op, just keep it in |target_keys|.)
+ }
+ }
+ accumulator->NextPrototype(); // Prepare for accumulating keys.
+ // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
+ // then:
+ if (extensible_target && nonconfigurable_keys_length == 0) {
+ // 15a. Return trapResult.
+ return accumulator->AddKeysFromProxy(proxy, trap_result);
+ }
+ // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
+ Zone set_zone;
+ const int kPresent = 1;
+ const int kGone = 0;
+ IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
+ int unchecked_result_keys_size = trap_result->length();
+ for (int i = 0; i < trap_result->length(); ++i) {
+ DCHECK(trap_result->get(i)->IsUniqueName());
+ unchecked_result_keys.Set(trap_result->get(i), kPresent);
+ }
+ // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
+ for (int i = 0; i < nonconfigurable_keys_length; ++i) {
+ Object* key = target_nonconfigurable_keys->get(i);
+ // 17a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ int* found = unchecked_result_keys.Find(key);
+ if (found == nullptr || *found == kGone) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
+ return Nothing<bool>();
+ }
+ // 17b. Remove key from uncheckedResultKeys.
+ *found = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 18. If extensibleTarget is true, return trapResult.
+ if (extensible_target) {
+ return accumulator->AddKeysFromProxy(proxy, trap_result);
+ }
+ // 19. Repeat, for each key that is an element of targetConfigurableKeys:
+ for (int i = 0; i < target_configurable_keys->length(); ++i) {
+ Object* key = target_configurable_keys->get(i);
+ if (key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
+ // 19a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ int* found = unchecked_result_keys.Find(key);
+ if (found == nullptr || *found == kGone) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
+ return Nothing<bool>();
}
+ // 19b. Remove key from uncheckedResultKeys.
+ *found = kGone;
+ unchecked_result_keys_size--;
}
+ // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
+ if (unchecked_result_keys_size != 0) {
+ DCHECK_GT(unchecked_result_keys_size, 0);
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysNonExtensible));
+ return Nothing<bool>();
+ }
+ // 21. Return trapResult.
+ return accumulator->AddKeysFromProxy(proxy, trap_result);
+}
+
- Handle<FixedArray> keys = accumulator.GetKeys(getConversion);
+MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
+ KeyCollectionType type,
+ PropertyFilter filter,
+ GetKeysConversion keys_conversion) {
+ USE(ContainsOnlyValidKeys);
+ Isolate* isolate = object->GetIsolate();
+ KeyAccumulator accumulator(isolate, filter);
+ MAYBE_RETURN(
+ GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
+ MaybeHandle<FixedArray>());
+ Handle<FixedArray> keys = accumulator.GetKeys(keys_conversion);
DCHECK(ContainsOnlyValidKeys(keys));
return keys;
}
@@ -7942,7 +9175,8 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
if (!map->is_dictionary_map()) {
new_bit_field3 = IsUnstable::update(new_bit_field3, false);
}
- new_bit_field3 = Counter::update(new_bit_field3, kRetainingCounterStart);
+ new_bit_field3 =
+ ConstructionCounter::update(new_bit_field3, kNoSlackTracking);
result->set_bit_field3(new_bit_field3);
return result;
}
@@ -8042,9 +9276,17 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
int in_object_properties,
int unused_property_fields) {
#ifdef DEBUG
+ Isolate* isolate = map->GetIsolate();
+ // Strict and strong function maps have Function as a constructor but the
+ // Function's initial map is a sloppy function map. Same holds for
+ // GeneratorFunction and its initial map.
Object* constructor = map->GetConstructor();
DCHECK(constructor->IsJSFunction());
- DCHECK_EQ(*map, JSFunction::cast(constructor)->initial_map());
+ DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
+ *map == *isolate->strict_function_map() ||
+ *map == *isolate->strong_function_map() ||
+ *map == *isolate->strict_generator_function_map() ||
+ *map == *isolate->strong_generator_function_map());
#endif
// Initial maps must always own their descriptors and it's descriptor array
// does not contain descriptors that do not belong to the map.
@@ -8077,9 +9319,10 @@ Handle<Map> Map::CopyDropDescriptors(Handle<Map> map) {
Handle<Map> result = RawCopy(map, map->instance_size());
// Please note instance_type and instance_size are set when allocated.
- result->SetInObjectProperties(map->GetInObjectProperties());
- result->set_unused_property_fields(map->unused_property_fields());
-
+ if (map->IsJSObjectMap()) {
+ result->SetInObjectProperties(map->GetInObjectProperties());
+ result->set_unused_property_fields(map->unused_property_fields());
+ }
result->ClearCodeCache(map->GetHeap());
map->NotifyLeafMapLayoutChange();
return result;
@@ -8228,48 +9471,85 @@ Handle<Map> Map::CopyReplaceDescriptors(
}
-// Since this method is used to rewrite an existing transition tree, it can
-// always insert transitions without checking.
-Handle<Map> Map::CopyInstallDescriptors(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
+// Creates transition tree starting from |split_map| and adding all descriptors
+// starting from descriptor with index |split_map|.NumberOfOwnDescriptors().
+// The way how it is done is tricky because of GC and special descriptors
+// marking logic.
+Handle<Map> Map::AddMissingTransitions(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor) {
DCHECK(descriptors->IsSortedNoDuplicates());
+ int split_nof = split_map->NumberOfOwnDescriptors();
+ int nof_descriptors = descriptors->number_of_descriptors();
+ DCHECK_LT(split_nof, nof_descriptors);
+
+ // Start with creating last map which will own full descriptors array.
+ // This is necessary to guarantee that GC will mark the whole descriptor
+ // array if any of the allocations happening below fail.
+ // Number of unused properties is temporarily incorrect and the layout
+ // descriptor could unnecessarily be in slow mode but we will fix after
+ // all the other intermediate maps are created.
+ Handle<Map> last_map = CopyDropDescriptors(split_map);
+ last_map->InitializeDescriptors(*descriptors, *full_layout_descriptor);
+ last_map->set_unused_property_fields(0);
+
+ // During creation of intermediate maps we violate descriptors sharing
+ // invariant since the last map is not yet connected to the transition tree
+ // we create here. But it is safe because GC never trims map's descriptors
+ // if there are no dead transitions from that map and this is exactly the
+ // case for all the intermediate maps we create here.
+ Handle<Map> map = split_map;
+ for (int i = split_nof; i < nof_descriptors - 1; ++i) {
+ Handle<Map> new_map = CopyDropDescriptors(map);
+ InstallDescriptors(map, new_map, i, descriptors, full_layout_descriptor);
+ map = new_map;
+ }
+ map->NotifyLeafMapLayoutChange();
+ InstallDescriptors(map, last_map, nof_descriptors - 1, descriptors,
+ full_layout_descriptor);
+ return last_map;
+}
- Handle<Map> result = CopyDropDescriptors(map);
- result->set_instance_descriptors(*descriptors);
- result->SetNumberOfOwnDescriptors(new_descriptor + 1);
+// Since this method is used to rewrite an existing transition tree, it can
+// always insert transitions without checking.
+void Map::InstallDescriptors(Handle<Map> parent, Handle<Map> child,
+ int new_descriptor,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor) {
+ DCHECK(descriptors->IsSortedNoDuplicates());
- int unused_property_fields = map->unused_property_fields();
+ child->set_instance_descriptors(*descriptors);
+ child->SetNumberOfOwnDescriptors(new_descriptor + 1);
+
+ int unused_property_fields = parent->unused_property_fields();
PropertyDetails details = descriptors->GetDetails(new_descriptor);
if (details.location() == kField) {
- unused_property_fields = map->unused_property_fields() - 1;
+ unused_property_fields = parent->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
}
}
- result->set_unused_property_fields(unused_property_fields);
+ child->set_unused_property_fields(unused_property_fields);
if (FLAG_unbox_double_fields) {
Handle<LayoutDescriptor> layout_descriptor =
- LayoutDescriptor::AppendIfFastOrUseFull(map, details,
+ LayoutDescriptor::AppendIfFastOrUseFull(parent, details,
full_layout_descriptor);
- result->set_layout_descriptor(*layout_descriptor);
+ child->set_layout_descriptor(*layout_descriptor);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+ CHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
}
#else
- SLOW_DCHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+ SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
#endif
- result->set_visitor_id(Heap::GetStaticVisitorIdForMap(*result));
+ child->set_visitor_id(Heap::GetStaticVisitorIdForMap(*child));
}
Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
- ConnectTransition(map, result, name, SIMPLE_PROPERTY_TRANSITION);
-
- return result;
+ ConnectTransition(parent, child, name, SIMPLE_PROPERTY_TRANSITION);
}
@@ -8308,6 +9588,58 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
}
+Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
+ LanguageMode language_mode, FunctionKind kind) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ // Initial map for sloppy mode function is stored in the function
+ // constructor. Initial maps for strict and strong modes are cached as
+ // special transitions using |strict_function_transition_symbol| and
+ // |strong_function_transition_symbol| respectively as a key.
+ if (language_mode == SLOPPY) return initial_map;
+ Isolate* isolate = initial_map->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<Symbol> transition_symbol;
+
+ int map_index = Context::FunctionMapIndex(language_mode, kind);
+ Handle<Map> function_map(
+ Map::cast(isolate->native_context()->get(map_index)));
+
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ switch (language_mode) {
+ case STRICT:
+ transition_symbol = factory->strict_function_transition_symbol();
+ break;
+ case STRONG:
+ transition_symbol = factory->strong_function_transition_symbol();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ Map* maybe_transition =
+ TransitionArray::SearchSpecial(*initial_map, *transition_symbol);
+ if (maybe_transition != NULL) {
+ return handle(maybe_transition, isolate);
+ }
+ initial_map->NotifyLeafMapLayoutChange();
+
+ // Create new map taking descriptors from the |function_map| and all
+ // the other details from the |initial_map|.
+ Handle<Map> map =
+ Map::CopyInitialMap(function_map, initial_map->instance_size(),
+ initial_map->GetInObjectProperties(),
+ initial_map->unused_property_fields());
+ map->SetConstructor(initial_map->GetConstructor());
+ map->set_prototype(initial_map->prototype());
+
+ if (TransitionArray::CanHaveMoreTransitions(initial_map)) {
+ Map::ConnectTransition(initial_map, map, transition_symbol,
+ SPECIAL_TRANSITION);
+ }
+ return map;
+}
+
+
Handle<Map> Map::CopyForObserved(Handle<Map> map) {
DCHECK(!map->is_observed());
@@ -8427,25 +9759,6 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
}
-Handle<Map> Map::FixProxy(Handle<Map> map, InstanceType type, int size) {
- DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
- DCHECK(map->IsJSProxyMap());
-
- Isolate* isolate = map->GetIsolate();
-
- // Allocate fresh map.
- // TODO(rossberg): Once we optimize proxies, cache these maps.
- Handle<Map> new_map = isolate->factory()->NewMap(type, size);
-
- Handle<Object> prototype(map->prototype(), isolate);
- Map::SetPrototype(new_map, prototype);
-
- map->NotifyLeafMapLayoutChange();
-
- return new_map;
-}
-
-
bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
PropertyDetails details = GetDetails(descriptor);
switch (details.type()) {
@@ -8726,7 +10039,6 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
Handle<DescriptorArray> descriptors =
DescriptorArray::Allocate(desc->GetIsolate(), size, slack);
- DescriptorArray::WhitenessWitness witness(*descriptors);
if (attributes != NONE) {
for (int i = 0; i < size; ++i) {
@@ -8734,7 +10046,7 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
Name* key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
// Bulk attribute changes never affect private properties.
- if (!key->IsSymbol() || !Symbol::cast(key)->is_private()) {
+ if (!key->IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
if (details.type() != ACCESSOR_CONSTANT || !value->IsAccessorPair()) {
@@ -8745,11 +10057,11 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
}
Descriptor inner_desc(
handle(key), handle(value, desc->GetIsolate()), details);
- descriptors->Set(i, &inner_desc, witness);
+ descriptors->SetDescriptor(i, &inner_desc);
}
} else {
for (int i = 0; i < size; ++i) {
- descriptors->CopyFrom(i, *desc, witness);
+ descriptors->CopyFrom(i, *desc);
}
}
@@ -8759,6 +10071,22 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
}
+bool DescriptorArray::IsEqualUpTo(DescriptorArray* desc, int nof_descriptors) {
+ for (int i = 0; i < nof_descriptors; i++) {
+ if (GetKey(i) != desc->GetKey(i) || GetValue(i) != desc->GetValue(i)) {
+ return false;
+ }
+ PropertyDetails details = GetDetails(i);
+ PropertyDetails other_details = desc->GetDetails(i);
+ if (details.type() != other_details.type() ||
+ !details.representation().Equals(other_details.representation())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor,
@@ -9440,6 +10768,12 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
}
+bool ArrayList::IsFull() {
+ int capacity = length();
+ return kFirstIndex + Length() == capacity;
+}
+
+
Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
int capacity = array->length();
bool empty = (capacity == 0);
@@ -9465,7 +10799,7 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int size = number_of_descriptors + slack;
if (size == 0) return factory->empty_descriptor_array();
// Allocate the array of keys.
- Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size));
+ Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size), TENURED);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
result->set(kEnumCacheIndex, Smi::FromInt(0));
@@ -9508,21 +10842,16 @@ void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
}
-void DescriptorArray::CopyFrom(int index, DescriptorArray* src,
- const WhitenessWitness& witness) {
+void DescriptorArray::CopyFrom(int index, DescriptorArray* src) {
Object* value = src->GetValue(index);
PropertyDetails details = src->GetDetails(index);
Descriptor desc(handle(src->GetKey(index)),
handle(value, src->GetIsolate()),
details);
- Set(index, &desc, witness);
+ SetDescriptor(index, &desc);
}
-// We need the whiteness witness since sort will reshuffle the entries in the
-// descriptor array. If the descriptor array were to be black, the shuffling
-// would move a slot that was already recorded as pointing into an evacuation
-// candidate. This would result in missing updates upon evacuation.
void DescriptorArray::Sort() {
// In-place heap sort.
int len = number_of_descriptors();
@@ -9628,47 +10957,6 @@ Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
}
-// static
-Handle<BindingsArray> BindingsArray::New(Isolate* isolate,
- Handle<TypeFeedbackVector> vector,
- Handle<JSReceiver> bound_function,
- Handle<Object> bound_this,
- int number_of_bindings) {
- Handle<FixedArray> bindings = isolate->factory()->NewFixedArray(
- number_of_bindings + kFirstBindingIndex);
- Handle<BindingsArray> casted_bindings = Handle<BindingsArray>::cast(bindings);
- casted_bindings->set_feedback_vector(*vector);
- casted_bindings->set_bound_function(*bound_function);
- casted_bindings->set_bound_this(*bound_this);
- return casted_bindings;
-}
-
-
-// static
-Handle<JSArray> BindingsArray::CreateBoundArguments(
- Handle<BindingsArray> bindings) {
- int bound_argument_count = bindings->bindings_count();
- Factory* factory = bindings->GetIsolate()->factory();
- Handle<FixedArray> arguments = factory->NewFixedArray(bound_argument_count);
- bindings->CopyTo(kFirstBindingIndex, *arguments, 0, bound_argument_count);
- return factory->NewJSArrayWithElements(arguments);
-}
-
-
-// static
-Handle<JSArray> BindingsArray::CreateRuntimeBindings(
- Handle<BindingsArray> bindings) {
- Factory* factory = bindings->GetIsolate()->factory();
- // A runtime bindings array consists of
- // [bound function, bound this, [arg0, arg1, ...]].
- Handle<FixedArray> runtime_bindings =
- factory->NewFixedArray(2 + bindings->bindings_count());
- bindings->CopyTo(kBoundFunctionIndex, *runtime_bindings, 0,
- 2 + bindings->bindings_count());
- return factory->NewJSArrayWithElements(runtime_bindings);
-}
-
-
int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out,
CatchPrediction* prediction_out) {
int innermost_handler = -1, innermost_start = -1;
@@ -10336,8 +11624,10 @@ static void CalculateLineEndsImpl(Isolate* isolate,
if (src_len > 0 && cache->IsLineTerminatorSequence(src[src_len - 1], 0)) {
line_ends->Add(src_len - 1);
- } else if (include_ending_line) {
- // Even if the last line misses a line end, it is counted.
+ }
+ if (include_ending_line) {
+ // Include one character beyond the end of script. The rewriter uses that
+ // position for the implicit return statement.
line_ends->Add(src_len);
}
}
@@ -10933,12 +12223,6 @@ void String::PrintOn(FILE* file) {
}
-inline static uint32_t ObjectAddressForHashing(Object* object) {
- uint32_t value = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object));
- return value & MemoryChunk::kAlignmentMask;
-}
-
-
int Map::Hash() {
// For performance reasons we only hash the 3 most variable fields of a map:
// constructor, prototype and bit_field2. For predictability reasons we
@@ -10973,7 +12257,15 @@ bool CheckEquivalent(Map* first, Map* second) {
bool Map::EquivalentToForTransition(Map* other) {
- return CheckEquivalent(this, other);
+ if (!CheckEquivalent(this, other)) return false;
+ if (instance_type() == JS_FUNCTION_TYPE) {
+ // JSFunctions require more checks to ensure that sloppy function is
+ // not equvalent to strict function.
+ int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
+ return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
+ nof);
+ }
+ return true;
}
@@ -11054,14 +12346,19 @@ void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Isolate* isolate = shared->GetIsolate();
if (isolate->serializer_enabled()) return;
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- Handle<Object> value(shared->optimized_code_map(), isolate);
- if (value->IsSmi()) return; // Empty code maps are unsupported.
- Handle<FixedArray> code_map = Handle<FixedArray>::cast(value);
- code_map->set(kSharedCodeIndex, *code);
+ // Empty code maps are unsupported.
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ Handle<WeakCell> cell = isolate->factory()->NewWeakCell(code);
+ // A collection may have occured and cleared the optimized code map in the
+ // allocation above.
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ shared->optimized_code_map()->set(kSharedCodeIndex, *cell);
+ }
+ }
}
-void SharedFunctionInfo::AddToOptimizedCodeMap(
+void SharedFunctionInfo::AddToOptimizedCodeMapInternal(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
Handle<HeapObject> code, Handle<LiteralsArray> literals,
BailoutId osr_ast_id) {
@@ -11074,86 +12371,110 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4);
Handle<FixedArray> new_code_map;
- Handle<Object> value(shared->optimized_code_map(), isolate);
int entry;
- if (value->IsSmi()) {
- // No optimized code map.
- DCHECK_EQ(0, Smi::cast(*value)->value());
+
+ if (shared->OptimizedCodeMapIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
+ new_code_map->set(kSharedCodeIndex, *isolate->factory()->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
entry = kEntriesStart;
} else {
- Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
+ Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
if (entry > kSharedCodeIndex) {
- // Found an existing context-specific entry, it must not contain any code.
- DCHECK_EQ(isolate->heap()->undefined_value(),
- old_code_map->get(entry + kCachedCodeOffset));
+ // Found an existing context-specific entry. If the user provided valid
+ // code, it must not contain any code.
+ DCHECK(code->IsUndefined() ||
+ WeakCell::cast(old_code_map->get(entry + kCachedCodeOffset))
+ ->cleared());
+
// Just set the code and literals to the entry.
- old_code_map->set(entry + kCachedCodeOffset, *code);
- old_code_map->set(entry + kLiteralsOffset, *literals);
+ if (!code->IsUndefined()) {
+ Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+ old_code_map->set(entry + kCachedCodeOffset, *code_cell);
+ }
+ Handle<WeakCell> literals_cell =
+ isolate->factory()->NewWeakCell(literals);
+ old_code_map->set(entry + kLiteralsOffset, *literals_cell);
return;
}
- // Copy old optimized code map and append one new entry.
- new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
- old_code_map, kEntryLength, TENURED);
- // TODO(mstarzinger): Temporary workaround. The allocation above might have
- // flushed the optimized code map and the copy we created is full of holes.
- // For now we just give up on adding the entry and pretend it got flushed.
- if (shared->optimized_code_map()->IsSmi()) return;
- entry = old_code_map->length();
+ // Can we reuse an entry?
+ DCHECK(entry < kEntriesStart);
+ int length = old_code_map->length();
+ for (int i = kEntriesStart; i < length; i += kEntryLength) {
+ if (WeakCell::cast(old_code_map->get(i + kContextOffset))->cleared()) {
+ new_code_map = old_code_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < kEntriesStart) {
+ // Copy old optimized code map and append one new entry.
+ new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
+ old_code_map, kEntryLength, TENURED);
+ // TODO(mstarzinger): Temporary workaround. The allocation above might
+ // have flushed the optimized code map and the copy we created is full of
+ // holes. For now we just give up on adding the entry and pretend it got
+ // flushed.
+ if (shared->OptimizedCodeMapIsCleared()) return;
+ entry = old_code_map->length();
+ }
}
- new_code_map->set(entry + kContextOffset, *native_context);
- new_code_map->set(entry + kCachedCodeOffset, *code);
- new_code_map->set(entry + kLiteralsOffset, *literals);
+
+ Handle<WeakCell> code_cell = code->IsUndefined()
+ ? isolate->factory()->empty_weak_cell()
+ : isolate->factory()->NewWeakCell(code);
+ Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ WeakCell* context_cell = native_context->self_weak_cell();
+
+ new_code_map->set(entry + kContextOffset, context_cell);
+ new_code_map->set(entry + kCachedCodeOffset, *code_cell);
+ new_code_map->set(entry + kLiteralsOffset, *literals_cell);
new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
- DCHECK(new_code_map->get(i + kContextOffset)->IsNativeContext());
- Object* code = new_code_map->get(i + kCachedCodeOffset);
- if (code != isolate->heap()->undefined_value()) {
- DCHECK(code->IsCode());
- DCHECK(Code::cast(code)->kind() == Code::OPTIMIZED_FUNCTION);
- }
- DCHECK(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+ WeakCell* cell = WeakCell::cast(new_code_map->get(i + kContextOffset));
+ DCHECK(cell->cleared() || cell->value()->IsNativeContext());
+ cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
+ DCHECK(cell->cleared() ||
+ (cell->value()->IsCode() &&
+ Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
+ cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
+ DCHECK(cell->cleared() || cell->value()->IsFixedArray());
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
- // Zap any old optimized code map.
- if (!shared->optimized_code_map()->IsSmi()) {
- FixedArray* old_code_map = FixedArray::cast(shared->optimized_code_map());
- old_code_map->FillWithHoles(0, old_code_map->length());
+ FixedArray* old_code_map = shared->optimized_code_map();
+ if (old_code_map != *new_code_map) {
+ shared->set_optimized_code_map(*new_code_map);
}
-
- shared->set_optimized_code_map(*new_code_map);
}
void SharedFunctionInfo::ClearOptimizedCodeMap() {
- // Zap any old optimized code map.
- if (!optimized_code_map()->IsSmi()) {
- FixedArray* old_code_map = FixedArray::cast(optimized_code_map());
- old_code_map->FillWithHoles(0, old_code_map->length());
- }
-
- set_optimized_code_map(Smi::FromInt(0));
+ FixedArray* cleared_map = GetHeap()->cleared_optimized_code_map();
+ set_optimized_code_map(cleared_map, SKIP_WRITE_BARRIER);
}
void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) {
DisallowHeapAllocation no_gc;
- if (optimized_code_map()->IsSmi()) return;
+ if (OptimizedCodeMapIsCleared()) return;
Heap* heap = GetHeap();
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ FixedArray* code_map = optimized_code_map();
int dst = kEntriesStart;
int length = code_map->length();
for (int src = kEntriesStart; src < length; src += kEntryLength) {
- DCHECK(code_map->get(src)->IsNativeContext());
- if (code_map->get(src + kCachedCodeOffset) == optimized_code) {
+ DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
+ WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
+ if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
+ optimized_code) {
BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
@@ -11170,7 +12491,8 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
// In case of non-OSR entry just clear the code in order to proceed
// sharing literals.
- code_map->set_undefined(src + kCachedCodeOffset);
+ code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
}
// Keep the src entry by copying it to the dst entry.
@@ -11185,9 +12507,11 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
dst += kEntryLength;
}
- if (code_map->get(kSharedCodeIndex) == optimized_code) {
+ if (WeakCell::cast(code_map->get(kSharedCodeIndex))->value() ==
+ optimized_code) {
// Evict context-independent code as well.
- code_map->set_undefined(kSharedCodeIndex);
+ code_map->set(kSharedCodeIndex, heap->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
@@ -11199,7 +12523,7 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
if (code_map->length() == kEntriesStart &&
- code_map->get(kSharedCodeIndex)->IsUndefined()) {
+ WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
ClearOptimizedCodeMap();
}
}
@@ -11207,14 +12531,14 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ FixedArray* code_map = optimized_code_map();
DCHECK(shrink_by % kEntryLength == 0);
DCHECK(shrink_by <= code_map->length() - kEntriesStart);
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
shrink_by);
if (code_map->length() == kEntriesStart &&
- code_map->get(kSharedCodeIndex)->IsUndefined()) {
+ WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
ClearOptimizedCodeMap();
}
}
@@ -11239,18 +12563,11 @@ static void ShrinkInstanceSize(Map* map, void* data) {
}
-void JSFunction::CompleteInobjectSlackTracking() {
- DCHECK(has_initial_map());
- initial_map()->CompleteInobjectSlackTracking();
-}
-
-
void Map::CompleteInobjectSlackTracking() {
// Has to be an initial map.
DCHECK(GetBackPointer()->IsUndefined());
- DCHECK_GE(counter(), kSlackTrackingCounterEnd - 1);
- set_counter(kRetainingCounterStart);
+ set_construction_counter(kNoSlackTracking);
int slack = unused_property_fields();
TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
@@ -11284,7 +12601,6 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
PrototypeOptimizationMode mode) {
if (object->IsJSGlobalObject()) return;
- if (object->IsJSGlobalProxy()) return;
if (mode == FAST_PROTOTYPE && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
@@ -11310,13 +12626,9 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
if (!constructor->shared()->IsApiFunction() &&
object->class_name() == isolate->heap()->Object_string()) {
- Handle<String> constructor_name(object->constructor_name(), isolate);
Context* context = constructor->context()->native_context();
JSFunction* object_function = context->object_function();
object->map()->SetConstructor(object_function);
- Handle<PrototypeInfo> proto_info =
- Map::GetOrCreatePrototypeInfo(object, isolate);
- proto_info->set_constructor_name(*constructor_name);
}
}
}
@@ -11346,7 +12658,6 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
break;
}
Handle<Object> maybe_proto = PrototypeIterator::GetCurrent(iter);
- if (maybe_proto->IsJSGlobalProxy()) continue;
// Proxies on the prototype chain are not supported. They make it
// impossible to make any assumptions about the prototype chain anyway.
if (maybe_proto->IsJSProxy()) return;
@@ -11381,17 +12692,18 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(user->is_prototype_map());
// If it doesn't have a PrototypeInfo, it was never registered.
if (!user->prototype_info()->IsPrototypeInfo()) return false;
- // If it doesn't have a prototype, it can't be registered.
- if (!user->prototype()->IsJSObject()) return false;
+ // If it had no prototype before, see if it had users that might expect
+ // registration.
+ if (!user->prototype()->IsJSObject()) {
+ Object* users =
+ PrototypeInfo::cast(user->prototype_info())->prototype_users();
+ return users->IsWeakFixedArray();
+ }
Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
Handle<PrototypeInfo> user_info =
Map::GetOrCreatePrototypeInfo(user, isolate);
int slot = user_info->registry_slot();
if (slot == PrototypeInfo::UNREGISTERED) return false;
- if (prototype->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, prototype);
- prototype = PrototypeIterator::GetCurrent<JSObject>(iter);
- }
DCHECK(prototype->map()->is_prototype_map());
Object* maybe_proto_info = prototype->map()->prototype_info();
// User knows its registry slot, prototype info and user registry must exist.
@@ -11440,10 +12752,6 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
void JSObject::InvalidatePrototypeChains(Map* map) {
if (!FLAG_eliminate_prototype_chain_checks) return;
DisallowHeapAllocation no_gc;
- if (map->IsJSGlobalProxyMap()) {
- PrototypeIterator iter(map);
- map = iter.GetCurrent<JSObject>()->map();
- }
InvalidatePrototypeChainsInternal(map);
}
@@ -11480,10 +12788,6 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Handle<Object> maybe_prototype(map->prototype(), isolate);
if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
- if (prototype->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, prototype);
- prototype = PrototypeIterator::GetCurrent<JSObject>(iter);
- }
// Ensure the prototype is registered with its own prototypes so its cell
// will be invalidated when necessary.
JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
@@ -11523,33 +12827,26 @@ Handle<Object> CacheInitialJSArrayMaps(
Handle<Context> native_context, Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
// the appropriate transitioned elements kind maps.
- Factory* factory = native_context->GetIsolate()->factory();
- Handle<FixedArray> maps = factory->NewFixedArrayWithHoles(
- kElementsKindCount, TENURED);
-
+ Strength strength =
+ initial_map->is_strong() ? Strength::STRONG : Strength::WEAK;
Handle<Map> current_map = initial_map;
ElementsKind kind = current_map->elements_kind();
- DCHECK(kind == GetInitialFastElementsKind());
- maps->set(kind, *current_map);
+ DCHECK_EQ(GetInitialFastElementsKind(), kind);
+ native_context->set(Context::ArrayMapIndex(kind, strength), *current_map);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- Map* maybe_elements_transition = current_map->ElementsTransitionMap();
- if (maybe_elements_transition != NULL) {
+ if (Map* maybe_elements_transition = current_map->ElementsTransitionMap()) {
new_map = handle(maybe_elements_transition);
- DCHECK(new_map->elements_kind() == next_kind);
} else {
new_map = Map::CopyAsElementsKind(
current_map, next_kind, INSERT_TRANSITION);
}
- maps->set(next_kind, *new_map);
+ DCHECK_EQ(next_kind, new_map->elements_kind());
+ native_context->set(Context::ArrayMapIndex(next_kind, strength), *new_map);
current_map = new_map;
}
- if (initial_map->is_strong())
- native_context->set_js_array_strong_maps(*maps);
- else
- native_context->set_js_array_maps(*maps);
return initial_map;
}
@@ -11567,9 +12864,7 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
// copy containing the new prototype. Also complete any in-object
// slack tracking that is in progress at this point because it is
// still tracking the old copy.
- if (function->IsInobjectSlackTrackingInProgress()) {
- function->CompleteInobjectSlackTracking();
- }
+ function->CompleteInobjectSlackTrackingIfActive();
Handle<Map> initial_map(function->initial_map(), isolate);
@@ -11689,10 +12984,85 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
}
+#ifdef DEBUG
+namespace {
+
+bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
+ switch (instance_type) {
+ case JS_OBJECT_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_DATE_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_FUNCTION_TYPE:
+ return true;
+
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_PROXY_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case FIXED_ARRAY_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ case ODDBALL_TYPE:
+ case FOREIGN_TYPE:
+ case MAP_TYPE:
+ case CODE_TYPE:
+ case CELL_TYPE:
+ case PROPERTY_CELL_TYPE:
+ case WEAK_CELL_TYPE:
+ case SYMBOL_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case FILLER_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE:
+#undef TYPED_ARRAY_CASE
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ // We must not end up here for these instance types at all.
+ UNREACHABLE();
+ // Fall through.
+ default:
+ return false;
+ }
+}
+
+} // namespace
+#endif
+
+
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
+ DCHECK(function->IsConstructor() || function->shared()->is_generator());
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
+ // The constructor should be compiled for the optimization hints to be
+ // available.
+ Compiler::Compile(function, CLEAR_EXCEPTION);
+
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
@@ -11725,78 +13095,109 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// Finally link initial map and constructor function.
DCHECK(prototype->IsJSReceiver());
JSFunction::SetInitialMap(function, map, prototype);
-
- if (!function->shared()->is_generator()) {
- function->StartInobjectSlackTracking();
- }
+ map->StartInobjectSlackTracking();
}
-Handle<Map> JSFunction::EnsureDerivedHasInitialMap(
- Handle<JSFunction> original_constructor, Handle<JSFunction> constructor) {
- DCHECK(constructor->has_initial_map());
- Isolate* isolate = constructor->GetIsolate();
- Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
- if (*original_constructor == *constructor) return constructor_initial_map;
- if (original_constructor->has_initial_map()) {
- // Check that |original_constructor|'s initial map still in sync with
- // the |constructor|, otherwise we must create a new initial map for
- // |original_constructor|.
- if (original_constructor->initial_map()->GetConstructor() == *constructor) {
- return handle(original_constructor->initial_map(), isolate);
- }
- }
-
- // First create a new map with the size and number of in-object properties
- // suggested by the function.
- DCHECK(!original_constructor->shared()->is_generator());
- DCHECK(!constructor->shared()->is_generator());
+// static
+MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target) {
+ EnsureHasInitialMap(constructor);
- // Fetch or allocate prototype.
+ Handle<Map> constructor_initial_map(constructor->initial_map(), isolate);
+ if (*new_target == *constructor) return constructor_initial_map;
+
+ // Fast case, new.target is a subclass of constructor. The map is cacheable
+ // (and may already have been cached). new.target.prototype is guaranteed to
+ // be a JSReceiver.
+ if (new_target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
+
+ // Check that |function|'s initial map still in sync with the |constructor|,
+ // otherwise we must create a new initial map for |function|.
+ if (function->has_initial_map() &&
+ function->initial_map()->GetConstructor() == *constructor) {
+ return handle(function->initial_map(), isolate);
+ }
+
+ // Create a new map with the size and number of in-object properties
+ // suggested by |function|.
+
+ // Link initial map and constructor function if the new.target is actually a
+ // subclass constructor.
+ if (IsSubclassConstructor(function->shared()->kind())) {
+ Handle<Object> prototype(function->instance_prototype(), isolate);
+ InstanceType instance_type = constructor_initial_map->instance_type();
+ DCHECK(CanSubclassHaveInobjectProperties(instance_type));
+ int internal_fields =
+ JSObject::GetInternalFieldCount(*constructor_initial_map);
+ int pre_allocated = constructor_initial_map->GetInObjectProperties() -
+ constructor_initial_map->unused_property_fields();
+ int instance_size;
+ int in_object_properties;
+ function->CalculateInstanceSizeForDerivedClass(
+ instance_type, internal_fields, &instance_size,
+ &in_object_properties);
+
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map =
+ Map::CopyInitialMap(constructor_initial_map, instance_size,
+ in_object_properties, unused_property_fields);
+ map->set_new_target_is_base(false);
+
+ JSFunction::SetInitialMap(function, map, prototype);
+ map->SetConstructor(*constructor);
+ map->StartInobjectSlackTracking();
+ return map;
+ }
+ }
+
+ // Slow path, new.target is either a proxy or can't cache the map.
+ // new.target.prototype is not guaranteed to be a JSReceiver, and may need to
+ // fall back to the intrinsicDefaultProto.
Handle<Object> prototype;
- if (original_constructor->has_instance_prototype()) {
- prototype = handle(original_constructor->instance_prototype(), isolate);
+ if (new_target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(new_target);
+ // Make sure the new.target.prototype is cached.
+ EnsureHasInitialMap(function);
+ prototype = handle(function->prototype(), isolate);
} else {
- prototype = isolate->factory()->NewFunctionPrototype(original_constructor);
- }
-
- // Finally link initial map and constructor function if the original
- // constructor is actually a subclass constructor.
- if (IsSubclassConstructor(original_constructor->shared()->kind())) {
-// TODO(ishell): v8:4531, allow ES6 built-ins subclasses to have
-// in-object properties.
-#if 0
- InstanceType instance_type = constructor_initial_map->instance_type();
- int internal_fields =
- JSObject::GetInternalFieldCount(*constructor_initial_map);
- int pre_allocated = constructor_initial_map->GetInObjectProperties() -
- constructor_initial_map->unused_property_fields();
- int instance_size;
- int in_object_properties;
- original_constructor->CalculateInstanceSizeForDerivedClass(
- instance_type, internal_fields, &instance_size, &in_object_properties);
-
- int unused_property_fields = in_object_properties - pre_allocated;
- Handle<Map> map =
- Map::CopyInitialMap(constructor_initial_map, instance_size,
- in_object_properties, unused_property_fields);
-#endif
- Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
-
- JSFunction::SetInitialMap(original_constructor, map, prototype);
- map->SetConstructor(*constructor);
- original_constructor->StartInobjectSlackTracking();
- return map;
-
- } else {
- Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
- DCHECK(prototype->IsJSReceiver());
- if (map->prototype() != *prototype) {
- Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
- }
- map->SetConstructor(*constructor);
- return map;
+ Handle<String> prototype_string = isolate->factory()->prototype_string();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ JSReceiver::GetProperty(new_target, prototype_string), Map);
+ // The above prototype lookup might change the constructor and its
+ // prototype, hence we have to reload the initial map.
+ EnsureHasInitialMap(constructor);
+ constructor_initial_map = handle(constructor->initial_map(), isolate);
+ }
+
+ // If prototype is not a JSReceiver, fetch the intrinsicDefaultProto from the
+ // correct realm. Rather than directly fetching the .prototype, we fetch the
+ // constructor that points to the .prototype. This relies on
+ // constructor.prototype being FROZEN for those constructors.
+ if (!prototype->IsJSReceiver()) {
+ Handle<Context> context;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, context,
+ JSReceiver::GetFunctionRealm(new_target), Map);
+ DCHECK(context->IsNativeContext());
+ Handle<Object> maybe_index = JSReceiver::GetDataProperty(
+ constructor, isolate->factory()->native_context_index_symbol());
+ int index = maybe_index->IsSmi() ? Smi::cast(*maybe_index)->value()
+ : Context::OBJECT_FUNCTION_INDEX;
+ Handle<JSFunction> realm_constructor(JSFunction::cast(context->get(index)));
+ prototype = handle(realm_constructor->prototype(), isolate);
+ }
+
+ Handle<Map> map = Map::CopyInitialMap(constructor_initial_map);
+ map->set_new_target_is_base(false);
+ DCHECK(prototype->IsJSReceiver());
+ if (map->prototype() != *prototype) {
+ Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
}
+ map->SetConstructor(*constructor);
+ return map;
}
@@ -11843,7 +13244,7 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
}
-Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
Handle<Object> name =
JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
@@ -11852,6 +13253,94 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
}
+Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<Object> name = JSReceiver::GetDataProperty(
+ function, isolate->factory()->display_name_string());
+ if (name->IsString()) return Handle<String>::cast(name);
+ return JSFunction::GetName(function);
+}
+
+
+namespace {
+
+char const kNativeCodeSource[] = "function () { [native code] }";
+
+
+Handle<String> NativeCodeFunctionSourceString(
+ Handle<SharedFunctionInfo> shared_info) {
+ Isolate* const isolate = shared_info->GetIsolate();
+ if (shared_info->name()->IsString()) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(handle(String::cast(shared_info->name()), isolate));
+ builder.AppendCString("() { [native code] }");
+ return builder.Finish().ToHandleChecked();
+ }
+ return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+}
+
+} // namespace
+
+
+// static
+Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
+}
+
+
+// static
+Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
+ Isolate* const isolate = function->GetIsolate();
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+
+ // Check if {function} should hide its source code.
+ if (!shared_info->script()->IsScript() ||
+ Script::cast(shared_info->script())->hide_source()) {
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+
+ // Check if we should print {function} as a class.
+ Handle<Object> class_start_position = JSReceiver::GetDataProperty(
+ function, isolate->factory()->class_start_position_symbol());
+ if (class_start_position->IsSmi()) {
+ Handle<Object> class_end_position = JSReceiver::GetDataProperty(
+ function, isolate->factory()->class_end_position_symbol());
+ Handle<String> script_source(
+ String::cast(Script::cast(shared_info->script())->source()), isolate);
+ return isolate->factory()->NewSubString(
+ script_source, Handle<Smi>::cast(class_start_position)->value(),
+ Handle<Smi>::cast(class_end_position)->value());
+ }
+
+ // Check if we have source code for the {function}.
+ if (!shared_info->HasSourceCode()) {
+ return NativeCodeFunctionSourceString(shared_info);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ if (!shared_info->is_arrow()) {
+ if (shared_info->is_concise_method()) {
+ if (shared_info->is_generator()) builder.AppendCharacter('*');
+ } else {
+ if (shared_info->is_generator()) {
+ builder.AppendCString("function* ");
+ } else {
+ builder.AppendCString("function ");
+ }
+ }
+ if (shared_info->name_should_print_as_anonymous()) {
+ builder.AppendCString("anonymous");
+ } else {
+ builder.AppendString(handle(String::cast(shared_info->name()), isolate));
+ }
+ }
+ builder.AppendString(Handle<String>::cast(shared_info->GetSourceCode()));
+ return builder.Finish().ToHandleChecked();
+}
+
+
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -12295,6 +13784,10 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_dont_crankshaft(lit->flags() &
AstProperties::kDontCrankshaft);
shared_info->set_kind(lit->kind());
+ if (!IsConstructable(lit->kind(), lit->language_mode())) {
+ shared_info->set_construct_stub(
+ *shared_info->GetIsolate()->builtins()->ConstructedNonConstructable());
+ }
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
}
@@ -12311,18 +13804,16 @@ bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
}
-void JSFunction::StartInobjectSlackTracking() {
- DCHECK(has_initial_map() && !IsInobjectSlackTrackingInProgress());
-
- Map* map = initial_map();
+void Map::StartInobjectSlackTracking() {
+ DCHECK(!IsInobjectSlackTrackingInProgress());
// No tracking during the snapshot construction phase.
Isolate* isolate = GetIsolate();
if (isolate->serializer_enabled()) return;
- if (map->unused_property_fields() == 0) return;
+ if (unused_property_fields() == 0) return;
- map->set_counter(Map::kSlackTrackingCounterStart);
+ set_construction_counter(Map::kSlackTrackingCounterStart);
}
@@ -12349,18 +13840,19 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
- Object* value = optimized_code_map();
- if (!value->IsSmi()) {
- FixedArray* optimized_code_map = FixedArray::cast(value);
+ if (!OptimizedCodeMapIsCleared()) {
+ FixedArray* optimized_code_map = this->optimized_code_map();
int length = optimized_code_map->length();
Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (optimized_code_map->get(i + kContextOffset) == native_context &&
+ if (WeakCell::cast(optimized_code_map->get(i + kContextOffset))
+ ->value() == native_context &&
optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
return i;
}
}
- Object* shared_code = optimized_code_map->get(kSharedCodeIndex);
+ Object* shared_code =
+ WeakCell::cast(optimized_code_map->get(kSharedCodeIndex))->value();
if (shared_code->IsCode() && osr_ast_id.IsNone()) {
return kSharedCodeIndex;
}
@@ -12374,18 +13866,27 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
CodeAndLiterals result = {nullptr, nullptr};
int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
if (entry != kNotFound) {
- FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ FixedArray* code_map = optimized_code_map();
if (entry == kSharedCodeIndex) {
- result = {Code::cast(code_map->get(kSharedCodeIndex)), nullptr};
-
+ // We know the weak cell isn't cleared because we made sure of it in
+ // SearchOptimizedCodeMapEntry and performed no allocations since that
+ // call.
+ result = {
+ Code::cast(WeakCell::cast(code_map->get(kSharedCodeIndex))->value()),
+ nullptr};
} else {
DCHECK_LE(entry + kEntryLength, code_map->length());
- Object* code = code_map->get(entry + kCachedCodeOffset);
- result = {code->IsUndefined() ? nullptr : Code::cast(code),
- LiteralsArray::cast(code_map->get(entry + kLiteralsOffset))};
+ WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
+ WeakCell* literals_cell =
+ WeakCell::cast(code_map->get(entry + kLiteralsOffset));
+
+ result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
+ literals_cell->cleared()
+ ? nullptr
+ : LiteralsArray::cast(literals_cell->value())};
}
}
- if (FLAG_trace_opt && !optimized_code_map()->IsSmi() &&
+ if (FLAG_trace_opt && !OptimizedCodeMapIsCleared() &&
result.code == nullptr) {
PrintF("[didn't find optimized code in optimized code map for ");
ShortPrint();
@@ -12784,7 +14285,6 @@ void Code::ClearInlineCaches(Code::Kind kind) {
void Code::ClearInlineCaches(Code::Kind* kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -13122,6 +14622,17 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
+ case Translation::INTERPRETED_FRAME: {
+ int bytecode_offset = iterator.Next();
+ int shared_info_id = iterator.Next();
+ unsigned height = iterator.Next();
+ Object* shared_info = LiteralArray()->get(shared_info_id);
+ os << "{bytecode_offset=" << bytecode_offset << ", function="
+ << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << ", height=" << height << "}";
+ break;
+ }
+
case Translation::JS_FRAME_FUNCTION: {
os << "{function}";
break;
@@ -13217,8 +14728,10 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
}
case Translation::LITERAL: {
- unsigned literal_index = iterator.Next();
- os << "{literal_id=" << literal_index << "}";
+ int literal_index = iterator.Next();
+ Object* literal_value = LiteralArray()->get(literal_index);
+ os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
+ << ")}";
break;
}
@@ -13494,15 +15007,24 @@ void BytecodeArray::Disassemble(std::ostream& os) {
SNPrintF(buf, "%p", bytecode_start);
os << buf.start() << " : ";
interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
- if (interpreter::Bytecodes::IsJump(bytecode)) {
- int offset = static_cast<int8_t>(bytecode_start[1]);
+
+ if (interpreter::Bytecodes::IsJumpConstantWide(bytecode)) {
+ DCHECK_EQ(bytecode_size, 3);
+ int index = static_cast<int>(ReadUnalignedUInt16(bytecode_start + 1));
+ int offset = Smi::cast(constant_pool()->get(index))->value();
SNPrintF(buf, " (%p)", bytecode_start + offset);
os << buf.start();
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+ DCHECK_EQ(bytecode_size, 2);
int index = static_cast<int>(bytecode_start[1]);
int offset = Smi::cast(constant_pool()->get(index))->value();
SNPrintF(buf, " (%p)", bytecode_start + offset);
os << buf.start();
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+ DCHECK_EQ(bytecode_size, 2);
+ int offset = static_cast<int8_t>(bytecode_start[1]);
+ SNPrintF(buf, " (%p)", bytecode_start + offset);
+ os << buf.start();
}
os << "\n";
}
@@ -13566,8 +15088,7 @@ MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
uint32_t old_length = 0;
CHECK(old_length_handle->ToArrayLength(&old_length));
- static const PropertyAttributes kNoAttrFilter = NONE;
- int num_elements = array->NumberOfOwnElements(kNoAttrFilter);
+ int num_elements = array->NumberOfOwnElements(ALL_PROPERTIES);
if (num_elements > 0) {
if (old_length == static_cast<uint32_t>(num_elements)) {
// Simple case for arrays without holes.
@@ -13579,7 +15100,7 @@ MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
// TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
// the to-be-removed indices twice.
Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- array->GetOwnElementKeys(*keys, kNoAttrFilter);
+ array->GetOwnElementKeys(*keys, ALL_PROPERTIES);
while (num_elements-- > 0) {
uint32_t index = NumberToUint32(keys->get(num_elements));
if (index < new_length) break;
@@ -13649,20 +15170,6 @@ void Map::AddDependentCode(Handle<Map> map,
}
-DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
- Recompute(entries);
-}
-
-
-void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) {
- start_indexes_[0] = 0;
- for (int g = 1; g <= kGroupCount; g++) {
- int count = entries->number_of_entries(static_cast<DependencyGroup>(g - 1));
- start_indexes_[g] = start_indexes_[g - 1] + count;
- }
-}
-
-
Handle<DependentCode> DependentCode::InsertCompilationDependencies(
Handle<DependentCode> entries, DependencyGroup group,
Handle<Foreign> info) {
@@ -13680,44 +15187,54 @@ Handle<DependentCode> DependentCode::InsertWeakCode(
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Object> object) {
- GroupStartIndexes starts(*entries);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- int number_of_entries = starts.number_of_entries();
+ if (entries->length() == 0 || entries->group() > group) {
+ // There is no such group.
+ return DependentCode::New(group, object, entries);
+ }
+ if (entries->group() < group) {
+ // The group comes later in the list.
+ Handle<DependentCode> old_next(entries->next_link());
+ Handle<DependentCode> new_next = Insert(old_next, group, object);
+ if (!old_next.is_identical_to(new_next)) {
+ entries->set_next_link(*new_next);
+ }
+ return entries;
+ }
+ DCHECK_EQ(group, entries->group());
+ int count = entries->count();
// Check for existing entry to avoid duplicates.
- for (int i = start; i < end; i++) {
+ for (int i = 0; i < count; i++) {
if (entries->object_at(i) == *object) return entries;
}
- if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
+ if (entries->length() < kCodesStartIndex + count + 1) {
entries = EnsureSpace(entries);
- // The number of codes can change after Compact and GC.
- starts.Recompute(*entries);
- start = starts.at(group);
- end = starts.at(group + 1);
+ // Count could have changed, reload it.
+ count = entries->count();
}
-
- entries->ExtendGroup(group);
- entries->set_object_at(end, *object);
- entries->set_number_of_entries(group, end + 1 - start);
+ entries->set_object_at(count, *object);
+ entries->set_count(count + 1);
return entries;
}
+Handle<DependentCode> DependentCode::New(DependencyGroup group,
+ Handle<Object> object,
+ Handle<DependentCode> next) {
+ Isolate* isolate = next->GetIsolate();
+ Handle<DependentCode> result = Handle<DependentCode>::cast(
+ isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
+ result->set_next_link(*next);
+ result->set_flags(GroupField::encode(group) | CountField::encode(1));
+ result->set_object_at(0, *object);
+ return result;
+}
+
+
Handle<DependentCode> DependentCode::EnsureSpace(
Handle<DependentCode> entries) {
- Isolate* isolate = entries->GetIsolate();
- if (entries->length() == 0) {
- entries = Handle<DependentCode>::cast(
- isolate->factory()->NewFixedArray(kCodesStartIndex + 1, TENURED));
- for (int g = 0; g < kGroupCount; g++) {
- entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
- }
- return entries;
- }
if (entries->Compact()) return entries;
- GroupStartIndexes starts(*entries);
- int capacity =
- kCodesStartIndex + DependentCode::Grow(starts.number_of_entries());
+ Isolate* isolate = entries->GetIsolate();
+ int capacity = kCodesStartIndex + DependentCode::Grow(entries->count());
int grow_by = capacity - entries->length();
return Handle<DependentCode>::cast(
isolate->factory()->CopyFixedArrayAndGrow(entries, grow_by, TENURED));
@@ -13725,46 +15242,47 @@ Handle<DependentCode> DependentCode::EnsureSpace(
bool DependentCode::Compact() {
- GroupStartIndexes starts(this);
- int n = 0;
- for (int g = 0; g < kGroupCount; g++) {
- int start = starts.at(g);
- int end = starts.at(g + 1);
- int count = 0;
- DCHECK(start >= n);
- for (int i = start; i < end; i++) {
- Object* obj = object_at(i);
- if (!obj->IsWeakCell() || !WeakCell::cast(obj)->cleared()) {
- if (i != n + count) {
- copy(i, n + count);
- }
- count++;
+ int old_count = count();
+ int new_count = 0;
+ for (int i = 0; i < old_count; i++) {
+ Object* obj = object_at(i);
+ if (!obj->IsWeakCell() || !WeakCell::cast(obj)->cleared()) {
+ if (i != new_count) {
+ copy(i, new_count);
}
+ new_count++;
}
- if (count != end - start) {
- set_number_of_entries(static_cast<DependencyGroup>(g), count);
- }
- n += count;
}
- return n < starts.number_of_entries();
+ set_count(new_count);
+ for (int i = new_count; i < old_count; i++) {
+ clear_at(i);
+ }
+ return new_count < old_count;
}
void DependentCode::UpdateToFinishedCode(DependencyGroup group, Foreign* info,
WeakCell* code_cell) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ next_link()->UpdateToFinishedCode(group, info, code_cell);
+ return;
+ }
+ DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_gc;
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- for (int i = start; i < end; i++) {
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
if (object_at(i) == info) {
set_object_at(i, code_cell);
break;
}
}
-
#ifdef DEBUG
- for (int i = start; i < end; i++) {
+ for (int i = 0; i < count; i++) {
DCHECK(object_at(i) != info);
}
#endif
@@ -13773,34 +15291,36 @@ void DependentCode::UpdateToFinishedCode(DependencyGroup group, Foreign* info,
void DependentCode::RemoveCompilationDependencies(
DependentCode::DependencyGroup group, Foreign* info) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ next_link()->RemoveCompilationDependencies(group, info);
+ return;
+ }
+ DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation;
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
+ int old_count = count();
// Find compilation info wrapper.
int info_pos = -1;
- for (int i = start; i < end; i++) {
+ for (int i = 0; i < old_count; i++) {
if (object_at(i) == info) {
info_pos = i;
break;
}
}
if (info_pos == -1) return; // Not found.
- int gap = info_pos;
- // Use the last of each group to fill the gap in the previous group.
- for (int i = group; i < kGroupCount; i++) {
- int last_of_group = starts.at(i + 1) - 1;
- DCHECK(last_of_group >= gap);
- if (last_of_group == gap) continue;
- copy(last_of_group, gap);
- gap = last_of_group;
- }
- DCHECK(gap == starts.number_of_entries() - 1);
- clear_at(gap); // Clear last gap.
- set_number_of_entries(group, end - start - 1);
+ // Use the last code to fill the gap.
+ if (info_pos < old_count - 1) {
+ copy(old_count - 1, info_pos);
+ }
+ clear_at(old_count - 1);
+ set_count(old_count - 1);
#ifdef DEBUG
- for (int i = start; i < end - 1; i++) {
+ for (int i = 0; i < old_count - 1; i++) {
DCHECK(object_at(i) != info);
}
#endif
@@ -13808,30 +15328,55 @@ void DependentCode::RemoveCompilationDependencies(
bool DependentCode::Contains(DependencyGroup group, WeakCell* code_cell) {
- GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- for (int i = start; i < end; i++) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return false;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->Contains(group, code_cell);
+ }
+ DCHECK_EQ(group, this->group());
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
if (object_at(i) == code_cell) return true;
}
return false;
}
+bool DependentCode::IsEmpty(DependencyGroup group) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return true;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->IsEmpty(group);
+ }
+ DCHECK_EQ(group, this->group());
+ return count() == 0;
+}
+
+
bool DependentCode::MarkCodeForDeoptimization(
Isolate* isolate,
DependentCode::DependencyGroup group) {
+ if (this->length() == 0 || this->group() > group) {
+ // There is no such group.
+ return false;
+ }
+ if (this->group() < group) {
+ // The group comes later in the list.
+ return next_link()->MarkCodeForDeoptimization(isolate, group);
+ }
+ DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation_scope;
- DependentCode::GroupStartIndexes starts(this);
- int start = starts.at(group);
- int end = starts.at(group + 1);
- int code_entries = starts.number_of_entries();
- if (start == end) return false;
-
// Mark all the code that needs to be deoptimized.
bool marked = false;
bool invalidate_embedded_objects = group == kWeakCodeGroup;
- for (int i = start; i < end; i++) {
+ int count = this->count();
+ for (int i = 0; i < count; i++) {
Object* obj = object_at(i);
if (obj->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(obj);
@@ -13852,16 +15397,10 @@ bool DependentCode::MarkCodeForDeoptimization(
info->Abort();
}
}
- // Compact the array by moving all subsequent groups to fill in the new holes.
- for (int src = end, dst = start; src < code_entries; src++, dst++) {
- copy(src, dst);
- }
- // Now the holes are at the end of the array, zap them for heap-verifier.
- int removed = end - start;
- for (int i = code_entries - removed; i < code_entries; i++) {
+ for (int i = 0; i < count; i++) {
clear_at(i);
}
- set_number_of_entries(group, 0);
+ set_count(0);
return marked;
}
@@ -13932,13 +15471,85 @@ Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
- if (!object->IsJSObject()) return Just(false);
- // TODO(neis): Deal with proxies.
+ if (object->IsJSProxy()) {
+ return JSProxy::SetPrototype(Handle<JSProxy>::cast(object), value,
+ from_javascript, should_throw);
+ }
return JSObject::SetPrototype(Handle<JSObject>::cast(object), value,
from_javascript, should_throw);
}
+// ES6: 9.5.2 [[SetPrototypeOf]] (V)
+// static
+Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw) {
+ Isolate* isolate = proxy->GetIsolate();
+ STACK_CHECK(Nothing<bool>());
+ Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
+ // 1. Assert: Either Type(V) is Object or Type(V) is Null.
+ DCHECK(value->IsJSReceiver() || value->IsNull());
+ // 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, trap_name));
+ return Nothing<bool>();
+ }
+ // 5. Let target be the value of the [[ProxyTarget]] internal slot.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 6. Let trap be ? GetMethod(handler, "getPrototypeOf").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
+ Nothing<bool>());
+ // 7. If trap is undefined, then return target.[[SetPrototypeOf]]().
+ if (trap->IsUndefined()) {
+ return JSReceiver::SetPrototype(target, value, from_javascript,
+ should_throw);
+ }
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «target, V»)).
+ Handle<Object> argv[] = {target, value};
+ Handle<Object> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(argv), argv),
+ Nothing<bool>());
+ bool bool_trap_result = trap_result->BooleanValue();
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
+ if (is_extensible.IsNothing()) return Nothing<bool>();
+ // 10. If extensibleTarget is true, return booleanTrapResult.
+ if (is_extensible.FromJust()) {
+ if (bool_trap_result) return Just(true);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+ // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ Handle<Object> target_proto;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_proto,
+ Object::GetPrototype(isolate, target),
+ Nothing<bool>());
+ // 12. If booleanTrapResult is true and SameValue(V, targetProto) is false,
+ // throw a TypeError exception.
+ if (bool_trap_result && !value->SameValue(*target_proto)) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxySetPrototypeOfNonExtensible));
+ return Nothing<bool>();
+ }
+ // 13. Return booleanTrapResult.
+ if (bool_trap_result) return Just(true);
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+}
+
+
Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
@@ -13947,7 +15558,9 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
const bool observed = from_javascript && object->map()->is_observed();
Handle<Object> old_value;
if (observed) {
- old_value = Object::GetPrototype(isolate, object);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, old_value,
+ Object::GetPrototype(isolate, object),
+ Nothing<bool>());
}
Maybe<bool> result =
@@ -13955,7 +15568,10 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
MAYBE_RETURN(result, Nothing<bool>());
if (result.FromJust() && observed) {
- Handle<Object> new_value = Object::GetPrototype(isolate, object);
+ Handle<Object> new_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, new_value,
+ Object::GetPrototype(isolate, object),
+ Nothing<bool>());
if (!new_value->SameValue(*old_value)) {
RETURN_ON_EXCEPTION_VALUE(
isolate, JSObject::EnqueueChangeRecord(
@@ -13984,7 +15600,8 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
!isolate->MayAccess(handle(isolate->context()), object)) {
isolate->ReportFailedAccessCheck(object);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- UNREACHABLE();
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kNoAccess));
}
} else {
DCHECK(!object->IsAccessCheckNeeded());
@@ -14064,7 +15681,7 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
// If the prototype chain didn't previously have element callbacks, then
// KeyedStoreICs need to be cleared to ensure any that involve this
// map go generic.
- object->GetHeap()->ClearAllKeyedStoreICs();
+ TypeFeedbackVector::ClearAllKeyedStoreICs(isolate);
}
heap->ClearInstanceofCache();
@@ -14513,8 +16130,8 @@ int JSObject::GetFastElementsUsage() {
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
- // Only JSArray have packed elements.
- return Smi::cast(JSArray::cast(this)->length())->value();
+ return IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value()
+ : store->length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
store = FixedArray::cast(FixedArray::cast(store)->get(1));
// Fall through.
@@ -14589,16 +16206,6 @@ InterceptorInfo* JSObject::GetNamedInterceptor() {
}
-InterceptorInfo* JSObject::GetIndexedInterceptor() {
- DCHECK(map()->has_indexed_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
- DCHECK(constructor->shared()->IsApiFunction());
- Object* result =
- constructor->shared()->get_api_func_data()->indexed_property_handler();
- return InterceptorInfo::cast(result);
-}
-
-
MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
bool* done) {
*done = false;
@@ -14627,6 +16234,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
result = args.Call(getter, index);
} else {
Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
return isolate->factory()->undefined_value();
@@ -14650,52 +16258,6 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
}
-// Compute the property keys from the interceptor.
-MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
- Handle<JSObject> object, Handle<JSReceiver> receiver) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Local<v8::Object> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::GenericNamedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
- result = args.Call(enum_fun);
- }
- if (result.IsEmpty()) return MaybeHandle<JSObject>();
- DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
- // Rebox before returning.
- return handle(*v8::Utils::OpenHandle(*result), isolate);
-}
-
-
-// Compute the element keys from the interceptor.
-MaybeHandle<JSObject> JSObject::GetKeysForIndexedInterceptor(
- Handle<JSObject> object, Handle<JSReceiver> receiver) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
- PropertyCallbackArguments
- args(isolate, interceptor->data(), *receiver, *object);
- v8::Local<v8::Object> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- v8::IndexedPropertyEnumeratorCallback enum_fun =
- v8::ToCData<v8::IndexedPropertyEnumeratorCallback>(
- interceptor->enumerator());
- LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object));
- result = args.Call(enum_fun);
- }
- if (result.IsEmpty()) return MaybeHandle<JSObject>();
- DCHECK(v8::Utils::OpenHandle(*result)->IsJSArray() ||
- v8::Utils::OpenHandle(*result)->HasSloppyArgumentsElements());
- // Rebox before returning.
- return handle(*v8::Utils::OpenHandle(*result), isolate);
-}
-
-
Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -14723,25 +16285,6 @@ Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
}
-int JSObject::NumberOfOwnProperties(PropertyAttributes filter) {
- if (HasFastProperties()) {
- Map* map = this->map();
- if (filter == NONE) return map->NumberOfOwnDescriptors();
- if (filter == DONT_SHOW) {
- // The cached enum length was computed with filter == DONT_SHOW, so
- // that's the only filter for which it's valid to retrieve it.
- int result = map->EnumLength();
- if (result != kInvalidEnumCacheSentinel) return result;
- }
- return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
- } else if (IsJSGlobalObject()) {
- return global_dictionary()->NumberOfElementsFilterAttributes(filter);
- } else {
- return property_dictionary()->NumberOfElementsFilterAttributes(filter);
- }
-}
-
-
void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
Object* temp = get(i);
set(i, get(j));
@@ -14855,55 +16398,33 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
}
-// Fill in the names of own properties into the supplied storage. The main
-// purpose of this function is to provide reflection information for the object
-// mirrors.
-int JSObject::GetOwnPropertyNames(FixedArray* storage, int index,
- PropertyAttributes filter) {
- DCHECK(storage->length() >= (NumberOfOwnProperties(filter) - index));
+void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
+ PropertyFilter filter) {
if (HasFastProperties()) {
- int start_index = index;
- int real_size = map()->NumberOfOwnDescriptors();
- DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < real_size; i++) {
- if ((descs->GetDetails(i).attributes() & filter) == 0 &&
- !descs->GetKey(i)->FilterKey(filter)) {
- storage->set(index++, descs->GetKey(i));
- }
- }
- return index - start_index;
- } else if (IsJSGlobalObject()) {
- return global_dictionary()->CopyKeysTo(storage, index, filter,
- GlobalDictionary::UNSORTED);
- } else {
- return property_dictionary()->CopyKeysTo(storage, index, filter,
- NameDictionary::UNSORTED);
- }
-}
-
-
-int JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
- PropertyAttributes filter) {
- if (HasFastProperties()) {
- int nof_keys = keys->length();
int real_size = map()->NumberOfOwnDescriptors();
Handle<DescriptorArray> descs(map()->instance_descriptors());
for (int i = 0; i < real_size; i++) {
- if ((descs->GetDetails(i).attributes() & filter) != 0) continue;
+ PropertyDetails details = descs->GetDetails(i);
+ if ((details.attributes() & filter) != 0) continue;
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = descs->GetValue(i);
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ }
Name* key = descs->GetKey(i);
if (key->FilterKey(filter)) continue;
keys->AddKey(key);
}
- return nof_keys - keys->length();
} else if (IsJSGlobalObject()) {
- return global_dictionary()->CollectKeysTo(keys, filter);
+ GlobalDictionary::CollectKeysTo(handle(global_dictionary()), keys, filter);
} else {
- return property_dictionary()->CollectKeysTo(keys, filter);
+ NameDictionary::CollectKeysTo(handle(property_dictionary()), keys, filter);
}
}
-int JSObject::NumberOfOwnElements(PropertyAttributes filter) {
+int JSObject::NumberOfOwnElements(PropertyFilter filter) {
// Fast case for objects with no elements.
if (!IsJSValue() && HasFastElements()) {
uint32_t length =
@@ -14918,14 +16439,10 @@ int JSObject::NumberOfOwnElements(PropertyAttributes filter) {
}
-int JSObject::NumberOfEnumElements() {
- return NumberOfOwnElements(static_cast<PropertyAttributes>(DONT_ENUM));
-}
-
-
void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
KeyAccumulator* keys,
- PropertyAttributes filter) {
+ PropertyFilter filter) {
+ if (filter & SKIP_STRINGS) return;
uint32_t string_keys = 0;
// If this is a String wrapper, add the string indices first,
@@ -14933,7 +16450,7 @@ void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
// and ascending order is required by ECMA-262, 6th, 9.1.12.
if (object->IsJSValue()) {
Object* val = JSValue::cast(*object)->value();
- if (val->IsString()) {
+ if (val->IsString() && (filter & ONLY_ALL_CAN_READ) == 0) {
String* str = String::cast(val);
string_keys = str->length();
for (uint32_t i = 0; i < string_keys; i++) {
@@ -14946,8 +16463,7 @@ void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
}
-int JSObject::GetOwnElementKeys(FixedArray* storage,
- PropertyAttributes filter) {
+int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
int counter = 0;
// If this is a String wrapper, add the string indices first,
@@ -15076,6 +16592,39 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
}
+MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
+ Handle<Object> object) {
+ if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
+ if (object->IsNull()) return isolate->factory()->null_to_string();
+
+ Handle<JSReceiver> receiver;
+ CHECK(Object::ToObject(isolate, object).ToHandle(&receiver));
+
+ Handle<String> tag;
+ if (FLAG_harmony_tostring) {
+ Handle<Object> to_string_tag;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, to_string_tag,
+ GetProperty(receiver, isolate->factory()->to_string_tag_symbol()),
+ String);
+ if (to_string_tag->IsString()) {
+ tag = Handle<String>::cast(to_string_tag);
+ }
+ }
+
+ if (tag.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, tag,
+ JSReceiver::BuiltinStringTag(receiver), String);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("[object ");
+ builder.AppendString(tag);
+ builder.AppendCharacter(']');
+ return builder.Finish();
+}
+
+
const char* Symbol::PrivateSymbolToName() const {
Heap* heap = GetIsolate()->heap();
#define SYMBOL_CHECK_AND_PRINT(name) \
@@ -15191,12 +16740,197 @@ class StringSharedKey : public HashTableKey {
};
+namespace {
+
+JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
+ JSRegExp::Flags value = JSRegExp::kNone;
+ int length = flags->length();
+ // A longer flags string cannot be valid.
+ if (length > 5) return JSRegExp::Flags(0);
+ for (int i = 0; i < length; i++) {
+ JSRegExp::Flag flag = JSRegExp::kNone;
+ switch (flags->Get(i)) {
+ case 'g':
+ flag = JSRegExp::kGlobal;
+ break;
+ case 'i':
+ flag = JSRegExp::kIgnoreCase;
+ break;
+ case 'm':
+ flag = JSRegExp::kMultiline;
+ break;
+ case 'u':
+ if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
+ flag = JSRegExp::kUnicode;
+ break;
+ case 'y':
+ if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
+ flag = JSRegExp::kSticky;
+ break;
+ default:
+ return JSRegExp::Flags(0);
+ }
+ // Duplicate flag.
+ if (value & flag) return JSRegExp::Flags(0);
+ value |= flag;
+ }
+ *success = true;
+ return value;
+}
+
+} // namespace
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern, Flags flags) {
+ Isolate* isolate = pattern->GetIsolate();
+ Handle<JSFunction> constructor = isolate->regexp_function();
+ Handle<JSRegExp> regexp =
+ Handle<JSRegExp>::cast(isolate->factory()->NewJSObject(constructor));
+
+ return JSRegExp::Initialize(regexp, pattern, flags);
+}
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern,
+ Handle<String> flags_string) {
+ Isolate* isolate = pattern->GetIsolate();
+ bool success = false;
+ Flags flags = RegExpFlagsFromString(flags_string, &success);
+ if (!success) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
+ JSRegExp);
+ }
+ return New(pattern, flags);
+}
+
+
+// static
+Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
+ Isolate* const isolate = regexp->GetIsolate();
+ return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
+}
+
+
+template <typename Char>
+inline int CountRequiredEscapes(Handle<String> source) {
+ DisallowHeapAllocation no_gc;
+ int escapes = 0;
+ Vector<const Char> src = source->GetCharVector<Char>();
+ for (int i = 0; i < src.length(); i++) {
+ if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
+ }
+ return escapes;
+}
+
+
+template <typename Char, typename StringType>
+inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
+ Handle<StringType> result) {
+ DisallowHeapAllocation no_gc;
+ Vector<const Char> src = source->GetCharVector<Char>();
+ Vector<Char> dst(result->GetChars(), result->length());
+ int s = 0;
+ int d = 0;
+ while (s < src.length()) {
+ if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
+ dst[d++] = src[s++];
+ }
+ DCHECK_EQ(result->length(), d);
+ return result;
+}
+
+
+MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
+ Handle<String> source) {
+ String::Flatten(source);
+ if (source->length() == 0) return isolate->factory()->query_colon_string();
+ bool one_byte = source->IsOneByteRepresentationUnderneath();
+ int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
+ : CountRequiredEscapes<uc16>(source);
+ if (escapes == 0) return source;
+ int length = source->length() + escapes;
+ if (one_byte) {
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ isolate->factory()->NewRawOneByteString(length),
+ String);
+ return WriteEscapedRegExpSource<uint8_t>(source, result);
+ } else {
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ isolate->factory()->NewRawTwoByteString(length),
+ String);
+ return WriteEscapedRegExpSource<uc16>(source, result);
+ }
+}
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string) {
+ Isolate* isolate = source->GetIsolate();
+ bool success = false;
+ Flags flags = RegExpFlagsFromString(flags_string, &success);
+ if (!success) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
+ JSRegExp);
+ }
+ return Initialize(regexp, source, flags);
+}
+
+
+// static
+MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source, Flags flags) {
+ Isolate* isolate = regexp->GetIsolate();
+ Factory* factory = isolate->factory();
+ // If source is the empty string we set it to "(?:)" instead as
+ // suggested by ECMA-262, 5th, section 15.10.4.1.
+ if (source->length() == 0) source = factory->query_colon_string();
+
+ Handle<String> escaped_source;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
+ EscapeRegExpSource(isolate, source), JSRegExp);
+
+ regexp->set_source(*escaped_source);
+ regexp->set_flags(Smi::FromInt(flags));
+
+ Map* map = regexp->map();
+ Object* constructor = map->GetConstructor();
+ if (constructor->IsJSFunction() &&
+ JSFunction::cast(constructor)->initial_map() == map) {
+ // If we still have the original map, set in-object properties directly.
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ } else {
+ // Map has changed, so use generic, but slower, method.
+ PropertyAttributes writable =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ regexp, factory->last_index_string(),
+ Handle<Smi>(Smi::FromInt(0), isolate), writable)
+ .Check();
+ }
+
+ RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
+ JSRegExp);
+
+ return regexp;
+}
+
+
// RegExpKey carries the source and flags of a regular expression as key.
class RegExpKey : public HashTableKey {
public:
RegExpKey(Handle<String> string, JSRegExp::Flags flags)
- : string_(string),
- flags_(Smi::FromInt(flags.value())) { }
+ : string_(string), flags_(Smi::FromInt(flags)) {}
// Rather than storing the key in the hash table, a pointer to the
// stored value is stored where the key should be. IsMatch then
@@ -15298,15 +17032,14 @@ class InternalizedStringKey : public HashTableKey {
template<typename Derived, typename Shape, typename Key>
void HashTable<Derived, Shape, Key>::IteratePrefix(ObjectVisitor* v) {
- IteratePointers(v, 0, kElementsStartOffset);
+ BodyDescriptorBase::IteratePointers(this, 0, kElementsStartOffset, v);
}
template<typename Derived, typename Shape, typename Key>
void HashTable<Derived, Shape, Key>::IterateElements(ObjectVisitor* v) {
- IteratePointers(v,
- kElementsStartOffset,
- kHeaderSize + length() * kPointerSize);
+ BodyDescriptorBase::IteratePointers(this, kElementsStartOffset,
+ kHeaderSize + length() * kPointerSize, v);
}
@@ -15493,14 +17226,8 @@ Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
Isolate* isolate = table->GetIsolate();
int capacity = table->Capacity();
int nof = table->NumberOfElements() + n;
- int nod = table->NumberOfDeletedElements();
- // Return if:
- // 50% is still free after adding n elements and
- // at most 50% of the free elements are deleted elements.
- if (nod <= (capacity - nof) >> 1) {
- int needed_free = nof >> 1;
- if (nof + needed_free <= capacity) return table;
- }
+
+ if (table->HasSufficientCapacity(n)) return table;
const int kMinCapacityForPretenure = 256;
bool should_pretenure = pretenure == TENURED ||
@@ -15517,6 +17244,22 @@ Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
}
+template <typename Derived, typename Shape, typename Key>
+bool HashTable<Derived, Shape, Key>::HasSufficientCapacity(int n) {
+ int capacity = Capacity();
+ int nof = NumberOfElements() + n;
+ int nod = NumberOfDeletedElements();
+ // Return true if:
+ // 50% is still free after adding n elements and
+ // at most 50% of the free elements are deleted elements.
+ if (nod <= (capacity - nof) >> 1) {
+ int needed_free = nof >> 1;
+ if (nof + needed_free <= capacity) return true;
+ }
+ return false;
+}
+
+
template<typename Derived, typename Shape, typename Key>
Handle<Derived> HashTable<Derived, Shape, Key>::Shrink(Handle<Derived> table,
Key key) {
@@ -15683,6 +17426,9 @@ template Handle<UnseededNumberDictionary>
Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
EnsureCapacity(Handle<UnseededNumberDictionary>, int, uint32_t);
+template void Dictionary<NameDictionary, NameDictionaryShape,
+ Handle<Name> >::SetRequiresCopyOnCapacityChange();
+
template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
@@ -16411,7 +18157,17 @@ Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices(
}
-template<typename Derived, typename Shape, typename Key>
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::SetRequiresCopyOnCapacityChange() {
+ DCHECK_EQ(0, DerivedHashTable::NumberOfElements());
+ DCHECK_EQ(0, DerivedHashTable::NumberOfDeletedElements());
+ // Make sure that HashTable::EnsureCapacity will create a copy.
+ DerivedHashTable::SetNumberOfDeletedElements(DerivedHashTable::Capacity());
+ DCHECK(!DerivedHashTable::HasSufficientCapacity(1));
+}
+
+
+template <typename Derived, typename Shape, typename Key>
Handle<Derived> Dictionary<Derived, Shape, Key>::EnsureCapacity(
Handle<Derived> dictionary, int n, Key key) {
// Check whether there are enough enumeration indices to add n elements.
@@ -16515,7 +18271,7 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
if (key > kRequiresSlowElementsLimit) {
if (used_as_prototype) {
// TODO(verwaest): Remove this hack.
- GetHeap()->ClearAllKeyedStoreICs();
+ TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
}
set_requires_slow_elements();
return;
@@ -16594,7 +18350,7 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
template <typename Derived, typename Shape, typename Key>
int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
- PropertyAttributes filter) {
+ PropertyFilter filter) {
int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
@@ -16615,12 +18371,12 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !k->FilterKey(NONE)) {
+ if (this->IsKey(k) && !k->FilterKey(ALL_PROPERTIES)) {
if (this->IsDeleted(i)) continue;
PropertyDetails details = this->DetailsAt(i);
if (details.type() == ACCESSOR_CONSTANT) return true;
PropertyAttributes attr = details.attributes();
- if (attr & (READ_ONLY | DONT_DELETE | DONT_ENUM)) return true;
+ if (attr & ALL_ATTRIBUTES_MASK) return true;
}
}
return false;
@@ -16667,7 +18423,7 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(FixedArray* storage) {
template <typename Derived, typename Shape, typename Key>
int Dictionary<Derived, Shape, Key>::CopyKeysTo(
- FixedArray* storage, int index, PropertyAttributes filter,
+ FixedArray* storage, int index, PropertyFilter filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
int start_index = index;
@@ -16690,20 +18446,44 @@ int Dictionary<Derived, Shape, Key>::CopyKeysTo(
template <typename Derived, typename Shape, typename Key>
-int Dictionary<Derived, Shape, Key>::CollectKeysTo(KeyAccumulator* keys,
- PropertyAttributes filter) {
- int capacity = this->Capacity();
- int keyLength = keys->length();
- for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (!this->IsKey(k) || k->FilterKey(filter)) continue;
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) != 0) continue;
- keys->AddKey(k);
+void Dictionary<Derived, Shape, Key>::CollectKeysTo(
+ Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
+ PropertyFilter filter) {
+ int capacity = dictionary->Capacity();
+ Handle<FixedArray> array =
+ keys->isolate()->factory()->NewFixedArray(dictionary->NumberOfElements());
+ int array_size = 0;
+
+ {
+ DisallowHeapAllocation no_gc;
+ Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = raw_dict->KeyAt(i);
+ if (!raw_dict->IsKey(k) || k->FilterKey(filter)) continue;
+ if (raw_dict->IsDeleted(i)) continue;
+ PropertyDetails details = raw_dict->DetailsAt(i);
+ if ((details.attributes() & filter) != 0) continue;
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = raw_dict->ValueAt(i);
+ if (accessors->IsPropertyCell()) {
+ accessors = PropertyCell::cast(accessors)->value();
+ }
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ }
+ array->set(array_size++, Smi::FromInt(i));
+ }
+
+ EnumIndexComparator<Derived> cmp(static_cast<Derived*>(raw_dict));
+ Smi** start = reinterpret_cast<Smi**>(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+
+ for (int i = 0; i < array_size; i++) {
+ int index = Smi::cast(array->get(i))->value();
+ keys->AddKey(dictionary->KeyAt(index));
}
- return keyLength - keys->length();
}
@@ -17510,6 +19290,39 @@ int BreakPointInfo::GetBreakPointCount() {
}
+// static
+MaybeHandle<JSDate> JSDate::New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target, double tv) {
+ Isolate* const isolate = constructor->GetIsolate();
+ Handle<JSObject> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ JSObject::New(constructor, new_target), JSDate);
+ if (-DateCache::kMaxTimeInMs <= tv && tv <= DateCache::kMaxTimeInMs) {
+ tv = DoubleToInteger(tv) + 0.0;
+ } else {
+ tv = std::numeric_limits<double>::quiet_NaN();
+ }
+ Handle<Object> value = isolate->factory()->NewNumber(tv);
+ Handle<JSDate>::cast(result)->SetValue(*value, std::isnan(tv));
+ return Handle<JSDate>::cast(result);
+}
+
+
+// static
+double JSDate::CurrentTimeValue(Isolate* isolate) {
+ if (FLAG_log_timer_events || FLAG_prof_cpp) LOG(isolate, CurrentTimeEvent());
+
+ // According to ECMA-262, section 15.9.1, page 117, the precision of
+ // the number in a Date object representing a particular instant in
+ // time is milliseconds. Therefore, we floor the result of getting
+ // the OS time.
+ return Floor(FLAG_verify_predictable
+ ? isolate->heap()->MonotonicallyIncreasingTimeInMs()
+ : base::OS::TimeCurrentMillis());
+}
+
+
+// static
Object* JSDate::GetField(Object* object, Smi* index) {
return JSDate::cast(object)->DoGetField(
static_cast<FieldIndex>(index->value()));
@@ -17602,6 +19415,16 @@ Object* JSDate::GetUTCField(FieldIndex index,
}
+// static
+Handle<Object> JSDate::SetValue(Handle<JSDate> date, double v) {
+ Isolate* const isolate = date->GetIsolate();
+ Handle<Object> value = isolate->factory()->NewNumber(v);
+ bool value_is_nan = std::isnan(v);
+ date->SetValue(*value, value_is_nan);
+ return value;
+}
+
+
void JSDate::SetValue(Object* value, bool is_value_nan) {
set_value(value);
if (is_value_nan) {
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 93f57333a1..c55c5c9780 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,6 +11,7 @@
#include "src/assert-scope.h"
#include "src/bailout-reason.h"
#include "src/base/bits.h"
+#include "src/base/flags.h"
#include "src/base/smart-pointers.h"
#include "src/builtins.h"
#include "src/checks.h"
@@ -50,6 +51,7 @@
// - JSArrayBufferView
// - JSTypedArray
// - JSDataView
+// - JSBoundFunction
// - JSCollection
// - JSSet
// - JSMap
@@ -68,14 +70,12 @@
// - JSDate
// - JSMessageObject
// - JSProxy
-// - JSFunctionProxy
// - FixedArrayBase
// - ByteArray
// - BytecodeArray
// - FixedArray
// - DescriptorArray
// - LiteralsArray
-// - BindingsArray
// - HashTable
// - Dictionary
// - StringTable
@@ -143,7 +143,6 @@
// - FunctionTemplateInfo
// - ObjectTemplateInfo
// - Script
-// - TypeSwitchInfo
// - DebugInfo
// - BreakPointInfo
// - CodeCache
@@ -179,7 +178,7 @@ enum class ToPrimitiveHint { kDefault, kNumber, kString };
enum class OrdinaryToPrimitiveHint { kNumber, kString };
-enum TypeofMode { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum MutableMode {
@@ -411,6 +410,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
V(WEAK_CELL_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
\
V(JS_MESSAGE_OBJECT_TYPE) \
\
@@ -434,10 +434,11 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_ITERATOR_RESULT_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
+ V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
\
+ V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE) \
- V(JS_FUNCTION_PROXY_TYPE) \
V(DEBUG_INFO_TYPE) \
V(BREAK_POINT_INFO_TYPE)
@@ -512,7 +513,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
V(ALLOCATION_SITE, AllocationSite, allocation_site) \
V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
@@ -701,6 +701,7 @@ enum InstanceType {
SHARED_FUNCTION_INFO_TYPE,
CELL_TYPE,
WEAK_CELL_TYPE,
+ TRANSITION_ARRAY_TYPE,
PROPERTY_CELL_TYPE,
PROTOTYPE_INFO_TYPE,
SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE,
@@ -708,11 +709,9 @@ enum InstanceType {
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
- // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
- // NONCALLABLE_JS_OBJECT range.
- JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
- JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE
- JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
+ // compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
+ JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
+ JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_MESSAGE_OBJECT_TYPE,
JS_DATE_TYPE,
JS_OBJECT_TYPE,
@@ -732,7 +731,9 @@ enum InstanceType {
JS_ITERATOR_RESULT_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
+ JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
+ JS_BOUND_FUNCTION_TYPE,
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
// Pseudo-types
@@ -745,6 +746,8 @@ enum InstanceType {
FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
FIRST_PRIMITIVE_TYPE = FIRST_NAME_TYPE,
LAST_PRIMITIVE_TYPE = ODDBALL_TYPE,
+ FIRST_FUNCTION_TYPE = JS_BOUND_FUNCTION_TYPE,
+ LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
// Boundaries for testing for a fixed typed array.
FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_UINT8_CLAMPED_ARRAY_TYPE,
@@ -755,23 +758,11 @@ enum InstanceType {
// are not continuous in this enum! The enum ranges instead reflect the
// external class names, where proxies are treated as either ordinary objects,
// or functions.
- FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
+ FIRST_JS_RECEIVER_TYPE = JS_PROXY_TYPE,
LAST_JS_RECEIVER_TYPE = LAST_TYPE,
// Boundaries for testing the types represented as JSObject
FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
LAST_JS_OBJECT_TYPE = LAST_TYPE,
- // Boundaries for testing the types represented as JSProxy
- FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
- LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
- // Boundaries for testing whether the type is a JavaScript object.
- FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
- LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
- // Boundaries for testing the types for which typeof is "object".
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
- // Note that the types for which typeof is "function" are not continuous.
- // Define this so that we can put assertions on discrete checks.
- NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
};
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -780,6 +771,9 @@ STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
+std::ostream& operator<<(std::ostream& os, InstanceType instance_type);
+
+
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
V(FAST_ELEMENTS_SUB_TYPE) \
V(DICTIONARY_ELEMENTS_SUB_TYPE) \
@@ -788,14 +782,13 @@ STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
V(MAP_CODE_CACHE_SUB_TYPE) \
V(SCOPE_INFO_SUB_TYPE) \
V(STRING_TABLE_SUB_TYPE) \
- V(DESCRIPTOR_ARRAY_SUB_TYPE) \
- V(TRANSITION_ARRAY_SUB_TYPE)
+ V(DESCRIPTOR_ARRAY_SUB_TYPE)
enum FixedArraySubInstanceType {
#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_FIXED_ARRAY_SUB_TYPE = TRANSITION_ARRAY_SUB_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = DESCRIPTOR_ARRAY_SUB_TYPE
};
@@ -863,6 +856,7 @@ class StringStream;
class TypeFeedbackInfo;
class TypeFeedbackVector;
class WeakCell;
+class TransitionArray;
// We cannot just say "class HeapType;" if it is created from a template... =8-?
template<class> class TypeImpl;
@@ -940,7 +934,6 @@ template <class C> inline bool Is(Object* obj);
V(LayoutDescriptor) \
V(Map) \
V(DescriptorArray) \
- V(BindingsArray) \
V(TransitionArray) \
V(LiteralsArray) \
V(TypeFeedbackMetadata) \
@@ -957,6 +950,7 @@ template <class C> inline bool Is(Object* obj);
V(ScriptContextTable) \
V(NativeContext) \
V(ScopeInfo) \
+ V(JSBoundFunction) \
V(JSFunction) \
V(Code) \
V(Oddball) \
@@ -973,7 +967,6 @@ template <class C> inline bool Is(Object* obj);
V(JSTypedArray) \
V(JSDataView) \
V(JSProxy) \
- V(JSFunctionProxy) \
V(JSSet) \
V(JSMap) \
V(JSSetIterator) \
@@ -1003,6 +996,9 @@ template <class C> inline bool Is(Object* obj);
V(WeakHashTable) \
V(OrderedHashTable)
+// The element types selection for CreateListFromArrayLike.
+enum class ElementTypes { kAll, kStringAndSymbol };
+
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@@ -1055,13 +1051,18 @@ class Object {
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
#undef DECLARE_STRUCT_PREDICATE
+ // ES6, section 7.2.2 IsArray. NOT to be confused with %_IsArray.
+ MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object);
+
+ // Test for JSBoundFunction or JSFunction.
+ INLINE(bool IsFunction() const);
+
// ES6, section 7.2.3 IsCallable.
INLINE(bool IsCallable() const);
// ES6, section 7.2.4 IsConstructor.
INLINE(bool IsConstructor() const);
- INLINE(bool IsSpecObject()) const;
INLINE(bool IsTemplateInfo()) const;
INLINE(bool IsNameDictionary() const);
INLINE(bool IsGlobalDictionary() const);
@@ -1102,7 +1103,7 @@ class Object {
// 1 all refer to the same property, so this helper will return true.
inline bool KeyEquals(Object* other);
- inline bool FilterKey(PropertyAttributes filter);
+ inline bool FilterKey(PropertyFilter filter);
Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
@@ -1174,6 +1175,13 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Object> GetMethod(
Handle<JSReceiver> receiver, Handle<Name> name);
+ // ES6 section 7.3.17 CreateListFromArrayLike
+ MUST_USE_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types);
+
+ // Check whether |object| is an instance of Error or NativeError.
+ static bool IsErrorObject(Isolate* isolate, Handle<Object> object);
+
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
@@ -1306,10 +1314,12 @@ class Object {
Handle<Object> value, LanguageMode language_mode);
// Get the first non-hidden prototype.
- static inline Handle<Object> GetPrototype(Isolate* isolate,
- Handle<Object> receiver);
+ static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
+ Handle<Object> receiver);
- bool HasInPrototypeChain(Isolate* isolate, Object* object);
+ MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> proto);
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
@@ -1336,6 +1346,10 @@ class Object {
// by ES6 Map and Set.
bool SameValueZero(Object* other);
+ // ES6 section 9.4.2.3 ArraySpeciesCreate (part of it)
+ MUST_USE_RESULT static MaybeHandle<Object> ArraySpeciesConstructor(
+ Isolate* isolate, Handle<Object> original_array);
+
// Tries to convert an object to an array length. Returns true and sets the
// output parameter if it succeeds.
inline bool ToArrayLength(uint32_t* index);
@@ -1357,6 +1371,10 @@ class Object {
inline void VerifyApiCallResultType();
+ // ES6 19.1.3.6 Object.prototype.toString
+ MUST_USE_RESULT static MaybeHandle<String> ObjectProtoToString(
+ Isolate* isolate, Handle<Object> object);
+
// Prints this object without details.
void ShortPrint(FILE* out = stdout);
@@ -1504,13 +1522,6 @@ class MapWord BASE_EMBEDDED {
};
-// The content of an heap object (except for the map pointer). kTaggedValues
-// objects can contain both heap pointers and Smis, kMixedValues can contain
-// heap pointers, Smis, and raw values (e.g. doubles or strings), and kRawValues
-// objects can contain raw values and Smis.
-enum class HeapObjectContents { kTaggedValues, kMixedValues, kRawValues };
-
-
// HeapObject is the superclass for all classes describing heap allocated
// objects.
class HeapObject: public Object {
@@ -1555,21 +1566,38 @@ class HeapObject: public Object {
return reinterpret_cast<Address>(this) - kHeapObjectTag;
}
- // Iterates over pointers contained in the object (including the Map)
+ // Iterates over pointers contained in the object (including the Map).
+ // If it's not performance critical iteration use the non-templatized
+ // version.
void Iterate(ObjectVisitor* v);
+ template <typename ObjectVisitor>
+ inline void IterateFast(ObjectVisitor* v);
+
// Iterates over all pointers contained in the object except the
// first map pointer. The object type is given in the first
// parameter. This function does not access the map pointer in the
// object, and so is safe to call while the map pointer is modified.
+ // If it's not performance critical iteration use the non-templatized
+ // version.
+ void IterateBody(ObjectVisitor* v);
void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
+ template <typename ObjectVisitor>
+ inline void IterateBodyFast(ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ inline void IterateBodyFast(InstanceType type, int object_size,
+ ObjectVisitor* v);
+
+ // Returns true if the object contains a tagged value at given offset.
+ // It is used for invalid slots filtering. If the offset points outside
+ // of the object or to the map word, the result is UNDEFINED (!!!).
+ bool IsValidSlot(int offset);
+
// Returns the heap object's size in bytes
inline int Size();
- // Indicates what type of values this heap object may contain.
- inline HeapObjectContents ContentType();
-
// Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes.
// GC internal.
@@ -1623,90 +1651,17 @@ class HeapObject: public Object {
STATIC_ASSERT(kMapOffset == Internals::kHeapObjectMapOffset);
- protected:
- // helpers for calling an ObjectVisitor to iterate over pointers in the
- // half-open range [start, end) specified as integer offsets
- inline void IteratePointers(ObjectVisitor* v, int start, int end);
- // as above, for the single element at "offset"
- inline void IteratePointer(ObjectVisitor* v, int offset);
- // as above, for the next code link of a code object.
- inline void IterateNextCodeLink(ObjectVisitor* v, int offset);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
};
-// This is the base class for object's body descriptors.
-class BodyDescriptorBase {
- protected:
- static inline void IterateBodyImpl(HeapObject* obj, int start_offset,
- int end_offset, ObjectVisitor* v);
-
- template <typename StaticVisitor>
- static inline void IterateBodyImpl(Heap* heap, HeapObject* obj,
- int start_offset, int end_offset);
-
- static inline void IteratePointers(HeapObject* obj, int start_offset,
- int end_offset, ObjectVisitor* v);
-
- template <typename StaticVisitor>
- static inline void IteratePointers(Heap* heap, HeapObject* obj,
- int start_offset, int end_offset);
-};
-
-
-// This class describes a body of an object of a fixed size
-// in which all pointer fields are located in the [start_offset, end_offset)
-// interval.
template <int start_offset, int end_offset, int size>
-class FixedBodyDescriptor : public BodyDescriptorBase {
- public:
- static const int kStartOffset = start_offset;
- static const int kEndOffset = end_offset;
- static const int kSize = size;
-
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
- IterateBodyImpl(obj, start_offset, end_offset, v);
- }
-
- template <typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj) {
- Heap* heap = obj->GetHeap();
- IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, end_offset);
- }
-};
-
-
-// This base class describes a body of an object of a variable size
-// in which all pointer fields are located in the [start_offset, object_size)
-// interval.
-template <int start_offset>
-class FlexibleBodyDescriptorBase : public BodyDescriptorBase {
- public:
- static const int kStartOffset = start_offset;
-
- static inline void IterateBody(HeapObject* obj, int object_size,
- ObjectVisitor* v) {
- IterateBodyImpl(obj, start_offset, object_size, v);
- }
+class FixedBodyDescriptor;
- template <typename StaticVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size) {
- Heap* heap = obj->GetHeap();
- IterateBodyImpl<StaticVisitor>(heap, obj, start_offset, object_size);
- }
-};
-
-// This class describes a body of an object of a variable size
-// in which all pointer fields are located in the [start_offset, object_size)
-// interval. The size of the object is taken from the map.
template <int start_offset>
-class FlexibleBodyDescriptor : public FlexibleBodyDescriptorBase<start_offset> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
-};
+class FlexibleBodyDescriptor;
// The HeapNumber class describes heap allocated numbers that cannot be
@@ -1840,9 +1795,6 @@ enum AccessorComponent {
};
-enum KeyFilter { SKIP_SYMBOLS, INCLUDE_SYMBOLS };
-
-
enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
@@ -1850,6 +1802,19 @@ enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
public:
+ // [properties]: Backing storage for properties.
+ // properties is a FixedArray in the fast case and a Dictionary in the
+ // slow case.
+ DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
+ inline void initialize_properties();
+ inline bool HasFastProperties();
+ // Gets slow properties for non-global objects.
+ inline NameDictionary* property_dictionary();
+
+ // Deletes an existing named property in a normalized object.
+ static void DeleteNormalizedProperty(Handle<JSReceiver> object,
+ Handle<Name> name, int entry);
+
DECLARE_CAST(JSReceiver)
// ES6 section 7.1.1 ToPrimitive
@@ -1859,27 +1824,28 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
+
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(Handle<JSReceiver>,
- Handle<Name> name);
MUST_USE_RESULT static inline Maybe<bool> HasElement(
Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static inline Maybe<bool> HasOwnElement(
- Handle<JSReceiver> object, uint32_t index);
- // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
- MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyOrElement(
+ MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
+ Handle<JSReceiver> object, Handle<Name> name);
+
+ // Implementation of ES6 [[Delete]]
+ MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
+ MUST_USE_RESULT static Maybe<bool> DeleteProperty(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = SLOPPY);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
- LookupIterator* it, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteElement(
+ MUST_USE_RESULT static Maybe<bool> DeleteProperty(LookupIterator* it,
+ LanguageMode language_mode);
+ MUST_USE_RESULT static Maybe<bool> DeleteElement(
Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode = SLOPPY);
@@ -1887,45 +1853,76 @@ class JSReceiver: public HeapObject {
Handle<Object> object,
Handle<Object> name,
Handle<Object> attributes);
- MUST_USE_RESULT static Object* DefineProperties(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> properties);
+ MUST_USE_RESULT static MaybeHandle<Object> DefineProperties(
+ Isolate* isolate, Handle<Object> object, Handle<Object> properties);
// "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
- static bool DefineOwnProperty(Isolate* isolate, Handle<JSReceiver> object,
- Handle<Object> key, PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
- static bool OrdinaryDefineOwnProperty(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Object> key,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
- static bool OrdinaryDefineOwnProperty(LookupIterator* it,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
-
- static bool GetOwnPropertyDescriptor(Isolate* isolate,
- Handle<JSReceiver> object,
- Handle<Object> key,
- PropertyDescriptor* desc);
- static bool GetOwnPropertyDescriptor(LookupIterator* it,
- PropertyDescriptor* desc);
+ // ES6 7.3.4 (when passed DONT_THROW)
+ MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
+ LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
- // Disallow further properties to be added to the object. This is
- // ES6's [[PreventExtensions]] when passed DONT_THROW.
+ // ES6 9.1.6.1
+ MUST_USE_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ LookupIterator* it, PropertyDescriptor* desc, ShouldThrow should_throw);
+ // ES6 9.1.6.2
+ MUST_USE_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
+ Isolate* isolate, bool extensible, PropertyDescriptor* desc,
+ PropertyDescriptor* current, Handle<Name> property_name,
+ ShouldThrow should_throw);
+ // ES6 9.1.6.3
+ // |it| can be NULL in cases where the ES spec passes |undefined| as the
+ // receiver. Exactly one of |it| and |property_name| must be provided.
+ MUST_USE_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
+ Isolate* isolate, LookupIterator* it, bool extensible,
+ PropertyDescriptor* desc, PropertyDescriptor* current,
+ ShouldThrow should_throw, Handle<Name> property_name = Handle<Name>());
+
+ MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
+ PropertyDescriptor* desc);
+ MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ LookupIterator* it, PropertyDescriptor* desc);
+
+ typedef PropertyAttributes IntegrityLevel;
+
+ // ES6 7.3.14 (when passed DONT_THROW)
+ // 'level' must be SEALED or FROZEN.
+ MUST_USE_RESULT static Maybe<bool> SetIntegrityLevel(
+ Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
+
+ // ES6 7.3.15
+ // 'level' must be SEALED or FROZEN.
+ MUST_USE_RESULT static Maybe<bool> TestIntegrityLevel(
+ Handle<JSReceiver> object, IntegrityLevel lvl);
+
+ // ES6 [[PreventExtensions]] (when passed DONT_THROW)
MUST_USE_RESULT static Maybe<bool> PreventExtensions(
Handle<JSReceiver> object, ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSReceiver> object);
+
// Tests for the fast common case for property enumeration.
bool IsSimpleEnum();
// Returns the class name ([[Class]] property in the specification).
String* class_name();
+ // Returns the builtin string tag used in Object.prototype.toString.
+ MUST_USE_RESULT static MaybeHandle<String> BuiltinStringTag(
+ Handle<JSReceiver> object);
+
// Returns the constructor name (the name (possibly, inferred name) of the
// function that was used to instantiate the object).
- String* constructor_name();
+ static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
+
+ Context* GetCreationContext();
MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name);
@@ -1963,12 +1960,22 @@ class JSReceiver: public HeapObject {
enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
+ // ES6 [[OwnPropertyKeys]] (modulo return type)
+ MUST_USE_RESULT static MaybeHandle<FixedArray> OwnPropertyKeys(
+ Handle<JSReceiver> object) {
+ return GetKeys(object, JSReceiver::OWN_ONLY, ALL_PROPERTIES,
+ CONVERT_TO_STRING);
+ }
+
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
- Handle<JSReceiver> object, KeyCollectionType type,
- KeyFilter filter = SKIP_SYMBOLS,
- GetKeysConversion getConversion = KEEP_NUMBERS);
+ Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
+ GetKeysConversion keys_conversion = KEEP_NUMBERS);
+
+ // Layout description.
+ static const int kPropertiesOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
@@ -1981,17 +1988,15 @@ class JSReceiver: public HeapObject {
// caching.
class JSObject: public JSReceiver {
public:
- // [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case and a Dictionary in the
- // slow case.
- DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
- inline void initialize_properties();
- inline bool HasFastProperties();
- // Gets slow properties for non-global objects.
- inline NameDictionary* property_dictionary();
+ static MUST_USE_RESULT MaybeHandle<JSObject> New(
+ Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site = Handle<AllocationSite>::null());
+
// Gets global object properties.
inline GlobalDictionary* global_dictionary();
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSObject> object);
+
// [elements]: The elements (properties with names that are integers).
//
// Elements can be in two general modes: fast and slow. Each mode
@@ -2145,6 +2150,9 @@ class JSObject: public JSReceiver {
PrototypeOptimizationMode mode);
static void ReoptimizeIfPrototype(Handle<JSObject> object);
static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
+ static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
+ Handle<Map> new_map,
+ Isolate* isolate);
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void InvalidatePrototypeChains(Map* map);
@@ -2156,7 +2164,7 @@ class JSObject: public JSReceiver {
// Retrieve interceptors.
InterceptorInfo* GetNamedInterceptor();
- InterceptorInfo* GetIndexedInterceptor();
+ inline InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
MUST_USE_RESULT static Maybe<PropertyAttributes>
@@ -2263,15 +2271,6 @@ class JSObject: public JSReceiver {
inline bool HasNamedInterceptor();
inline bool HasIndexedInterceptor();
- // Computes the enumerable keys from interceptors. Used for debug mirrors and
- // by JSReceiver::GetKeys.
- MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForNamedInterceptor(
- Handle<JSObject> object,
- Handle<JSReceiver> receiver);
- MUST_USE_RESULT static MaybeHandle<JSObject> GetKeysForIndexedInterceptor(
- Handle<JSObject> object,
- Handle<JSReceiver> receiver);
-
// Support functions for v8 api (needed for correct interceptor behavior).
MUST_USE_RESULT static Maybe<bool> HasRealNamedProperty(
Handle<JSObject> object, Handle<Name> name);
@@ -2292,33 +2291,21 @@ class JSObject: public JSReceiver {
inline void SetInternalField(int index, Object* value);
inline void SetInternalField(int index, Smi* value);
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- int NumberOfOwnProperties(PropertyAttributes filter = NONE);
- // Fill in details for properties into storage starting at the specified
- // index. Returns the number of properties added.
- int GetOwnPropertyNames(FixedArray* storage, int index,
- PropertyAttributes filter = NONE);
- int CollectOwnPropertyNames(KeyAccumulator* keys,
- PropertyAttributes filter = NONE);
+ void CollectOwnPropertyNames(KeyAccumulator* keys,
+ PropertyFilter filter = ALL_PROPERTIES);
// Returns the number of properties on this object filtering out properties
// with the specified attributes (ignoring interceptors).
- int NumberOfOwnElements(PropertyAttributes filter);
- // Returns the number of enumerable elements (ignoring interceptors).
- int NumberOfEnumElements();
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int NumberOfOwnElements(PropertyFilter filter);
// Returns the number of elements on this object filtering out elements
// with the specified attributes (ignoring interceptors).
- int GetOwnElementKeys(FixedArray* storage, PropertyAttributes filter);
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int GetOwnElementKeys(FixedArray* storage, PropertyFilter filter);
+
static void CollectOwnElementKeys(Handle<JSObject> object,
KeyAccumulator* keys,
- PropertyAttributes filter);
- // Count and fill in the enumerable elements into storage.
- // (storage->length() == NumberOfEnumElements()).
- // If storage is NULL, will count the elements without adding
- // them to any storage.
- // Returns the number of enumerable elements.
- int GetEnumElementKeys(FixedArray* storage);
+ PropertyFilter filter);
static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
bool cache_result);
@@ -2384,14 +2371,13 @@ class JSObject: public JSReceiver {
bool from_javascript,
ShouldThrow should_throw);
- // Initializes the body after properties slot, properties slot is
- // initialized by set_properties. Fill the pre-allocated fields with
+ // Initializes the body starting at |start_offset|. It is responsibility of
+ // the caller to initialize object header. Fill the pre-allocated fields with
// pre_allocated_value and the rest with filler_value.
// Note: this call does not update write barrier, the caller is responsible
// to ensure that |filler_value| can be collected without WB here.
- inline void InitializeBody(Map* map,
- Object* pre_allocated_value,
- Object* filler_value);
+ inline void InitializeBody(Map* map, int start_offset,
+ Object* pre_allocated_value, Object* filler_value);
// Check whether this object references another object
bool ReferencesObject(Object* obj);
@@ -2401,12 +2387,6 @@ class JSObject: public JSReceiver {
static bool IsExtensible(Handle<JSObject> object);
- // ES5 Object.seal
- MUST_USE_RESULT static MaybeHandle<Object> Seal(Handle<JSObject> object);
-
- // ES5 Object.freeze
- MUST_USE_RESULT static MaybeHandle<Object> Freeze(Handle<JSObject> object);
-
// Called the first time an object is observed with ES7 Object.observe.
static void SetObserved(Handle<JSObject> object);
@@ -2503,15 +2483,12 @@ class JSObject: public JSReceiver {
static const int kFieldsAdded = 3;
// Layout description.
- static const int kPropertiesOffset = HeapObject::kHeaderSize;
- static const int kElementsOffset = kPropertiesOffset + kPointerSize;
+ static const int kElementsOffset = JSReceiver::kHeaderSize;
static const int kHeaderSize = kElementsOffset + kPointerSize;
STATIC_ASSERT(kHeaderSize == Internals::kJSObjectHeaderSize);
- typedef FlexibleBodyDescriptor<kPropertiesOffset> BodyDescriptor;
-
- Context* GetCreationContext();
+ typedef FlexibleBodyDescriptor<JSReceiver::kPropertiesOffset> BodyDescriptor;
// Enqueue change record for Object.observe. May cause GC.
MUST_USE_RESULT static MaybeHandle<Object> EnqueueChangeRecord(
@@ -2521,10 +2498,6 @@ class JSObject: public JSReceiver {
// Gets the number of currently used elements.
int GetFastElementsUsage();
- // Deletes an existing named property in a normalized object.
- static void DeleteNormalizedProperty(Handle<JSObject> object,
- Handle<Name> name, int entry);
-
static bool AllCanRead(LookupIterator* it);
static bool AllCanWrite(LookupIterator* it);
@@ -2550,7 +2523,7 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
- MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
+ MUST_USE_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
LookupIterator* it);
bool ReferencesObjectFromElements(FixedArray* elements,
@@ -2688,10 +2661,7 @@ class FixedArray: public FixedArrayBase {
// object, the prefix of this array is sorted.
void SortPairs(FixedArray* numbers, uint32_t len);
- class BodyDescriptor : public FlexibleBodyDescriptorBase<kHeaderSize> {
- public:
- static inline int SizeOf(Map* map, HeapObject* object);
- };
+ typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
protected:
// Set operation on FixedArray without using write barriers. Can
@@ -2700,13 +2670,6 @@ class FixedArray: public FixedArrayBase {
int index,
Object* value);
- // Set operation on FixedArray without incremental write barrier. Can
- // only be used if the object is guaranteed to be white (whiteness witness
- // is present).
- static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
- int index,
- Object* value);
-
private:
STATIC_ASSERT(kHeaderSize == Internals::kFixedArrayHeaderSize);
@@ -2845,6 +2808,7 @@ class ArrayList : public FixedArray {
inline Object** Slot(int index);
inline void Set(int index, Object* obj);
inline void Clear(int index, Object* undefined);
+ bool IsFull();
DECLARE_CAST(ArrayList)
private:
@@ -2955,6 +2919,8 @@ class DescriptorArray: public FixedArray {
// necessary.
INLINE(int SearchWithCache(Name* name, Map* map));
+ bool IsEqualUpTo(DescriptorArray* desc, int nof_descriptors);
+
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
static Handle<DescriptorArray> Allocate(Isolate* isolate,
@@ -3015,23 +2981,6 @@ class DescriptorArray: public FixedArray {
}
private:
- // WhitenessWitness is used to prove that a descriptor array is white
- // (unmarked), so incremental write barriers can be skipped because the
- // marking invariant cannot be broken and slots pointing into evacuation
- // candidates will be discovered when the object is scanned. A witness is
- // always stack-allocated right after creating an array. By allocating a
- // witness, incremental marking is globally disabled. The witness is then
- // passed along wherever needed to statically prove that the array is known to
- // be white.
- class WhitenessWitness {
- public:
- inline explicit WhitenessWitness(DescriptorArray* array);
- inline ~WhitenessWitness();
-
- private:
- IncrementalMarking* marking_;
- };
-
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
public:
@@ -3067,11 +3016,9 @@ class DescriptorArray: public FixedArray {
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
- void CopyFrom(int index, DescriptorArray* src, const WhitenessWitness&);
+ void CopyFrom(int index, DescriptorArray* src);
- inline void Set(int descriptor_number,
- Descriptor* desc,
- const WhitenessWitness&);
+ inline void SetDescriptor(int descriptor_number, Descriptor* desc);
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
@@ -3274,6 +3221,9 @@ class HashTable : public HashTableBase {
Key key,
PretenureFlag pretenure = NOT_TENURED);
+ // Returns true if this table has sufficient capacity for adding n elements.
+ bool HasSufficientCapacity(int n);
+
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
@@ -3429,12 +3379,13 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
- int NumberOfElementsFilterAttributes(PropertyAttributes filter);
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int NumberOfElementsFilterAttributes(PropertyFilter filter);
// Returns the number of enumerable elements in the dictionary.
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
int NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
+ return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
}
// Returns true if the dictionary contains any elements that are non-writable,
@@ -3445,10 +3396,13 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Fill in details for properties into storage.
// Returns the number of properties added.
- int CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
+ // TODO(jkummerow): Deprecated, only used by Object.observe.
+ int CopyKeysTo(FixedArray* storage, int index, PropertyFilter filter,
SortMode sort_mode);
- // Collect the unsorted keys into the given KeyAccumulator.
- int CollectKeysTo(KeyAccumulator* keys, PropertyAttributes filter);
+ // Collect the keys into the given KeyAccumulator, in ascending chronological
+ // order of property creation.
+ static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key> > dictionary,
+ KeyAccumulator* keys, PropertyFilter filter);
// Copies enumerable keys to preallocated fixed array.
void CopyEnumKeysTo(FixedArray* storage);
@@ -3469,6 +3423,9 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
int at_least_space_for,
PretenureFlag pretenure = NOT_TENURED);
+ // Ensures that a new dictionary is created when the capacity is checked.
+ void SetRequiresCopyOnCapacityChange();
+
// Ensure enough space for n additional elements.
static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
@@ -4078,6 +4035,9 @@ class ScopeInfo : public FixedArray {
// or context-allocated?
bool HasAllocatedReceiver();
+ // Does this scope declare a "new.target" binding?
+ bool HasNewTarget();
+
// Is this scope the scope of a named function expression?
bool HasFunctionName();
@@ -4286,9 +4246,10 @@ class ScopeInfo : public FixedArray {
class ReceiverVariableField
: public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
2> {};
+ class HasNewTargetField
+ : public BitField<bool, ReceiverVariableField::kNext, 1> {};
class FunctionVariableField
- : public BitField<VariableAllocationInfo, ReceiverVariableField::kNext,
- 2> {};
+ : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
class FunctionVariableMode
: public BitField<VariableMode, FunctionVariableField::kNext, 3> {};
class AsmModuleField : public BitField<bool, FunctionVariableMode::kNext, 1> {
@@ -4423,7 +4384,6 @@ class BytecodeArray : public FixedArrayBase {
// Dispatched behavior.
inline int BytecodeArraySize();
- inline void BytecodeArrayIterateBody(ObjectVisitor* v);
DECLARE_PRINTER(BytecodeArray)
DECLARE_VERIFIER(BytecodeArray)
@@ -4443,6 +4403,8 @@ class BytecodeArray : public FixedArrayBase {
// Maximal length of a single BytecodeArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArray);
};
@@ -4508,11 +4470,6 @@ class FixedTypedArrayBase: public FixedArrayBase {
DECL_ACCESSORS(external_pointer, void)
// Dispatched behavior.
- inline void FixedTypedArrayBaseIterateBody(ObjectVisitor* v);
-
- template <typename StaticVisitor>
- inline void FixedTypedArrayBaseIterateBody();
-
DECLARE_CAST(FixedTypedArrayBase)
static const int kBasePointerOffset = FixedArrayBase::kHeaderSize;
@@ -4522,6 +4479,8 @@ class FixedTypedArrayBase: public FixedArrayBase {
static const int kDataOffset = kHeaderSize;
+ class BodyDescriptor;
+
inline int size();
static inline int TypedArraySize(InstanceType type, int length);
@@ -4734,48 +4693,6 @@ class LiteralsArray : public FixedArray {
};
-// A bindings array contains the bindings for a bound function. It also holds
-// the type feedback vector.
-class BindingsArray : public FixedArray {
- public:
- inline TypeFeedbackVector* feedback_vector() const;
- inline void set_feedback_vector(TypeFeedbackVector* vector);
-
- inline JSReceiver* bound_function() const;
- inline void set_bound_function(JSReceiver* function);
- inline Object* bound_this() const;
- inline void set_bound_this(Object* bound_this);
-
- inline Object* binding(int binding_index) const;
- inline void set_binding(int binding_index, Object* binding);
- inline int bindings_count() const;
-
- static Handle<BindingsArray> New(Isolate* isolate,
- Handle<TypeFeedbackVector> vector,
- Handle<JSReceiver> bound_function,
- Handle<Object> bound_this,
- int number_of_bindings);
-
- static Handle<JSArray> CreateBoundArguments(Handle<BindingsArray> bindings);
- static Handle<JSArray> CreateRuntimeBindings(Handle<BindingsArray> bindings);
-
- DECLARE_CAST(BindingsArray)
-
- private:
- static const int kVectorIndex = 0;
- static const int kBoundFunctionIndex = 1;
- static const int kBoundThisIndex = 2;
- static const int kFirstBindingIndex = 3;
-
- inline Object* get(int index) const;
- inline void set(int index, Object* value);
- inline void set(int index, Smi* value);
- inline void set(int index, Object* value, WriteBarrierMode mode);
-
- inline int length() const;
-};
-
-
// HandlerTable is a fixed array containing entries for exception handlers in
// the code object it is associated with. The tables comes in two flavors:
// 1) Based on ranges: Used for unoptimized code. Contains one entry per
@@ -5062,12 +4979,6 @@ class Code: public HeapObject {
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline uint16_t to_boolean_state();
- // [has_function_cache]: For kind STUB tells whether there is a function
- // cache is passed to the stub.
- inline bool has_function_cache();
- inline void set_has_function_cache(bool flag);
-
-
// [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
// the code is going to be deoptimized because of dead embedded maps.
inline bool marked_for_deoptimization();
@@ -5192,10 +5103,6 @@ class Code: public HeapObject {
// Dispatched behavior.
inline int CodeSize();
- inline void CodeIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void CodeIterateBody(Heap* heap);
DECLARE_PRINTER(Code)
DECLARE_VERIFIER(Code)
@@ -5294,6 +5201,8 @@ class Code: public HeapObject {
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
+ class BodyDescriptor;
+
// Byte offsets within kKindSpecificFlags1Offset.
static const int kFullCodeFlags = kKindSpecificFlags1Offset;
class FullCodeFlagsHasDeoptimizationSupportField:
@@ -5315,9 +5224,8 @@ class Code: public HeapObject {
// KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
static const int kStackSlotsBitCount = 24;
- static const int kHasFunctionCacheBit =
+ static const int kMarkedForDeoptimizationBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1;
static const int kIsTurbofannedBit = kMarkedForDeoptimizationBit + 1;
static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
@@ -5326,10 +5234,8 @@ class Code: public HeapObject {
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
- class HasFunctionCacheField : public BitField<bool, kHasFunctionCacheBit, 1> {
- }; // NOLINT
class MarkedForDeoptimizationField
- : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
+ : public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT
class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
}; // NOLINT
class CanHaveWeakObjectsField
@@ -5389,24 +5295,24 @@ class Code: public HeapObject {
};
-// This class describes the layout of dependent codes array of a map. The
-// array is partitioned into several groups of dependent codes. Each group
-// contains codes with the same dependency on the map. The array has the
-// following layout for n dependency groups:
+// Dependent code is a singly linked list of fixed arrays. Each array contains
+// code objects in weak cells for one dependent group. The suffix of the array
+// can be filled with the undefined value if the number of codes is less than
+// the length of the array.
//
-// +----+----+-----+----+---------+----------+-----+---------+-----------+
-// | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined |
-// +----+----+-----+----+---------+----------+-----+---------+-----------+
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// | next | count & group 1 | code 1 | code 2 | ... | code n | undefined | ... |
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// |
+// V
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// | next | count & group 2 | code 1 | code 2 | ... | code m | undefined | ... |
+// +------+-----------------+--------+--------+-----+--------+-----------+-----+
+// |
+// V
+// empty_fixed_array()
//
-// The first n elements are Smis, each of them specifies the number of codes
-// in the corresponding group. The subsequent elements contain grouped code
-// objects in weak cells. The suffix of the array can be filled with the
-// undefined value if the number of codes is less than the length of the
-// array. The order of the code objects within a group is not preserved.
-//
-// All code indexes used in the class are counted starting from the first
-// code object of the first group. In other words, code index 0 corresponds
-// to array index n = kCodesStartIndex.
+// The list of fixed arrays is ordered by dependency groups.
class DependentCode: public FixedArray {
public:
@@ -5441,19 +5347,8 @@ class DependentCode: public FixedArray {
static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
- // Array for holding the index of the first code object of each group.
- // The last element stores the total number of code objects.
- class GroupStartIndexes {
- public:
- explicit GroupStartIndexes(DependentCode* entries);
- void Recompute(DependentCode* entries);
- int at(int i) { return start_indexes_[i]; }
- int number_of_entries() { return start_indexes_[kGroupCount]; }
- private:
- int start_indexes_[kGroupCount + 1];
- };
-
bool Contains(DependencyGroup group, WeakCell* code_cell);
+ bool IsEmpty(DependencyGroup group);
static Handle<DependentCode> InsertCompilationDependencies(
Handle<DependentCode> entries, DependencyGroup group,
@@ -5477,8 +5372,12 @@ class DependentCode: public FixedArray {
// The following low-level accessors should only be used by this class
// and the mark compact collector.
- inline int number_of_entries(DependencyGroup group);
- inline void set_number_of_entries(DependencyGroup group, int value);
+ inline DependentCode* next_link();
+ inline void set_next_link(DependentCode* next);
+ inline int count();
+ inline void set_count(int value);
+ inline DependencyGroup group();
+ inline void set_group(DependencyGroup group);
inline Object* object_at(int i);
inline void set_object_at(int i, Object* object);
inline void clear_at(int i);
@@ -5492,10 +5391,9 @@ class DependentCode: public FixedArray {
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Object> object);
+ static Handle<DependentCode> New(DependencyGroup group, Handle<Object> object,
+ Handle<DependentCode> next);
static Handle<DependentCode> EnsureSpace(Handle<DependentCode> entries);
- // Make a room at the end of the given group by moving out the first
- // code objects of the subsequent groups.
- inline void ExtendGroup(DependencyGroup group);
// Compact by removing cleared weak cells and return true if there was
// any cleared weak cell.
bool Compact();
@@ -5503,7 +5401,14 @@ class DependentCode: public FixedArray {
if (number_of_entries < 5) return number_of_entries + 1;
return number_of_entries * 5 / 4;
}
- static const int kCodesStartIndex = kGroupCount;
+ inline int flags();
+ inline void set_flags(int flags);
+ class GroupField : public BitField<int, 0, 3> {};
+ class CountField : public BitField<int, 3, 27> {};
+ STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
+ static const int kNextLinkIndex = 0;
+ static const int kFlagsIndex = 1;
+ static const int kCodesStartIndex = 2;
};
@@ -5575,18 +5480,62 @@ class Map: public HeapObject {
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
class IsStrong : public BitField<bool, 26, 1> {};
- // Bit 27 is free.
+ class NewTargetIsBase : public BitField<bool, 27, 1> {};
+ // Bit 28 is free.
// Keep this bit field at the very end for better code in
// Builtins::kJSConstructStubGeneric stub.
- // This counter is used for in-object slack tracking and for map aging.
+ // This counter is used for in-object slack tracking.
// The in-object slack tracking is considered enabled when the counter is
- // in the range [kSlackTrackingCounterStart, kSlackTrackingCounterEnd].
- class Counter : public BitField<int, 28, 4> {};
- static const int kSlackTrackingCounterStart = 14;
- static const int kSlackTrackingCounterEnd = 8;
- static const int kRetainingCounterStart = kSlackTrackingCounterEnd - 1;
- static const int kRetainingCounterEnd = 0;
+ // non zero.
+ class ConstructionCounter : public BitField<int, 29, 3> {};
+ static const int kSlackTrackingCounterStart = 7;
+ static const int kSlackTrackingCounterEnd = 1;
+ static const int kNoSlackTracking = 0;
+ STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounter::kMax);
+
+
+ // Inobject slack tracking is the way to reclaim unused inobject space.
+ //
+ // The instance size is initially determined by adding some slack to
+ // expected_nof_properties (to allow for a few extra properties added
+ // after the constructor). There is no guarantee that the extra space
+ // will not be wasted.
+ //
+ // Here is the algorithm to reclaim the unused inobject space:
+ // - Detect the first constructor call for this JSFunction.
+ // When it happens enter the "in progress" state: initialize construction
+ // counter in the initial_map.
+ // - While the tracking is in progress initialize unused properties of a new
+ // object with one_pointer_filler_map instead of undefined_value (the "used"
+ // part is initialized with undefined_value as usual). This way they can
+ // be resized quickly and safely.
+ // - Once enough objects have been created compute the 'slack'
+ // (traverse the map transition tree starting from the
+ // initial_map and find the lowest value of unused_property_fields).
+ // - Traverse the transition tree again and decrease the instance size
+ // of every map. Existing objects will resize automatically (they are
+ // filled with one_pointer_filler_map). All further allocations will
+ // use the adjusted instance size.
+ // - SharedFunctionInfo's expected_nof_properties left unmodified since
+ // allocations made using different closures could actually create different
+ // kind of objects (see prototype inheritance pattern).
+ //
+ // Important: inobject slack tracking is not attempted during the snapshot
+ // creation.
+
+ static const int kGenerousAllocationCount =
+ kSlackTrackingCounterStart - kSlackTrackingCounterEnd + 1;
+
+ // Starts the tracking by initializing object constructions countdown counter.
+ void StartInobjectSlackTracking();
+
+ // True if the object constructions countdown counter is a range
+ // [kSlackTrackingCounterEnd, kSlackTrackingCounterStart].
+ inline bool IsInobjectSlackTrackingInProgress();
+
+ // Does the tracking step.
+ inline void InobjectSlackTrackingStep();
// Completes inobject slack tracking for the transition tree starting at this
// initial map.
@@ -5602,7 +5551,7 @@ class Map: public HeapObject {
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
- inline void set_is_constructor(bool value);
+ inline void set_is_constructor();
inline bool is_constructor() const;
// Tells whether the instance with this map should be ignored by the
@@ -5638,6 +5587,8 @@ class Map: public HeapObject {
inline void set_is_strong();
inline bool is_strong();
+ inline void set_new_target_is_base(bool value);
+ inline bool new_target_is_base();
inline void set_is_extensible(bool value);
inline bool is_extensible();
inline void set_is_prototype_map(bool value);
@@ -5728,10 +5679,6 @@ class Map: public HeapObject {
static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
const char* reason);
- // Returns the constructor name (the name (possibly, inferred name) of the
- // function that was used to instantiate the object).
- String* constructor_name();
-
// Tells whether the map is used for JSObjects in dictionary mode (ie
// normalized objects, ie objects for which HasFastProperties returns false).
// A map can never be used for both dictionary mode and fast mode JSObjects.
@@ -5814,8 +5761,8 @@ class Map: public HeapObject {
inline bool is_stable();
inline void set_migration_target(bool value);
inline bool is_migration_target();
- inline void set_counter(int value);
- inline int counter();
+ inline void set_construction_counter(int value);
+ inline int construction_counter();
inline void deprecate();
inline bool is_deprecated();
inline bool CanBeDeprecated();
@@ -5866,6 +5813,11 @@ class Map: public HeapObject {
ElementsKind kind,
TransitionFlag flag);
+ static Handle<Map> AsLanguageMode(Handle<Map> initial_map,
+ LanguageMode language_mode,
+ FunctionKind kind);
+
+
static Handle<Map> CopyForObserved(Handle<Map> map);
static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
@@ -5911,7 +5863,7 @@ class Map: public HeapObject {
// Returns the number of properties described in instance_descriptors
// filtering out properties with the specified attributes.
int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
- PropertyAttributes filter = NONE);
+ PropertyFilter filter = ALL_PROPERTIES);
DECLARE_CAST(Map)
@@ -5963,6 +5915,7 @@ class Map: public HeapObject {
inline bool IsBooleanMap();
inline bool IsPrimitiveMap();
+ inline bool IsJSReceiverMap();
inline bool IsJSObjectMap();
inline bool IsJSArrayMap();
inline bool IsJSFunctionMap();
@@ -5971,6 +5924,7 @@ class Map: public HeapObject {
inline bool IsJSGlobalProxyMap();
inline bool IsJSGlobalObjectMap();
inline bool IsJSTypedArrayMap();
+ inline bool IsJSDataViewMap();
inline bool CanOmitMapChecks();
@@ -6109,9 +6063,9 @@ class Map: public HeapObject {
static void TraceAllTransitions(Map* map);
#endif
- static inline Handle<Map> CopyInstallDescriptorsForTesting(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor);
+ static inline Handle<Map> AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
private:
static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
@@ -6122,9 +6076,13 @@ class Map: public HeapObject {
static Handle<Map> ShareDescriptor(Handle<Map> map,
Handle<DescriptorArray> descriptors,
Descriptor* descriptor);
- static Handle<Map> CopyInstallDescriptors(
- Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor);
+ static Handle<Map> AddMissingTransitions(
+ Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
+ static void InstallDescriptors(
+ Handle<Map> parent_map, Handle<Map> child_map, int new_descriptor,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
static Handle<Map> CopyAddDescriptor(Handle<Map> map,
Descriptor* descriptor,
TransitionFlag flag);
@@ -6152,10 +6110,10 @@ class Map: public HeapObject {
inline void NotifyLeafMapLayoutChange();
void DeprecateTransitionTree();
- bool DeprecateTarget(PropertyKind kind, Name* key,
- PropertyAttributes attributes,
- DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor);
+
+ void ReplaceDescriptors(DescriptorArray* new_descriptors,
+ LayoutDescriptor* new_layout_descriptor);
+
Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
@@ -6235,8 +6193,6 @@ class PrototypeInfo : public Struct {
// given receiver embed the currently valid cell for that receiver's prototype
// during their compilation and check it on execution.
DECL_ACCESSORS(validity_cell, Object)
- // [constructor_name]: User-friendly name of the original constructor.
- DECL_ACCESSORS(constructor_name, Object)
DECLARE_CAST(PrototypeInfo)
@@ -6528,8 +6484,8 @@ class SharedFunctionInfo: public HeapObject {
inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
- // and a shared literals array or Smi(0) if none.
- DECL_ACCESSORS(optimized_code_map, Object)
+ // and a shared literals array.
+ DECL_ACCESSORS(optimized_code_map, FixedArray)
// Returns entry from optimized code map for specified context and OSR entry.
// Note that {code == nullptr, literals == nullptr} indicates no matching
@@ -6541,6 +6497,11 @@ class SharedFunctionInfo: public HeapObject {
// Clear optimized code map.
void ClearOptimizedCodeMap();
+ // We have a special root FixedArray with the right shape and values
+ // to represent the cleared optimized code map. This predicate checks
+ // if that root is installed.
+ inline bool OptimizedCodeMapIsCleared() const;
+
// Removes a specific optimized code object from the optimized code map.
// In case of non-OSR the code reference is cleared from the cache entry but
// the entry itself is left in the map in order to proceed sharing literals.
@@ -6554,13 +6515,17 @@ class SharedFunctionInfo: public HeapObject {
Handle<Code> code);
// Add a new entry to the optimized code map for context-dependent code.
- // |code| is either a code object or an undefined value. In the latter case
- // the entry just maps |native_context, osr_ast_id| pair to |literals| array.
- static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<HeapObject> code,
- Handle<LiteralsArray> literals,
- BailoutId osr_ast_id);
+ inline static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<Code> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id);
+
+ // We may already have cached the code, but want to store literals in the
+ // cache.
+ inline static void AddLiteralsToOptimizedCodeMap(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<LiteralsArray> literals);
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
@@ -6761,10 +6726,6 @@ class SharedFunctionInfo: public HeapObject {
// see a binding for it.
DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
- // Indicates whether the function is a bound function created using
- // the bind function.
- DECL_BOOLEAN_ACCESSORS(bound)
-
// Indicates that the function is anonymous (the name field can be set
// through the API, which does not change this flag).
DECL_BOOLEAN_ACCESSORS(is_anonymous)
@@ -7054,7 +7015,7 @@ class SharedFunctionInfo: public HeapObject {
// byte 1
kHasDuplicateParameters,
kForceInline,
- kBoundFunction,
+ kIsAsmFunction,
kIsAnonymous,
kNameShouldPrintAsAnonymous,
kIsFunction,
@@ -7071,7 +7032,6 @@ class SharedFunctionInfo: public HeapObject {
kIsBaseConstructor,
kIsInObjectLiteral,
// byte 3
- kIsAsmFunction,
kDeserialized,
kNeverCompiled,
kCompilerHintsCount, // Pseudo entry
@@ -7125,7 +7085,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrongModeBit =
kStrongModeFunction + kCompilerHintsSmiTagSize;
static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
- static const int kBoundBit = kBoundFunction + kCompilerHintsSmiTagSize;
static const int kClassConstructorBits =
FunctionKind::kClassConstructor
@@ -7137,7 +7096,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
static const int kStrongModeBitWithinByte = kStrongModeBit % kBitsPerByte;
static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
- static const int kBoundBitWithinByte = kBoundBit % kBitsPerByte;
static const int kClassConstructorBitsWithinByte =
FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
@@ -7157,7 +7115,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
static const int kStrongModeByteOffset = BYTE_OFFSET(kStrongModeFunction);
static const int kNativeByteOffset = BYTE_OFFSET(kNative);
- static const int kBoundByteOffset = BYTE_OFFSET(kBoundFunction);
static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
#undef BYTE_OFFSET
@@ -7168,6 +7125,13 @@ class SharedFunctionInfo: public HeapObject {
int SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id);
+ // If code is undefined, then existing code won't be overwritten.
+ static void AddToOptimizedCodeMapInternal(Handle<SharedFunctionInfo> shared,
+ Handle<Context> native_context,
+ Handle<HeapObject> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -7260,6 +7224,64 @@ class JSModule: public JSObject {
};
+// JSBoundFunction describes a bound function exotic object.
+class JSBoundFunction : public JSObject {
+ public:
+ // [length]: The bound function "length" property.
+ DECL_ACCESSORS(length, Object)
+
+ // [name]: The bound function "name" property.
+ DECL_ACCESSORS(name, Object)
+
+ // [bound_target_function]: The wrapped function object.
+ DECL_ACCESSORS(bound_target_function, JSReceiver)
+
+ // [bound_this]: The value that is always passed as the this value when
+ // calling the wrapped function.
+ DECL_ACCESSORS(bound_this, Object)
+
+ // [bound_arguments]: A list of values whose elements are used as the first
+ // arguments to any call to the wrapped function.
+ DECL_ACCESSORS(bound_arguments, FixedArray)
+
+ // [creation_context]: The native context in which the function was bound.
+ // TODO(bmeurer, verwaest): Can we (mis)use (unused) constructor field in
+ // the Map instead of putting this into the object? Only required for
+ // JSReceiver::GetCreationContext() anyway.
+ DECL_ACCESSORS(creation_context, Context)
+
+ static MaybeHandle<Context> GetFunctionRealm(
+ Handle<JSBoundFunction> function);
+
+ DECLARE_CAST(JSBoundFunction)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSBoundFunction)
+ DECLARE_VERIFIER(JSBoundFunction)
+
+ // The bound function's string representation implemented according
+ // to ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSBoundFunction> function);
+
+ // Layout description.
+ static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
+ static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
+ static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
+ static const int kCreationContextOffset =
+ kBoundArgumentsOffset + kPointerSize;
+ static const int kLengthOffset = kCreationContextOffset + kPointerSize;
+ static const int kNameOffset = kLengthOffset + kPointerSize;
+ static const int kSize = kNameOffset + kPointerSize;
+
+ // Indices of in-object properties.
+ static const int kLengthIndex = 0;
+ static const int kNameIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
+};
+
+
// JSFunction describes JavaScript functions.
class JSFunction: public JSObject {
public:
@@ -7274,6 +7296,9 @@ class JSFunction: public JSObject {
inline Context* context();
inline void set_context(Object* context);
inline JSObject* global_proxy();
+ inline Context* native_context();
+
+ static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
// [code]: The generated code object for this function. Executed
// when the function is invoked, e.g. foo() or new foo(). See
@@ -7303,49 +7328,10 @@ class JSFunction: public JSObject {
// Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInOptimizationQueue();
- // Inobject slack tracking is the way to reclaim unused inobject space.
- //
- // The instance size is initially determined by adding some slack to
- // expected_nof_properties (to allow for a few extra properties added
- // after the constructor). There is no guarantee that the extra space
- // will not be wasted.
- //
- // Here is the algorithm to reclaim the unused inobject space:
- // - Detect the first constructor call for this JSFunction.
- // When it happens enter the "in progress" state: initialize construction
- // counter in the initial_map.
- // - While the tracking is in progress create objects filled with
- // one_pointer_filler_map instead of undefined_value. This way they can be
- // resized quickly and safely.
- // - Once enough objects have been created compute the 'slack'
- // (traverse the map transition tree starting from the
- // initial_map and find the lowest value of unused_property_fields).
- // - Traverse the transition tree again and decrease the instance size
- // of every map. Existing objects will resize automatically (they are
- // filled with one_pointer_filler_map). All further allocations will
- // use the adjusted instance size.
- // - SharedFunctionInfo's expected_nof_properties left unmodified since
- // allocations made using different closures could actually create different
- // kind of objects (see prototype inheritance pattern).
- //
- // Important: inobject slack tracking is not attempted during the snapshot
- // creation.
-
- // True if the initial_map is set and the object constructions countdown
- // counter is not zero.
- static const int kGenerousAllocationCount =
- Map::kSlackTrackingCounterStart - Map::kSlackTrackingCounterEnd + 1;
- inline bool IsInobjectSlackTrackingInProgress();
+ // Completes inobject slack tracking on initial map if it is active.
+ inline void CompleteInobjectSlackTrackingIfActive();
- // Starts the tracking.
- // Initializes object constructions countdown counter in the initial map.
- void StartInobjectSlackTracking();
-
- // Completes the tracking.
- void CompleteInobjectSlackTracking();
-
- // [literals_or_bindings]: Fixed array holding either
- // the materialized literals or the bindings of a bound function.
+ // [literals]: Fixed array holding the materialized literals.
//
// If the function contains object, regexp or array literals, the
// literals array prefix contains the object, regexp, and array
@@ -7354,17 +7340,7 @@ class JSFunction: public JSObject {
// or array functions. Performing a dynamic lookup, we might end up
// using the functions from a new context that we should not have
// access to.
- //
- // On bound functions, the array is a (copy-on-write) fixed-array containing
- // the function that was bound, bound this-value and any bound
- // arguments. Bound functions never contain literals.
- DECL_ACCESSORS(literals_or_bindings, FixedArray)
-
- inline LiteralsArray* literals();
- inline void set_literals(LiteralsArray* literals);
-
- inline BindingsArray* function_bindings();
- inline void set_function_bindings(BindingsArray* bindings);
+ DECL_ACCESSORS(literals, LiteralsArray)
// The initial map for an object created by this constructor.
inline Map* initial_map();
@@ -7372,11 +7348,13 @@ class JSFunction: public JSObject {
Handle<Object> prototype);
inline bool has_initial_map();
static void EnsureHasInitialMap(Handle<JSFunction> function);
- // Ensures that the |original_constructor| has correct initial map and
- // returns it. If the |original_constructor| is not a subclass constructor
- // its initial map is left unmodified.
- static Handle<Map> EnsureDerivedHasInitialMap(
- Handle<JSFunction> original_constructor, Handle<JSFunction> constructor);
+
+ // Creates a map that matches the constructor's initial map, but with
+ // [[prototype]] being new.target.prototype. Because new.target can be a
+ // JSProxy, this can call back into JavaScript.
+ static MUST_USE_RESULT MaybeHandle<Map> GetDerivedMap(
+ Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target);
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
@@ -7451,8 +7429,17 @@ class JSFunction: public JSObject {
// The function's name if it is configured, otherwise shared function info
// debug name.
+ static Handle<String> GetName(Handle<JSFunction> function);
+
+ // The function's displayName if it is set, otherwise name if it is
+ // configured, otherwise shared function info
+ // debug name.
static Handle<String> GetDebugName(Handle<JSFunction> function);
+ // The function's string representation implemented according to
+ // ES6 section 19.2.3.5 Function.prototype.toString ( ).
+ static Handle<String> ToString(Handle<JSFunction> function);
+
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
@@ -7566,6 +7553,10 @@ class DateCache;
// Representation for JS date objects.
class JSDate: public JSObject {
public:
+ static MUST_USE_RESULT MaybeHandle<JSDate> New(Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ double tv);
+
// If one component is NaN, all of them are, indicating a NaN time value.
// [value]: the time value.
DECL_ACCESSORS(value, Object)
@@ -7589,10 +7580,15 @@ class JSDate: public JSObject {
DECLARE_CAST(JSDate)
+ // Returns the time value (UTC) identifying the current time.
+ static double CurrentTimeValue(Isolate* isolate);
+
// Returns the date field with the specified index.
// See FieldIndex for the list of date fields.
static Object* GetField(Object* date, Smi* index);
+ static Handle<Object> SetValue(Handle<JSDate> date, double v);
+
void SetValue(Object* value, bool is_value_nan);
// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ]
@@ -7732,31 +7728,29 @@ class JSRegExp: public JSObject {
// IRREGEXP_NATIVE: Compiled to native code with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
enum Flag {
- NONE = 0,
- GLOBAL = 1,
- IGNORE_CASE = 2,
- MULTILINE = 4,
- STICKY = 8,
- UNICODE_ESCAPES = 16
- };
-
- class Flags {
- public:
- explicit Flags(uint32_t value) : value_(value) { }
- bool is_global() { return (value_ & GLOBAL) != 0; }
- bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
- bool is_multiline() { return (value_ & MULTILINE) != 0; }
- bool is_sticky() { return (value_ & STICKY) != 0; }
- bool is_unicode() { return (value_ & UNICODE_ESCAPES) != 0; }
- uint32_t value() { return value_; }
- private:
- uint32_t value_;
+ kNone = 0,
+ kGlobal = 1 << 0,
+ kIgnoreCase = 1 << 1,
+ kMultiline = 1 << 2,
+ kSticky = 1 << 3,
+ kUnicode = 1 << 4,
};
+ typedef base::Flags<Flag> Flags;
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
DECL_ACCESSORS(source, Object)
+ static MaybeHandle<JSRegExp> New(Handle<String> source, Flags flags);
+ static MaybeHandle<JSRegExp> New(Handle<String> source, Handle<String> flags);
+ static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
+
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source, Flags flags);
+ static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
+ Handle<String> source,
+ Handle<String> flags_string);
+
inline Type TypeTag();
inline int CaptureCount();
inline Flags GetFlags();
@@ -7855,6 +7849,8 @@ class JSRegExp: public JSObject {
static const int kCodeAgeMask = 0xff;
};
+DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
+
class CompilationCacheShape : public BaseShape<HashTableKey*> {
public:
@@ -8165,7 +8161,7 @@ class AllocationSite: public Struct {
// Increments the mementos found counter and returns true when the first
// memento was found for a given allocation site.
- inline bool IncrementMementoFoundCount();
+ inline bool IncrementMementoFoundCount(int increment = 1);
inline void IncrementMementoCreateCount();
@@ -9204,11 +9200,7 @@ class ExternalOneByteString : public ExternalString {
DECLARE_CAST(ExternalOneByteString)
- // Garbage collection support.
- inline void ExternalOneByteStringIterateBody(ObjectVisitor* v);
-
- template <typename StaticVisitor>
- inline void ExternalOneByteStringIterateBody();
+ class BodyDescriptor;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
@@ -9243,11 +9235,7 @@ class ExternalTwoByteString: public ExternalString {
DECLARE_CAST(ExternalTwoByteString)
- // Garbage collection support.
- inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ExternalTwoByteStringIterateBody();
+ class BodyDescriptor;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
@@ -9541,7 +9529,7 @@ class WeakCell : public HeapObject {
DECL_ACCESSORS(next, Object)
- inline void clear_next(Heap* heap);
+ inline void clear_next(Object* the_hole_value);
inline bool next_cleared();
@@ -9565,118 +9553,108 @@ class WeakCell : public HeapObject {
// The JSProxy describes EcmaScript Harmony proxies
class JSProxy: public JSReceiver {
public:
+ MUST_USE_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
+ Handle<Object>,
+ Handle<Object>);
+
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
-
+ // [target]: The target property.
+ DECL_ACCESSORS(target, JSReceiver)
// [hash]: The hash code property (undefined if not initialized yet).
DECL_ACCESSORS(hash, Object)
- DECLARE_CAST(JSProxy)
-
- MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithHandler(
- Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name);
-
- // If the handler defines an accessor property with a setter, invoke it.
- // If it defines an accessor property without a setter, or a data property
- // that is read-only, fail. In all these cases set '*done' to true.
- // Otherwise set it to false, in which case the return value is not
- // meaningful.
- MUST_USE_RESULT
- static Maybe<bool> SetPropertyViaPrototypesWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, ShouldThrow should_throw, bool* done);
+ static MaybeHandle<Context> GetFunctionRealm(Handle<JSProxy> proxy);
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetPropertyAttributesWithHandler(Handle<JSProxy> proxy,
- Handle<Object> receiver,
- Handle<Name> name);
- MUST_USE_RESULT static Maybe<bool> SetPropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, ShouldThrow should_throw);
+ DECLARE_CAST(JSProxy)
- // Turn the proxy into an (empty) JSObject.
- static void Fix(Handle<JSProxy> proxy);
+ INLINE(bool IsRevoked() const);
+ static void Revoke(Handle<JSProxy> proxy);
- // Initializes the body after the handler slot.
- inline void InitializeBody(int object_size, Object* value);
+ // ES6 9.5.1
+ static MaybeHandle<Object> GetPrototype(Handle<JSProxy> receiver);
- // Invoke a trap by name. If the trap does not exist on this's handler,
- // but derived_trap is non-NULL, invoke that instead. May cause GC.
- MUST_USE_RESULT static MaybeHandle<Object> CallTrap(
- Handle<JSProxy> proxy,
- const char* name,
- Handle<Object> derived_trap,
- int argc,
- Handle<Object> args[]);
+ // ES6 9.5.2
+ MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSProxy> proxy,
+ Handle<Object> value,
+ bool from_javascript,
+ ShouldThrow should_throw);
+ // ES6 9.5.3
+ MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
- // Dispatched behavior.
- DECLARE_PRINTER(JSProxy)
- DECLARE_VERIFIER(JSProxy)
+ // ES6 9.5.4 (when passed DONT_THROW)
+ MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ Handle<JSProxy> proxy, ShouldThrow should_throw);
- // Layout description. We add padding so that a proxy has the same
- // size as a virgin JSObject. This is essential for becoming a JSObject
- // upon freeze.
- static const int kHandlerOffset = HeapObject::kHeaderSize;
- static const int kHashOffset = kHandlerOffset + kPointerSize;
- static const int kPaddingOffset = kHashOffset + kPointerSize;
- static const int kSize = JSObject::kHeaderSize;
- static const int kHeaderSize = kPaddingOffset;
- static const int kPaddingSize = kSize - kPaddingOffset;
+ // ES6 9.5.5
+ MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
+ PropertyDescriptor* desc);
- STATIC_ASSERT(kPaddingSize >= 0);
+ // ES6 9.5.6
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSProxy> object, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
- typedef FixedBodyDescriptor<kHandlerOffset,
- kPaddingOffset,
- kSize> BodyDescriptor;
+ // ES6 9.5.7
+ MUST_USE_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name);
- private:
- friend class JSReceiver;
+ // ES6 9.5.8
+ MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
+ Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
+ Handle<Object> receiver, LanguageMode language_mode);
- MUST_USE_RESULT static Maybe<bool> HasPropertyWithHandler(
- Handle<JSProxy> proxy, Handle<Name> name);
+ // ES6 9.5.9
+ MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
+ Handle<Name> name,
+ Handle<Object> value,
+ Handle<Object> receiver,
+ LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler(
+ // ES6 9.5.10 (when passed SLOPPY)
+ MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
- MUST_USE_RESULT Object* GetIdentityHash();
-
- static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
-};
-
-
-class JSFunctionProxy: public JSProxy {
- public:
- // [call_trap]: The call trap.
- DECL_ACCESSORS(call_trap, JSReceiver)
+ // ES6 9.5.11
+ MUST_USE_RESULT static Maybe<bool> Enumerate(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy,
+ KeyAccumulator* accumulator);
- // [construct_trap]: The construct trap.
- DECL_ACCESSORS(construct_trap, Object)
+ // ES6 9.5.12
+ MUST_USE_RESULT static Maybe<bool> OwnPropertyKeys(
+ Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
+ PropertyFilter filter, KeyAccumulator* accumulator);
- DECLARE_CAST(JSFunctionProxy)
+ MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ LookupIterator* it);
// Dispatched behavior.
- DECLARE_PRINTER(JSFunctionProxy)
- DECLARE_VERIFIER(JSFunctionProxy)
+ DECLARE_PRINTER(JSProxy)
+ DECLARE_VERIFIER(JSProxy)
// Layout description.
- static const int kCallTrapOffset = JSProxy::kPaddingOffset;
- static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
- static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
- static const int kSize = JSFunction::kSize;
- static const int kPaddingSize = kSize - kPaddingOffset;
+ static const int kTargetOffset = JSReceiver::kHeaderSize;
+ static const int kHandlerOffset = kTargetOffset + kPointerSize;
+ static const int kHashOffset = kHandlerOffset + kPointerSize;
+ static const int kSize = kHashOffset + kPointerSize;
- STATIC_ASSERT(kPaddingSize >= 0);
+ typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
+ BodyDescriptor;
- typedef FixedBodyDescriptor<kHandlerOffset,
- kConstructTrapOffset + kPointerSize,
- kSize> BodyDescriptor;
+ MUST_USE_RESULT Object* GetIdentityHash();
+
+ static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunctionProxy);
+ static Maybe<bool> AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ Handle<Symbol> private_name,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9886,6 +9864,20 @@ class JSWeakCollection: public JSObject {
static const int kNextOffset = kTableOffset + kPointerSize;
static const int kSize = kNextOffset + kPointerSize;
+ // Visiting policy defines whether the table and next collection fields
+ // should be visited or not.
+ enum BodyVisitingPolicy { kVisitStrong, kVisitWeak };
+
+ // Iterates the function object according to the visiting policy.
+ template <BodyVisitingPolicy>
+ class BodyDescriptorImpl;
+
+ // Visit the whole object.
+ typedef BodyDescriptorImpl<kVisitStrong> BodyDescriptor;
+
+ // Don't visit table and next collection fields.
+ typedef BodyDescriptorImpl<kVisitWeak> BodyDescriptorWeak;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSWeakCollection);
};
@@ -9964,11 +9956,6 @@ class JSArrayBuffer: public JSObject {
DECLARE_VERIFIER(JSArrayBuffer)
static const int kByteLengthOffset = JSObject::kHeaderSize;
-
- // NOTE: GC will visit objects fields:
- // 1. From JSObject::BodyDescriptor::kStartOffset to kByteLengthOffset +
- // kPointerSize
- // 2. From start of the internal fields and up to the end of them
static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
@@ -9981,11 +9968,9 @@ class JSArrayBuffer: public JSObject {
static const int kSizeWithInternalFields =
kSize + v8::ArrayBuffer::kInternalFieldCount * kPointerSize;
- template <typename StaticVisitor>
- static inline void JSArrayBufferIterateBody(Heap* heap, HeapObject* obj);
-
- static inline void JSArrayBufferIterateBody(HeapObject* obj,
- ObjectVisitor* v);
+ // Iterates all fields in the object including internal ones except
+ // kBackingStoreOffset and kBitFieldSlot.
+ class BodyDescriptor;
class IsExternal : public BitField<bool, 1, 1> {};
class IsNeuterable : public BitField<bool, 2, 1> {};
@@ -10091,12 +10076,6 @@ class Foreign: public HeapObject {
DECLARE_CAST(Foreign)
// Dispatched behavior.
- inline void ForeignIterateBody(ObjectVisitor* v);
-
- template<typename StaticVisitor>
- inline void ForeignIterateBody();
-
- // Dispatched behavior.
DECLARE_PRINTER(Foreign)
DECLARE_VERIFIER(Foreign)
@@ -10107,6 +10086,8 @@ class Foreign: public HeapObject {
STATIC_ASSERT(kForeignAddressOffset == Internals::kForeignAddressOffset);
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
};
@@ -10151,16 +10132,18 @@ class JSArray: public JSObject {
static inline void SetContent(Handle<JSArray> array,
Handle<FixedArrayBase> storage);
- static bool DefineOwnProperty(Isolate* isolate, Handle<JSArray> o,
- Handle<Object> name, PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ // ES6 9.4.2.1
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSArray> o, Handle<Object> name,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
static bool AnythingToArrayLength(Isolate* isolate,
Handle<Object> length_object,
uint32_t* output);
- static bool ArraySetLength(Isolate* isolate, Handle<JSArray> a,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ MUST_USE_RESULT static Maybe<bool> ArraySetLength(Isolate* isolate,
+ Handle<JSArray> a,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw);
DECLARE_CAST(JSArray)
@@ -10414,6 +10397,7 @@ class CallHandlerInfo: public Struct {
public:
DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
+ DECL_ACCESSORS(fast_handler, Object)
DECLARE_CAST(CallHandlerInfo)
@@ -10423,7 +10407,8 @@ class CallHandlerInfo: public Struct {
static const int kCallbackOffset = HeapObject::kHeaderSize;
static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+ static const int kFastHandlerOffset = kDataOffset + kPointerSize;
+ static const int kSize = kFastHandlerOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
@@ -10552,21 +10537,6 @@ class ObjectTemplateInfo: public TemplateInfo {
};
-class TypeSwitchInfo: public Struct {
- public:
- DECL_ACCESSORS(types, Object)
-
- DECLARE_CAST(TypeSwitchInfo)
-
- // Dispatched behavior.
- DECLARE_PRINTER(TypeSwitchInfo)
- DECLARE_VERIFIER(TypeSwitchInfo)
-
- static const int kTypesOffset = Struct::kHeaderSize;
- static const int kSize = kTypesOffset + kPointerSize;
-};
-
-
// The DebugInfo class holds additional information for a function being
// debugged.
class DebugInfo: public Struct {
@@ -10770,9 +10740,6 @@ class ObjectVisitor BASE_EMBEDDED {
};
-typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
-
-
// BooleanBit is a helper class for setting and getting a bit in an integer.
class BooleanBit : public AllStatic {
public:
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index c3532bdbb1..a7a67f5d2f 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -71,6 +71,7 @@ std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c) {
std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c) {
if (c.value == '\n') return os << "\\n";
if (c.value == '\r') return os << "\\r";
+ if (c.value == '\t') return os << "\\t";
if (c.value == '\"') return os << "\\\"";
return PrintUC16(os, c.value, IsOK);
}
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 6f8600e7b1..56f4aa7e45 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -36,7 +36,7 @@ class OFStreamBase : public std::streambuf {
class OFStream : public std::ostream {
public:
explicit OFStream(FILE* f);
- ~OFStream();
+ virtual ~OFStream();
private:
OFStreamBase buf_;
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
new file mode 100644
index 0000000000..fbab2056f8
--- /dev/null
+++ b/deps/v8/src/parsing/OWNERS
@@ -0,0 +1,6 @@
+set noparent
+
+adamk@chromium.org
+littledan@chromium.org
+marja@chromium.org
+rossberg@chromium.org
diff --git a/deps/v8/src/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 7392a7add8..96ccf871f4 100644
--- a/deps/v8/src/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXPRESSION_CLASSIFIER_H
-#define V8_EXPRESSION_CLASSIFIER_H
+#ifndef V8_PARSING_EXPRESSION_CLASSIFIER_H
+#define V8_PARSING_EXPRESSION_CLASSIFIER_H
#include "src/messages.h"
-#include "src/scanner.h"
-#include "src/token.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -19,10 +19,12 @@ class ExpressionClassifier {
Error()
: location(Scanner::Location::invalid()),
message(MessageTemplate::kNone),
+ type(kSyntaxError),
arg(nullptr) {}
Scanner::Location location;
- MessageTemplate::Template message;
+ MessageTemplate::Template message : 30;
+ ParseErrorType type : 2;
const char* arg;
};
@@ -36,6 +38,7 @@ class ExpressionClassifier {
StrongModeFormalParametersProduction = 1 << 6,
ArrowFormalParametersProduction = 1 << 7,
LetPatternProduction = 1 << 8,
+ CoverInitializedNameProduction = 1 << 9,
ExpressionProductions =
(ExpressionProduction | FormalParameterInitializerProduction),
@@ -45,8 +48,9 @@ class ExpressionClassifier {
StrictModeFormalParametersProduction |
StrongModeFormalParametersProduction),
StandardProductions = ExpressionProductions | PatternProductions,
- AllProductions = (StandardProductions | FormalParametersProductions |
- ArrowFormalParametersProduction)
+ AllProductions =
+ (StandardProductions | FormalParametersProductions |
+ ArrowFormalParametersProduction | CoverInitializedNameProduction)
};
enum FunctionProperties { NonSimpleParameter = 1 << 0 };
@@ -133,6 +137,13 @@ class ExpressionClassifier {
const Error& let_pattern_error() const { return let_pattern_error_; }
+ bool has_cover_initialized_name() const {
+ return !is_valid(CoverInitializedNameProduction);
+ }
+ const Error& cover_initialized_name_error() const {
+ return cover_initialized_name_error_;
+ }
+
bool is_simple_parameter_list() const {
return !(function_properties_ & NonSimpleParameter);
}
@@ -151,6 +162,17 @@ class ExpressionClassifier {
expression_error_.arg = arg;
}
+ void RecordExpressionError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ ParseErrorType type, const char* arg = nullptr) {
+ if (!is_valid_expression()) return;
+ invalid_productions_ |= ExpressionProduction;
+ expression_error_.location = loc;
+ expression_error_.message = message;
+ expression_error_.arg = arg;
+ expression_error_.type = type;
+ }
+
void RecordFormalParameterInitializerError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -181,6 +203,13 @@ class ExpressionClassifier {
assignment_pattern_error_.arg = arg;
}
+ void RecordPatternError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ RecordBindingPatternError(loc, message, arg);
+ RecordAssignmentPatternError(loc, message, arg);
+ }
+
void RecordArrowFormalParametersError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -232,6 +261,26 @@ class ExpressionClassifier {
let_pattern_error_.arg = arg;
}
+ void RecordCoverInitializedNameError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (has_cover_initialized_name()) return;
+ invalid_productions_ |= CoverInitializedNameProduction;
+ cover_initialized_name_error_.location = loc;
+ cover_initialized_name_error_.message = message;
+ cover_initialized_name_error_.arg = arg;
+ }
+
+ void ForgiveCoverInitializedNameError() {
+ invalid_productions_ &= ~CoverInitializedNameProduction;
+ cover_initialized_name_error_ = Error();
+ }
+
+ void ForgiveAssignmentPatternError() {
+ invalid_productions_ &= ~AssignmentPatternProduction;
+ assignment_pattern_error_ = Error();
+ }
+
void Accumulate(const ExpressionClassifier& inner,
unsigned productions = StandardProductions) {
// Propagate errors from inner, but don't overwrite already recorded
@@ -266,6 +315,8 @@ class ExpressionClassifier {
inner.strong_mode_formal_parameter_error_;
if (errors & LetPatternProduction)
let_pattern_error_ = inner.let_pattern_error_;
+ if (errors & CoverInitializedNameProduction)
+ cover_initialized_name_error_ = inner.cover_initialized_name_error_;
}
// As an exception to the above, the result continues to be a valid arrow
@@ -295,10 +346,11 @@ class ExpressionClassifier {
Error strict_mode_formal_parameter_error_;
Error strong_mode_formal_parameter_error_;
Error let_pattern_error_;
+ Error cover_initialized_name_error_;
DuplicateFinder* duplicate_finder_;
};
} // namespace internal
} // namespace v8
-#endif // V8_EXPRESSION_CLASSIFIER_H
+#endif // V8_PARSING_EXPRESSION_CLASSIFIER_H
diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index 5006c03eb6..12013afd28 100644
--- a/deps/v8/src/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/func-name-inferrer.h"
+#include "src/parsing/func-name-inferrer.h"
-#include "src/ast.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-value-factory.h"
#include "src/list-inl.h"
namespace v8 {
diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index c17acf52f2..ba38ffeb24 100644
--- a/deps/v8/src/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FUNC_NAME_INFERRER_H_
-#define V8_FUNC_NAME_INFERRER_H_
+#ifndef V8_PARSING_FUNC_NAME_INFERRER_H_
+#define V8_PARSING_FUNC_NAME_INFERRER_H_
#include "src/handles.h"
#include "src/zone.h"
@@ -124,4 +124,4 @@ class FuncNameInferrer : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_FUNC_NAME_INFERRER_H_
+#endif // V8_PARSING_FUNC_NAME_INFERRER_H_
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/parsing/json-parser.h
index 21889530c3..e23c73383e 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/parsing/json-parser.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_JSON_PARSER_H_
-#define V8_JSON_PARSER_H_
+#ifndef V8_PARSING_JSON_PARSER_H_
+#define V8_PARSING_JSON_PARSER_H_
#include "src/char-predicates.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/messages.h"
-#include "src/scanner.h"
-#include "src/token.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
#include "src/transitions.h"
#include "src/types.h"
@@ -761,17 +761,8 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
position_);
}
if (c0 < 0x20) return Handle<String>::null();
- if (static_cast<uint32_t>(c0) >
- unibrow::Utf16::kMaxNonSurrogateCharCode) {
- running_hash =
- StringHasher::AddCharacterCore(running_hash,
- unibrow::Utf16::LeadSurrogate(c0));
- running_hash =
- StringHasher::AddCharacterCore(running_hash,
- unibrow::Utf16::TrailSurrogate(c0));
- } else {
- running_hash = StringHasher::AddCharacterCore(running_hash, c0);
- }
+ running_hash = StringHasher::AddCharacterCore(running_hash,
+ static_cast<uint16_t>(c0));
position++;
if (position >= source_length_) return Handle<String>::null();
c0 = seq_source_->SeqOneByteStringGet(position);
@@ -848,4 +839,4 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
} // namespace internal
} // namespace v8
-#endif // V8_JSON_PARSER_H_
+#endif // V8_PARSING_JSON_PARSER_H_
diff --git a/deps/v8/src/parameter-initializer-rewriter.cc b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
index 28f741c1f6..003bbebae0 100644
--- a/deps/v8/src/parameter-initializer-rewriter.cc
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/parameter-initializer-rewriter.h"
+#include "src/parsing/parameter-initializer-rewriter.h"
-#include "src/ast.h"
-#include "src/ast-expression-visitor.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
@@ -60,8 +60,14 @@ void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
- DCHECK(!proxy->is_resolved());
- if (old_scope_->RemoveUnresolved(proxy)) {
+ if (proxy->is_resolved()) {
+ Variable* var = proxy->var();
+ DCHECK_EQ(var->mode(), TEMPORARY);
+ if (old_scope_->RemoveTemporary(var)) {
+ var->set_scope(new_scope_);
+ new_scope_->AddTemporary(var);
+ }
+ } else if (old_scope_->RemoveUnresolved(proxy)) {
new_scope_->AddUnresolved(proxy);
}
}
diff --git a/deps/v8/src/parameter-initializer-rewriter.h b/deps/v8/src/parsing/parameter-initializer-rewriter.h
index a195cb600f..255534c99e 100644
--- a/deps/v8/src/parameter-initializer-rewriter.h
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARAMETER_EXPRESSION_REWRITER_H_
-#define V8_PARAMETER_EXPRESSION_REWRITER_H_
+#ifndef V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
+#define V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
namespace v8 {
namespace internal {
@@ -19,4 +19,4 @@ void RewriteParameterInitializerScope(uintptr_t stack_limit,
} // namespace internal
} // namespace v8
-#endif // V8_PARAMETER_EXPRESSION_REWRITER_H_
+#endif // V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/parsing/parser-base.h
index c4d7ed45b3..2955b0b9d9 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PREPARSER_H
-#define V8_PREPARSER_H
+#ifndef V8_PARSING_PARSER_BASE_H
+#define V8_PARSING_PARSER_BASE_H
+#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
-#include "src/expression-classifier.h"
-#include "src/func-name-inferrer.h"
#include "src/hashmap.h"
#include "src/messages.h"
-#include "src/scanner.h"
-#include "src/scopes.h"
-#include "src/token.h"
+#include "src/parsing/expression-classifier.h"
+#include "src/parsing/func-name-inferrer.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,6 @@ struct FormalParametersBase {
bool has_rest = false;
bool is_simple = true;
int materialized_literals_count = 0;
- mutable int rest_array_literal_index = -1;
};
@@ -111,12 +110,13 @@ class ParserBase : public Traits {
allow_harmony_sloppy_(false),
allow_harmony_sloppy_function_(false),
allow_harmony_sloppy_let_(false),
- allow_harmony_rest_parameters_(false),
allow_harmony_default_parameters_(false),
- allow_harmony_destructuring_(false),
+ allow_harmony_destructuring_bind_(false),
+ allow_harmony_destructuring_assignment_(false),
allow_strong_mode_(false),
allow_legacy_const_(true),
- allow_harmony_do_expressions_(false) {}
+ allow_harmony_do_expressions_(false),
+ allow_harmony_function_name_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -127,12 +127,13 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(harmony_sloppy);
ALLOW_ACCESSORS(harmony_sloppy_function);
ALLOW_ACCESSORS(harmony_sloppy_let);
- ALLOW_ACCESSORS(harmony_rest_parameters);
ALLOW_ACCESSORS(harmony_default_parameters);
- ALLOW_ACCESSORS(harmony_destructuring);
+ ALLOW_ACCESSORS(harmony_destructuring_bind);
+ ALLOW_ACCESSORS(harmony_destructuring_assignment);
ALLOW_ACCESSORS(strong_mode);
ALLOW_ACCESSORS(legacy_const);
ALLOW_ACCESSORS(harmony_do_expressions);
+ ALLOW_ACCESSORS(harmony_function_name);
#undef ALLOW_ACCESSORS
uintptr_t stack_limit() const { return stack_limit_; }
@@ -175,6 +176,15 @@ class ParserBase : public Traits {
Scope* outer_scope_;
};
+ struct DestructuringAssignment {
+ public:
+ DestructuringAssignment(ExpressionT expression, Scope* scope)
+ : assignment(expression), scope(scope) {}
+
+ ExpressionT assignment;
+ Scope* scope;
+ };
+
class FunctionState BASE_EMBEDDED {
public:
FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
@@ -227,6 +237,15 @@ class ParserBase : public Traits {
typename Traits::Type::Factory* factory() { return factory_; }
+ const List<DestructuringAssignment>& destructuring_assignments_to_rewrite()
+ const {
+ return destructuring_assignments_to_rewrite_;
+ }
+
+ void AddDestructuringAssignment(DestructuringAssignment pair) {
+ destructuring_assignments_to_rewrite_.Add(pair);
+ }
+
private:
// Used to assign an index to each literal that needs materialization in
// the function. Includes regexp literals, and boilerplate for object and
@@ -255,6 +274,11 @@ class ParserBase : public Traits {
FunctionState* outer_function_state_;
Scope** scope_stack_;
Scope* outer_scope_;
+
+ List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
+
+ void RewriteDestructuringAssignments();
+
typename Traits::Type::Factory* factory_;
friend class ParserTraits;
@@ -460,6 +484,10 @@ class ParserBase : public Traits {
ok);
}
+ void CheckDestructuringElement(ExpressionT element,
+ ExpressionClassifier* classifier, int beg_pos,
+ int end_pos);
+
// Checking the name of a function literal. This has to be done after parsing
// the function, since the function can declare itself strict.
void CheckFunctionName(LanguageMode language_mode, IdentifierT function_name,
@@ -538,12 +566,20 @@ class ParserBase : public Traits {
void ReportClassifierError(const ExpressionClassifier::Error& error) {
Traits::ReportMessageAt(error.location, error.message, error.arg,
- kSyntaxError);
+ error.type);
}
void ValidateExpression(const ExpressionClassifier* classifier, bool* ok) {
- if (!classifier->is_valid_expression()) {
- ReportClassifierError(classifier->expression_error());
+ if (!classifier->is_valid_expression() ||
+ classifier->has_cover_initialized_name()) {
+ const Scanner::Location& a = classifier->expression_error().location;
+ const Scanner::Location& b =
+ classifier->cover_initialized_name_error().location;
+ if (a.beg_pos < 0 || (b.beg_pos >= 0 && a.beg_pos > b.beg_pos)) {
+ ReportClassifierError(classifier->cover_initialized_name_error());
+ } else {
+ ReportClassifierError(classifier->expression_error());
+ }
*ok = false;
}
}
@@ -664,9 +700,20 @@ class ParserBase : public Traits {
IdentifierT ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
bool* ok);
// Parses an identifier or a strict mode future reserved word, and indicate
- // whether it is strict mode future reserved.
- IdentifierT ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ // whether it is strict mode future reserved. Allows passing in is_generator
+ // for the case of parsing the identifier in a function expression, where the
+ // relevant "is_generator" bit is of the function being parsed, not the
+ // containing
+ // function.
+ IdentifierT ParseIdentifierOrStrictReservedWord(bool is_generator,
+ bool* is_strict_reserved,
bool* ok);
+ IdentifierT ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
+ bool* ok) {
+ return ParseIdentifierOrStrictReservedWord(this->is_generator(),
+ is_strict_reserved, ok);
+ }
+
IdentifierT ParseIdentifierName(bool* ok);
// Parses an identifier and determines whether or not it is 'get' or 'set'.
IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get, bool* is_set,
@@ -681,21 +728,37 @@ class ParserBase : public Traits {
ExpressionT ParseExpression(bool accept_IN, bool* ok);
ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
bool* ok);
+ ExpressionT ParseExpression(bool accept_IN, int flags,
+ ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
bool* is_static, bool* is_computed_name,
+ bool* is_identifier, bool* is_escaped_keyword,
ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
ObjectLiteralPropertyT ParsePropertyDefinition(
ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
bool is_static, bool* is_computed_name, bool* has_seen_constructor,
- ExpressionClassifier* classifier, bool* ok);
+ ExpressionClassifier* classifier, IdentifierT* name, bool* ok);
typename Traits::Type::ExpressionList ParseArguments(
Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
bool* ok);
- ExpressionT ParseAssignmentExpression(bool accept_IN,
+
+ enum AssignmentExpressionFlags {
+ kIsNormalAssignment = 0,
+ kIsPossiblePatternElement = 1 << 0,
+ kIsPossibleArrowFormals = 1 << 1
+ };
+
+ ExpressionT ParseAssignmentExpression(bool accept_IN, int flags,
ExpressionClassifier* classifier,
bool* ok);
+ ExpressionT ParseAssignmentExpression(bool accept_IN,
+ ExpressionClassifier* classifier,
+ bool* ok) {
+ return ParseAssignmentExpression(accept_IN, kIsNormalAssignment, classifier,
+ ok);
+ }
ExpressionT ParseYieldExpression(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseConditionalExpression(bool accept_IN,
ExpressionClassifier* classifier,
@@ -743,10 +806,40 @@ class ParserBase : public Traits {
ExpressionT CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok);
+ ExpressionT ClassifyAndRewriteReferenceExpression(
+ ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
+ int end_pos, MessageTemplate::Template message,
+ ParseErrorType type = kSyntaxError);
ExpressionT CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok);
+ bool IsValidReferenceExpression(ExpressionT expression);
+
+ bool IsAssignableIdentifier(ExpressionT expression) {
+ if (!Traits::IsIdentifier(expression)) return false;
+ if (is_strict(language_mode()) &&
+ Traits::IsEvalOrArguments(Traits::AsIdentifier(expression))) {
+ return false;
+ }
+ if (is_strong(language_mode()) &&
+ Traits::IsUndefined(Traits::AsIdentifier(expression))) {
+ return false;
+ }
+ return true;
+ }
+
+ // Keep track of eval() calls since they disable all local variable
+ // optimizations. This checks if expression is an eval call, and if yes,
+ // forwards the information to scope.
+ void CheckPossibleEvalCall(ExpressionT expression, Scope* scope) {
+ if (Traits::IsIdentifier(expression) &&
+ Traits::IsEval(Traits::AsIdentifier(expression))) {
+ scope->DeclarationScope()->RecordEvalCall();
+ scope->RecordEvalCall();
+ }
+ }
+
// Used to validate property names in object literals and class literals
enum PropertyKind {
kAccessorProperty,
@@ -832,1121 +925,16 @@ class ParserBase : public Traits {
bool allow_harmony_sloppy_;
bool allow_harmony_sloppy_function_;
bool allow_harmony_sloppy_let_;
- bool allow_harmony_rest_parameters_;
bool allow_harmony_default_parameters_;
- bool allow_harmony_destructuring_;
+ bool allow_harmony_destructuring_bind_;
+ bool allow_harmony_destructuring_assignment_;
bool allow_strong_mode_;
bool allow_legacy_const_;
bool allow_harmony_do_expressions_;
+ bool allow_harmony_function_name_;
};
-class PreParserIdentifier {
- public:
- PreParserIdentifier() : type_(kUnknownIdentifier) {}
- static PreParserIdentifier Default() {
- return PreParserIdentifier(kUnknownIdentifier);
- }
- static PreParserIdentifier Eval() {
- return PreParserIdentifier(kEvalIdentifier);
- }
- static PreParserIdentifier Arguments() {
- return PreParserIdentifier(kArgumentsIdentifier);
- }
- static PreParserIdentifier Undefined() {
- return PreParserIdentifier(kUndefinedIdentifier);
- }
- static PreParserIdentifier FutureReserved() {
- return PreParserIdentifier(kFutureReservedIdentifier);
- }
- static PreParserIdentifier FutureStrictReserved() {
- return PreParserIdentifier(kFutureStrictReservedIdentifier);
- }
- static PreParserIdentifier Let() {
- return PreParserIdentifier(kLetIdentifier);
- }
- static PreParserIdentifier Static() {
- return PreParserIdentifier(kStaticIdentifier);
- }
- static PreParserIdentifier Yield() {
- return PreParserIdentifier(kYieldIdentifier);
- }
- static PreParserIdentifier Prototype() {
- return PreParserIdentifier(kPrototypeIdentifier);
- }
- static PreParserIdentifier Constructor() {
- return PreParserIdentifier(kConstructorIdentifier);
- }
- bool IsEval() const { return type_ == kEvalIdentifier; }
- bool IsArguments() const { return type_ == kArgumentsIdentifier; }
- bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
- bool IsUndefined() const { return type_ == kUndefinedIdentifier; }
- bool IsLet() const { return type_ == kLetIdentifier; }
- bool IsStatic() const { return type_ == kStaticIdentifier; }
- bool IsYield() const { return type_ == kYieldIdentifier; }
- bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
- bool IsConstructor() const { return type_ == kConstructorIdentifier; }
- bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
- bool IsFutureStrictReserved() const {
- return type_ == kFutureStrictReservedIdentifier ||
- type_ == kLetIdentifier || type_ == kStaticIdentifier ||
- type_ == kYieldIdentifier;
- }
-
- // Allow identifier->name()[->length()] to work. The preparser
- // does not need the actual positions/lengths of the identifiers.
- const PreParserIdentifier* operator->() const { return this; }
- const PreParserIdentifier raw_name() const { return *this; }
-
- int position() const { return 0; }
- int length() const { return 0; }
-
- private:
- enum Type {
- kUnknownIdentifier,
- kFutureReservedIdentifier,
- kFutureStrictReservedIdentifier,
- kLetIdentifier,
- kStaticIdentifier,
- kYieldIdentifier,
- kEvalIdentifier,
- kArgumentsIdentifier,
- kUndefinedIdentifier,
- kPrototypeIdentifier,
- kConstructorIdentifier
- };
-
- explicit PreParserIdentifier(Type type) : type_(type) {}
- Type type_;
-
- friend class PreParserExpression;
-};
-
-
-class PreParserExpression {
- public:
- static PreParserExpression Default() {
- return PreParserExpression(TypeField::encode(kExpression));
- }
-
- static PreParserExpression Spread(PreParserExpression expression) {
- return PreParserExpression(TypeField::encode(kSpreadExpression));
- }
-
- static PreParserExpression FromIdentifier(PreParserIdentifier id) {
- return PreParserExpression(TypeField::encode(kIdentifierExpression) |
- IdentifierTypeField::encode(id.type_));
- }
-
- static PreParserExpression BinaryOperation(PreParserExpression left,
- Token::Value op,
- PreParserExpression right) {
- return PreParserExpression(
- TypeField::encode(kBinaryOperationExpression) |
- HasRestField::encode(op == Token::COMMA &&
- right->IsSpreadExpression()));
- }
-
- static PreParserExpression ObjectLiteral() {
- return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
- }
-
- static PreParserExpression ArrayLiteral() {
- return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
- }
-
- static PreParserExpression StringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression));
- }
-
- static PreParserExpression UseStrictStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrictField::encode(true));
- }
-
- static PreParserExpression UseStrongStringLiteral() {
- return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
- IsUseStrongField::encode(true));
- }
-
- static PreParserExpression This() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kThisExpression));
- }
-
- static PreParserExpression ThisProperty() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kThisPropertyExpression));
- }
-
- static PreParserExpression Property() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kPropertyExpression));
- }
-
- static PreParserExpression Call() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kCallExpression));
- }
-
- static PreParserExpression SuperCallReference() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kSuperCallReference));
- }
-
- static PreParserExpression NoTemplateTag() {
- return PreParserExpression(
- TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kNoTemplateTagExpression));
- }
-
- bool IsIdentifier() const {
- return TypeField::decode(code_) == kIdentifierExpression;
- }
-
- PreParserIdentifier AsIdentifier() const {
- DCHECK(IsIdentifier());
- return PreParserIdentifier(IdentifierTypeField::decode(code_));
- }
-
- bool IsObjectLiteral() const {
- return TypeField::decode(code_) == kObjectLiteralExpression;
- }
-
- bool IsArrayLiteral() const {
- return TypeField::decode(code_) == kArrayLiteralExpression;
- }
-
- bool IsStringLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression;
- }
-
- bool IsUseStrictLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseStrictField::decode(code_);
- }
-
- bool IsUseStrongLiteral() const {
- return TypeField::decode(code_) == kStringLiteralExpression &&
- IsUseStrongField::decode(code_);
- }
-
- bool IsThis() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kThisExpression;
- }
-
- bool IsThisProperty() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kThisPropertyExpression;
- }
-
- bool IsProperty() const {
- return TypeField::decode(code_) == kExpression &&
- (ExpressionTypeField::decode(code_) == kPropertyExpression ||
- ExpressionTypeField::decode(code_) == kThisPropertyExpression);
- }
-
- bool IsCall() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kCallExpression;
- }
-
- bool IsSuperCallReference() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kSuperCallReference;
- }
-
- bool IsValidReferenceExpression() const {
- return IsIdentifier() || IsProperty();
- }
-
- // At the moment PreParser doesn't track these expression types.
- bool IsFunctionLiteral() const { return false; }
- bool IsCallNew() const { return false; }
-
- bool IsNoTemplateTag() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
- }
-
- bool IsSpreadExpression() const {
- return TypeField::decode(code_) == kSpreadExpression;
- }
-
- bool IsArrowFunctionFormalParametersWithRestParameter() const {
- // Iff the expression classifier has determined that this expression is a
- // valid arrow fformal parameter list, return true if the formal parameter
- // list ends with a rest parameter.
- return IsSpreadExpression() ||
- (IsBinaryOperation() && HasRestField::decode(code_));
- }
-
- PreParserExpression AsFunctionLiteral() { return *this; }
-
- bool IsBinaryOperation() const {
- return TypeField::decode(code_) == kBinaryOperationExpression;
- }
-
- // Dummy implementation for making expression->somefunc() work in both Parser
- // and PreParser.
- PreParserExpression* operator->() { return this; }
-
- // More dummy implementations of things PreParser doesn't need to track:
- void set_index(int index) {} // For YieldExpressions
- void set_should_eager_compile() {}
-
- int position() const { return RelocInfo::kNoPosition; }
- void set_function_token_position(int position) {}
-
- private:
- enum Type {
- kExpression,
- kIdentifierExpression,
- kStringLiteralExpression,
- kBinaryOperationExpression,
- kSpreadExpression,
- kObjectLiteralExpression,
- kArrayLiteralExpression
- };
-
- enum ExpressionType {
- kThisExpression,
- kThisPropertyExpression,
- kPropertyExpression,
- kCallExpression,
- kSuperCallReference,
- kNoTemplateTagExpression
- };
-
- explicit PreParserExpression(uint32_t expression_code)
- : code_(expression_code) {}
-
- // The first three bits are for the Type.
- typedef BitField<Type, 0, 3> TypeField;
-
- // The rest of the bits are interpreted depending on the value
- // of the Type field, so they can share the storage.
- typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
- typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
- typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
- typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
- IdentifierTypeField;
- typedef BitField<bool, TypeField::kNext, 1> HasRestField;
-
- uint32_t code_;
-};
-
-
-// The pre-parser doesn't need to build lists of expressions, identifiers, or
-// the like.
-template <typename T>
-class PreParserList {
- public:
- // These functions make list->Add(some_expression) work (and do nothing).
- PreParserList() : length_(0) {}
- PreParserList* operator->() { return this; }
- void Add(T, void*) { ++length_; }
- int length() const { return length_; }
- private:
- int length_;
-};
-
-
-typedef PreParserList<PreParserExpression> PreParserExpressionList;
-
-
-class PreParserStatement {
- public:
- static PreParserStatement Default() {
- return PreParserStatement(kUnknownStatement);
- }
-
- static PreParserStatement Jump() {
- return PreParserStatement(kJumpStatement);
- }
-
- static PreParserStatement FunctionDeclaration() {
- return PreParserStatement(kFunctionDeclaration);
- }
-
- // Creates expression statement from expression.
- // Preserves being an unparenthesized string literal, possibly
- // "use strict".
- static PreParserStatement ExpressionStatement(
- PreParserExpression expression) {
- if (expression.IsUseStrictLiteral()) {
- return PreParserStatement(kUseStrictExpressionStatement);
- }
- if (expression.IsUseStrongLiteral()) {
- return PreParserStatement(kUseStrongExpressionStatement);
- }
- if (expression.IsStringLiteral()) {
- return PreParserStatement(kStringLiteralExpressionStatement);
- }
- return Default();
- }
-
- bool IsStringLiteral() {
- return code_ == kStringLiteralExpressionStatement;
- }
-
- bool IsUseStrictLiteral() {
- return code_ == kUseStrictExpressionStatement;
- }
-
- bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
-
- bool IsFunctionDeclaration() {
- return code_ == kFunctionDeclaration;
- }
-
- bool IsJumpStatement() {
- return code_ == kJumpStatement;
- }
-
- private:
- enum Type {
- kUnknownStatement,
- kJumpStatement,
- kStringLiteralExpressionStatement,
- kUseStrictExpressionStatement,
- kUseStrongExpressionStatement,
- kFunctionDeclaration
- };
-
- explicit PreParserStatement(Type code) : code_(code) {}
- Type code_;
-};
-
-
-typedef PreParserList<PreParserStatement> PreParserStatementList;
-
-
-class PreParserFactory {
- public:
- explicit PreParserFactory(void* unused_value_factory) {}
- PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewNumberLiteral(double number,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
- PreParserIdentifier js_flags,
- int literal_index,
- bool is_strong,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewArrayLiteral(PreParserExpressionList values,
- int literal_index,
- bool is_strong,
- int pos) {
- return PreParserExpression::ArrayLiteral();
- }
- PreParserExpression NewArrayLiteral(PreParserExpressionList values,
- int first_spread_index, int literal_index,
- bool is_strong, int pos) {
- return PreParserExpression::ArrayLiteral();
- }
- PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
- PreParserExpression value,
- ObjectLiteralProperty::Kind kind,
- bool is_static,
- bool is_computed_name) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
- PreParserExpression value,
- bool is_static,
- bool is_computed_name) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
- int literal_index,
- int boilerplate_properties,
- bool has_function,
- bool is_strong,
- int pos) {
- return PreParserExpression::ObjectLiteral();
- }
- PreParserExpression NewVariableProxy(void* variable) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewProperty(PreParserExpression obj,
- PreParserExpression key,
- int pos) {
- if (obj.IsThis()) {
- return PreParserExpression::ThisProperty();
- }
- return PreParserExpression::Property();
- }
- PreParserExpression NewUnaryOperation(Token::Value op,
- PreParserExpression expression,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewBinaryOperation(Token::Value op,
- PreParserExpression left,
- PreParserExpression right, int pos) {
- return PreParserExpression::BinaryOperation(left, op, right);
- }
- PreParserExpression NewCompareOperation(Token::Value op,
- PreParserExpression left,
- PreParserExpression right, int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewAssignment(Token::Value op,
- PreParserExpression left,
- PreParserExpression right,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewYield(PreParserExpression generator_object,
- PreParserExpression expression,
- Yield::Kind yield_kind,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewConditional(PreParserExpression condition,
- PreParserExpression then_expression,
- PreParserExpression else_expression,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewCountOperation(Token::Value op,
- bool is_prefix,
- PreParserExpression expression,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewCall(PreParserExpression expression,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Call();
- }
- PreParserExpression NewCallNew(PreParserExpression expression,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewCallRuntime(const AstRawString* name,
- const Runtime::Function* function,
- PreParserExpressionList arguments,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserStatement NewReturnStatement(PreParserExpression expression,
- int pos) {
- return PreParserStatement::Default();
- }
- PreParserExpression NewFunctionLiteral(
- PreParserIdentifier name, AstValueFactory* ast_value_factory,
- Scope* scope, PreParserStatementList body, int materialized_literal_count,
- int expected_property_count, int parameter_count,
- FunctionLiteral::ParameterFlag has_duplicate_parameters,
- FunctionLiteral::FunctionType function_type,
- FunctionLiteral::IsFunctionFlag is_function,
- FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
- int position) {
- return PreParserExpression::Default();
- }
-
- PreParserExpression NewSpread(PreParserExpression expression, int pos) {
- return PreParserExpression::Spread(expression);
- }
-
- PreParserExpression NewEmptyParentheses(int pos) {
- return PreParserExpression::Default();
- }
-
- // Return the object itself as AstVisitor and implement the needed
- // dummy method right in this class.
- PreParserFactory* visitor() { return this; }
- int* ast_properties() {
- static int dummy = 42;
- return &dummy;
- }
-};
-
-
-struct PreParserFormalParameters : FormalParametersBase {
- explicit PreParserFormalParameters(Scope* scope)
- : FormalParametersBase(scope) {}
- int arity = 0;
-
- int Arity() const { return arity; }
- PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
-};
-
-
-class PreParser;
-
-class PreParserTraits {
- public:
- struct Type {
- // TODO(marja): To be removed. The Traits object should contain all the data
- // it needs.
- typedef PreParser* Parser;
-
- // PreParser doesn't need to store generator variables.
- typedef void GeneratorVariable;
-
- typedef int AstProperties;
-
- // Return types for traversing functions.
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- typedef PreParserExpression YieldExpression;
- typedef PreParserExpression FunctionLiteral;
- typedef PreParserExpression ClassLiteral;
- typedef PreParserExpression ObjectLiteralProperty;
- typedef PreParserExpression Literal;
- typedef PreParserExpressionList ExpressionList;
- typedef PreParserExpressionList PropertyList;
- typedef PreParserIdentifier FormalParameter;
- typedef PreParserFormalParameters FormalParameters;
- typedef PreParserStatementList StatementList;
-
- // For constructing objects returned by the traversing functions.
- typedef PreParserFactory Factory;
- };
-
- explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
-
- // Helper functions for recursive descent.
- static bool IsEval(PreParserIdentifier identifier) {
- return identifier.IsEval();
- }
-
- static bool IsArguments(PreParserIdentifier identifier) {
- return identifier.IsArguments();
- }
-
- static bool IsEvalOrArguments(PreParserIdentifier identifier) {
- return identifier.IsEvalOrArguments();
- }
-
- static bool IsUndefined(PreParserIdentifier identifier) {
- return identifier.IsUndefined();
- }
-
- static bool IsPrototype(PreParserIdentifier identifier) {
- return identifier.IsPrototype();
- }
-
- static bool IsConstructor(PreParserIdentifier identifier) {
- return identifier.IsConstructor();
- }
-
- // Returns true if the expression is of type "this.foo".
- static bool IsThisProperty(PreParserExpression expression) {
- return expression.IsThisProperty();
- }
-
- static bool IsIdentifier(PreParserExpression expression) {
- return expression.IsIdentifier();
- }
-
- static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
- return expression.AsIdentifier();
- }
-
- static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
- return identifier.IsFutureStrictReserved();
- }
-
- static bool IsBoilerplateProperty(PreParserExpression property) {
- // PreParser doesn't count boilerplate properties.
- return false;
- }
-
- static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
- return false;
- }
-
- static PreParserExpression GetPropertyValue(PreParserExpression property) {
- return PreParserExpression::Default();
- }
-
- // Functions for encapsulating the differences between parsing and preparsing;
- // operations interleaved with the recursive descent.
- static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
- }
-
- static void PushPropertyName(FuncNameInferrer* fni,
- PreParserExpression expression) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
- }
-
- static void InferFunctionName(FuncNameInferrer* fni,
- PreParserExpression expression) {
- // PreParser should not use FuncNameInferrer.
- UNREACHABLE();
- }
-
- static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
- Scope* scope, PreParserExpression property, bool* has_function) {}
-
- static void CheckAssigningFunctionLiteralToProperty(
- PreParserExpression left, PreParserExpression right) {}
-
- static void CheckPossibleEvalCall(PreParserExpression expression,
- Scope* scope) {
- if (IsIdentifier(expression) && IsEval(AsIdentifier(expression))) {
- scope->DeclarationScope()->RecordEvalCall();
- scope->RecordEvalCall();
- }
- }
-
- static PreParserExpression MarkExpressionAsAssigned(
- PreParserExpression expression) {
- // TODO(marja): To be able to produce the same errors, the preparser needs
- // to start tracking which expressions are variables and which are assigned.
- return expression;
- }
-
- bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
- PreParserExpression y,
- Token::Value op,
- int pos,
- PreParserFactory* factory) {
- return false;
- }
-
- PreParserExpression BuildUnaryExpression(PreParserExpression expression,
- Token::Value op, int pos,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
- int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
- Handle<Object> arg, int pos) {
- return PreParserExpression::Default();
- }
- PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
- Handle<Object> arg, int pos) {
- return PreParserExpression::Default();
- }
-
- // Reporting errors.
- void ReportMessageAt(Scanner::Location location,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(int start_pos, int end_pos,
- MessageTemplate::Template message,
- const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
-
- // "null" return type creators.
- static PreParserIdentifier EmptyIdentifier() {
- return PreParserIdentifier::Default();
- }
- static PreParserIdentifier EmptyIdentifierString() {
- return PreParserIdentifier::Default();
- }
- static PreParserExpression EmptyExpression() {
- return PreParserExpression::Default();
- }
- static PreParserExpression EmptyLiteral() {
- return PreParserExpression::Default();
- }
- static PreParserExpression EmptyObjectLiteralProperty() {
- return PreParserExpression::Default();
- }
- static PreParserExpression EmptyFunctionLiteral() {
- return PreParserExpression::Default();
- }
- static PreParserExpressionList NullExpressionList() {
- return PreParserExpressionList();
- }
-
- // Odd-ball literal creators.
- static PreParserExpression GetLiteralTheHole(int position,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- // Producing data during the recursive descent.
- PreParserIdentifier GetSymbol(Scanner* scanner);
- PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
-
- static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
- return PreParserIdentifier::Default();
- }
-
- static PreParserExpression ThisExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::This();
- }
-
- static PreParserExpression SuperPropertyReference(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression SuperCallReference(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::SuperCallReference();
- }
-
- static PreParserExpression NewTargetExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
- int pos, int end_pos) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression ExpressionFromLiteral(
- Token::Value token, int pos, Scanner* scanner,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int start_position, int end_position,
- Scope* scope, PreParserFactory* factory) {
- return PreParserExpression::FromIdentifier(name);
- }
-
- PreParserExpression ExpressionFromString(int pos,
- Scanner* scanner,
- PreParserFactory* factory = NULL);
-
- PreParserExpression GetIterator(PreParserExpression iterable,
- PreParserFactory* factory) {
- return PreParserExpression::Default();
- }
-
- static PreParserExpressionList NewExpressionList(int size, Zone* zone) {
- return PreParserExpressionList();
- }
-
- static PreParserStatementList NewStatementList(int size, Zone* zone) {
- return PreParserStatementList();
- }
-
- static PreParserExpressionList NewPropertyList(int size, Zone* zone) {
- return PreParserExpressionList();
- }
-
- static void AddParameterInitializationBlock(
- const PreParserFormalParameters& parameters,
- PreParserStatementList list, bool* ok) {}
-
- V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok) {
- UNREACHABLE();
- }
-
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- V8_INLINE void ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters,
- PreParserExpression expression, const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok);
-
- void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
-
- struct TemplateLiteralState {};
-
- TemplateLiteralState OpenTemplateLiteral(int pos) {
- return TemplateLiteralState();
- }
- void AddTemplateSpan(TemplateLiteralState*, bool) {}
- void AddTemplateExpression(TemplateLiteralState*, PreParserExpression) {}
- PreParserExpression CloseTemplateLiteral(TemplateLiteralState*, int,
- PreParserExpression tag) {
- if (IsTaggedTemplate(tag)) {
- // Emulate generation of array literals for tag callsite
- // 1st is array of cooked strings, second is array of raw strings
- MaterializeTemplateCallsiteLiterals();
- }
- return EmptyExpression();
- }
- inline void MaterializeTemplateCallsiteLiterals();
- PreParserExpression NoTemplateTag() {
- return PreParserExpression::NoTemplateTag();
- }
- static bool IsTaggedTemplate(const PreParserExpression tag) {
- return !tag.IsNoTemplateTag();
- }
-
- void AddFormalParameter(
- PreParserFormalParameters* parameters, PreParserExpression pattern,
- PreParserExpression initializer, bool is_rest) {
- ++parameters->arity;
- }
- void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
- ExpressionClassifier* classifier) {
- if (!classifier->is_simple_parameter_list()) {
- scope->SetHasNonSimpleParameters();
- }
- }
-
- void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
-
- // Temporary glue; these functions will move to ParserBase.
- PreParserExpression ParseV8Intrinsic(bool* ok);
- V8_INLINE PreParserExpression ParseDoExpression(bool* ok);
- PreParserExpression ParseFunctionLiteral(
- PreParserIdentifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
- LanguageMode language_mode, bool* ok);
-
- PreParserExpression ParseClassLiteral(PreParserIdentifier name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-
- PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
- return list;
- }
-
- inline void MaterializeUnspreadArgumentsLiterals(int count);
-
- inline PreParserExpression SpreadCall(PreParserExpression function,
- PreParserExpressionList args, int pos);
-
- inline PreParserExpression SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos);
-
- private:
- PreParser* pre_parser_;
-};
-
-
-// Preparsing checks a JavaScript program and emits preparse-data that helps
-// a later parsing to be faster.
-// See preparse-data-format.h for the data format.
-
-// The PreParser checks that the syntax follows the grammar for JavaScript,
-// and collects some information about the program along the way.
-// The grammar check is only performed in order to understand the program
-// sufficiently to deduce some information about it, that can be used
-// to speed up later parsing. Finding errors is not the goal of pre-parsing,
-// rather it is to speed up properly written and correct programs.
-// That means that contextual checks (like a label being declared where
-// it is used) are generally omitted.
-class PreParser : public ParserBase<PreParserTraits> {
- public:
- typedef PreParserIdentifier Identifier;
- typedef PreParserExpression Expression;
- typedef PreParserStatement Statement;
-
- enum PreParseResult {
- kPreParseStackOverflow,
- kPreParseSuccess
- };
-
- PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
- ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
- ast_value_factory, log, this) {}
-
- // Pre-parse the program from the character stream; returns true on
- // success (even if parsing failed, the pre-parse data successfully
- // captured the syntax error), and false if a stack-overflow happened
- // during parsing.
- PreParseResult PreParseProgram(int* materialized_literals = 0) {
- Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- PreParserFactory factory(NULL);
- FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
- &factory);
- bool ok = true;
- int start_position = scanner()->peek_location().beg_pos;
- ParseStatementList(Token::EOS, &ok);
- if (stack_overflow()) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner()->current_token());
- } else if (is_strict(scope_->language_mode())) {
- CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
- &ok);
- }
- if (materialized_literals) {
- *materialized_literals = function_state_->materialized_literal_count();
- }
- return kPreParseSuccess;
- }
-
- // Parses a single function literal, from the opening parentheses before
- // parameters to the closing brace after the body.
- // Returns a FunctionEntry describing the body of the function in enough
- // detail that it can be lazily compiled.
- // The scanner is expected to have matched the "function" or "function*"
- // keyword and parameters, and have consumed the initial '{'.
- // At return, unless an error occurred, the scanner is positioned before the
- // the final '}'.
- PreParseResult PreParseLazyFunction(
- LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
- ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
-
- private:
- friend class PreParserTraits;
-
- static const int kLazyParseTrialLimit = 200;
-
- // These types form an algebra over syntactic categories that is just
- // rich enough to let us recognize and propagate the constructs that
- // are either being counted in the preparser data, or is important
- // to throw the correct syntax error exceptions.
-
- // All ParseXXX functions take as the last argument an *ok parameter
- // which is set to false if parsing failed; it is unchanged otherwise.
- // By making the 'exception handling' explicit, we are forced to check
- // for failure at the call sites.
- Statement ParseStatementListItem(bool* ok);
- void ParseStatementList(int end_token, bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
- Statement ParseStatement(bool* ok);
- Statement ParseSubStatement(bool* ok);
- Statement ParseFunctionDeclaration(bool* ok);
- Statement ParseClassDeclaration(bool* ok);
- Statement ParseBlock(bool* ok);
- Statement ParseVariableStatement(VariableDeclarationContext var_context,
- bool* ok);
- Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
- int* num_decl,
- Scanner::Location* first_initializer_loc,
- Scanner::Location* bindings_loc,
- bool* ok);
- Statement ParseExpressionOrLabelledStatement(bool* ok);
- Statement ParseIfStatement(bool* ok);
- Statement ParseContinueStatement(bool* ok);
- Statement ParseBreakStatement(bool* ok);
- Statement ParseReturnStatement(bool* ok);
- Statement ParseWithStatement(bool* ok);
- Statement ParseSwitchStatement(bool* ok);
- Statement ParseDoWhileStatement(bool* ok);
- Statement ParseWhileStatement(bool* ok);
- Statement ParseForStatement(bool* ok);
- Statement ParseThrowStatement(bool* ok);
- Statement ParseTryStatement(bool* ok);
- Statement ParseDebuggerStatement(bool* ok);
- Expression ParseConditionalExpression(bool accept_IN, bool* ok);
- Expression ParseObjectLiteral(bool* ok);
- Expression ParseV8Intrinsic(bool* ok);
- Expression ParseDoExpression(bool* ok);
-
- V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok);
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- Expression ParseFunctionLiteral(
- Identifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_pos, FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
- LanguageMode language_mode, bool* ok);
- void ParseLazyFunctionLiteralBody(bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
-
- PreParserExpression ParseClassLiteral(PreParserIdentifier name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-};
-
-
-void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
-}
-
-
-void PreParserTraits::MaterializeUnspreadArgumentsLiterals(int count) {
- for (int i = 0; i < count; ++i) {
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- }
-}
-
-
-PreParserExpression PreParserTraits::SpreadCall(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return pre_parser_->factory()->NewCall(function, args, pos);
-}
-
-PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return pre_parser_->factory()->NewCallNew(function, args, pos);
-}
-
-
-void PreParserTraits::ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters,
- PreParserExpression params, const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok) {
- // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
- // lists that are too long.
-
- // Accomodate array literal for rest parameter.
- if (params.IsArrowFunctionFormalParametersWithRestParameter()) {
- ++parameters->materialized_literals_count;
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- }
-}
-
-
-PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
- return pre_parser_->ParseDoExpression(ok);
-}
-
-
-PreParserStatementList PreParser::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
-
- ParseStatementList(Token::RBRACE, ok);
- if (!*ok) return PreParserStatementList();
-
- Expect(Token::RBRACE, ok);
- return PreParserStatementList();
-}
-
-
-PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- return pre_parser_->ParseEagerFunctionBody(function_name, pos, parameters,
- kind, function_type, ok);
-}
-
-
template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
@@ -2016,6 +1004,11 @@ void ParserBase<Traits>::GetUnexpectedTokenMessage(
*message = MessageTemplate::kUnexpectedTemplateString;
*arg = nullptr;
break;
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_KEYWORD:
+ *message = MessageTemplate::kInvalidEscapedReservedWord;
+ *arg = nullptr;
+ break;
default:
const char* name = Token::String(token);
DCHECK(name != NULL);
@@ -2053,9 +1046,6 @@ typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
if (!*ok) return Traits::EmptyIdentifier();
ValidateBindingPattern(&classifier, ok);
if (!*ok) return Traits::EmptyIdentifier();
- } else {
- ValidateExpression(&classifier, ok);
- if (!*ok) return Traits::EmptyIdentifier();
}
return result;
@@ -2115,10 +1105,17 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
return name;
} else if (is_sloppy(language_mode()) &&
(next == Token::FUTURE_STRICT_RESERVED_WORD ||
+ next == Token::ESCAPED_STRICT_RESERVED_WORD ||
next == Token::LET || next == Token::STATIC ||
(next == Token::YIELD && !is_generator()))) {
classifier->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kUnexpectedStrictReserved);
+ if (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
+ is_strict(language_mode())) {
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return Traits::EmptyIdentifier();
+ }
if (next == Token::LET) {
classifier->RecordLetPatternError(scanner()->location(),
MessageTemplate::kLetInLexicalBinding);
@@ -2133,15 +1130,14 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
template <class Traits>
-typename ParserBase<Traits>::IdentifierT ParserBase<
- Traits>::ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
- bool* ok) {
+typename ParserBase<Traits>::IdentifierT
+ParserBase<Traits>::ParseIdentifierOrStrictReservedWord(
+ bool is_generator, bool* is_strict_reserved, bool* ok) {
Token::Value next = Next();
if (next == Token::IDENTIFIER) {
*is_strict_reserved = false;
} else if (next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
- next == Token::STATIC ||
- (next == Token::YIELD && !this->is_generator())) {
+ next == Token::STATIC || (next == Token::YIELD && !is_generator)) {
*is_strict_reserved = true;
} else {
ReportUnexpectedToken(next);
@@ -2161,7 +1157,9 @@ ParserBase<Traits>::ParseIdentifierName(bool* ok) {
Token::Value next = Next();
if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
next != Token::LET && next != Token::STATIC && next != Token::YIELD &&
- next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
+ next != Token::FUTURE_STRICT_RESERVED_WORD &&
+ next != Token::ESCAPED_KEYWORD &&
+ next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
this->ReportUnexpectedToken(next);
*ok = false;
return Traits::EmptyIdentifier();
@@ -2199,13 +1197,14 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
int literal_index = function_state_->NextMaterializedLiteralIndex();
IdentifierT js_pattern = this->GetNextSymbol(scanner());
- if (!scanner()->ScanRegExpFlags()) {
+ Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
+ if (flags.IsNothing()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
*ok = false;
return Traits::EmptyExpression();
}
- IdentifierT js_flags = this->GetNextSymbol(scanner());
+ int js_flags = flags.FromJust();
Next();
return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index,
is_strong(language_mode()), pos);
@@ -2278,6 +1277,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::LET:
case Token::STATIC:
case Token::YIELD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseAndClassifyIdentifier(classifier, CHECK_OK);
@@ -2303,13 +1303,13 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->ParseRegExpLiteral(false, classifier, ok);
case Token::LBRACK:
- if (!allow_harmony_destructuring()) {
+ if (!allow_harmony_destructuring_bind()) {
BindingPatternUnexpectedToken(classifier);
}
return this->ParseArrayLiteral(classifier, ok);
case Token::LBRACE:
- if (!allow_harmony_destructuring()) {
+ if (!allow_harmony_destructuring_bind()) {
BindingPatternUnexpectedToken(classifier);
}
return this->ParseObjectLiteral(classifier, ok);
@@ -2335,7 +1335,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
MessageTemplate::kUnexpectedToken,
Token::String(Token::RPAREN));
return factory()->NewEmptyParentheses(beg_pos);
- } else if (allow_harmony_rest_parameters() && Check(Token::ELLIPSIS)) {
+ } else if (Check(Token::ELLIPSIS)) {
// (...x)=>x. The continuation that looks for the => is in
// ParseAssignmentExpression.
int ellipsis_pos = position();
@@ -2343,18 +1343,8 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
MessageTemplate::kUnexpectedToken,
Token::String(Token::ELLIPSIS));
classifier->RecordNonSimpleParameter();
- Scanner::Location expr_loc = scanner()->peek_location();
- Token::Value tok = peek();
ExpressionT expr =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- // Patterns are not allowed as rest parameters. There is no way we can
- // succeed so go ahead and use the convenient ReportUnexpectedToken
- // interface.
- if (!Traits::IsIdentifier(expr)) {
- ReportUnexpectedTokenAt(expr_loc, tok);
- *ok = false;
- return this->EmptyExpression();
- }
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -2367,8 +1357,12 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
parenthesized_function_ = (peek() == Token::FUNCTION);
- ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
+ ExpressionT expr = this->ParseExpression(true, kIsPossibleArrowFormals,
+ classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
+ if (peek() != Token::ARROW) {
+ expr->set_is_parenthesized();
+ }
return expr;
}
@@ -2431,22 +1425,28 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
bool accept_IN, bool* ok) {
ExpressionClassifier classifier;
ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ result = Traits::RewriteNonPattern(result, &classifier, CHECK_OK);
return result;
}
-// Precedence = 1
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
+ return ParseExpression(accept_IN, kIsNormalAssignment, classifier, ok);
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+ bool accept_IN, int flags, ExpressionClassifier* classifier, bool* ok) {
// Expression ::
// AssignmentExpression
// Expression ',' AssignmentExpression
ExpressionClassifier binding_classifier;
- ExpressionT result =
- this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
+ ExpressionT result = this->ParseAssignmentExpression(
+ accept_IN, flags, &binding_classifier, CHECK_OK);
classifier->Accumulate(binding_classifier,
ExpressionClassifier::AllProductions);
bool is_simple_parameter_list = this->IsIdentifier(result);
@@ -2460,7 +1460,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
}
Consume(Token::COMMA);
bool is_rest = false;
- if (allow_harmony_rest_parameters() && peek() == Token::ELLIPSIS) {
+ if (peek() == Token::ELLIPSIS) {
// 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
// as the formal parameters of'(x, y, ...z) => foo', and is not itself a
// valid expression or binding pattern.
@@ -2471,7 +1471,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
}
int pos = position();
ExpressionT right = this->ParseAssignmentExpression(
- accept_IN, &binding_classifier, CHECK_OK);
+ accept_IN, flags, &binding_classifier, CHECK_OK);
if (is_rest) right = factory()->NewSpread(right, pos);
is_simple_parameter_list =
is_simple_parameter_list && this->IsIdentifier(right);
@@ -2482,6 +1482,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
if (!is_simple_parameter_list || seen_rest) {
classifier->RecordNonSimpleParameter();
}
+
return result;
}
@@ -2498,7 +1499,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
int first_spread_index = -1;
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
- bool seen_spread = false;
ExpressionT elem = this->EmptyExpression();
if (peek() == Token::COMMA) {
if (is_strong(language_mode())) {
@@ -2514,18 +1514,31 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
ExpressionT argument =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
elem = factory()->NewSpread(argument, start_pos);
- seen_spread = true;
+
if (first_spread_index < 0) {
first_spread_index = values->length();
}
+
+ if (argument->IsAssignment()) {
+ classifier->RecordPatternError(
+ Scanner::Location(start_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ } else {
+ CheckDestructuringElement(argument, classifier, start_pos,
+ scanner()->location().end_pos);
+ }
+
+ if (peek() == Token::COMMA) {
+ classifier->RecordPatternError(
+ Scanner::Location(start_pos, scanner()->location().end_pos),
+ MessageTemplate::kElementAfterRest);
+ }
} else {
- elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ elem = this->ParseAssignmentExpression(true, kIsPossiblePatternElement,
+ classifier, CHECK_OK);
}
values->Add(elem, zone_);
if (peek() != Token::RBRACK) {
- if (seen_spread) {
- BindingPatternUnexpectedToken(classifier);
- }
Expect(Token::COMMA, CHECK_OK);
}
}
@@ -2542,7 +1555,8 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
IdentifierT* name, bool* is_get, bool* is_set, bool* is_static,
- bool* is_computed_name, ExpressionClassifier* classifier, bool* ok) {
+ bool* is_computed_name, bool* is_identifier, bool* is_escaped_keyword,
+ ExpressionClassifier* classifier, bool* ok) {
Token::Value token = peek();
int pos = peek_position();
@@ -2577,17 +1591,25 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
ExpressionClassifier computed_name_classifier;
ExpressionT expression =
ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(
+ expression, &computed_name_classifier, CHECK_OK);
classifier->Accumulate(computed_name_classifier,
ExpressionClassifier::ExpressionProductions);
Expect(Token::RBRACK, CHECK_OK);
return expression;
}
+ case Token::ESCAPED_KEYWORD:
+ *is_escaped_keyword = true;
+ *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
+ break;
+
case Token::STATIC:
*is_static = true;
// Fall through.
default:
+ *is_identifier = true;
*name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
break;
}
@@ -2604,10 +1626,9 @@ typename ParserBase<Traits>::ObjectLiteralPropertyT
ParserBase<Traits>::ParsePropertyDefinition(
ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
bool is_static, bool* is_computed_name, bool* has_seen_constructor,
- ExpressionClassifier* classifier, bool* ok) {
+ ExpressionClassifier* classifier, IdentifierT* name, bool* ok) {
DCHECK(!in_class || is_static || has_seen_constructor != nullptr);
ExpressionT value = this->EmptyExpression();
- IdentifierT name = this->EmptyIdentifier();
bool is_get = false;
bool is_set = false;
bool name_is_static = false;
@@ -2616,14 +1637,21 @@ ParserBase<Traits>::ParsePropertyDefinition(
Token::Value name_token = peek();
int next_beg_pos = scanner()->peek_location().beg_pos;
int next_end_pos = scanner()->peek_location().end_pos;
+ bool is_identifier = false;
+ bool is_escaped_keyword = false;
ExpressionT name_expression = ParsePropertyName(
- &name, &is_get, &is_set, &name_is_static, is_computed_name, classifier,
+ name, &is_get, &is_set, &name_is_static, is_computed_name, &is_identifier,
+ &is_escaped_keyword, classifier,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (fni_ != nullptr && !*is_computed_name) {
- this->PushLiteralName(fni_, name);
+ this->PushLiteralName(fni_, *name);
}
+ bool escaped_static =
+ is_escaped_keyword &&
+ scanner()->is_literal_contextual_keyword(CStrVector("static"));
+
if (!in_class && !is_generator) {
DCHECK(!is_static);
@@ -2636,13 +1664,14 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
Consume(Token::COLON);
value = this->ParseAssignmentExpression(
- true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ true, kIsPossiblePatternElement, classifier,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
return factory()->NewObjectLiteralProperty(name_expression, value, false,
*is_computed_name);
}
- if (Token::IsIdentifier(name_token, language_mode(),
- this->is_generator()) &&
+ if ((is_identifier || is_escaped_keyword) &&
(peek() == Token::COMMA || peek() == Token::RBRACE ||
peek() == Token::ASSIGN)) {
// PropertyDefinition
@@ -2651,6 +1680,14 @@ ParserBase<Traits>::ParsePropertyDefinition(
//
// CoverInitializedName
// IdentifierReference Initializer?
+ if (!Token::IsIdentifier(name_token, language_mode(),
+ this->is_generator())) {
+ if (!escaped_static) {
+ ReportUnexpectedTokenAt(scanner()->location(), name_token);
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+ }
if (classifier->duplicate_finder() != nullptr &&
scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
classifier->RecordDuplicateFormalParameterError(scanner()->location());
@@ -2661,18 +1698,23 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
ExpressionT lhs = this->ExpressionFromIdentifier(
- name, next_beg_pos, next_end_pos, scope_, factory());
+ *name, next_beg_pos, next_end_pos, scope_, factory());
+ CheckDestructuringElement(lhs, classifier, next_beg_pos, next_end_pos);
if (peek() == Token::ASSIGN) {
- this->ExpressionUnexpectedToken(classifier);
Consume(Token::ASSIGN);
ExpressionClassifier rhs_classifier;
ExpressionT rhs = this->ParseAssignmentExpression(
true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ rhs = Traits::RewriteNonPattern(
+ rhs, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
classifier->Accumulate(rhs_classifier,
ExpressionClassifier::ExpressionProductions);
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
RelocInfo::kNoPosition);
+ classifier->RecordCoverInitializedNameError(
+ Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidCoverInitializedName);
} else {
value = lhs;
}
@@ -2683,6 +1725,16 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
}
+ if (in_class && escaped_static && !is_static) {
+ ReportUnexpectedTokenAt(scanner()->location(), name_token);
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
+ }
+
+ // Method definitions are never valid in patterns.
+ classifier->RecordPatternError(
+ Scanner::Location(next_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
if (is_generator || peek() == Token::LPAREN) {
// MethodDefinition
@@ -2697,7 +1749,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
: FunctionKind::kConciseMethod;
- if (in_class && !is_static && this->IsConstructor(name)) {
+ if (in_class && !is_static && this->IsConstructor(*name)) {
*has_seen_constructor = true;
kind = has_extends ? FunctionKind::kSubclassConstructor
: FunctionKind::kBaseConstructor;
@@ -2706,9 +1758,9 @@ ParserBase<Traits>::ParsePropertyDefinition(
if (!in_class) kind = WithObjectLiteralBit(kind);
value = this->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
- FunctionLiteral::NORMAL_ARITY, language_mode(),
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kNormalArity, language_mode(),
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
return factory()->NewObjectLiteralProperty(name_expression, value,
@@ -2719,21 +1771,26 @@ ParserBase<Traits>::ParsePropertyDefinition(
if (in_class && name_is_static && !is_static) {
// ClassElement (static)
// 'static' MethodDefinition
- return ParsePropertyDefinition(checker, true, has_extends, true,
- is_computed_name, nullptr, classifier, ok);
+ *name = this->EmptyIdentifier();
+ ObjectLiteralPropertyT property = ParsePropertyDefinition(
+ checker, true, has_extends, true, is_computed_name, nullptr, classifier,
+ name, ok);
+ property = Traits::RewriteNonPatternObjectLiteralProperty(property,
+ classifier, ok);
+ return property;
}
if (is_get || is_set) {
// MethodDefinition (Accessors)
// get PropertyName '(' ')' '{' FunctionBody '}'
// set PropertyName '(' PropertySetParameterList ')' '{' FunctionBody '}'
- name = this->EmptyIdentifier();
+ *name = this->EmptyIdentifier();
bool dont_care = false;
name_token = peek();
name_expression = ParsePropertyName(
- &name, &dont_care, &dont_care, &dont_care, is_computed_name, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ name, &dont_care, &dont_care, &dont_care, is_computed_name, &dont_care,
+ &dont_care, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (!*is_computed_name) {
checker->CheckProperty(name_token, kAccessorProperty, is_static,
@@ -2744,9 +1801,9 @@ ParserBase<Traits>::ParsePropertyDefinition(
FunctionKind kind = FunctionKind::kAccessorFunction;
if (!in_class) kind = WithObjectLiteralBit(kind);
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
- name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
- is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
+ *name, scanner()->location(), kSkipFunctionNameCheck, kind,
+ RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
+ is_get ? FunctionLiteral::kGetterArity : FunctionLiteral::kSetterArity,
language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
// Make sure the name expression is a string since we need a Name for
@@ -2754,7 +1811,7 @@ ParserBase<Traits>::ParsePropertyDefinition(
// statically we can skip the extra runtime check.
if (!*is_computed_name) {
name_expression =
- factory()->NewStringLiteral(name, name_expression->position());
+ factory()->NewStringLiteral(*name, name_expression->position());
}
return factory()->NewObjectLiteralProperty(
@@ -2793,9 +1850,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
const bool is_static = false;
const bool has_extends = false;
bool is_computed_name = false;
+ IdentifierT name = this->EmptyIdentifier();
ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
&checker, in_class, has_extends, is_static, &is_computed_name, NULL,
- classifier, CHECK_OK);
+ classifier, &name, CHECK_OK);
if (is_computed_name) {
has_computed_names = true;
@@ -2819,6 +1877,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
}
if (fni_ != nullptr) fni_->Infer();
+
+ if (allow_harmony_function_name()) {
+ Traits::SetFunctionNameFromPropertyName(property, name);
+ }
}
Expect(Token::RBRACE, CHECK_OK);
@@ -2854,6 +1916,8 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
ExpressionT argument = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
+ argument = Traits::RewriteNonPattern(argument, classifier,
+ CHECK_OK_CUSTOM(NullExpressionList));
if (is_spread) {
if (!spread_arg.IsValid()) {
spread_arg.beg_pos = start_pos;
@@ -2903,7 +1967,7 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
// Precedence = 2
template <class Traits>
typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
ExpressionClassifier* classifier,
bool* ok) {
// AssignmentExpression ::
@@ -2911,7 +1975,9 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
// ArrowFunction
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
-
+ bool maybe_pattern_element = flags & kIsPossiblePatternElement;
+ bool maybe_arrow_formals = flags & kIsPossibleArrowFormals;
+ bool is_destructuring_assignment = false;
int lhs_beg_pos = peek_position();
if (peek() == Token::YIELD && is_generator()) {
@@ -2944,43 +2010,78 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
parameters.is_simple = false;
}
- Scanner::Location duplicate_loc = Scanner::Location::invalid();
- this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
- &duplicate_loc, CHECK_OK);
-
checkpoint.Restore(&parameters.materialized_literals_count);
scope->set_start_position(lhs_beg_pos);
+ Scanner::Location duplicate_loc = Scanner::Location::invalid();
+ this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
+ &duplicate_loc, CHECK_OK);
if (duplicate_loc.IsValid()) {
arrow_formals_classifier.RecordDuplicateFormalParameterError(
duplicate_loc);
}
expression = this->ParseArrowFunctionLiteral(
accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
+ if (maybe_pattern_element) {
+ classifier->RecordPatternError(
+ Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
if (fni_ != nullptr) fni_->Infer();
return expression;
}
+ if (this->IsValidReferenceExpression(expression)) {
+ arrow_formals_classifier.ForgiveAssignmentPatternError();
+ }
+
// "expression" was not itself an arrow function parameter list, but it might
// form part of one. Propagate speculative formal parameter error locations.
- classifier->Accumulate(arrow_formals_classifier,
- ExpressionClassifier::StandardProductions |
- ExpressionClassifier::FormalParametersProductions);
+ classifier->Accumulate(
+ arrow_formals_classifier,
+ ExpressionClassifier::StandardProductions |
+ ExpressionClassifier::FormalParametersProductions |
+ ExpressionClassifier::CoverInitializedNameProduction);
+
+ bool maybe_pattern =
+ (expression->IsObjectLiteral() || expression->IsArrayLiteral()) &&
+ !expression->is_parenthesized();
if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
+ if (maybe_pattern_element) {
+ CheckDestructuringElement(expression, classifier, lhs_beg_pos,
+ scanner()->location().end_pos);
+ }
return expression;
}
- if (!(allow_harmony_destructuring() || allow_harmony_default_parameters())) {
+ if (!(allow_harmony_destructuring_bind() ||
+ allow_harmony_default_parameters())) {
BindingPatternUnexpectedToken(classifier);
}
- expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, scanner()->location().end_pos,
- MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
+ if (allow_harmony_destructuring_assignment() && maybe_pattern &&
+ peek() == Token::ASSIGN) {
+ classifier->ForgiveCoverInitializedNameError();
+ ValidateAssignmentPattern(classifier, CHECK_OK);
+ is_destructuring_assignment = true;
+ } else if (maybe_arrow_formals) {
+ expression = this->ClassifyAndRewriteReferenceExpression(
+ classifier, expression, lhs_beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInAssignment);
+ } else {
+ if (maybe_pattern_element) {
+ CheckDestructuringElement(expression, classifier, lhs_beg_pos,
+ scanner()->location().end_pos);
+ }
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_beg_pos, scanner()->location().end_pos,
+ MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
+ }
+
expression = this->MarkExpressionAsAssigned(expression);
Token::Value op = Next(); // Get assignment operator.
@@ -2992,10 +2093,13 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
int pos = position();
ExpressionClassifier rhs_classifier;
+
ExpressionT right =
this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
- classifier->Accumulate(rhs_classifier,
- ExpressionClassifier::ExpressionProductions);
+ right = Traits::RewriteNonPattern(right, &rhs_classifier, CHECK_OK);
+ classifier->Accumulate(
+ rhs_classifier, ExpressionClassifier::ExpressionProductions |
+ ExpressionClassifier::CoverInitializedNameProduction);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -3006,23 +2110,38 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
function_state_->AddProperty();
}
+ if (op != Token::ASSIGN && maybe_pattern_element) {
+ classifier->RecordAssignmentPatternError(
+ Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+ }
+
this->CheckAssigningFunctionLiteralToProperty(expression, right);
if (fni_ != NULL) {
// Check if the right hand side is a call to avoid inferring a
// name if we're dealing with "a = function(){...}();"-like
// expression.
- if ((op == Token::INIT_VAR
- || op == Token::INIT_CONST_LEGACY
- || op == Token::ASSIGN)
- && (!right->IsCall() && !right->IsCallNew())) {
+ if ((op == Token::INIT || op == Token::ASSIGN) &&
+ (!right->IsCall() && !right->IsCallNew())) {
fni_->Infer();
} else {
fni_->RemoveLastFunction();
}
}
- return factory()->NewAssignment(op, expression, right, pos);
+ if (op == Token::ASSIGN && allow_harmony_function_name()) {
+ Traits::SetFunctionNameFromIdentifierRef(right, expression);
+ }
+
+ ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
+
+ if (is_destructuring_assignment) {
+ result = factory()->NewRewritableAssignmentExpression(result);
+ Traits::QueueDestructuringAssignmentForRewriting(result);
+ }
+
+ return result;
}
template <class Traits>
@@ -3059,13 +2178,19 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
+ expression =
+ Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
break;
}
}
if (kind == Yield::kDelegating) {
// var iterator = subject[Symbol.iterator]();
- expression = this->GetIterator(expression, factory());
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
+ expression = this->GetIterator(expression, factory(), pos + 1);
}
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
typename Traits::Type::YieldExpression yield =
factory()->NewYield(generator_object, expression, kind, pos);
return yield;
@@ -3087,6 +2212,7 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
ExpressionT expression =
this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
ArrowFormalParametersUnexpectedToken(classifier);
BindingPatternUnexpectedToken(classifier);
Consume(Token::CONDITIONAL);
@@ -3094,9 +2220,11 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
+ left = Traits::RewriteNonPattern(left, classifier, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
ExpressionT right =
ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
+ right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
return factory()->NewConditional(expression, left, right, pos);
}
@@ -3112,6 +2240,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
+ x = Traits::RewriteNonPattern(x, classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Token::Value op = Next();
@@ -3119,6 +2248,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
int pos = position();
ExpressionT y =
ParseBinaryExpression(prec1 + 1, accept_IN, classifier, CHECK_OK);
+ y = Traits::RewriteNonPattern(y, classifier, CHECK_OK);
if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
factory())) {
@@ -3181,6 +2311,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
op = Next();
int pos = position();
ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
if (is_strong(language_mode())) {
@@ -3207,6 +2338,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
this->MarkExpressionAsAssigned(expression);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
@@ -3238,6 +2370,7 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
Token::Value next = Next();
expression =
@@ -3268,12 +2401,14 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
+ index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
case Token::LPAREN: {
+ result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -3284,7 +2419,8 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
return this->EmptyExpression();
}
int pos;
- if (scanner()->current_token() == Token::IDENTIFIER) {
+ if (scanner()->current_token() == Token::IDENTIFIER ||
+ scanner()->current_token() == Token::SUPER) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
pos = position();
@@ -3327,8 +2463,8 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
// implicit binding assignment to the 'this' variable.
if (is_super_call) {
ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
- result = factory()->NewAssignment(Token::INIT_CONST, this_expr,
- result, pos);
+ result =
+ factory()->NewAssignment(Token::INIT, this_expr, result, pos);
}
if (fni_ != NULL) fni_->RemoveLastFunction();
@@ -3400,6 +2536,7 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
} else {
result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
}
+ result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
@@ -3451,12 +2588,12 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
bool is_strict_reserved_name = false;
Scanner::Location function_name_location = Scanner::Location::invalid();
FunctionLiteral::FunctionType function_type =
- FunctionLiteral::ANONYMOUS_EXPRESSION;
+ FunctionLiteral::kAnonymousExpression;
if (peek_any_identifier()) {
- name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
- CHECK_OK);
+ name = ParseIdentifierOrStrictReservedWord(
+ is_generator, &is_strict_reserved_name, CHECK_OK);
function_name_location = scanner()->location();
- function_type = FunctionLiteral::NAMED_EXPRESSION;
+ function_type = FunctionLiteral::kNamedExpression;
}
result = this->ParseFunctionLiteral(
name, function_name_location,
@@ -3464,7 +2601,7 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
+ function_token_position, function_type, FunctionLiteral::kNormalArity,
language_mode(), CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
@@ -3499,6 +2636,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
+ index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
left = factory()->NewProperty(this_expr, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -3534,6 +2672,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
ExpressionT right =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
this->CheckAssigningFunctionLiteralToProperty(left, right);
function_state_->AddProperty();
if (fni_ != NULL) {
@@ -3614,7 +2753,7 @@ ParserBase<Traits>::ParseStrongSuperCallExpression(
// Explicit calls to the super constructor using super() perform an implicit
// binding assignment to the 'this' variable.
ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
- return factory()->NewAssignment(Token::INIT_CONST, this_expr, expr, pos);
+ return factory()->NewAssignment(Token::INIT, this_expr, expr, pos);
}
@@ -3623,8 +2762,8 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseSuperExpression(bool is_new,
ExpressionClassifier* classifier,
bool* ok) {
- int pos = position();
Expect(Token::SUPER, CHECK_OK);
+ int pos = position();
Scope* scope = scope_->ReceiverScope();
FunctionKind kind = scope->function_kind();
@@ -3690,6 +2829,7 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
+ index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -3754,7 +2894,7 @@ void ParserBase<Traits>::ParseFormalParameter(
if (!*ok) return;
if (!Traits::IsIdentifier(pattern)) {
- if (is_rest || !allow_harmony_destructuring()) {
+ if (!allow_harmony_destructuring_bind()) {
ReportUnexpectedToken(next);
*ok = false;
return;
@@ -3765,25 +2905,20 @@ void ParserBase<Traits>::ParseFormalParameter(
classifier->RecordNonSimpleParameter();
}
- if (is_rest) {
- parameters->rest_array_literal_index =
- function_state_->NextMaterializedLiteralIndex();
- ++parameters->materialized_literals_count;
- }
-
ExpressionT initializer = Traits::EmptyExpression();
if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
ExpressionClassifier init_classifier;
initializer = ParseAssignmentExpression(true, &init_classifier, ok);
if (!*ok) return;
- ValidateExpression(&init_classifier, ok);
+ initializer = Traits::RewriteNonPattern(initializer, &init_classifier, ok);
ValidateFormalParameterInitializer(&init_classifier, ok);
if (!*ok) return;
parameters->is_simple = false;
classifier->RecordNonSimpleParameter();
}
- Traits::AddFormalParameter(parameters, pattern, initializer, is_rest);
+ Traits::AddFormalParameter(parameters, pattern, initializer,
+ scanner()->location().end_pos, is_rest);
}
@@ -3813,8 +2948,7 @@ void ParserBase<Traits>::ParseFormalParameterList(
*ok = false;
return;
}
- parameters->has_rest =
- allow_harmony_rest_parameters() && Check(Token::ELLIPSIS);
+ parameters->has_rest = Check(Token::ELLIPSIS);
ParseFormalParameter(parameters, classifier, ok);
if (!*ok) return;
} while (!parameters->has_rest && Check(Token::COMMA));
@@ -3843,14 +2977,14 @@ void ParserBase<Traits>::CheckArityRestrictions(
int param_count, FunctionLiteral::ArityRestriction arity_restriction,
bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok) {
switch (arity_restriction) {
- case FunctionLiteral::GETTER_ARITY:
+ case FunctionLiteral::kGetterArity:
if (param_count != 0) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadGetterArity);
*ok = false;
}
break;
- case FunctionLiteral::SETTER_ARITY:
+ case FunctionLiteral::kSetterArity:
if (param_count != 1) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
MessageTemplate::kBadSetterArity);
@@ -3931,7 +3065,6 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
body = this->NewStatementList(0, zone());
this->SkipLazyFunctionBody(&materialized_literal_count,
&expected_property_count, CHECK_OK);
-
if (formal_parameters.materialized_literals_count > 0) {
materialized_literal_count +=
formal_parameters.materialized_literals_count;
@@ -3939,7 +3072,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
} else {
body = this->ParseEagerFunctionBody(
this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
- kArrowFunction, FunctionLiteral::ANONYMOUS_EXPRESSION, CHECK_OK);
+ kArrowFunction, FunctionLiteral::kAnonymousExpression, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -3951,7 +3084,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
ExpressionClassifier classifier;
ExpressionT expression =
ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(expression, &classifier, CHECK_OK);
body = this->NewStatementList(1, zone());
this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
body->Add(factory()->NewReturnStatement(expression, pos), zone());
@@ -3978,14 +3111,15 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
if (is_strict(language_mode()) || allow_harmony_sloppy()) {
this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
}
+
+ Traits::RewriteDestructuringAssignments();
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
- this->EmptyIdentifierString(), ast_value_factory(),
- formal_parameters.scope, body, materialized_literal_count,
- expected_property_count, num_parameters,
+ this->EmptyIdentifierString(), formal_parameters.scope, body,
+ materialized_literal_count, expected_property_count, num_parameters,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+ FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldLazyCompile, FunctionKind::kArrowFunction,
formal_parameters.scope->start_position());
@@ -4056,6 +3190,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
int expr_pos = peek_position();
ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
+ expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
Traits::AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
@@ -4109,21 +3244,33 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok) {
+ ExpressionClassifier classifier;
+ ExpressionT result = ClassifyAndRewriteReferenceExpression(
+ &classifier, expression, beg_pos, end_pos, message, type);
+ ValidateExpression(&classifier, ok);
+ if (!*ok) return this->EmptyExpression();
+ return result;
+}
+
+
+template <typename Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ClassifyAndRewriteReferenceExpression(
+ ExpressionClassifier* classifier, ExpressionT expression, int beg_pos,
+ int end_pos, MessageTemplate::Template message, ParseErrorType type) {
Scanner::Location location(beg_pos, end_pos);
if (this->IsIdentifier(expression)) {
if (is_strict(language_mode()) &&
this->IsEvalOrArguments(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, MessageTemplate::kStrictEvalArguments,
- kSyntaxError);
- *ok = false;
- return this->EmptyExpression();
+ classifier->RecordExpressionError(
+ location, MessageTemplate::kStrictEvalArguments, kSyntaxError);
+ return expression;
}
if (is_strong(language_mode()) &&
this->IsUndefined(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, MessageTemplate::kStrongUndefined,
- kSyntaxError);
- *ok = false;
- return this->EmptyExpression();
+ classifier->RecordExpressionError(
+ location, MessageTemplate::kStrongUndefined, kSyntaxError);
+ return expression;
}
}
if (expression->IsValidReferenceExpression()) {
@@ -4135,9 +3282,37 @@ ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT error = this->NewThrowReferenceError(message, pos);
return factory()->NewProperty(expression, error, pos);
} else {
- this->ReportMessageAt(location, message, type);
- *ok = false;
- return this->EmptyExpression();
+ classifier->RecordExpressionError(location, message, type);
+ return expression;
+ }
+}
+
+
+template <typename Traits>
+bool ParserBase<Traits>::IsValidReferenceExpression(ExpressionT expression) {
+ return this->IsAssignableIdentifier(expression) || expression->IsProperty();
+}
+
+
+template <typename Traits>
+void ParserBase<Traits>::CheckDestructuringElement(
+ ExpressionT expression, ExpressionClassifier* classifier, int begin,
+ int end) {
+ static const MessageTemplate::Template message =
+ MessageTemplate::kInvalidDestructuringTarget;
+ const Scanner::Location location(begin, end);
+ if (expression->IsArrayLiteral() || expression->IsObjectLiteral() ||
+ expression->IsAssignment()) {
+ if (expression->is_parenthesized()) {
+ classifier->RecordPatternError(location, message);
+ }
+ return;
+ }
+
+ if (expression->IsProperty()) {
+ classifier->RecordBindingPatternError(location, message);
+ } else if (!this->IsAssignableIdentifier(expression)) {
+ classifier->RecordPatternError(location, message);
}
}
@@ -4202,4 +3377,4 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
} // namespace internal
} // namespace v8
-#endif // V8_PREPARSER_H
+#endif // V8_PARSING_PARSER_BASE_H
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parsing/parser.cc
index 2704db3d7c..b1b8c1316b 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/api.h"
-#include "src/ast.h"
-#include "src/ast-literal-reindexer.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/ast-literal-reindexer.h"
+#include "src/ast/scopeinfo.h"
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -14,12 +16,11 @@
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/messages.h"
-#include "src/parameter-initializer-rewriter.h"
-#include "src/preparser.h"
-#include "src/rewriter.h"
+#include "src/parsing/parameter-initializer-rewriter.h"
+#include "src/parsing/parser-base.h"
+#include "src/parsing/rewriter.h"
+#include "src/parsing/scanner-character-streams.h"
#include "src/runtime/runtime.h"
-#include "src/scanner-character-streams.h"
-#include "src/scopeinfo.h"
#include "src/string-stream.h"
namespace v8 {
@@ -94,162 +95,6 @@ ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
}
-RegExpBuilder::RegExpBuilder(Zone* zone)
- : zone_(zone),
- pending_empty_(false),
- characters_(NULL),
- terms_(),
- alternatives_()
-#ifdef DEBUG
- , last_added_(ADD_NONE)
-#endif
- {}
-
-
-void RegExpBuilder::FlushCharacters() {
- pending_empty_ = false;
- if (characters_ != NULL) {
- RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
- characters_ = NULL;
- text_.Add(atom, zone());
- LAST(ADD_ATOM);
- }
-}
-
-
-void RegExpBuilder::FlushText() {
- FlushCharacters();
- int num_text = text_.length();
- if (num_text == 0) {
- return;
- } else if (num_text == 1) {
- terms_.Add(text_.last(), zone());
- } else {
- RegExpText* text = new(zone()) RegExpText(zone());
- for (int i = 0; i < num_text; i++)
- text_.Get(i)->AppendToText(text, zone());
- terms_.Add(text, zone());
- }
- text_.Clear();
-}
-
-
-void RegExpBuilder::AddCharacter(uc16 c) {
- pending_empty_ = false;
- if (characters_ == NULL) {
- characters_ = new(zone()) ZoneList<uc16>(4, zone());
- }
- characters_->Add(c, zone());
- LAST(ADD_CHAR);
-}
-
-
-void RegExpBuilder::AddEmpty() {
- pending_empty_ = true;
-}
-
-
-void RegExpBuilder::AddAtom(RegExpTree* term) {
- if (term->IsEmpty()) {
- AddEmpty();
- return;
- }
- if (term->IsTextElement()) {
- FlushCharacters();
- text_.Add(term, zone());
- } else {
- FlushText();
- terms_.Add(term, zone());
- }
- LAST(ADD_ATOM);
-}
-
-
-void RegExpBuilder::AddAssertion(RegExpTree* assert) {
- FlushText();
- terms_.Add(assert, zone());
- LAST(ADD_ASSERT);
-}
-
-
-void RegExpBuilder::NewAlternative() {
- FlushTerms();
-}
-
-
-void RegExpBuilder::FlushTerms() {
- FlushText();
- int num_terms = terms_.length();
- RegExpTree* alternative;
- if (num_terms == 0) {
- alternative = new (zone()) RegExpEmpty();
- } else if (num_terms == 1) {
- alternative = terms_.last();
- } else {
- alternative = new(zone()) RegExpAlternative(terms_.GetList(zone()));
- }
- alternatives_.Add(alternative, zone());
- terms_.Clear();
- LAST(ADD_NONE);
-}
-
-
-RegExpTree* RegExpBuilder::ToRegExp() {
- FlushTerms();
- int num_alternatives = alternatives_.length();
- if (num_alternatives == 0) return new (zone()) RegExpEmpty();
- if (num_alternatives == 1) return alternatives_.last();
- return new(zone()) RegExpDisjunction(alternatives_.GetList(zone()));
-}
-
-
-void RegExpBuilder::AddQuantifierToAtom(
- int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
- if (pending_empty_) {
- pending_empty_ = false;
- return;
- }
- RegExpTree* atom;
- if (characters_ != NULL) {
- DCHECK(last_added_ == ADD_CHAR);
- // Last atom was character.
- Vector<const uc16> char_vector = characters_->ToConstVector();
- int num_chars = char_vector.length();
- if (num_chars > 1) {
- Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
- text_.Add(new(zone()) RegExpAtom(prefix), zone());
- char_vector = char_vector.SubVector(num_chars - 1, num_chars);
- }
- characters_ = NULL;
- atom = new(zone()) RegExpAtom(char_vector);
- FlushText();
- } else if (text_.length() > 0) {
- DCHECK(last_added_ == ADD_ATOM);
- atom = text_.RemoveLast();
- FlushText();
- } else if (terms_.length() > 0) {
- DCHECK(last_added_ == ADD_ATOM);
- atom = terms_.RemoveLast();
- if (atom->max_match() == 0) {
- // Guaranteed to only match an empty string.
- LAST(ADD_TERM);
- if (min == 0) {
- return;
- }
- terms_.Add(atom, zone());
- return;
- }
- } else {
- // Only call immediately after adding an atom or character!
- UNREACHABLE();
- return;
- }
- terms_.Add(
- new(zone()) RegExpQuantifier(min, max, quantifier_type, atom), zone());
- LAST(ADD_TERM);
-}
-
-
FunctionEntry ParseData::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given
// start position.
@@ -360,24 +205,29 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
if (call_super) {
- // %_DefaultConstructorCallSuper(new.target, %GetPrototype(<this-fun>))
+ // $super_constructor = %_GetSuperConstructor(<this-function>)
+ // %reflect_construct($super_constructor, arguments, new.target)
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(2, zone());
- VariableProxy* new_target_proxy = scope_->NewUnresolved(
- factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
- pos);
- args->Add(new_target_proxy, zone());
VariableProxy* this_function_proxy = scope_->NewUnresolved(
factory(), ast_value_factory()->this_function_string(),
Variable::NORMAL, pos);
ZoneList<Expression*>* tmp =
new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(this_function_proxy, zone());
- Expression* get_prototype =
- factory()->NewCallRuntime(Runtime::kGetPrototype, tmp, pos);
- args->Add(get_prototype, zone());
+ Expression* super_constructor = factory()->NewCallRuntime(
+ Runtime::kInlineGetSuperConstructor, tmp, pos);
+ args->Add(super_constructor, zone());
+ VariableProxy* arguments_proxy = scope_->NewUnresolved(
+ factory(), ast_value_factory()->arguments_string(), Variable::NORMAL,
+ pos);
+ args->Add(arguments_proxy, zone());
+ VariableProxy* new_target_proxy = scope_->NewUnresolved(
+ factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
+ pos);
+ args->Add(new_target_proxy, zone());
CallRuntime* call = factory()->NewCallRuntime(
- Runtime::kInlineDefaultConstructorCallSuper, args, pos);
+ Context::REFLECT_CONSTRUCT_INDEX, args, pos);
body->Add(factory()->NewReturnStatement(call, pos), zone());
}
@@ -386,10 +236,10 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
}
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- name, ast_value_factory(), function_scope, body,
- materialized_literal_count, expected_property_count, parameter_count,
+ name, function_scope, body, materialized_literal_count,
+ expected_property_count, parameter_count,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+ FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldLazyCompile, kind, pos);
return function_literal;
@@ -525,17 +375,6 @@ void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
}
-void ParserTraits::CheckPossibleEvalCall(Expression* expression,
- Scope* scope) {
- VariableProxy* callee = expression->AsVariableProxy();
- if (callee != NULL &&
- callee->raw_name() == parser_->ast_value_factory()->eval_string()) {
- scope->DeclarationScope()->RecordEvalCall();
- scope->RecordEvalCall();
- }
-}
-
-
Expression* ParserTraits::MarkExpressionAsAssigned(Expression* expression) {
VariableProxy* proxy =
expression != NULL ? expression->AsVariableProxy() : NULL;
@@ -551,49 +390,52 @@ bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
double y_val = y->AsLiteral()->raw_value()->AsNumber();
+ bool x_has_dot = (*x)->AsLiteral()->raw_value()->ContainsDot();
+ bool y_has_dot = y->AsLiteral()->raw_value()->ContainsDot();
+ bool has_dot = x_has_dot || y_has_dot;
switch (op) {
case Token::ADD:
- *x = factory->NewNumberLiteral(x_val + y_val, pos);
+ *x = factory->NewNumberLiteral(x_val + y_val, pos, has_dot);
return true;
case Token::SUB:
- *x = factory->NewNumberLiteral(x_val - y_val, pos);
+ *x = factory->NewNumberLiteral(x_val - y_val, pos, has_dot);
return true;
case Token::MUL:
- *x = factory->NewNumberLiteral(x_val * y_val, pos);
+ *x = factory->NewNumberLiteral(x_val * y_val, pos, has_dot);
return true;
case Token::DIV:
- *x = factory->NewNumberLiteral(x_val / y_val, pos);
+ *x = factory->NewNumberLiteral(x_val / y_val, pos, has_dot);
return true;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- *x = factory->NewNumberLiteral(value, pos);
+ *x = factory->NewNumberLiteral(value, pos, has_dot);
return true;
}
default:
@@ -617,13 +459,14 @@ Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->AsNumber();
+ bool has_dot = literal->ContainsDot();
switch (op) {
case Token::ADD:
return expression;
case Token::SUB:
- return factory->NewNumberLiteral(-value, pos);
+ return factory->NewNumberLiteral(-value, pos, has_dot);
case Token::BIT_NOT:
- return factory->NewNumberLiteral(~DoubleToInt32(value), pos);
+ return factory->NewNumberLiteral(~DoubleToInt32(value), pos, has_dot);
default:
break;
}
@@ -852,10 +695,9 @@ Expression* ParserTraits::ExpressionFromString(int pos, Scanner* scanner,
Expression* ParserTraits::GetIterator(Expression* iterable,
- AstNodeFactory* factory) {
+ AstNodeFactory* factory, int pos) {
Expression* iterator_symbol_literal =
factory->NewSymbolLiteral("iterator_symbol", RelocInfo::kNoPosition);
- int pos = iterable->position();
Expression* prop =
factory->NewProperty(iterable, iterator_symbol_literal, pos);
Zone* zone = parser_->zone();
@@ -917,12 +759,14 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
- set_allow_harmony_rest_parameters(FLAG_harmony_rest_parameters);
set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
- set_allow_harmony_destructuring(FLAG_harmony_destructuring);
+ set_allow_harmony_destructuring_bind(FLAG_harmony_destructuring_bind);
+ set_allow_harmony_destructuring_assignment(
+ FLAG_harmony_destructuring_assignment);
set_allow_strong_mode(FLAG_strong_mode);
set_allow_legacy_const(FLAG_legacy_const);
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
+ set_allow_harmony_function_name(FLAG_harmony_function_name);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -1075,7 +919,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
InsertSloppyBlockFunctionVarBindings(scope, &ok);
}
if (ok && (is_strict(language_mode()) || allow_harmony_sloppy() ||
- allow_harmony_destructuring())) {
+ allow_harmony_destructuring_bind())) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -1090,14 +934,14 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
}
if (ok) {
+ ParserTraits::RewriteDestructuringAssignments();
result = factory()->NewFunctionLiteral(
- ast_value_factory()->empty_string(), ast_value_factory(), scope_,
- body, function_state.materialized_literal_count(),
+ ast_value_factory()->empty_string(), scope_, body,
+ function_state.materialized_literal_count(),
function_state.expected_property_count(), 0,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval,
- FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction,
- 0);
+ FunctionLiteral::kGlobalOrEval, FunctionLiteral::kShouldLazyCompile,
+ FunctionKind::kNormalFunction, 0);
}
}
@@ -1183,17 +1027,28 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
DCHECK(is_sloppy(scope->language_mode()) ||
is_strict(info->language_mode()));
DCHECK(info->language_mode() == shared_info->language_mode());
- FunctionLiteral::FunctionType function_type = shared_info->is_expression()
- ? (shared_info->is_anonymous()
- ? FunctionLiteral::ANONYMOUS_EXPRESSION
- : FunctionLiteral::NAMED_EXPRESSION)
- : FunctionLiteral::DECLARATION;
+ FunctionLiteral::FunctionType function_type =
+ shared_info->is_expression()
+ ? (shared_info->is_anonymous()
+ ? FunctionLiteral::kAnonymousExpression
+ : FunctionLiteral::kNamedExpression)
+ : FunctionLiteral::kDeclaration;
bool ok = true;
if (shared_info->is_arrow()) {
+ // TODO(adamk): We should construct this scope from the ScopeInfo.
Scope* scope =
NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+
+ // These two bits only need to be explicitly set because we're
+ // not passing the ScopeInfo to the Scope constructor.
+ // TODO(adamk): Remove these calls once the above NewScope call
+ // passes the ScopeInfo.
+ if (shared_info->scope_info()->CallsEval()) {
+ scope->RecordEvalCall();
+ }
SetLanguageMode(scope, shared_info->language_mode());
+
scope->set_start_position(shared_info->start_position());
ExpressionClassifier formals_classifier;
ParserFormalParameters formals(scope);
@@ -1249,7 +1104,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck,
shared_info->kind(), RelocInfo::kNoPosition, function_type,
- FunctionLiteral::NORMAL_ARITY, shared_info->language_mode(), &ok);
+ FunctionLiteral::kNormalArity, shared_info->language_mode(), &ok);
}
// Make sure the results agree.
DCHECK(ok == (result != NULL));
@@ -1720,7 +1575,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
int pos = peek_position();
ExpressionClassifier classifier;
Expression* expr = ParseAssignmentExpression(true, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
ExpectSemicolon(CHECK_OK);
result = factory()->NewExpressionStatement(expr, pos);
@@ -2025,9 +1880,11 @@ VariableProxy* Parser::NewUnresolved(const AstRawString* name,
// scope.
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
- return DeclarationScope(mode)->NewUnresolved(
- factory(), name, Variable::NORMAL, scanner()->location().beg_pos,
- scanner()->location().end_pos);
+ Scope* scope =
+ IsLexicalVariableMode(mode) ? scope_ : scope_->DeclarationScope();
+ return scope->NewUnresolved(factory(), name, Variable::NORMAL,
+ scanner()->location().beg_pos,
+ scanner()->location().end_pos);
}
@@ -2038,6 +1895,7 @@ Variable* Parser::Declare(Declaration* declaration,
DCHECK(proxy->raw_name() != NULL);
const AstRawString* name = proxy->raw_name();
VariableMode mode = declaration->mode();
+ bool is_function_declaration = declaration->IsFunctionDeclaration();
if (scope == nullptr) scope = scope_;
Scope* declaration_scope =
IsLexicalVariableMode(mode) ? scope : scope->DeclarationScope();
@@ -2064,7 +1922,7 @@ Variable* Parser::Declare(Declaration* declaration,
// Declare the name.
Variable::Kind kind = Variable::NORMAL;
int declaration_group_start = -1;
- if (declaration->IsFunctionDeclaration()) {
+ if (is_function_declaration) {
kind = Variable::FUNCTION;
} else if (declaration->IsVariableDeclaration() &&
declaration->AsVariableDeclaration()->is_class_declaration()) {
@@ -2075,8 +1933,11 @@ Variable* Parser::Declare(Declaration* declaration,
var = declaration_scope->DeclareLocal(
name, mode, declaration->initialization(), kind, kNotAssigned,
declaration_group_start);
- } else if (IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode()) ||
+ } else if (((IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode())) &&
+ // Allow duplicate function decls for web compat, see bug 4693.
+ (is_strict(language_mode()) || !is_function_declaration ||
+ !var->is_function())) ||
((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
!declaration_scope->is_script_scope())) {
// The name was declared in this scope before; check for conflicting
@@ -2093,7 +1954,9 @@ Variable* Parser::Declare(Declaration* declaration,
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
DCHECK(IsDeclaredVariableMode(var->mode()));
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
+ if (is_strict(language_mode()) ||
+ (allow_harmony_sloppy() && mode != CONST_LEGACY &&
+ var->mode() != CONST_LEGACY)) {
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
if (declaration_kind == DeclarationDescriptor::NORMAL) {
@@ -2210,7 +2073,8 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
// because of lazy compilation.
- DeclarationScope(VAR)->ForceEagerCompilation();
+ // TODO(adamk): Should this be ClosureScope()?
+ scope_->DeclarationScope()->ForceEagerCompilation();
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
@@ -2222,8 +2086,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
name, extension_, RelocInfo::kNoPosition);
return factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition),
+ factory()->NewAssignment(Token::INIT, proxy, lit, RelocInfo::kNoPosition),
pos);
}
@@ -2250,7 +2113,7 @@ Statement* Parser::ParseFunctionDeclaration(
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- pos, FunctionLiteral::DECLARATION, FunctionLiteral::NORMAL_ARITY,
+ pos, FunctionLiteral::kDeclaration, FunctionLiteral::kNormalArity,
language_mode(), CHECK_OK);
// Even if we're not at the top-level of the global or a function
@@ -2338,9 +2201,8 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
outer_class_variable->AsClassVariable()->declaration_group_start());
}
- Token::Value init_op =
- is_strong(language_mode()) ? Token::INIT_CONST : Token::INIT_LET;
- Assignment* assignment = factory()->NewAssignment(init_op, proxy, value, pos);
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
if (names) names->Add(name, zone());
@@ -2348,35 +2210,8 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
}
-Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
- return ParseScopedBlock(labels, ok);
- }
-
- // Block ::
- // '{' Statement* '}'
-
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
- // Construct block expecting 16 statements.
- Block* result =
- factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Target target(&this->target_stack_, result);
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- result->statements()->Add(stat, zone());
- }
- }
- Expect(Token::RBRACE, CHECK_OK);
- return result;
-}
-
-
-Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
- bool* ok) {
+Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels,
+ bool finalize_block_scope, bool* ok) {
// The harmony mode uses block elements instead of statements.
//
// Block ::
@@ -2402,19 +2237,16 @@ Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
}
Expect(Token::RBRACE, CHECK_OK);
block_scope->set_end_position(scanner()->location().end_pos);
- block_scope = block_scope->FinalizeBlockScope();
+ if (finalize_block_scope) {
+ block_scope = block_scope->FinalizeBlockScope();
+ }
body->set_scope(block_scope);
return body;
}
-const AstRawString* Parser::DeclarationParsingResult::SingleName() const {
- if (declarations.length() != 1) return nullptr;
- const Declaration& declaration = declarations.at(0);
- if (declaration.pattern->IsVariableProxy()) {
- return declaration.pattern->AsVariableProxy()->raw_name();
- }
- return nullptr;
+Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
+ return ParseBlock(labels, true, ok);
}
@@ -2484,8 +2316,6 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
// need initialization. 'var' declared bindings are always initialized
// immediately by their declaration nodes.
parsing_result->descriptor.needs_init = false;
- parsing_result->descriptor.is_const = false;
- parsing_result->descriptor.init_op = Token::INIT_VAR;
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
@@ -2498,28 +2328,22 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
Consume(Token::CONST);
if (is_sloppy(language_mode()) && allow_legacy_const()) {
parsing_result->descriptor.mode = CONST_LEGACY;
- parsing_result->descriptor.init_op = Token::INIT_CONST_LEGACY;
++use_counts_[v8::Isolate::kLegacyConst];
} else {
DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = CONST;
- parsing_result->descriptor.init_op = Token::INIT_CONST;
}
- parsing_result->descriptor.is_const = true;
parsing_result->descriptor.needs_init = true;
} else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = LET;
parsing_result->descriptor.needs_init = true;
- parsing_result->descriptor.init_op = Token::INIT_LET;
} else {
UNREACHABLE(); // by current callers
}
- parsing_result->descriptor.declaration_scope =
- DeclarationScope(parsing_result->descriptor.mode);
parsing_result->descriptor.scope = scope_;
parsing_result->descriptor.hoist_scope = nullptr;
@@ -2546,14 +2370,16 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
ValidateLetPattern(&pattern_classifier, ok);
if (!*ok) return;
}
- if (!allow_harmony_destructuring() && !pattern->IsVariableProxy()) {
+ if (!allow_harmony_destructuring_bind() && !pattern->IsVariableProxy()) {
ReportUnexpectedToken(next);
*ok = false;
return;
}
}
- bool is_pattern = pattern->IsObjectLiteral() || pattern->IsArrayLiteral();
+ bool is_pattern =
+ (pattern->IsObjectLiteral() || pattern->IsArrayLiteral()) &&
+ !pattern->is_parenthesized();
Scanner::Location variable_loc = scanner()->location();
const AstRawString* single_name =
@@ -2580,7 +2406,7 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
value = ParseAssignmentExpression(var_context != kForStatement,
&classifier, ok);
if (!*ok) return;
- ValidateExpression(&classifier, ok);
+ value = ParserTraits::RewriteNonPattern(value, &classifier, ok);
if (!*ok) return;
variable_loc.end_pos = scanner()->location().end_pos;
@@ -2597,6 +2423,21 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
fni_->RemoveLastFunction();
}
}
+
+ if (allow_harmony_function_name() && single_name) {
+ if (value->IsFunctionLiteral()) {
+ auto function_literal = value->AsFunctionLiteral();
+ if (function_literal->is_anonymous()) {
+ function_literal->set_raw_name(single_name);
+ }
+ } else if (value->IsClassLiteral()) {
+ auto class_literal = value->AsClassLiteral();
+ if (class_literal->raw_name() == nullptr) {
+ class_literal->set_raw_name(single_name);
+ }
+ }
+ }
+
// End position of the initializer is after the assignment expression.
initializer_position = scanner()->location().end_pos;
} else {
@@ -2676,7 +2517,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
} else {
expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
}
- ValidateExpression(&classifier, CHECK_OK);
+ expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
switch (peek()) {
case Token::SEMICOLON:
Consume(Token::SEMICOLON);
@@ -2894,7 +2735,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// Is rewritten as:
//
// return (temp = expr) === undefined ? this :
- // %_IsSpecObject(temp) ? temp : throw new TypeError(...);
+ // %_IsJSReceiver(temp) ? temp : throw new TypeError(...);
Variable* temp = scope_->NewTemporary(
ast_value_factory()->empty_string());
Assignment* assign = factory()->NewAssignment(
@@ -2904,14 +2745,14 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
NewThrowTypeError(MessageTemplate::kDerivedConstructorReturn,
ast_value_factory()->empty_string(), pos);
- // %_IsSpecObject(temp)
+ // %_IsJSReceiver(temp)
ZoneList<Expression*>* is_spec_object_args =
new (zone()) ZoneList<Expression*>(1, zone());
is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
- Runtime::kInlineIsSpecObject, is_spec_object_args, pos);
+ Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
- // %_IsSpecObject(temp) ? temp : throw_expression
+ // %_IsJSReceiver(temp) ? temp : throw_expression
Expression* is_object_conditional = factory()->NewConditional(
is_spec_object_call, factory()->NewVariableProxy(temp),
throw_expression, pos);
@@ -2926,6 +2767,8 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
is_undefined, ThisExpression(scope_, factory(), pos),
is_object_conditional, pos);
}
+
+ return_value->MarkTail();
}
ExpectSemicolon(CHECK_OK);
@@ -3186,7 +3029,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
{
BlockState block_state(&scope_, catch_scope);
- // TODO(adamk): Make a version of ParseScopedBlock that takes a scope and
+ // TODO(adamk): Make a version of ParseBlock that takes a scope and
// a block.
catch_block =
factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
@@ -3201,15 +3044,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
descriptor.parser = this;
- descriptor.declaration_scope = scope_;
descriptor.scope = scope_;
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
- descriptor.is_const = false;
descriptor.needs_init = true;
descriptor.declaration_pos = pattern->position();
descriptor.initialization_pos = pattern->position();
- descriptor.init_op = Token::INIT_LET;
DeclarationParsingResult::Declaration decl(
pattern, pattern->position(),
@@ -3322,7 +3162,7 @@ WhileStatement* Parser::ParseWhileStatement(
}
-// !%_IsSpecObject(result = iterator.next()) &&
+// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
Expression* Parser::BuildIteratorNextResult(Expression* iterator,
Variable* result, int pos) {
@@ -3338,12 +3178,12 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
Expression* left =
factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
- // %_IsSpecObject(...)
+ // %_IsJSReceiver(...)
ZoneList<Expression*>* is_spec_object_args =
new (zone()) ZoneList<Expression*>(1, zone());
is_spec_object_args->Add(left, zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
- Runtime::kInlineIsSpecObject, is_spec_object_args, pos);
+ Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
// %ThrowIteratorResultNotAnObject(result)
Expression* result_proxy_again = factory()->NewVariableProxy(result);
@@ -3361,9 +3201,10 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
void Parser::InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each,
- Expression* subject,
- Statement* body) {
+ Expression* each, Expression* subject,
+ Statement* body,
+ bool is_destructuring) {
+ DCHECK(!is_destructuring || allow_harmony_destructuring_assignment());
ForOfStatement* for_of = stmt->AsForOfStatement();
if (for_of != NULL) {
@@ -3378,17 +3219,22 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* assign_each;
// iterator = subject[Symbol.iterator]()
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
assign_iterator = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(subject, factory()), subject->position());
+ GetIterator(subject, factory(), subject->position() - 2),
+ subject->position());
- // !%_IsSpecObject(result = iterator.next()) &&
+ // !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
{
// result = iterator.next()
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
- next_result =
- BuildIteratorNextResult(iterator_proxy, result, subject->position());
+ // Hackily disambiguate o from o.next and o [Symbol.iterator]().
+ // TODO(verwaest): Come up with a better solution.
+ next_result = BuildIteratorNextResult(iterator_proxy, result,
+ subject->position() - 1);
}
// result.done
@@ -3409,6 +3255,10 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
result_proxy, value_literal, RelocInfo::kNoPosition);
assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
RelocInfo::kNoPosition);
+ if (is_destructuring) {
+ assign_each = PatternRewriter::RewriteDestructuringAssignment(
+ this, assign_each->AsAssignment(), scope_);
+ }
}
for_of->Initialize(each, subject, body,
@@ -3417,6 +3267,23 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
result_done,
assign_each);
} else {
+ if (is_destructuring) {
+ Variable* temp =
+ scope_->NewTemporary(ast_value_factory()->empty_string());
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
+ Expression* assign_each = PatternRewriter::RewriteDestructuringAssignment(
+ this, factory()->NewAssignment(Token::ASSIGN, each, temp_proxy,
+ RelocInfo::kNoPosition),
+ scope_);
+ auto block =
+ factory()->NewBlock(nullptr, 2, false, RelocInfo::kNoPosition);
+ block->statements()->Add(factory()->NewExpressionStatement(
+ assign_each, RelocInfo::kNoPosition),
+ zone());
+ block->statements()->Add(body, zone());
+ body = block;
+ each = factory()->NewVariableProxy(temp);
+ }
stmt->Initialize(each, subject, body);
}
}
@@ -3536,9 +3403,8 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
inner_vars.Add(declaration->proxy()->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
- Assignment* assignment =
- factory()->NewAssignment(is_const ? Token::INIT_CONST : Token::INIT_LET,
- proxy, temp_proxy, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
DCHECK(init->position() != RelocInfo::kNoPosition);
@@ -3710,9 +3576,12 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
*ok = false;
return nullptr;
}
+ DeclarationParsingResult::Declaration& decl =
+ parsing_result.declarations[0];
if (parsing_result.first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
- IsLexicalVariableMode(parsing_result.descriptor.mode))) {
+ IsLexicalVariableMode(parsing_result.descriptor.mode) ||
+ !decl.pattern->IsVariableProxy())) {
if (mode == ForEachStatement::ITERATE) {
ReportMessageAt(parsing_result.first_initializer_loc,
MessageTemplate::kForOfLoopInitializer);
@@ -3725,23 +3594,22 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
return nullptr;
}
- DCHECK(parsing_result.declarations.length() == 1);
Block* init_block = nullptr;
// special case for legacy for (var/const x =.... in)
if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
- parsing_result.declarations[0].initializer != nullptr) {
+ decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+ const AstRawString* name =
+ decl.pattern->AsVariableProxy()->raw_name();
VariableProxy* single_var = scope_->NewUnresolved(
- factory(), parsing_result.SingleName(), Variable::NORMAL,
- each_beg_pos, each_end_pos);
+ factory(), name, Variable::NORMAL, each_beg_pos, each_end_pos);
init_block = factory()->NewBlock(
nullptr, 2, true, parsing_result.descriptor.declaration_pos);
init_block->statements()->Add(
factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::ASSIGN, single_var,
- parsing_result.declarations[0].initializer,
- RelocInfo::kNoPosition),
+ factory()->NewAssignment(Token::ASSIGN, single_var,
+ decl.initializer,
+ RelocInfo::kNoPosition),
RelocInfo::kNoPosition),
zone());
}
@@ -3784,9 +3652,6 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
auto each_initialization_block =
factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
{
- DCHECK(parsing_result.declarations.length() == 1);
- DeclarationParsingResult::Declaration decl =
- parsing_result.declarations[0];
auto descriptor = parsing_result.descriptor;
descriptor.declaration_pos = RelocInfo::kNoPosition;
descriptor.initialization_pos = RelocInfo::kNoPosition;
@@ -3803,7 +3668,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
body_block->statements()->Add(body, zone());
VariableProxy* temp_proxy =
factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
- InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
+ false);
scope_ = for_scope;
body_scope->set_end_position(scanner()->location().end_pos);
body_scope = body_scope->FinalizeBlockScope();
@@ -3854,7 +3720,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
} else {
int lhs_beg_pos = peek_position();
- Expression* expression = ParseExpression(false, CHECK_OK);
+ ExpressionClassifier classifier;
+ Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
ForEachStatement::VisitMode mode;
is_let_identifier_expression =
@@ -3862,11 +3729,25 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
expression->AsVariableProxy()->raw_name() ==
ast_value_factory()->let_string();
- if (CheckInOrOf(&mode, ok)) {
- if (!*ok) return nullptr;
- expression = this->CheckAndRewriteReferenceExpression(
- expression, lhs_beg_pos, lhs_end_pos,
- MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
+ bool is_for_each = CheckInOrOf(&mode, ok);
+ if (!*ok) return nullptr;
+ bool is_destructuring =
+ is_for_each && allow_harmony_destructuring_assignment() &&
+ (expression->IsArrayLiteral() || expression->IsObjectLiteral());
+
+ if (is_destructuring) {
+ ValidateAssignmentPattern(&classifier, CHECK_OK);
+ } else {
+ expression =
+ ParserTraits::RewriteNonPattern(expression, &classifier, CHECK_OK);
+ }
+
+ if (is_for_each) {
+ if (!is_destructuring) {
+ expression = this->CheckAndRewriteReferenceExpression(
+ expression, lhs_beg_pos, lhs_end_pos,
+ MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
+ }
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
@@ -3887,7 +3768,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
Statement* body = ParseSubStatement(NULL, CHECK_OK);
block->statements()->Add(body, zone());
- InitializeForEachStatement(loop, expression, enumerable, block);
+ InitializeForEachStatement(loop, expression, enumerable, block,
+ is_destructuring);
scope_ = saved_scope;
body_scope->set_end_position(scanner()->location().end_pos);
body_scope = body_scope->FinalizeBlockScope();
@@ -4092,9 +3974,6 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
if (is_rest) {
expr = expr->AsSpread()->expression();
parameters->has_rest = true;
- parameters->rest_array_literal_index =
- parser_->function_state_->NextMaterializedLiteralIndex();
- ++parameters->materialized_literals_count;
}
if (parameters->is_simple) {
parameters->is_simple = !is_rest && expr->IsVariableProxy();
@@ -4120,7 +3999,11 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
parser_->scope_, parameters->scope);
}
- AddFormalParameter(parameters, expr, initializer, is_rest);
+ // TODO(adamk): params_loc.end_pos is not the correct initializer position,
+ // but it should be conservative enough to trigger hole checks for variables
+ // referenced in the initializer (if any).
+ AddFormalParameter(parameters, expr, initializer, params_loc.end_pos,
+ is_rest);
}
@@ -4132,12 +4015,13 @@ DoExpression* Parser::ParseDoExpression(bool* ok) {
Expect(Token::DO, CHECK_OK);
Variable* result =
scope_->NewTemporary(ast_value_factory()->dot_result_string());
- Block* block = ParseScopedBlock(nullptr, CHECK_OK);
+ Block* block = ParseBlock(nullptr, false, CHECK_OK);
DoExpression* expr = factory()->NewDoExpression(block, result, pos);
if (!Rewriter::Rewrite(this, expr, ast_value_factory())) {
*ok = false;
return nullptr;
}
+ block->set_scope(block->scope()->FinalizeBlockScope());
return expr;
}
@@ -4172,10 +4056,7 @@ void ParserTraits::ReindexLiterals(const ParserFormalParameters& parameters) {
for (const auto p : parameters.params) {
if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
- }
-
- if (parameters.has_rest) {
- parameters.rest_array_literal_index = reindexer.NextIndex();
+ if (p.initializer != nullptr) reindexer.Reindex(p.initializer);
}
DCHECK(reindexer.count() <=
@@ -4243,7 +4124,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// nested function, and hoisting works normally relative to that.
Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
- Scope* scope = function_type == FunctionLiteral::DECLARATION &&
+ Scope* scope = function_type == FunctionLiteral::kDeclaration &&
is_sloppy(language_mode) &&
!allow_harmony_sloppy_function() &&
(original_scope_ == original_declaration_scope ||
@@ -4377,7 +4258,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// - The function literal shouldn't be hinted to eagerly compile.
bool use_temp_zone =
FLAG_lazy && !allow_natives() && extension_ == NULL && allow_lazy() &&
- function_type == FunctionLiteral::DECLARATION &&
+ function_type == FunctionLiteral::kDeclaration &&
eager_compile_hint != FunctionLiteral::kShouldEagerCompile;
// Open a new BodyScope, which sets our AstNodeFactory to allocate in the
// new temporary zone if the preconditions are satisfied, and ensures that
@@ -4432,9 +4313,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
}
if (is_strict(language_mode) || allow_harmony_sloppy() ||
- allow_harmony_destructuring()) {
+ allow_harmony_destructuring_bind()) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
+
+ if (body) {
+ // If body can be inspected, rewrite queued destructuring assignments
+ ParserTraits::RewriteDestructuringAssignments();
+ }
}
bool has_duplicate_parameters =
@@ -4444,14 +4330,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
: FunctionLiteral::kNoDuplicateParameters;
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- function_name, ast_value_factory(), scope, body,
- materialized_literal_count, expected_property_count, arity,
- duplicate_parameters, function_type, FunctionLiteral::kIsFunction,
+ function_name, scope, body, materialized_literal_count,
+ expected_property_count, arity, duplicate_parameters, function_type,
eager_compile_hint, kind, pos);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
+ if (scope->has_rest_parameter()) {
+ function_literal->set_dont_optimize_reason(kRestParameter);
+ }
+
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
@@ -4563,6 +4452,36 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
}
+class InitializerRewriter : public AstExpressionVisitor {
+ public:
+ InitializerRewriter(uintptr_t stack_limit, Expression* root, Parser* parser,
+ Scope* scope)
+ : AstExpressionVisitor(stack_limit, root),
+ parser_(parser),
+ scope_(scope) {}
+
+ private:
+ void VisitExpression(Expression* expr) {
+ RewritableAssignmentExpression* to_rewrite =
+ expr->AsRewritableAssignmentExpression();
+ if (to_rewrite == nullptr || to_rewrite->is_rewritten()) return;
+
+ Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
+ scope_);
+ }
+
+ private:
+ Parser* parser_;
+ Scope* scope_;
+};
+
+
+void Parser::RewriteParameterInitializer(Expression* expr, Scope* scope) {
+ InitializerRewriter rewriter(stack_limit_, expr, this, scope);
+ rewriter.Run();
+}
+
+
Block* Parser::BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok) {
DCHECK(!parameters.is_simple);
@@ -4571,23 +4490,32 @@ Block* Parser::BuildParameterInitializationBlock(
factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
for (int i = 0; i < parameters.params.length(); ++i) {
auto parameter = parameters.params[i];
+ if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.parser = this;
- descriptor.declaration_scope = scope_;
descriptor.scope = scope_;
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
- descriptor.is_const = false;
descriptor.needs_init = true;
descriptor.declaration_pos = parameter.pattern->position();
+ // The position that will be used by the AssignmentExpression
+ // which copies from the temp parameter to the pattern.
+ //
+ // TODO(adamk): Should this be RelocInfo::kNoPosition, since
+ // it's just copying from a temp var to the real param var?
descriptor.initialization_pos = parameter.pattern->position();
- descriptor.init_op = Token::INIT_LET;
+ // The initializer position which will end up in,
+ // Variable::initializer_position(), used for hole check elimination.
+ int initializer_position = parameter.pattern->position();
Expression* initial_value =
factory()->NewVariableProxy(parameters.scope->parameter(i));
if (parameter.initializer != nullptr) {
// IS_UNDEFINED($param) ? initializer : $param
- DCHECK(!parameter.is_rest);
+
+ // Ensure initializer is rewritten
+ RewriteParameterInitializer(parameter.initializer, scope_);
+
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
factory()->NewVariableProxy(parameters.scope->parameter(i)),
@@ -4597,83 +4525,7 @@ Block* Parser::BuildParameterInitializationBlock(
condition, parameter.initializer, initial_value,
RelocInfo::kNoPosition);
descriptor.initialization_pos = parameter.initializer->position();
- } else if (parameter.is_rest) {
- // $rest = [];
- // for (var $argument_index = $rest_index;
- // $argument_index < %_ArgumentsLength();
- // ++$argument_index) {
- // %AppendElement($rest, %_Arguments($argument_index));
- // }
- // let <param> = $rest;
- DCHECK(parameter.pattern->IsVariableProxy());
- DCHECK_EQ(i, parameters.params.length() - 1);
-
- int pos = parameter.pattern->position();
- Variable* temp_var = parameters.scope->parameter(i);
- auto empty_values = new (zone()) ZoneList<Expression*>(0, zone());
- auto empty_array = factory()->NewArrayLiteral(
- empty_values, parameters.rest_array_literal_index,
- is_strong(language_mode()), RelocInfo::kNoPosition);
-
- auto init_array = factory()->NewAssignment(
- Token::INIT_VAR, factory()->NewVariableProxy(temp_var), empty_array,
- RelocInfo::kNoPosition);
-
- auto loop = factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
-
- auto argument_index =
- parameters.scope->NewTemporary(ast_value_factory()->empty_string());
- auto init = factory()->NewExpressionStatement(
- factory()->NewAssignment(
- Token::INIT_VAR, factory()->NewVariableProxy(argument_index),
- factory()->NewSmiLiteral(i, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- auto empty_arguments = new (zone()) ZoneList<Expression*>(0, zone());
-
- // $arguments_index < arguments.length
- auto cond = factory()->NewCompareOperation(
- Token::LT, factory()->NewVariableProxy(argument_index),
- factory()->NewCallRuntime(Runtime::kInlineArgumentsLength,
- empty_arguments, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- // ++argument_index
- auto next = factory()->NewExpressionStatement(
- factory()->NewCountOperation(
- Token::INC, true, factory()->NewVariableProxy(argument_index),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- // %_Arguments($arguments_index)
- auto arguments_args = new (zone()) ZoneList<Expression*>(1, zone());
- arguments_args->Add(factory()->NewVariableProxy(argument_index), zone());
-
- // %AppendElement($rest, %_Arguments($arguments_index))
- auto append_element_args = new (zone()) ZoneList<Expression*>(2, zone());
-
- append_element_args->Add(factory()->NewVariableProxy(temp_var), zone());
- append_element_args->Add(
- factory()->NewCallRuntime(Runtime::kInlineArguments, arguments_args,
- RelocInfo::kNoPosition),
- zone());
-
- auto body = factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
-
- loop->Initialize(init, cond, next, body);
-
- init_block->statements()->Add(
- factory()->NewExpressionStatement(init_array, RelocInfo::kNoPosition),
- zone());
-
- init_block->statements()->Add(loop, zone());
-
- descriptor.initialization_pos = pos;
+ initializer_position = parameter.initializer_end_position;
}
Scope* param_scope = scope_;
@@ -4692,7 +4544,7 @@ Block* Parser::BuildParameterInitializationBlock(
{
BlockState block_state(&scope_, param_scope);
DeclarationParsingResult::Declaration decl(
- parameter.pattern, parameter.pattern->position(), initial_value);
+ parameter.pattern, initializer_position, initial_value);
PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
&decl, nullptr, CHECK_OK);
}
@@ -4719,7 +4571,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
static const int kFunctionNameAssignmentIndex = 0;
- if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
+ if (function_type == FunctionLiteral::kNamedExpression) {
DCHECK(function_name != NULL);
// If we have a named function expression, we add a local variable
// declaration to the body of the function with the name of the
@@ -4754,7 +4606,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
VariableProxy* init_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
- Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
+ Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Yield* yield = factory()->NewYield(
@@ -4806,16 +4658,13 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
result->Add(inner_block, zone());
}
- if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
+ if (function_type == FunctionLiteral::kNamedExpression) {
// Now that we know the language mode, we can create the const assignment
// in the previously reserved spot.
// NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxies as is the case now.
- const bool use_strict_const = is_strict(scope_->language_mode());
- Token::Value fvar_init_op =
- use_strict_const ? Token::INIT_CONST : Token::INIT_CONST_LEGACY;
- VariableMode fvar_mode = use_strict_const ? CONST : CONST_LEGACY;
+ VariableMode fvar_mode = is_strict(language_mode()) ? CONST : CONST_LEGACY;
Variable* fvar = new (zone())
Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
kCreatedInitialized, kNotAssigned);
@@ -4827,7 +4676,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
result->Set(kFunctionNameAssignmentIndex,
factory()->NewExpressionStatement(
- factory()->NewAssignment(fvar_init_op, fproxy,
+ factory()->NewAssignment(Token::INIT, fproxy,
factory()->NewThisFunction(pos),
RelocInfo::kNoPosition),
RelocInfo::kNoPosition));
@@ -4854,11 +4703,12 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
SET_ALLOW(natives);
SET_ALLOW(harmony_sloppy);
SET_ALLOW(harmony_sloppy_let);
- SET_ALLOW(harmony_rest_parameters);
SET_ALLOW(harmony_default_parameters);
- SET_ALLOW(harmony_destructuring);
+ SET_ALLOW(harmony_destructuring_bind);
+ SET_ALLOW(harmony_destructuring_assignment);
SET_ALLOW(strong_mode);
SET_ALLOW(harmony_do_expressions);
+ SET_ALLOW(harmony_function_name);
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4913,7 +4763,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
block_scope->set_start_position(scanner()->location().end_pos);
ExpressionClassifier classifier;
extends = ParseLeftHandSideExpression(&classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ extends = ParserTraits::RewriteNonPattern(extends, &classifier, CHECK_OK);
} else {
block_scope->set_start_position(scanner()->location().end_pos);
}
@@ -4935,10 +4785,12 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
bool is_computed_name = false; // Classes do not care about computed
// property names here.
ExpressionClassifier classifier;
+ const AstRawString* name = nullptr;
ObjectLiteral::Property* property = ParsePropertyDefinition(
&checker, in_class, has_extends, is_static, &is_computed_name,
- &has_seen_constructor, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ &has_seen_constructor, &classifier, &name, CHECK_OK);
+ property = ParserTraits::RewriteNonPatternObjectLiteralProperty(
+ property, &classifier, CHECK_OK);
if (has_seen_constructor && constructor == NULL) {
constructor = GetPropertyValue(property)->AsFunctionLiteral();
@@ -4948,6 +4800,10 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
}
if (fni_ != NULL) fni_->Infer();
+
+ if (allow_harmony_function_name()) {
+ SetFunctionNameFromPropertyName(property, name);
+ }
}
Expect(Token::RBRACE, CHECK_OK);
@@ -4985,7 +4841,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
ExpressionClassifier classifier;
ZoneList<Expression*>* args =
ParseArguments(&spread_pos, &classifier, CHECK_OK);
- ValidateExpression(&classifier, CHECK_OK);
+ args = RewriteNonPatternArguments(args, &classifier, CHECK_OK);
DCHECK(!spread_pos.IsValid());
@@ -5201,917 +5057,8 @@ void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
// ----------------------------------------------------------------------------
-// Regular expressions
-
-
-RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
- bool multiline, bool unicode, Isolate* isolate,
- Zone* zone)
- : isolate_(isolate),
- zone_(zone),
- error_(error),
- captures_(NULL),
- in_(in),
- current_(kEndMarker),
- next_pos_(0),
- capture_count_(0),
- has_more_(true),
- multiline_(multiline),
- unicode_(unicode),
- simple_(false),
- contains_anchor_(false),
- is_scanned_for_captures_(false),
- failed_(false) {
- Advance();
-}
-
-
-uc32 RegExpParser::Next() {
- if (has_next()) {
- return in()->Get(next_pos_);
- } else {
- return kEndMarker;
- }
-}
-
-
-void RegExpParser::Advance() {
- if (next_pos_ < in()->length()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
- ReportError(CStrVector(Isolate::kStackOverflowMessage));
- } else if (zone()->excess_allocation()) {
- ReportError(CStrVector("Regular expression too large"));
- } else {
- current_ = in()->Get(next_pos_);
- next_pos_++;
- }
- } else {
- current_ = kEndMarker;
- // Advance so that position() points to 1-after-the-last-character. This is
- // important so that Reset() to this position works correctly.
- next_pos_ = in()->length() + 1;
- has_more_ = false;
- }
-}
-
-
-void RegExpParser::Reset(int pos) {
- next_pos_ = pos;
- has_more_ = (pos < in()->length());
- Advance();
-}
-
-
-void RegExpParser::Advance(int dist) {
- next_pos_ += dist - 1;
- Advance();
-}
-
-
-bool RegExpParser::simple() {
- return simple_;
-}
-
-
-bool RegExpParser::IsSyntaxCharacter(uc32 c) {
- return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
- c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
- c == '{' || c == '}' || c == '|';
-}
-
-
-RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
- failed_ = true;
- *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
- // Zip to the end to make sure the no more input is read.
- current_ = kEndMarker;
- next_pos_ = in()->length();
- return NULL;
-}
-
-
-// Pattern ::
-// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
- RegExpTree* result = ParseDisjunction(CHECK_FAILED);
- DCHECK(!has_more());
- // If the result of parsing is a literal string atom, and it has the
- // same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
- simple_ = true;
- }
- return result;
-}
-
-
-// Disjunction ::
-// Alternative
-// Alternative | Disjunction
-// Alternative ::
-// [empty]
-// Term Alternative
-// Term ::
-// Assertion
-// Atom
-// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
- // Used to store current state while parsing subexpressions.
- RegExpParserState initial_state(NULL, INITIAL, 0, zone());
- RegExpParserState* stored_state = &initial_state;
- // Cache the builder in a local variable for quick access.
- RegExpBuilder* builder = initial_state.builder();
- while (true) {
- switch (current()) {
- case kEndMarker:
- if (stored_state->IsSubexpression()) {
- // Inside a parenthesized group when hitting end of input.
- ReportError(CStrVector("Unterminated group") CHECK_FAILED);
- }
- DCHECK_EQ(INITIAL, stored_state->group_type());
- // Parsing completed successfully.
- return builder->ToRegExp();
- case ')': {
- if (!stored_state->IsSubexpression()) {
- ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
- }
- DCHECK_NE(INITIAL, stored_state->group_type());
-
- Advance();
- // End disjunction parsing and convert builder content to new single
- // regexp atom.
- RegExpTree* body = builder->ToRegExp();
-
- int end_capture_index = captures_started();
-
- int capture_index = stored_state->capture_index();
- SubexpressionType group_type = stored_state->group_type();
-
- // Restore previous state.
- stored_state = stored_state->previous_state();
- builder = stored_state->builder();
-
- // Build result of subexpression.
- if (group_type == CAPTURE) {
- RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
- captures_->at(capture_index - 1) = capture;
- body = capture;
- } else if (group_type != GROUPING) {
- DCHECK(group_type == POSITIVE_LOOKAHEAD ||
- group_type == NEGATIVE_LOOKAHEAD);
- bool is_positive = (group_type == POSITIVE_LOOKAHEAD);
- body = new(zone()) RegExpLookahead(body,
- is_positive,
- end_capture_index - capture_index,
- capture_index);
- }
- builder->AddAtom(body);
- // For compatability with JSC and ES3, we allow quantifiers after
- // lookaheads, and break in all cases.
- break;
- }
- case '|': {
- Advance();
- builder->NewAlternative();
- continue;
- }
- case '*':
- case '+':
- case '?':
- return ReportError(CStrVector("Nothing to repeat"));
- case '^': {
- Advance();
- if (multiline_) {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
- } else {
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
- set_contains_anchor();
- }
- continue;
- }
- case '$': {
- Advance();
- RegExpAssertion::AssertionType assertion_type =
- multiline_ ? RegExpAssertion::END_OF_LINE :
- RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(new(zone()) RegExpAssertion(assertion_type));
- continue;
- }
- case '.': {
- Advance();
- // everything except \x0a, \x0d, \u2028 and \u2029
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape('.', ranges, zone());
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '(': {
- SubexpressionType subexpr_type = CAPTURE;
- Advance();
- if (current() == '?') {
- switch (Next()) {
- case ':':
- subexpr_type = GROUPING;
- break;
- case '=':
- subexpr_type = POSITIVE_LOOKAHEAD;
- break;
- case '!':
- subexpr_type = NEGATIVE_LOOKAHEAD;
- break;
- default:
- ReportError(CStrVector("Invalid group") CHECK_FAILED);
- break;
- }
- Advance(2);
- } else {
- if (captures_ == NULL) {
- captures_ = new(zone()) ZoneList<RegExpCapture*>(2, zone());
- }
- if (captures_started() >= kMaxCaptures) {
- ReportError(CStrVector("Too many captures") CHECK_FAILED);
- }
- captures_->Add(NULL, zone());
- }
- // Store current state and begin new disjunction parsing.
- stored_state = new(zone()) RegExpParserState(stored_state, subexpr_type,
- captures_started(), zone());
- builder = stored_state->builder();
- continue;
- }
- case '[': {
- RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
- builder->AddAtom(atom);
- break;
- }
- // Atom ::
- // \ AtomEscape
- case '\\':
- switch (Next()) {
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- case 'b':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
- continue;
- case 'B':
- Advance(2);
- builder->AddAssertion(
- new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
- continue;
- // AtomEscape ::
- // CharacterClassEscape
- //
- // CharacterClassEscape :: one of
- // d D s S w W
- case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
- uc32 c = Next();
- Advance(2);
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::AddClassEscape(c, ranges, zone());
- RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
- break;
- }
- case '1': case '2': case '3': case '4': case '5': case '6':
- case '7': case '8': case '9': {
- int index = 0;
- if (ParseBackReferenceIndex(&index)) {
- RegExpCapture* capture = NULL;
- if (captures_ != NULL && index <= captures_->length()) {
- capture = captures_->at(index - 1);
- }
- if (capture == NULL) {
- builder->AddEmpty();
- break;
- }
- RegExpTree* atom = new(zone()) RegExpBackReference(capture);
- builder->AddAtom(atom);
- break;
- }
- uc32 first_digit = Next();
- if (first_digit == '8' || first_digit == '9') {
- // If the 'u' flag is present, only syntax characters can be escaped,
- // no other identity escapes are allowed. If the 'u' flag is not
- // present, all identity escapes are allowed.
- if (!FLAG_harmony_unicode_regexps || !unicode_) {
- builder->AddCharacter(first_digit);
- Advance(2);
- } else {
- return ReportError(CStrVector("Invalid escape"));
- }
- break;
- }
- }
- // FALLTHROUGH
- case '0': {
- Advance();
- uc32 octal = ParseOctalLiteral();
- builder->AddCharacter(octal);
- break;
- }
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance(2);
- builder->AddCharacter('\f');
- break;
- case 'n':
- Advance(2);
- builder->AddCharacter('\n');
- break;
- case 'r':
- Advance(2);
- builder->AddCharacter('\r');
- break;
- case 't':
- Advance(2);
- builder->AddCharacter('\t');
- break;
- case 'v':
- Advance(2);
- builder->AddCharacter('\v');
- break;
- case 'c': {
- Advance();
- uc32 controlLetter = Next();
- // Special case if it is an ASCII letter.
- // Convert lower case letters to uppercase.
- uc32 letter = controlLetter & ~('a' ^ 'A');
- if (letter < 'A' || 'Z' < letter) {
- // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
- // This is outside the specification. We match JSC in
- // reading the backslash as a literal character instead
- // of as starting an escape.
- builder->AddCharacter('\\');
- } else {
- Advance(2);
- builder->AddCharacter(controlLetter & 0x1f);
- }
- break;
- }
- case 'x': {
- Advance(2);
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- builder->AddCharacter(value);
- } else if (!FLAG_harmony_unicode_regexps || !unicode_) {
- builder->AddCharacter('x');
- } else {
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- return ReportError(CStrVector("Invalid escape"));
- }
- break;
- }
- case 'u': {
- Advance(2);
- uc32 value;
- if (ParseUnicodeEscape(&value)) {
- builder->AddCharacter(value);
- } else if (!FLAG_harmony_unicode_regexps || !unicode_) {
- builder->AddCharacter('u');
- } else {
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- return ReportError(CStrVector("Invalid unicode escape"));
- }
- break;
- }
- default:
- Advance();
- // If the 'u' flag is present, only syntax characters can be escaped, no
- // other identity escapes are allowed. If the 'u' flag is not present,
- // all identity escapes are allowed.
- if (!FLAG_harmony_unicode_regexps || !unicode_ ||
- IsSyntaxCharacter(current())) {
- builder->AddCharacter(current());
- Advance();
- } else {
- return ReportError(CStrVector("Invalid escape"));
- }
- break;
- }
- break;
- case '{': {
- int dummy;
- if (ParseIntervalQuantifier(&dummy, &dummy)) {
- ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
- }
- // fallthrough
- }
- default:
- builder->AddCharacter(current());
- Advance();
- break;
- } // end switch(current())
-
- int min;
- int max;
- switch (current()) {
- // QuantifierPrefix ::
- // *
- // +
- // ?
- // {
- case '*':
- min = 0;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '+':
- min = 1;
- max = RegExpTree::kInfinity;
- Advance();
- break;
- case '?':
- min = 0;
- max = 1;
- Advance();
- break;
- case '{':
- if (ParseIntervalQuantifier(&min, &max)) {
- if (max < min) {
- ReportError(CStrVector("numbers out of order in {} quantifier.")
- CHECK_FAILED);
- }
- break;
- } else {
- continue;
- }
- default:
- continue;
- }
- RegExpQuantifier::QuantifierType quantifier_type = RegExpQuantifier::GREEDY;
- if (current() == '?') {
- quantifier_type = RegExpQuantifier::NON_GREEDY;
- Advance();
- } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
- // FLAG_regexp_possessive_quantifier is a debug-only flag.
- quantifier_type = RegExpQuantifier::POSSESSIVE;
- Advance();
- }
- builder->AddQuantifierToAtom(min, max, quantifier_type);
- }
-}
-
-
-#ifdef DEBUG
-// Currently only used in an DCHECK.
-static bool IsSpecialClassEscape(uc32 c) {
- switch (c) {
- case 'd': case 'D':
- case 's': case 'S':
- case 'w': case 'W':
- return true;
- default:
- return false;
- }
-}
-#endif
-
-
-// In order to know whether an escape is a backreference or not we have to scan
-// the entire regexp and find the number of capturing parentheses. However we
-// don't want to scan the regexp twice unless it is necessary. This mini-parser
-// is called when needed. It can see the difference between capturing and
-// noncapturing parentheses and can skip character classes and backslash-escaped
-// characters.
-void RegExpParser::ScanForCaptures() {
- // Start with captures started previous to current position
- int capture_count = captures_started();
- // Add count of captures after this position.
- int n;
- while ((n = current()) != kEndMarker) {
- Advance();
- switch (n) {
- case '\\':
- Advance();
- break;
- case '[': {
- int c;
- while ((c = current()) != kEndMarker) {
- Advance();
- if (c == '\\') {
- Advance();
- } else {
- if (c == ']') break;
- }
- }
- break;
- }
- case '(':
- if (current() != '?') capture_count++;
- break;
- }
- }
- capture_count_ = capture_count;
- is_scanned_for_captures_ = true;
-}
-
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
- DCHECK_EQ('\\', current());
- DCHECK('1' <= Next() && Next() <= '9');
- // Try to parse a decimal literal that is no greater than the total number
- // of left capturing parentheses in the input.
- int start = position();
- int value = Next() - '0';
- Advance(2);
- while (true) {
- uc32 c = current();
- if (IsDecimalDigit(c)) {
- value = 10 * value + (c - '0');
- if (value > kMaxCaptures) {
- Reset(start);
- return false;
- }
- Advance();
- } else {
- break;
- }
- }
- if (value > captures_started()) {
- if (!is_scanned_for_captures_) {
- int saved_position = position();
- ScanForCaptures();
- Reset(saved_position);
- }
- if (value > capture_count_) {
- Reset(start);
- return false;
- }
- }
- *index_out = value;
- return true;
-}
-
-
-// QuantifierPrefix ::
-// { DecimalDigits }
-// { DecimalDigits , }
-// { DecimalDigits , DecimalDigits }
-//
-// Returns true if parsing succeeds, and set the min_out and max_out
-// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
- DCHECK_EQ(current(), '{');
- int start = position();
- Advance();
- int min = 0;
- if (!IsDecimalDigit(current())) {
- Reset(start);
- return false;
- }
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (min > (RegExpTree::kInfinity - next) / 10) {
- // Overflow. Skip past remaining decimal digits and return -1.
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- min = RegExpTree::kInfinity;
- break;
- }
- min = 10 * min + next;
- Advance();
- }
- int max = 0;
- if (current() == '}') {
- max = min;
- Advance();
- } else if (current() == ',') {
- Advance();
- if (current() == '}') {
- max = RegExpTree::kInfinity;
- Advance();
- } else {
- while (IsDecimalDigit(current())) {
- int next = current() - '0';
- if (max > (RegExpTree::kInfinity - next) / 10) {
- do {
- Advance();
- } while (IsDecimalDigit(current()));
- max = RegExpTree::kInfinity;
- break;
- }
- max = 10 * max + next;
- Advance();
- }
- if (current() != '}') {
- Reset(start);
- return false;
- }
- Advance();
- }
- } else {
- Reset(start);
- return false;
- }
- *min_out = min;
- *max_out = max;
- return true;
-}
-
-
-uc32 RegExpParser::ParseOctalLiteral() {
- DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
- // For compatibility with some other browsers (not all), we parse
- // up to three octal digits with a value below 256.
- uc32 value = current() - '0';
- Advance();
- if ('0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- if (value < 32 && '0' <= current() && current() <= '7') {
- value = value * 8 + current() - '0';
- Advance();
- }
- }
- return value;
-}
-
-
-bool RegExpParser::ParseHexEscape(int length, uc32* value) {
- int start = position();
- uc32 val = 0;
- for (int i = 0; i < length; ++i) {
- uc32 c = current();
- int d = HexValue(c);
- if (d < 0) {
- Reset(start);
- return false;
- }
- val = val * 16 + d;
- Advance();
- }
- *value = val;
- return true;
-}
-
-
-bool RegExpParser::ParseUnicodeEscape(uc32* value) {
- // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
- // allowed). In the latter case, the number of hex digits between { } is
- // arbitrary. \ and u have already been read.
- if (current() == '{' && FLAG_harmony_unicode_regexps && unicode_) {
- int start = position();
- Advance();
- if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
- if (current() == '}') {
- Advance();
- return true;
- }
- }
- Reset(start);
- return false;
- }
- // \u but no {, or \u{...} escapes not allowed.
- return ParseHexEscape(4, value);
-}
-
-
-bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
- uc32 x = 0;
- int d = HexValue(current());
- if (d < 0) {
- return false;
- }
- while (d >= 0) {
- x = x * 16 + d;
- if (x > max_value) {
- return false;
- }
- Advance();
- d = HexValue(current());
- }
- *value = x;
- return true;
-}
-
-
-uc32 RegExpParser::ParseClassCharacterEscape() {
- DCHECK(current() == '\\');
- DCHECK(has_next() && !IsSpecialClassEscape(Next()));
- Advance();
- switch (current()) {
- case 'b':
- Advance();
- return '\b';
- // ControlEscape :: one of
- // f n r t v
- case 'f':
- Advance();
- return '\f';
- case 'n':
- Advance();
- return '\n';
- case 'r':
- Advance();
- return '\r';
- case 't':
- Advance();
- return '\t';
- case 'v':
- Advance();
- return '\v';
- case 'c': {
- uc32 controlLetter = Next();
- uc32 letter = controlLetter & ~('A' ^ 'a');
- // For compatibility with JSC, inside a character class
- // we also accept digits and underscore as control characters.
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_' ||
- (letter >= 'A' && letter <= 'Z')) {
- Advance(2);
- // Control letters mapped to ASCII control characters in the range
- // 0x00-0x1f.
- return controlLetter & 0x1f;
- }
- // We match JSC in reading the backslash as a literal
- // character instead of as starting an escape.
- return '\\';
- }
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7':
- // For compatibility, we interpret a decimal escape that isn't
- // a back reference (and therefore either \0 or not valid according
- // to the specification) as a 1..3 digit octal character code.
- return ParseOctalLiteral();
- case 'x': {
- Advance();
- uc32 value;
- if (ParseHexEscape(2, &value)) {
- return value;
- }
- if (!FLAG_harmony_unicode_regexps || !unicode_) {
- // If \x is not followed by a two-digit hexadecimal, treat it
- // as an identity escape.
- return 'x';
- }
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- ReportError(CStrVector("Invalid escape"));
- return 0;
- }
- case 'u': {
- Advance();
- uc32 value;
- if (ParseUnicodeEscape(&value)) {
- return value;
- }
- if (!FLAG_harmony_unicode_regexps || !unicode_) {
- return 'u';
- }
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- ReportError(CStrVector("Invalid unicode escape"));
- return 0;
- }
- default: {
- uc32 result = current();
- // If the 'u' flag is present, only syntax characters can be escaped, no
- // other identity escapes are allowed. If the 'u' flag is not present, all
- // identity escapes are allowed.
- if (!FLAG_harmony_unicode_regexps || !unicode_ ||
- IsSyntaxCharacter(result)) {
- Advance();
- return result;
- }
- ReportError(CStrVector("Invalid escape"));
- return 0;
- }
- }
- return 0;
-}
-
-
-CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
- DCHECK_EQ(0, *char_class);
- uc32 first = current();
- if (first == '\\') {
- switch (Next()) {
- case 'w': case 'W': case 'd': case 'D': case 's': case 'S': {
- *char_class = Next();
- Advance(2);
- return CharacterRange::Singleton(0); // Return dummy value.
- }
- case kEndMarker:
- return ReportError(CStrVector("\\ at end of pattern"));
- default:
- uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
- return CharacterRange::Singleton(c);
- }
- } else {
- Advance();
- return CharacterRange::Singleton(first);
- }
-}
-
-
-static const uc16 kNoCharClass = 0;
-
-// Adds range or pre-defined character class to character ranges.
-// If char_class is not kInvalidClass, it's interpreted as a class
-// escape (i.e., 's' means whitespace, from '\s').
-static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
- uc16 char_class,
- CharacterRange range,
- Zone* zone) {
- if (char_class != kNoCharClass) {
- CharacterRange::AddClassEscape(char_class, ranges, zone);
- } else {
- ranges->Add(range, zone);
- }
-}
-
-
-RegExpTree* RegExpParser::ParseCharacterClass() {
- static const char* kUnterminated = "Unterminated character class";
- static const char* kRangeOutOfOrder = "Range out of order in character class";
-
- DCHECK_EQ(current(), '[');
- Advance();
- bool is_negated = false;
- if (current() == '^') {
- is_negated = true;
- Advance();
- }
- ZoneList<CharacterRange>* ranges =
- new(zone()) ZoneList<CharacterRange>(2, zone());
- while (has_more() && current() != ']') {
- uc16 char_class = kNoCharClass;
- CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
- if (current() == '-') {
- Advance();
- if (current() == kEndMarker) {
- // If we reach the end we break out of the loop and let the
- // following code report an error.
- break;
- } else if (current() == ']') {
- AddRangeOrEscape(ranges, char_class, first, zone());
- ranges->Add(CharacterRange::Singleton('-'), zone());
- break;
- }
- uc16 char_class_2 = kNoCharClass;
- CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
- if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
- // Either end is an escaped character class. Treat the '-' verbatim.
- AddRangeOrEscape(ranges, char_class, first, zone());
- ranges->Add(CharacterRange::Singleton('-'), zone());
- AddRangeOrEscape(ranges, char_class_2, next, zone());
- continue;
- }
- if (first.from() > next.to()) {
- return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
- }
- ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
- } else {
- AddRangeOrEscape(ranges, char_class, first, zone());
- }
- }
- if (!has_more()) {
- return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
- }
- Advance();
- if (ranges->length() == 0) {
- ranges->Add(CharacterRange::Everything(), zone());
- is_negated = !is_negated;
- }
- return new(zone()) RegExpCharacterClass(ranges, is_negated);
-}
-
-
-// ----------------------------------------------------------------------------
// The Parser interface.
-bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, bool multiline,
- bool unicode, RegExpCompileData* result) {
- DCHECK(result != NULL);
- RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
- RegExpTree* tree = parser.ParsePattern();
- if (parser.failed()) {
- DCHECK(tree == NULL);
- DCHECK(!result->error.is_null());
- } else {
- DCHECK(tree != NULL);
- DCHECK(result->error.is_null());
- result->tree = tree;
- int capture_count = parser.captures_started();
- result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
- result->contains_anchor = parser.contains_anchor();
- result->capture_count = capture_count;
- }
- return !parser.failed();
-}
-
bool Parser::ParseStatic(ParseInfo* info) {
Parser parser(info);
@@ -6267,7 +5214,6 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
args->Add(factory()->NewSmiLiteral(hash_obj->value(), pos), zone());
- this->CheckPossibleEvalCall(tag, scope_);
Expression* call_site = factory()->NewCallRuntime(
Context::GET_TEMPLATE_CALL_SITE_INDEX, args, start);
@@ -6381,12 +5327,13 @@ Expression* Parser::SpreadCall(Expression* function,
int pos) {
if (function->IsSuperCallReference()) {
// Super calls
- // %reflect_construct(%GetPrototype(<this-function>), args, new.target))
+ // $super_constructor = %_GetSuperConstructor(<this-function>)
+ // %reflect_construct($super_constructor, args, new.target)
ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
- Expression* get_prototype =
- factory()->NewCallRuntime(Runtime::kGetPrototype, tmp, pos);
- args->InsertAt(0, get_prototype, zone());
+ Expression* super_constructor = factory()->NewCallRuntime(
+ Runtime::kInlineGetSuperConstructor, tmp, pos);
+ args->InsertAt(0, super_constructor, zone());
args->Add(function->AsSuperCallReference()->new_target_var(), zone());
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args,
pos);
@@ -6451,5 +5398,151 @@ void Parser::RaiseLanguageMode(LanguageMode mode) {
static_cast<LanguageMode>(scope_->language_mode() | mode));
}
+
+void ParserTraits::RewriteDestructuringAssignments() {
+ parser_->RewriteDestructuringAssignments();
+}
+
+
+Expression* ParserTraits::RewriteNonPattern(
+ Expression* expr, const ExpressionClassifier* classifier, bool* ok) {
+ return parser_->RewriteNonPattern(expr, classifier, ok);
+}
+
+
+ZoneList<Expression*>* ParserTraits::RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok) {
+ return parser_->RewriteNonPatternArguments(args, classifier, ok);
+}
+
+
+ObjectLiteralProperty* ParserTraits::RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok) {
+ return parser_->RewriteNonPatternObjectLiteralProperty(property, classifier,
+ ok);
+}
+
+
+Expression* Parser::RewriteNonPattern(Expression* expr,
+ const ExpressionClassifier* classifier,
+ bool* ok) {
+ // For the time being, this does no rewriting at all.
+ ValidateExpression(classifier, ok);
+ return expr;
+}
+
+
+ZoneList<Expression*>* Parser::RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok) {
+ // For the time being, this does no rewriting at all.
+ ValidateExpression(classifier, ok);
+ return args;
+}
+
+
+ObjectLiteralProperty* Parser::RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok) {
+ if (property != nullptr) {
+ Expression* key = RewriteNonPattern(property->key(), classifier, ok);
+ property->set_key(key);
+ Expression* value = RewriteNonPattern(property->value(), classifier, ok);
+ property->set_value(value);
+ }
+ return property;
+}
+
+
+void Parser::RewriteDestructuringAssignments() {
+ FunctionState* func = function_state_;
+ if (!allow_harmony_destructuring_assignment()) return;
+ const List<DestructuringAssignment>& assignments =
+ func->destructuring_assignments_to_rewrite();
+ for (int i = assignments.length() - 1; i >= 0; --i) {
+ // Rewrite list in reverse, so that nested assignment patterns are rewritten
+ // correctly.
+ DestructuringAssignment pair = assignments.at(i);
+ RewritableAssignmentExpression* to_rewrite =
+ pair.assignment->AsRewritableAssignmentExpression();
+ Scope* scope = pair.scope;
+ DCHECK_NOT_NULL(to_rewrite);
+ if (!to_rewrite->is_rewritten()) {
+ PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope);
+ }
+ }
+}
+
+
+void ParserTraits::QueueDestructuringAssignmentForRewriting(Expression* expr) {
+ DCHECK(expr->IsRewritableAssignmentExpression());
+ parser_->function_state_->AddDestructuringAssignment(
+ Parser::DestructuringAssignment(expr, parser_->scope_));
+}
+
+
+void ParserTraits::SetFunctionNameFromPropertyName(
+ ObjectLiteralProperty* property, const AstRawString* name) {
+ Expression* value = property->value();
+ if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
+
+ // TODO(adamk): Support computed names.
+ if (property->is_computed_name()) return;
+ DCHECK_NOT_NULL(name);
+
+ // Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
+ // of an object literal.
+ if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
+
+ if (value->IsFunctionLiteral()) {
+ auto function = value->AsFunctionLiteral();
+ if (function->is_anonymous()) {
+ if (property->kind() == ObjectLiteralProperty::GETTER) {
+ function->set_raw_name(parser_->ast_value_factory()->NewConsString(
+ parser_->ast_value_factory()->get_space_string(), name));
+ } else if (property->kind() == ObjectLiteralProperty::SETTER) {
+ function->set_raw_name(parser_->ast_value_factory()->NewConsString(
+ parser_->ast_value_factory()->set_space_string(), name));
+ } else {
+ function->set_raw_name(name);
+ DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
+ }
+ }
+ } else {
+ DCHECK(value->IsClassLiteral());
+ DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
+ auto class_literal = value->AsClassLiteral();
+ if (class_literal->raw_name() == nullptr) {
+ class_literal->set_raw_name(name);
+ }
+ }
+}
+
+
+void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
+ Expression* identifier) {
+ if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
+ if (!identifier->IsVariableProxy()) return;
+
+ auto name = identifier->AsVariableProxy()->raw_name();
+ DCHECK_NOT_NULL(name);
+
+ if (value->IsFunctionLiteral()) {
+ auto function = value->AsFunctionLiteral();
+ if (function->is_anonymous()) {
+ function->set_raw_name(name);
+ }
+ } else {
+ DCHECK(value->IsClassLiteral());
+ auto class_literal = value->AsClassLiteral();
+ if (class_literal->raw_name() == nullptr) {
+ class_literal->set_raw_name(name);
+ }
+ }
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parsing/parser.h
index b674a9d2e2..7d50221334 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSER_H_
-#define V8_PARSER_H_
+#ifndef V8_PARSING_PARSER_H_
+#define V8_PARSING_PARSER_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h" // TODO(titzer): remove this include dependency
+#include "src/parsing/parser-base.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparser.h"
#include "src/pending-compilation-error-handler.h"
-#include "src/preparse-data.h"
-#include "src/preparse-data-format.h"
-#include "src/preparser.h"
-#include "src/scopes.h"
namespace v8 {
@@ -151,6 +152,10 @@ class ParseInfo {
context_ = Handle<Context>(*context_);
}
+#ifdef DEBUG
+ bool script_is_native() { return script_->type() == Script::TYPE_NATIVE; }
+#endif // DEBUG
+
private:
// Various configuration flags for parsing.
enum Flag {
@@ -284,252 +289,6 @@ class ParseData {
};
// ----------------------------------------------------------------------------
-// REGEXP PARSING
-
-// A BufferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be NULL pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(NULL), last_(NULL) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value, Zone* zone) {
- if (last_ != NULL) {
- if (list_ == NULL) {
- list_ = new(zone) ZoneList<T*>(initial_size, zone);
- }
- list_->Add(last_, zone);
- }
- last_ = value;
- }
-
- T* last() {
- DCHECK(last_ != NULL);
- return last_;
- }
-
- T* RemoveLast() {
- DCHECK(last_ != NULL);
- T* result = last_;
- if ((list_ != NULL) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = NULL;
- return result;
- }
-
- T* Get(int i) {
- DCHECK((0 <= i) && (i < length()));
- if (list_ == NULL) {
- DCHECK_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- DCHECK(last_ != NULL);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = NULL;
- last_ = NULL;
- }
-
- int length() {
- int length = (list_ == NULL) ? 0 : list_->length();
- return length + ((last_ == NULL) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == NULL) {
- list_ = new(zone) ZoneList<T*>(initial_size, zone);
- }
- if (last_ != NULL) {
- list_->Add(last_, zone);
- last_ = NULL;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
-
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder: public ZoneObject {
- public:
- explicit RegExpBuilder(Zone* zone);
- void AddCharacter(uc16 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddAtom(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- void AddQuantifierToAtom(
- int min, int max, RegExpQuantifier::QuantifierType type);
- RegExpTree* ToRegExp();
-
- private:
- void FlushCharacters();
- void FlushText();
- void FlushTerms();
- Zone* zone() const { return zone_; }
-
- Zone* zone_;
- bool pending_empty_;
- ZoneList<uc16>* characters_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
-
-
-class RegExpParser BASE_EMBEDDED {
- public:
- RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
- bool unicode, Isolate* isolate, Zone* zone);
-
- static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
- bool multiline, bool unicode,
- RegExpCompileData* result);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
- RegExpTree* ParseCharacterClass();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, uc32* value);
- bool ParseUnicodeEscape(uc32* value);
- bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
-
- uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- CharacterRange ParseClassAtom(uc16* char_class);
- RegExpTree* ReportError(Vector<const char> message);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_ == NULL ? 0 : captures_->length(); }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
-
- static bool IsSyntaxCharacter(uc32 c);
-
- static const int kMaxCaptures = 1 << 16;
- static const uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAHEAD,
- NEGATIVE_LOOKAHEAD,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- int disjunction_capture_index,
- Zone* zone)
- : previous_state_(previous_state),
- builder_(new(zone) RegExpBuilder(zone)),
- group_type_(group_type),
- disjunction_capture_index_(disjunction_capture_index) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != NULL; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() { return group_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() { return disjunction_capture_index_; }
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- SubexpressionType group_type_;
- // Stored disjunction's capture index (if any).
- int disjunction_capture_index_;
- };
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- uc32 Next();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- Isolate* isolate_;
- Zone* zone_;
- Handle<String>* error_;
- ZoneList<RegExpCapture*>* captures_;
- FlatStringReader* in_;
- uc32 current_;
- int next_pos_;
- // The capture count is only valid after we have scanned for captures.
- int capture_count_;
- bool has_more_;
- bool multiline_;
- bool unicode_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool failed_;
-};
-
-// ----------------------------------------------------------------------------
// JAVASCRIPT PARSING
class Parser;
@@ -539,12 +298,17 @@ class SingletonLogger;
struct ParserFormalParameters : FormalParametersBase {
struct Parameter {
Parameter(const AstRawString* name, Expression* pattern,
- Expression* initializer, bool is_rest)
- : name(name), pattern(pattern), initializer(initializer),
+ Expression* initializer, int initializer_end_position,
+ bool is_rest)
+ : name(name),
+ pattern(pattern),
+ initializer(initializer),
+ initializer_end_position(initializer_end_position),
is_rest(is_rest) {}
const AstRawString* name;
Expression* pattern;
Expression* initializer;
+ int initializer_end_position;
bool is_rest;
bool is_simple() const {
return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
@@ -652,11 +416,6 @@ class ParserTraits {
static void CheckAssigningFunctionLiteralToProperty(Expression* left,
Expression* right);
- // Keep track of eval() calls since they disable all local variable
- // optimizations. This checks if expression is an eval call, and if yes,
- // forwards the information to scope.
- void CheckPossibleEvalCall(Expression* expression, Scope* scope);
-
// Determine if the expression is a variable proxy and mark it as being used
// in an assignment or with a increment/decrement operator.
static Expression* MarkExpressionAsAssigned(Expression* expression);
@@ -763,7 +522,8 @@ class ParserTraits {
Scope* scope, AstNodeFactory* factory);
Expression* ExpressionFromString(int pos, Scanner* scanner,
AstNodeFactory* factory);
- Expression* GetIterator(Expression* iterable, AstNodeFactory* factory);
+ Expression* GetIterator(Expression* iterable, AstNodeFactory* factory,
+ int pos);
ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
}
@@ -781,9 +541,10 @@ class ParserTraits {
V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
FunctionKind kind = kNormalFunction);
- V8_INLINE void AddFormalParameter(
- ParserFormalParameters* parameters, Expression* pattern,
- Expression* initializer, bool is_rest);
+ V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
+ Expression* pattern,
+ Expression* initializer,
+ int initializer_end_position, bool is_rest);
V8_INLINE void DeclareFormalParameter(
Scope* scope, const ParserFormalParameters::Parameter& parameter,
ExpressionClassifier* classifier);
@@ -876,6 +637,28 @@ class ParserTraits {
ZoneList<v8::internal::Expression*>* args,
int pos);
+ // Rewrite all DestructuringAssignments in the current FunctionState.
+ V8_INLINE void RewriteDestructuringAssignments();
+
+ V8_INLINE void QueueDestructuringAssignmentForRewriting(
+ Expression* assignment);
+
+ void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
+ const AstRawString* name);
+
+ void SetFunctionNameFromIdentifierRef(Expression* value,
+ Expression* identifier);
+
+ // Rewrite expressions that are not used as patterns
+ V8_INLINE Expression* RewriteNonPattern(
+ Expression* expr, const ExpressionClassifier* classifier, bool* ok);
+ V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok);
+ V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok);
+
private:
Parser* parser_;
};
@@ -937,10 +720,6 @@ class Parser : public ParserBase<ParserTraits> {
bool produce_cached_parse_data() const {
return compile_options_ == ScriptCompiler::kProduceParserCache;
}
- Scope* DeclarationScope(VariableMode mode) {
- return IsLexicalVariableMode(mode)
- ? scope_ : scope_->DeclarationScope();
- }
// All ParseXXX functions take as the last argument an *ok parameter
// which is set to false if parsing failed; it is unchanged otherwise.
@@ -969,6 +748,8 @@ class Parser : public ParserBase<ParserTraits> {
bool* ok);
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
+ Block* ParseBlock(ZoneList<const AstRawString*>* labels,
+ bool finalize_block_scope, bool* ok);
Block* ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
bool* ok);
@@ -977,15 +758,12 @@ class Parser : public ParserBase<ParserTraits> {
struct DeclarationDescriptor {
enum Kind { NORMAL, PARAMETER };
Parser* parser;
- Scope* declaration_scope;
Scope* scope;
Scope* hoist_scope;
VariableMode mode;
- bool is_const;
bool needs_init;
int declaration_pos;
int initialization_pos;
- Token::Value init_op;
Kind declaration_kind;
};
@@ -1009,7 +787,6 @@ class Parser : public ParserBase<ParserTraits> {
Block* BuildInitializationBlock(ZoneList<const AstRawString*>* names,
bool* ok);
- const AstRawString* SingleName() const;
DeclarationDescriptor descriptor;
List<Declaration> declarations;
@@ -1024,6 +801,13 @@ class Parser : public ParserBase<ParserTraits> {
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok);
+ static void RewriteDestructuringAssignment(
+ Parser* parser, RewritableAssignmentExpression* expr, Scope* Scope);
+
+ static Expression* RewriteDestructuringAssignment(Parser* parser,
+ Assignment* assignment,
+ Scope* scope);
+
void set_initializer_position(int pos) { initializer_position_ = pos; }
private:
@@ -1035,27 +819,56 @@ class Parser : public ParserBase<ParserTraits> {
#undef DECLARE_VISIT
void Visit(AstNode* node) override;
+ enum PatternContext {
+ BINDING,
+ INITIALIZER,
+ ASSIGNMENT,
+ ASSIGNMENT_INITIALIZER
+ };
+
+ PatternContext context() const { return context_; }
+ void set_context(PatternContext context) { context_ = context; }
+
void RecurseIntoSubpattern(AstNode* pattern, Expression* value) {
Expression* old_value = current_value_;
current_value_ = value;
+ recursion_level_++;
pattern->Accept(this);
+ recursion_level_--;
current_value_ = old_value;
}
+ void VisitObjectLiteral(ObjectLiteral* node, Variable** temp_var);
+ void VisitArrayLiteral(ArrayLiteral* node, Variable** temp_var);
+
+ bool IsBindingContext() const { return IsBindingContext(context_); }
+ bool IsInitializerContext() const { return context_ != ASSIGNMENT; }
+ bool IsAssignmentContext() const { return IsAssignmentContext(context_); }
+ bool IsAssignmentContext(PatternContext c) const;
+ bool IsBindingContext(PatternContext c) const;
+ bool IsSubPattern() const { return recursion_level_ > 1; }
+ PatternContext SetAssignmentContextIfNeeded(Expression* node);
+ PatternContext SetInitializerContextIfNeeded(Expression* node);
+
Variable* CreateTempVar(Expression* value = nullptr);
- AstNodeFactory* factory() const { return descriptor_->parser->factory(); }
+ AstNodeFactory* factory() const { return parser_->factory(); }
AstValueFactory* ast_value_factory() const {
- return descriptor_->parser->ast_value_factory();
+ return parser_->ast_value_factory();
}
- Zone* zone() const { return descriptor_->parser->zone(); }
+ Zone* zone() const { return parser_->zone(); }
+ Scope* scope() const { return scope_; }
+ Scope* scope_;
+ Parser* parser_;
+ PatternContext context_;
Expression* pattern_;
int initializer_position_;
Block* block_;
const DeclarationDescriptor* descriptor_;
ZoneList<const AstRawString*>* names_;
Expression* current_value_;
+ int recursion_level_;
bool* ok_;
};
@@ -1086,20 +899,16 @@ class Parser : public ParserBase<ParserTraits> {
TryStatement* ParseTryStatement(bool* ok);
DebuggerStatement* ParseDebuggerStatement(bool* ok);
- // Support for hamony block scoped bindings.
- Block* ParseScopedBlock(ZoneList<const AstRawString*>* labels, bool* ok);
-
- // !%_IsSpecObject(result = iterator.next()) &&
+ // !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
int pos);
// Initialize the components of a for-in / for-of statement.
- void InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each,
- Expression* subject,
- Statement* body);
+ void InitializeForEachStatement(ForEachStatement* stmt, Expression* each,
+ Expression* subject, Statement* body,
+ bool is_destructuring);
Statement* DesugarLexicalBindingsInForStatement(
Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
@@ -1202,6 +1011,20 @@ class Parser : public ParserBase<ParserTraits> {
void SetLanguageMode(Scope* scope, LanguageMode mode);
void RaiseLanguageMode(LanguageMode mode);
+ V8_INLINE void RewriteDestructuringAssignments();
+
+ V8_INLINE Expression* RewriteNonPattern(
+ Expression* expr, const ExpressionClassifier* classifier, bool* ok);
+ V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
+ ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
+ bool* ok);
+ V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
+ ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
+ bool* ok);
+
+ friend class InitializerRewriter;
+ void RewriteParameterInitializer(Expression* expr, Scope* scope);
+
Scanner scanner_;
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
@@ -1330,19 +1153,18 @@ Expression* ParserTraits::SpreadCallNew(
}
-void ParserTraits::AddFormalParameter(
- ParserFormalParameters* parameters,
- Expression* pattern, Expression* initializer, bool is_rest) {
- bool is_simple =
- !is_rest && pattern->IsVariableProxy() && initializer == nullptr;
- DCHECK(parser_->allow_harmony_destructuring() ||
- parser_->allow_harmony_rest_parameters() ||
- parser_->allow_harmony_default_parameters() || is_simple);
+void ParserTraits::AddFormalParameter(ParserFormalParameters* parameters,
+ Expression* pattern,
+ Expression* initializer,
+ int initializer_end_position,
+ bool is_rest) {
+ bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
const AstRawString* name = is_simple
? pattern->AsVariableProxy()->raw_name()
: parser_->ast_value_factory()->empty_string();
parameters->params.Add(
- ParserFormalParameters::Parameter(name, pattern, initializer, is_rest),
+ ParserFormalParameters::Parameter(name, pattern, initializer,
+ initializer_end_position, is_rest),
parameters->scope->zone());
}
@@ -1352,8 +1174,10 @@ void ParserTraits::DeclareFormalParameter(
ExpressionClassifier* classifier) {
bool is_duplicate = false;
bool is_simple = classifier->is_simple_parameter_list();
- auto name = parameter.name;
- auto mode = is_simple ? VAR : TEMPORARY;
+ auto name = is_simple || parameter.is_rest
+ ? parameter.name
+ : parser_->ast_value_factory()->empty_string();
+ auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
if (!is_simple) scope->SetHasNonSimpleParameters();
bool is_optional = parameter.initializer != nullptr;
Variable* var = scope->DeclareParameter(
@@ -1393,4 +1217,4 @@ DoExpression* ParserTraits::ParseDoExpression(bool* ok) {
} // namespace internal
} // namespace v8
-#endif // V8_PARSER_H_
+#endif // V8_PARSING_PARSER_H_
diff --git a/deps/v8/src/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index e96aef8ba2..6e20282785 100644
--- a/deps/v8/src/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -2,35 +2,128 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/messages.h"
-#include "src/parameter-initializer-rewriter.h"
-#include "src/parser.h"
+#include "src/parsing/parameter-initializer-rewriter.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
-
void Parser::PatternRewriter::DeclareAndInitializeVariables(
Block* block, const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok) {
PatternRewriter rewriter;
+ rewriter.scope_ = declaration_descriptor->scope;
+ rewriter.parser_ = declaration_descriptor->parser;
+ rewriter.context_ = BINDING;
rewriter.pattern_ = declaration->pattern;
rewriter.initializer_position_ = declaration->initializer_position;
rewriter.block_ = block;
rewriter.descriptor_ = declaration_descriptor;
rewriter.names_ = names;
rewriter.ok_ = ok;
+ rewriter.recursion_level_ = 0;
rewriter.RecurseIntoSubpattern(rewriter.pattern_, declaration->initializer);
}
+void Parser::PatternRewriter::RewriteDestructuringAssignment(
+ Parser* parser, RewritableAssignmentExpression* to_rewrite, Scope* scope) {
+ PatternRewriter rewriter;
+
+ DCHECK(!to_rewrite->is_rewritten());
+
+ bool ok = true;
+ rewriter.scope_ = scope;
+ rewriter.parser_ = parser;
+ rewriter.context_ = ASSIGNMENT;
+ rewriter.pattern_ = to_rewrite;
+ rewriter.block_ = nullptr;
+ rewriter.descriptor_ = nullptr;
+ rewriter.names_ = nullptr;
+ rewriter.ok_ = &ok;
+ rewriter.recursion_level_ = 0;
+
+ rewriter.RecurseIntoSubpattern(rewriter.pattern_, nullptr);
+ DCHECK(ok);
+}
+
+
+Expression* Parser::PatternRewriter::RewriteDestructuringAssignment(
+ Parser* parser, Assignment* assignment, Scope* scope) {
+ DCHECK_NOT_NULL(assignment);
+ DCHECK_EQ(Token::ASSIGN, assignment->op());
+ auto to_rewrite =
+ parser->factory()->NewRewritableAssignmentExpression(assignment);
+ RewriteDestructuringAssignment(parser, to_rewrite, scope);
+ return to_rewrite->expression();
+}
+
+
+bool Parser::PatternRewriter::IsAssignmentContext(PatternContext c) const {
+ return c == ASSIGNMENT || c == ASSIGNMENT_INITIALIZER;
+}
+
+
+bool Parser::PatternRewriter::IsBindingContext(PatternContext c) const {
+ return c == BINDING || c == INITIALIZER;
+}
+
+
+Parser::PatternRewriter::PatternContext
+Parser::PatternRewriter::SetAssignmentContextIfNeeded(Expression* node) {
+ PatternContext old_context = context();
+ if (node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN) {
+ set_context(ASSIGNMENT);
+ }
+ return old_context;
+}
+
+
+Parser::PatternRewriter::PatternContext
+Parser::PatternRewriter::SetInitializerContextIfNeeded(Expression* node) {
+ // Set appropriate initializer context for BindingElement and
+ // AssignmentElement nodes
+ PatternContext old_context = context();
+ bool is_destructuring_assignment =
+ node->IsRewritableAssignmentExpression() &&
+ !node->AsRewritableAssignmentExpression()->is_rewritten();
+ bool is_assignment =
+ node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN;
+ if (is_destructuring_assignment || is_assignment) {
+ switch (old_context) {
+ case BINDING:
+ set_context(INITIALIZER);
+ break;
+ case ASSIGNMENT:
+ set_context(ASSIGNMENT_INITIALIZER);
+ break;
+ default:
+ break;
+ }
+ }
+ return old_context;
+}
+
+
void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
Expression* value = current_value_;
+
+ if (IsAssignmentContext()) {
+ // In an assignment context, simply perform the assignment
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, pattern, value, pattern->position());
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, pattern->position()),
+ zone());
+ return;
+ }
+
descriptor_->scope->RemoveUnresolved(pattern);
// Declare variable.
@@ -48,15 +141,14 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// For let/const declarations in harmony mode, we can also immediately
// pre-resolve the proxy because it resides in the same scope as the
// declaration.
- Parser* parser = descriptor_->parser;
const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy = parser->NewUnresolved(name, descriptor_->mode);
+ VariableProxy* proxy = parser_->NewUnresolved(name, descriptor_->mode);
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, descriptor_->mode, descriptor_->scope,
descriptor_->declaration_pos);
- Variable* var = parser->Declare(declaration, descriptor_->declaration_kind,
- descriptor_->mode != VAR, ok_,
- descriptor_->hoist_scope);
+ Variable* var =
+ parser_->Declare(declaration, descriptor_->declaration_kind,
+ descriptor_->mode != VAR, ok_, descriptor_->hoist_scope);
if (!*ok_) return;
DCHECK_NOT_NULL(var);
DCHECK(!proxy->is_resolved() || proxy->var() == var);
@@ -64,9 +156,11 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK(initializer_position_ != RelocInfo::kNoPosition);
- if (descriptor_->declaration_scope->num_var_or_const() >
- kMaxNumFunctionLocals) {
- parser->ReportMessage(MessageTemplate::kTooManyVariables);
+ Scope* declaration_scope = IsLexicalVariableMode(descriptor_->mode)
+ ? descriptor_->scope
+ : descriptor_->scope->DeclarationScope();
+ if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
+ parser_->ReportMessage(MessageTemplate::kTooManyVariables);
*ok_ = false;
return;
}
@@ -100,8 +194,8 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// The "variable" c initialized to x is the same as the declared
// one - there is no re-lookup (see the last parameter of the
// Declare() call above).
- Scope* initialization_scope = descriptor_->is_const
- ? descriptor_->declaration_scope
+ Scope* initialization_scope = IsImmutableVariableMode(descriptor_->mode)
+ ? declaration_scope
: descriptor_->scope;
@@ -135,7 +229,7 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
zone());
CallRuntime* initialize;
- if (descriptor_->is_const) {
+ if (IsImmutableVariableMode(descriptor_->mode)) {
arguments->Add(value, zone());
value = NULL; // zap the value to avoid the unnecessary assignment
@@ -188,11 +282,12 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
DCHECK_NOT_NULL(proxy);
DCHECK_NOT_NULL(proxy->var());
DCHECK_NOT_NULL(value);
- Assignment* assignment = factory()->NewAssignment(
- descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
+ // Add break location for destructured sub-pattern.
+ int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
+ factory()->NewExpressionStatement(assignment, pos), zone());
value = NULL;
}
@@ -204,18 +299,18 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// if they are inside a 'with' statement - they may change a 'with' object
// property).
VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
- Assignment* assignment = factory()->NewAssignment(
- descriptor_->init_op, proxy, value, descriptor_->initialization_pos);
+ // Add break location for destructured sub-pattern.
+ int pos = IsSubPattern() ? pattern->position() : RelocInfo::kNoPosition;
+ Assignment* assignment =
+ factory()->NewAssignment(Token::INIT, proxy, value, pos);
block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
+ factory()->NewExpressionStatement(assignment, pos), zone());
}
}
Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
- auto temp = descriptor_->parser->scope_->NewTemporary(
- ast_value_factory()->empty_string());
+ auto temp = scope()->NewTemporary(ast_value_factory()->empty_string());
if (value != nullptr) {
auto assignment = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), value,
@@ -229,29 +324,98 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
}
-void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern) {
- auto temp = CreateTempVar(current_value_);
+void Parser::PatternRewriter::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* node) {
+ if (!IsAssignmentContext()) {
+ // Mark the assignment as rewritten to prevent redundant rewriting, and
+ // perform BindingPattern rewriting
+ DCHECK(!node->is_rewritten());
+ node->Rewrite(node->expression());
+ return node->expression()->Accept(this);
+ }
+
+ if (node->is_rewritten()) return;
+ DCHECK(IsAssignmentContext());
+ Assignment* assign = node->expression()->AsAssignment();
+ DCHECK_NOT_NULL(assign);
+ DCHECK_EQ(Token::ASSIGN, assign->op());
+
+ auto initializer = assign->value();
+ auto value = initializer;
+
+ if (IsInitializerContext()) {
+ // let {<pattern> = <init>} = <value>
+ // becomes
+ // temp = <value>;
+ // <pattern> = temp === undefined ? <init> : temp;
+ auto temp_var = CreateTempVar(current_value_);
+ Expression* is_undefined = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(temp_var),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ value = factory()->NewConditional(is_undefined, initializer,
+ factory()->NewVariableProxy(temp_var),
+ RelocInfo::kNoPosition);
+ }
+
+ PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
+ int pos = assign->position();
+ Block* old_block = block_;
+ block_ = factory()->NewBlock(nullptr, 8, false, pos);
+ Variable* temp = nullptr;
+ Expression* pattern = assign->target();
+ Expression* old_value = current_value_;
+ current_value_ = value;
+ if (pattern->IsObjectLiteral()) {
+ VisitObjectLiteral(pattern->AsObjectLiteral(), &temp);
+ } else {
+ DCHECK(pattern->IsArrayLiteral());
+ VisitArrayLiteral(pattern->AsArrayLiteral(), &temp);
+ }
+ DCHECK_NOT_NULL(temp);
+ current_value_ = old_value;
+ Expression* expr = factory()->NewDoExpression(block_, temp, pos);
+ node->Rewrite(expr);
+ block_ = old_block;
+ if (block_) {
+ block_->statements()->Add(factory()->NewExpressionStatement(expr, pos),
+ zone());
+ }
+ return set_context(old_context);
+}
- block_->statements()->Add(descriptor_->parser->BuildAssertIsCoercible(temp),
- zone());
+
+void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
+ Variable** temp_var) {
+ auto temp = *temp_var = CreateTempVar(current_value_);
+
+ block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
for (ObjectLiteralProperty* property : *pattern->properties()) {
+ PatternContext context = SetInitializerContextIfNeeded(property->value());
RecurseIntoSubpattern(
property->value(),
factory()->NewProperty(factory()->NewVariableProxy(temp),
property->key(), RelocInfo::kNoPosition));
+ set_context(context);
}
}
-void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
- auto temp = CreateTempVar(current_value_);
+void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* node) {
+ Variable* temp_var = nullptr;
+ VisitObjectLiteral(node, &temp_var);
+}
- block_->statements()->Add(descriptor_->parser->BuildAssertIsCoercible(temp),
- zone());
- auto iterator = CreateTempVar(descriptor_->parser->GetIterator(
- factory()->NewVariableProxy(temp), factory()));
+void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
+ Variable** temp_var) {
+ auto temp = *temp_var = CreateTempVar(current_value_);
+
+ block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
+
+ auto iterator = CreateTempVar(parser_->GetIterator(
+ factory()->NewVariableProxy(temp), factory(), RelocInfo::kNoPosition));
auto done = CreateTempVar(
factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
auto result = CreateTempVar();
@@ -264,19 +428,19 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
break;
}
+ PatternContext context = SetInitializerContextIfNeeded(value);
// if (!done) {
// result = IteratorNext(iterator);
// v = (done = result.done) ? undefined : result.value;
// }
auto next_block =
factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
- next_block->statements()->Add(
- factory()->NewExpressionStatement(
- descriptor_->parser->BuildIteratorNextResult(
- factory()->NewVariableProxy(iterator), result,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
+ next_block->statements()->Add(factory()->NewExpressionStatement(
+ parser_->BuildIteratorNextResult(
+ factory()->NewVariableProxy(iterator),
+ result, RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
auto assign_to_done = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(done),
@@ -313,6 +477,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
}
+ set_context(context);
}
if (spread != nullptr) {
@@ -323,7 +488,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
empty_exprs,
// Reuse pattern's literal index - it is unused since there is no
// actual literal allocated.
- node->literal_index(), is_strong(descriptor_->parser->language_mode()),
+ node->literal_index(), is_strong(scope()->language_mode()),
RelocInfo::kNoPosition));
auto arguments = new (zone()) ZoneList<Expression*>(2, zone());
@@ -349,29 +514,58 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
}
+void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
+ Variable* temp_var = nullptr;
+ VisitArrayLiteral(node, &temp_var);
+}
+
+
void Parser::PatternRewriter::VisitAssignment(Assignment* node) {
// let {<pattern> = <init>} = <value>
// becomes
// temp = <value>;
// <pattern> = temp === undefined ? <init> : temp;
- DCHECK(node->op() == Token::ASSIGN);
+ DCHECK_EQ(Token::ASSIGN, node->op());
+
+ auto initializer = node->value();
+ auto value = initializer;
auto temp = CreateTempVar(current_value_);
- Expression* is_undefined = factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(temp),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
- Expression* initializer = node->value();
- if (descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
- descriptor_->scope->is_arrow_scope()) {
- // TODO(adamk): Only call this if necessary.
- RewriteParameterInitializerScope(
- descriptor_->parser->stack_limit(), initializer,
- descriptor_->scope->outer_scope(), descriptor_->scope);
+
+ if (IsInitializerContext()) {
+ Expression* is_undefined = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(temp),
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ value = factory()->NewConditional(is_undefined, initializer,
+ factory()->NewVariableProxy(temp),
+ RelocInfo::kNoPosition);
}
- Expression* value = factory()->NewConditional(
- is_undefined, initializer, factory()->NewVariableProxy(temp),
- RelocInfo::kNoPosition);
+
+ if (IsBindingContext() &&
+ descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
+ scope()->is_arrow_scope()) {
+ RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
+ scope()->outer_scope(), scope());
+ }
+
+ PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
RecurseIntoSubpattern(node->target(), value);
+ set_context(old_context);
+}
+
+
+// =============== AssignmentPattern only ==================
+
+void Parser::PatternRewriter::VisitProperty(v8::internal::Property* node) {
+ DCHECK(IsAssignmentContext());
+ auto value = current_value_;
+
+ Assignment* assignment =
+ factory()->NewAssignment(Token::ASSIGN, node, value, node->position());
+
+ block_->statements()->Add(
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ zone());
}
@@ -412,7 +606,6 @@ NOT_A_PATTERN(IfStatement)
NOT_A_PATTERN(ImportDeclaration)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
-NOT_A_PATTERN(Property)
NOT_A_PATTERN(RegExpLiteral)
NOT_A_PATTERN(ReturnStatement)
NOT_A_PATTERN(SloppyBlockFunctionStatement)
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/parsing/preparse-data-format.h
index c68a684562..f7d9f68cce 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/parsing/preparse-data-format.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PREPARSE_DATA_FORMAT_H_
-#define V8_PREPARSE_DATA_FORMAT_H_
+#ifndef V8_PARSING_PREPARSE_DATA_FORMAT_H_
+#define V8_PARSING_PREPARSE_DATA_FORMAT_H_
namespace v8 {
namespace internal {
@@ -38,4 +38,4 @@ struct PreparseDataConstants {
} // namespace internal
} // namespace v8.
-#endif // V8_PREPARSE_DATA_FORMAT_H_
+#endif // V8_PARSING_PREPARSE_DATA_FORMAT_H_
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index ffbfbab633..d02cd63d66 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -5,9 +5,9 @@
#include "src/base/logging.h"
#include "src/globals.h"
#include "src/hashmap.h"
-#include "src/parser.h"
-#include "src/preparse-data.h"
-#include "src/preparse-data-format.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparse-data-format.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 711ff3b895..dbe1022d1e 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PREPARSE_DATA_H_
-#define V8_PREPARSE_DATA_H_
+#ifndef V8_PARSING_PREPARSE_DATA_H_
+#define V8_PARSING_PREPARSE_DATA_H_
#include "src/allocation.h"
#include "src/hashmap.h"
#include "src/messages.h"
-#include "src/preparse-data-format.h"
+#include "src/parsing/preparse-data-format.h"
namespace v8 {
namespace internal {
@@ -209,4 +209,4 @@ class CompleteParserRecorder : public ParserRecorder {
} // namespace internal
} // namespace v8.
-#endif // V8_PREPARSE_DATA_H_
+#endif // V8_PARSING_PREPARSE_DATA_H_
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 4b86d78597..64511acc39 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -11,9 +11,10 @@
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/preparse-data.h"
-#include "src/preparse-data-format.h"
-#include "src/preparser.h"
+#include "src/parsing/parser-base.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparser.h"
#include "src/unicode.h"
#include "src/utils.h"
@@ -450,8 +451,8 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- pos, FunctionLiteral::DECLARATION,
- FunctionLiteral::NORMAL_ARITY, language_mode(),
+ pos, FunctionLiteral::kDeclaration,
+ FunctionLiteral::kNormalArity, language_mode(),
CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -477,19 +478,12 @@ PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
PreParser::Statement PreParser::ParseBlock(bool* ok) {
// Block ::
- // '{' Statement* '}'
+ // '{' StatementList '}'
- // Note that a Block does not introduce a new execution scope!
- // (ECMA-262, 3rd, 12.2)
- //
Expect(Token::LBRACE, CHECK_OK);
Statement final = Statement::Default();
while (peek() != Token::RBRACE) {
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
- final = ParseStatementListItem(CHECK_OK);
- } else {
- final = ParseStatement(CHECK_OK);
- }
+ final = ParseStatementListItem(CHECK_OK);
}
Expect(Token::RBRACE, ok);
return final;
@@ -502,8 +496,8 @@ PreParser::Statement PreParser::ParseVariableStatement(
// VariableStatement ::
// VariableDeclarations ';'
- Statement result = ParseVariableDeclarations(var_context, nullptr, nullptr,
- nullptr, CHECK_OK);
+ Statement result = ParseVariableDeclarations(
+ var_context, nullptr, nullptr, nullptr, nullptr, nullptr, CHECK_OK);
ExpectSemicolon(CHECK_OK);
return result;
}
@@ -515,9 +509,9 @@ PreParser::Statement PreParser::ParseVariableStatement(
// to initialize it properly. This mechanism is also used for the parsing
// of 'for-in' loops.
PreParser::Statement PreParser::ParseVariableDeclarations(
- VariableDeclarationContext var_context, int* num_decl,
- Scanner::Location* first_initializer_loc, Scanner::Location* bindings_loc,
- bool* ok) {
+ VariableDeclarationContext var_context, int* num_decl, bool* is_lexical,
+ bool* is_binding_pattern, Scanner::Location* first_initializer_loc,
+ Scanner::Location* bindings_loc, bool* ok) {
// VariableDeclarations ::
// ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
//
@@ -533,6 +527,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// BindingPattern '=' AssignmentExpression
bool require_initializer = false;
bool lexical = false;
+ bool is_pattern = false;
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
@@ -589,14 +584,15 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
ValidateLetPattern(&pattern_classifier, CHECK_OK);
}
- if (!allow_harmony_destructuring() && !pattern.IsIdentifier()) {
+ if (!allow_harmony_destructuring_bind() && !pattern.IsIdentifier()) {
ReportUnexpectedToken(next);
*ok = false;
return Statement::Default();
}
}
- bool is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
+ is_pattern = (pattern.IsObjectLiteral() || pattern.IsArrayLiteral()) &&
+ !pattern.is_parenthesized();
bool is_for_iteration_variable =
var_context == kForStatement &&
@@ -630,7 +626,9 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
Scanner::Location(bindings_start, scanner()->location().end_pos);
}
- if (num_decl != NULL) *num_decl = nvars;
+ if (num_decl != nullptr) *num_decl = nvars;
+ if (is_lexical != nullptr) *is_lexical = lexical;
+ if (is_binding_pattern != nullptr) *is_binding_pattern = is_pattern;
return Statement::Default();
}
@@ -919,11 +917,13 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
(peek() == Token::LET && IsNextLetKeyword())) {
int decl_count;
+ bool is_lexical;
+ bool is_binding_pattern;
Scanner::Location first_initializer_loc = Scanner::Location::invalid();
Scanner::Location bindings_loc = Scanner::Location::invalid();
- ParseVariableDeclarations(kForStatement, &decl_count,
- &first_initializer_loc, &bindings_loc,
- CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
+ &is_binding_pattern, &first_initializer_loc,
+ &bindings_loc, CHECK_OK);
bool accept_IN = decl_count >= 1;
if (accept_IN && CheckInOrOf(&mode, ok)) {
if (!*ok) return Statement::Default();
@@ -937,7 +937,8 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
return Statement::Default();
}
if (first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) || mode == ForEachStatement::ITERATE)) {
+ (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
+ is_lexical || is_binding_pattern)) {
if (mode == ForEachStatement::ITERATE) {
ReportMessageAt(first_initializer_loc,
MessageTemplate::kForOfLoopInitializer);
@@ -956,15 +957,29 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
} else {
int lhs_beg_pos = peek_position();
- Expression lhs = ParseExpression(false, CHECK_OK);
+ ExpressionClassifier classifier;
+ Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
is_let_identifier_expression =
lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
- if (CheckInOrOf(&mode, ok)) {
- if (!*ok) return Statement::Default();
- lhs = CheckAndRewriteReferenceExpression(
- lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
- kSyntaxError, CHECK_OK);
+ bool is_for_each = CheckInOrOf(&mode, ok);
+ if (!*ok) return Statement::Default();
+ bool is_destructuring = is_for_each &&
+ allow_harmony_destructuring_assignment() &&
+ (lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
+
+ if (is_destructuring) {
+ ValidateAssignmentPattern(&classifier, CHECK_OK);
+ } else {
+ ValidateExpression(&classifier, CHECK_OK);
+ }
+
+ if (is_for_each) {
+ if (!is_destructuring) {
+ lhs = CheckAndRewriteReferenceExpression(
+ lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+ kSyntaxError, CHECK_OK);
+ }
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
ParseSubStatement(CHECK_OK);
@@ -1219,10 +1234,11 @@ PreParserExpression PreParser::ParseClassLiteral(
const bool is_static = false;
bool is_computed_name = false; // Classes do not care about computed
// property names here.
+ Identifier name;
ExpressionClassifier classifier;
ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
&is_computed_name, &has_seen_constructor,
- &classifier, CHECK_OK);
+ &classifier, &name, CHECK_OK);
ValidateExpression(&classifier, CHECK_OK);
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
new file mode 100644
index 0000000000..59100f1ae9
--- /dev/null
+++ b/deps/v8/src/parsing/preparser.h
@@ -0,0 +1,1175 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSER_H
+#define V8_PARSING_PREPARSER_H
+
+#include "src/ast/scopes.h"
+#include "src/bailout-reason.h"
+#include "src/hashmap.h"
+#include "src/messages.h"
+#include "src/parsing/expression-classifier.h"
+#include "src/parsing/func-name-inferrer.h"
+#include "src/parsing/parser-base.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+
+class PreParserIdentifier {
+ public:
+ PreParserIdentifier() : type_(kUnknownIdentifier) {}
+ static PreParserIdentifier Default() {
+ return PreParserIdentifier(kUnknownIdentifier);
+ }
+ static PreParserIdentifier Eval() {
+ return PreParserIdentifier(kEvalIdentifier);
+ }
+ static PreParserIdentifier Arguments() {
+ return PreParserIdentifier(kArgumentsIdentifier);
+ }
+ static PreParserIdentifier Undefined() {
+ return PreParserIdentifier(kUndefinedIdentifier);
+ }
+ static PreParserIdentifier FutureReserved() {
+ return PreParserIdentifier(kFutureReservedIdentifier);
+ }
+ static PreParserIdentifier FutureStrictReserved() {
+ return PreParserIdentifier(kFutureStrictReservedIdentifier);
+ }
+ static PreParserIdentifier Let() {
+ return PreParserIdentifier(kLetIdentifier);
+ }
+ static PreParserIdentifier Static() {
+ return PreParserIdentifier(kStaticIdentifier);
+ }
+ static PreParserIdentifier Yield() {
+ return PreParserIdentifier(kYieldIdentifier);
+ }
+ static PreParserIdentifier Prototype() {
+ return PreParserIdentifier(kPrototypeIdentifier);
+ }
+ static PreParserIdentifier Constructor() {
+ return PreParserIdentifier(kConstructorIdentifier);
+ }
+ bool IsEval() const { return type_ == kEvalIdentifier; }
+ bool IsArguments() const { return type_ == kArgumentsIdentifier; }
+ bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
+ bool IsUndefined() const { return type_ == kUndefinedIdentifier; }
+ bool IsLet() const { return type_ == kLetIdentifier; }
+ bool IsStatic() const { return type_ == kStaticIdentifier; }
+ bool IsYield() const { return type_ == kYieldIdentifier; }
+ bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
+ bool IsConstructor() const { return type_ == kConstructorIdentifier; }
+ bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
+ bool IsFutureStrictReserved() const {
+ return type_ == kFutureStrictReservedIdentifier ||
+ type_ == kLetIdentifier || type_ == kStaticIdentifier ||
+ type_ == kYieldIdentifier;
+ }
+
+ // Allow identifier->name()[->length()] to work. The preparser
+ // does not need the actual positions/lengths of the identifiers.
+ const PreParserIdentifier* operator->() const { return this; }
+ const PreParserIdentifier raw_name() const { return *this; }
+
+ int position() const { return 0; }
+ int length() const { return 0; }
+
+ private:
+ enum Type {
+ kUnknownIdentifier,
+ kFutureReservedIdentifier,
+ kFutureStrictReservedIdentifier,
+ kLetIdentifier,
+ kStaticIdentifier,
+ kYieldIdentifier,
+ kEvalIdentifier,
+ kArgumentsIdentifier,
+ kUndefinedIdentifier,
+ kPrototypeIdentifier,
+ kConstructorIdentifier
+ };
+
+ explicit PreParserIdentifier(Type type) : type_(type) {}
+ Type type_;
+
+ friend class PreParserExpression;
+};
+
+
+class PreParserExpression {
+ public:
+ static PreParserExpression Default() {
+ return PreParserExpression(TypeField::encode(kExpression));
+ }
+
+ static PreParserExpression Spread(PreParserExpression expression) {
+ return PreParserExpression(TypeField::encode(kSpreadExpression));
+ }
+
+ static PreParserExpression FromIdentifier(PreParserIdentifier id) {
+ return PreParserExpression(TypeField::encode(kIdentifierExpression) |
+ IdentifierTypeField::encode(id.type_));
+ }
+
+ static PreParserExpression BinaryOperation(PreParserExpression left,
+ Token::Value op,
+ PreParserExpression right) {
+ return PreParserExpression(TypeField::encode(kBinaryOperationExpression));
+ }
+
+ static PreParserExpression Assignment() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kAssignment));
+ }
+
+ static PreParserExpression ObjectLiteral() {
+ return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
+ }
+
+ static PreParserExpression ArrayLiteral() {
+ return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
+ }
+
+ static PreParserExpression StringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression));
+ }
+
+ static PreParserExpression UseStrictStringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+ IsUseStrictField::encode(true));
+ }
+
+ static PreParserExpression UseStrongStringLiteral() {
+ return PreParserExpression(TypeField::encode(kStringLiteralExpression) |
+ IsUseStrongField::encode(true));
+ }
+
+ static PreParserExpression This() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kThisExpression));
+ }
+
+ static PreParserExpression ThisProperty() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kThisPropertyExpression));
+ }
+
+ static PreParserExpression Property() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kPropertyExpression));
+ }
+
+ static PreParserExpression Call() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kCallExpression));
+ }
+
+ static PreParserExpression SuperCallReference() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kSuperCallReference));
+ }
+
+ static PreParserExpression NoTemplateTag() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kNoTemplateTagExpression));
+ }
+
+ bool IsIdentifier() const {
+ return TypeField::decode(code_) == kIdentifierExpression;
+ }
+
+ PreParserIdentifier AsIdentifier() const {
+ DCHECK(IsIdentifier());
+ return PreParserIdentifier(IdentifierTypeField::decode(code_));
+ }
+
+ bool IsAssignment() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kAssignment;
+ }
+
+ bool IsObjectLiteral() const {
+ return TypeField::decode(code_) == kObjectLiteralExpression;
+ }
+
+ bool IsArrayLiteral() const {
+ return TypeField::decode(code_) == kArrayLiteralExpression;
+ }
+
+ bool IsStringLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression;
+ }
+
+ bool IsUseStrictLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression &&
+ IsUseStrictField::decode(code_);
+ }
+
+ bool IsUseStrongLiteral() const {
+ return TypeField::decode(code_) == kStringLiteralExpression &&
+ IsUseStrongField::decode(code_);
+ }
+
+ bool IsThis() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kThisExpression;
+ }
+
+ bool IsThisProperty() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kThisPropertyExpression;
+ }
+
+ bool IsProperty() const {
+ return TypeField::decode(code_) == kExpression &&
+ (ExpressionTypeField::decode(code_) == kPropertyExpression ||
+ ExpressionTypeField::decode(code_) == kThisPropertyExpression);
+ }
+
+ bool IsCall() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kCallExpression;
+ }
+
+ bool IsSuperCallReference() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kSuperCallReference;
+ }
+
+ bool IsValidReferenceExpression() const {
+ return IsIdentifier() || IsProperty();
+ }
+
+ // At the moment PreParser doesn't track these expression types.
+ bool IsFunctionLiteral() const { return false; }
+ bool IsCallNew() const { return false; }
+
+ bool IsNoTemplateTag() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
+ }
+
+ bool IsSpreadExpression() const {
+ return TypeField::decode(code_) == kSpreadExpression;
+ }
+
+ PreParserExpression AsFunctionLiteral() { return *this; }
+
+ bool IsBinaryOperation() const {
+ return TypeField::decode(code_) == kBinaryOperationExpression;
+ }
+
+ // Dummy implementation for making expression->somefunc() work in both Parser
+ // and PreParser.
+ PreParserExpression* operator->() { return this; }
+
+ // More dummy implementations of things PreParser doesn't need to track:
+ void set_index(int index) {} // For YieldExpressions
+ void set_should_eager_compile() {}
+
+ int position() const { return RelocInfo::kNoPosition; }
+ void set_function_token_position(int position) {}
+
+ // Parenthesized expressions in the form `( Expression )`.
+ void set_is_parenthesized() {
+ code_ = ParenthesizedField::update(code_, true);
+ }
+ bool is_parenthesized() const { return ParenthesizedField::decode(code_); }
+
+ private:
+ enum Type {
+ kExpression,
+ kIdentifierExpression,
+ kStringLiteralExpression,
+ kBinaryOperationExpression,
+ kSpreadExpression,
+ kObjectLiteralExpression,
+ kArrayLiteralExpression
+ };
+
+ enum ExpressionType {
+ kThisExpression,
+ kThisPropertyExpression,
+ kPropertyExpression,
+ kCallExpression,
+ kSuperCallReference,
+ kNoTemplateTagExpression,
+ kAssignment
+ };
+
+ explicit PreParserExpression(uint32_t expression_code)
+ : code_(expression_code) {}
+
+ // The first three bits are for the Type.
+ typedef BitField<Type, 0, 3> TypeField;
+
+ // The high order bit applies only to nodes which would inherit from the
+ // Expression ASTNode --- This is by necessity, due to the fact that
+ // Expression nodes may be represented as multiple Types, not exclusively
+ // through kExpression.
+ // TODO(caitp, adamk): clean up PreParserExpression bitfields.
+ typedef BitField<bool, 31, 1> ParenthesizedField;
+
+ // The rest of the bits are interpreted depending on the value
+ // of the Type field, so they can share the storage.
+ typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
+ typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
+ typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
+ typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
+ IdentifierTypeField;
+ typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
+
+ uint32_t code_;
+};
+
+
+// The pre-parser doesn't need to build lists of expressions, identifiers, or
+// the like.
+template <typename T>
+class PreParserList {
+ public:
+ // These functions make list->Add(some_expression) work (and do nothing).
+ PreParserList() : length_(0) {}
+ PreParserList* operator->() { return this; }
+ void Add(T, void*) { ++length_; }
+ int length() const { return length_; }
+ private:
+ int length_;
+};
+
+
+typedef PreParserList<PreParserExpression> PreParserExpressionList;
+
+
+class PreParserStatement {
+ public:
+ static PreParserStatement Default() {
+ return PreParserStatement(kUnknownStatement);
+ }
+
+ static PreParserStatement Jump() {
+ return PreParserStatement(kJumpStatement);
+ }
+
+ static PreParserStatement FunctionDeclaration() {
+ return PreParserStatement(kFunctionDeclaration);
+ }
+
+ // Creates expression statement from expression.
+ // Preserves being an unparenthesized string literal, possibly
+ // "use strict".
+ static PreParserStatement ExpressionStatement(
+ PreParserExpression expression) {
+ if (expression.IsUseStrictLiteral()) {
+ return PreParserStatement(kUseStrictExpressionStatement);
+ }
+ if (expression.IsUseStrongLiteral()) {
+ return PreParserStatement(kUseStrongExpressionStatement);
+ }
+ if (expression.IsStringLiteral()) {
+ return PreParserStatement(kStringLiteralExpressionStatement);
+ }
+ return Default();
+ }
+
+ bool IsStringLiteral() {
+ return code_ == kStringLiteralExpressionStatement;
+ }
+
+ bool IsUseStrictLiteral() {
+ return code_ == kUseStrictExpressionStatement;
+ }
+
+ bool IsUseStrongLiteral() { return code_ == kUseStrongExpressionStatement; }
+
+ bool IsFunctionDeclaration() {
+ return code_ == kFunctionDeclaration;
+ }
+
+ bool IsJumpStatement() {
+ return code_ == kJumpStatement;
+ }
+
+ private:
+ enum Type {
+ kUnknownStatement,
+ kJumpStatement,
+ kStringLiteralExpressionStatement,
+ kUseStrictExpressionStatement,
+ kUseStrongExpressionStatement,
+ kFunctionDeclaration
+ };
+
+ explicit PreParserStatement(Type code) : code_(code) {}
+ Type code_;
+};
+
+
+typedef PreParserList<PreParserStatement> PreParserStatementList;
+
+
+class PreParserFactory {
+ public:
+ explicit PreParserFactory(void* unused_value_factory) {}
+ PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewNumberLiteral(double number,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
+ int js_flags, int literal_index,
+ bool is_strong, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int literal_index,
+ bool is_strong,
+ int pos) {
+ return PreParserExpression::ArrayLiteral();
+ }
+ PreParserExpression NewArrayLiteral(PreParserExpressionList values,
+ int first_spread_index, int literal_index,
+ bool is_strong, int pos) {
+ return PreParserExpression::ArrayLiteral();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value,
+ ObjectLiteralProperty::Kind kind,
+ bool is_static,
+ bool is_computed_name) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
+ PreParserExpression value,
+ bool is_static,
+ bool is_computed_name) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
+ int literal_index,
+ int boilerplate_properties,
+ bool has_function,
+ bool is_strong,
+ int pos) {
+ return PreParserExpression::ObjectLiteral();
+ }
+ PreParserExpression NewVariableProxy(void* variable) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewProperty(PreParserExpression obj,
+ PreParserExpression key,
+ int pos) {
+ if (obj.IsThis()) {
+ return PreParserExpression::ThisProperty();
+ }
+ return PreParserExpression::Property();
+ }
+ PreParserExpression NewUnaryOperation(Token::Value op,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewBinaryOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::BinaryOperation(left, op, right);
+ }
+ PreParserExpression NewCompareOperation(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewRewritableAssignmentExpression(
+ PreParserExpression expression) {
+ return expression;
+ }
+ PreParserExpression NewAssignment(Token::Value op,
+ PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return PreParserExpression::Assignment();
+ }
+ PreParserExpression NewYield(PreParserExpression generator_object,
+ PreParserExpression expression,
+ Yield::Kind yield_kind,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewConditional(PreParserExpression condition,
+ PreParserExpression then_expression,
+ PreParserExpression else_expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCountOperation(Token::Value op,
+ bool is_prefix,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCall(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Call();
+ }
+ PreParserExpression NewCallNew(PreParserExpression expression,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewCallRuntime(const AstRawString* name,
+ const Runtime::Function* function,
+ PreParserExpressionList arguments,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserStatement NewReturnStatement(PreParserExpression expression,
+ int pos) {
+ return PreParserStatement::Default();
+ }
+ PreParserExpression NewFunctionLiteral(
+ PreParserIdentifier name, Scope* scope, PreParserStatementList body,
+ int materialized_literal_count, int expected_property_count,
+ int parameter_count,
+ FunctionLiteral::ParameterFlag has_duplicate_parameters,
+ FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
+ int position) {
+ return PreParserExpression::Default();
+ }
+
+ PreParserExpression NewSpread(PreParserExpression expression, int pos) {
+ return PreParserExpression::Spread(expression);
+ }
+
+ PreParserExpression NewEmptyParentheses(int pos) {
+ return PreParserExpression::Default();
+ }
+
+ // Return the object itself as AstVisitor and implement the needed
+ // dummy method right in this class.
+ PreParserFactory* visitor() { return this; }
+ int* ast_properties() {
+ static int dummy = 42;
+ return &dummy;
+ }
+};
+
+
+struct PreParserFormalParameters : FormalParametersBase {
+ explicit PreParserFormalParameters(Scope* scope)
+ : FormalParametersBase(scope) {}
+ int arity = 0;
+
+ int Arity() const { return arity; }
+ PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
+};
+
+
+class PreParser;
+
+class PreParserTraits {
+ public:
+ struct Type {
+ // TODO(marja): To be removed. The Traits object should contain all the data
+ // it needs.
+ typedef PreParser* Parser;
+
+ // PreParser doesn't need to store generator variables.
+ typedef void GeneratorVariable;
+
+ typedef int AstProperties;
+
+ // Return types for traversing functions.
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserExpression YieldExpression;
+ typedef PreParserExpression FunctionLiteral;
+ typedef PreParserExpression ClassLiteral;
+ typedef PreParserExpression ObjectLiteralProperty;
+ typedef PreParserExpression Literal;
+ typedef PreParserExpressionList ExpressionList;
+ typedef PreParserExpressionList PropertyList;
+ typedef PreParserIdentifier FormalParameter;
+ typedef PreParserFormalParameters FormalParameters;
+ typedef PreParserStatementList StatementList;
+
+ // For constructing objects returned by the traversing functions.
+ typedef PreParserFactory Factory;
+ };
+
+ explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+
+ // Helper functions for recursive descent.
+ static bool IsEval(PreParserIdentifier identifier) {
+ return identifier.IsEval();
+ }
+
+ static bool IsArguments(PreParserIdentifier identifier) {
+ return identifier.IsArguments();
+ }
+
+ static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ return identifier.IsEvalOrArguments();
+ }
+
+ static bool IsUndefined(PreParserIdentifier identifier) {
+ return identifier.IsUndefined();
+ }
+
+ static bool IsPrototype(PreParserIdentifier identifier) {
+ return identifier.IsPrototype();
+ }
+
+ static bool IsConstructor(PreParserIdentifier identifier) {
+ return identifier.IsConstructor();
+ }
+
+ // Returns true if the expression is of type "this.foo".
+ static bool IsThisProperty(PreParserExpression expression) {
+ return expression.IsThisProperty();
+ }
+
+ static bool IsIdentifier(PreParserExpression expression) {
+ return expression.IsIdentifier();
+ }
+
+ static PreParserIdentifier AsIdentifier(PreParserExpression expression) {
+ return expression.AsIdentifier();
+ }
+
+ static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
+ return identifier.IsFutureStrictReserved();
+ }
+
+ static bool IsBoilerplateProperty(PreParserExpression property) {
+ // PreParser doesn't count boilerplate properties.
+ return false;
+ }
+
+ static bool IsArrayIndex(PreParserIdentifier string, uint32_t* index) {
+ return false;
+ }
+
+ static PreParserExpression GetPropertyValue(PreParserExpression property) {
+ return PreParserExpression::Default();
+ }
+
+ // Functions for encapsulating the differences between parsing and preparsing;
+ // operations interleaved with the recursive descent.
+ static void PushLiteralName(FuncNameInferrer* fni, PreParserIdentifier id) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void PushPropertyName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void InferFunctionName(FuncNameInferrer* fni,
+ PreParserExpression expression) {
+ // PreParser should not use FuncNameInferrer.
+ UNREACHABLE();
+ }
+
+ static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
+ Scope* scope, PreParserExpression property, bool* has_function) {}
+
+ static void CheckAssigningFunctionLiteralToProperty(
+ PreParserExpression left, PreParserExpression right) {}
+
+ static PreParserExpression MarkExpressionAsAssigned(
+ PreParserExpression expression) {
+ // TODO(marja): To be able to produce the same errors, the preparser needs
+ // to start tracking which expressions are variables and which are assigned.
+ return expression;
+ }
+
+ bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
+ PreParserExpression y,
+ Token::Value op,
+ int pos,
+ PreParserFactory* factory) {
+ return false;
+ }
+
+ PreParserExpression BuildUnaryExpression(PreParserExpression expression,
+ Token::Value op, int pos,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
+ Handle<Object> arg, int pos) {
+ return PreParserExpression::Default();
+ }
+ PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
+ Handle<Object> arg, int pos) {
+ return PreParserExpression::Default();
+ }
+
+ // Reporting errors.
+ void ReportMessageAt(Scanner::Location location,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError);
+ void ReportMessageAt(int start_pos, int end_pos,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError);
+
+ // "null" return type creators.
+ static PreParserIdentifier EmptyIdentifier() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserIdentifier EmptyIdentifierString() {
+ return PreParserIdentifier::Default();
+ }
+ static PreParserExpression EmptyExpression() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyObjectLiteralProperty() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpression EmptyFunctionLiteral() {
+ return PreParserExpression::Default();
+ }
+ static PreParserExpressionList NullExpressionList() {
+ return PreParserExpressionList();
+ }
+
+ // Odd-ball literal creators.
+ static PreParserExpression GetLiteralTheHole(int position,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ // Producing data during the recursive descent.
+ PreParserIdentifier GetSymbol(Scanner* scanner);
+ PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
+
+ static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
+ return PreParserIdentifier::Default();
+ }
+
+ static PreParserExpression ThisExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::This();
+ }
+
+ static PreParserExpression SuperPropertyReference(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression SuperCallReference(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::SuperCallReference();
+ }
+
+ static PreParserExpression NewTargetExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
+ int pos, int end_pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ PreParserFactory* factory) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpression ExpressionFromIdentifier(
+ PreParserIdentifier name, int start_position, int end_position,
+ Scope* scope, PreParserFactory* factory) {
+ return PreParserExpression::FromIdentifier(name);
+ }
+
+ PreParserExpression ExpressionFromString(int pos,
+ Scanner* scanner,
+ PreParserFactory* factory = NULL);
+
+ PreParserExpression GetIterator(PreParserExpression iterable,
+ PreParserFactory* factory, int pos) {
+ return PreParserExpression::Default();
+ }
+
+ static PreParserExpressionList NewExpressionList(int size, Zone* zone) {
+ return PreParserExpressionList();
+ }
+
+ static PreParserStatementList NewStatementList(int size, Zone* zone) {
+ return PreParserStatementList();
+ }
+
+ static PreParserExpressionList NewPropertyList(int size, Zone* zone) {
+ return PreParserExpressionList();
+ }
+
+ static void AddParameterInitializationBlock(
+ const PreParserFormalParameters& parameters,
+ PreParserStatementList list, bool* ok) {}
+
+ V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
+ int* expected_property_count, bool* ok) {
+ UNREACHABLE();
+ }
+
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
+
+ V8_INLINE void ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters,
+ PreParserExpression expression, const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok);
+
+ void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
+
+ struct TemplateLiteralState {};
+
+ TemplateLiteralState OpenTemplateLiteral(int pos) {
+ return TemplateLiteralState();
+ }
+ void AddTemplateSpan(TemplateLiteralState*, bool) {}
+ void AddTemplateExpression(TemplateLiteralState*, PreParserExpression) {}
+ PreParserExpression CloseTemplateLiteral(TemplateLiteralState*, int,
+ PreParserExpression tag) {
+ if (IsTaggedTemplate(tag)) {
+ // Emulate generation of array literals for tag callsite
+ // 1st is array of cooked strings, second is array of raw strings
+ MaterializeTemplateCallsiteLiterals();
+ }
+ return EmptyExpression();
+ }
+ inline void MaterializeTemplateCallsiteLiterals();
+ PreParserExpression NoTemplateTag() {
+ return PreParserExpression::NoTemplateTag();
+ }
+ static bool IsTaggedTemplate(const PreParserExpression tag) {
+ return !tag.IsNoTemplateTag();
+ }
+
+ void AddFormalParameter(PreParserFormalParameters* parameters,
+ PreParserExpression pattern,
+ PreParserExpression initializer,
+ int initializer_end_position, bool is_rest) {
+ ++parameters->arity;
+ }
+ void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
+ ExpressionClassifier* classifier) {
+ if (!classifier->is_simple_parameter_list()) {
+ scope->SetHasNonSimpleParameters();
+ }
+ }
+
+ void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
+
+ // Temporary glue; these functions will move to ParserBase.
+ PreParserExpression ParseV8Intrinsic(bool* ok);
+ V8_INLINE PreParserExpression ParseDoExpression(bool* ok);
+ PreParserExpression ParseFunctionLiteral(
+ PreParserIdentifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_position, FunctionLiteral::FunctionType type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
+
+ PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok);
+
+ PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
+ return list;
+ }
+
+ inline void MaterializeUnspreadArgumentsLiterals(int count);
+
+ inline PreParserExpression SpreadCall(PreParserExpression function,
+ PreParserExpressionList args, int pos);
+
+ inline PreParserExpression SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos);
+
+ inline void RewriteDestructuringAssignments() {}
+
+ inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
+
+ void SetFunctionNameFromPropertyName(PreParserExpression,
+ PreParserIdentifier) {}
+ void SetFunctionNameFromIdentifierRef(PreParserExpression,
+ PreParserExpression) {}
+
+ inline PreParserExpression RewriteNonPattern(
+ PreParserExpression expr, const ExpressionClassifier* classifier,
+ bool* ok);
+ inline PreParserExpression RewriteNonPatternArguments(
+ PreParserExpression args, const ExpressionClassifier* classifier,
+ bool* ok);
+ inline PreParserExpression RewriteNonPatternObjectLiteralProperty(
+ PreParserExpression property, const ExpressionClassifier* classifier,
+ bool* ok);
+
+ private:
+ PreParser* pre_parser_;
+};
+
+
+// Preparsing checks a JavaScript program and emits preparse-data that helps
+// a later parsing to be faster.
+// See preparse-data-format.h for the data format.
+
+// The PreParser checks that the syntax follows the grammar for JavaScript,
+// and collects some information about the program along the way.
+// The grammar check is only performed in order to understand the program
+// sufficiently to deduce some information about it, that can be used
+// to speed up later parsing. Finding errors is not the goal of pre-parsing,
+// rather it is to speed up properly written and correct programs.
+// That means that contextual checks (like a label being declared where
+// it is used) are generally omitted.
+class PreParser : public ParserBase<PreParserTraits> {
+ public:
+ typedef PreParserIdentifier Identifier;
+ typedef PreParserExpression Expression;
+ typedef PreParserStatement Statement;
+
+ enum PreParseResult {
+ kPreParseStackOverflow,
+ kPreParseSuccess
+ };
+
+ PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
+ ParserRecorder* log, uintptr_t stack_limit)
+ : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
+ ast_value_factory, log, this) {}
+
+ // Pre-parse the program from the character stream; returns true on
+ // success (even if parsing failed, the pre-parse data successfully
+ // captured the syntax error), and false if a stack-overflow happened
+ // during parsing.
+ PreParseResult PreParseProgram(int* materialized_literals = 0) {
+ Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
+ PreParserFactory factory(NULL);
+ FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
+ &factory);
+ bool ok = true;
+ int start_position = scanner()->peek_location().beg_pos;
+ ParseStatementList(Token::EOS, &ok);
+ if (stack_overflow()) return kPreParseStackOverflow;
+ if (!ok) {
+ ReportUnexpectedToken(scanner()->current_token());
+ } else if (is_strict(scope_->language_mode())) {
+ CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
+ &ok);
+ }
+ if (materialized_literals) {
+ *materialized_literals = function_state_->materialized_literal_count();
+ }
+ return kPreParseSuccess;
+ }
+
+ // Parses a single function literal, from the opening parentheses before
+ // parameters to the closing brace after the body.
+ // Returns a FunctionEntry describing the body of the function in enough
+ // detail that it can be lazily compiled.
+ // The scanner is expected to have matched the "function" or "function*"
+ // keyword and parameters, and have consumed the initial '{'.
+ // At return, unless an error occurred, the scanner is positioned before the
+ // the final '}'.
+ PreParseResult PreParseLazyFunction(
+ LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
+ ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
+
+ private:
+ friend class PreParserTraits;
+
+ static const int kLazyParseTrialLimit = 200;
+
+ // These types form an algebra over syntactic categories that is just
+ // rich enough to let us recognize and propagate the constructs that
+ // are either being counted in the preparser data, or is important
+ // to throw the correct syntax error exceptions.
+
+ // All ParseXXX functions take as the last argument an *ok parameter
+ // which is set to false if parsing failed; it is unchanged otherwise.
+ // By making the 'exception handling' explicit, we are forced to check
+ // for failure at the call sites.
+ Statement ParseStatementListItem(bool* ok);
+ void ParseStatementList(int end_token, bool* ok,
+ Scanner::BookmarkScope* bookmark = nullptr);
+ Statement ParseStatement(bool* ok);
+ Statement ParseSubStatement(bool* ok);
+ Statement ParseFunctionDeclaration(bool* ok);
+ Statement ParseClassDeclaration(bool* ok);
+ Statement ParseBlock(bool* ok);
+ Statement ParseVariableStatement(VariableDeclarationContext var_context,
+ bool* ok);
+ Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
+ int* num_decl, bool* is_lexical,
+ bool* is_binding_pattern,
+ Scanner::Location* first_initializer_loc,
+ Scanner::Location* bindings_loc,
+ bool* ok);
+ Statement ParseExpressionOrLabelledStatement(bool* ok);
+ Statement ParseIfStatement(bool* ok);
+ Statement ParseContinueStatement(bool* ok);
+ Statement ParseBreakStatement(bool* ok);
+ Statement ParseReturnStatement(bool* ok);
+ Statement ParseWithStatement(bool* ok);
+ Statement ParseSwitchStatement(bool* ok);
+ Statement ParseDoWhileStatement(bool* ok);
+ Statement ParseWhileStatement(bool* ok);
+ Statement ParseForStatement(bool* ok);
+ Statement ParseThrowStatement(bool* ok);
+ Statement ParseTryStatement(bool* ok);
+ Statement ParseDebuggerStatement(bool* ok);
+ Expression ParseConditionalExpression(bool accept_IN, bool* ok);
+ Expression ParseObjectLiteral(bool* ok);
+ Expression ParseV8Intrinsic(bool* ok);
+ Expression ParseDoExpression(bool* ok);
+
+ V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
+ int* expected_property_count, bool* ok);
+ V8_INLINE PreParserStatementList ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok);
+
+ Expression ParseFunctionLiteral(
+ Identifier name, Scanner::Location function_name_location,
+ FunctionNameValidity function_name_validity, FunctionKind kind,
+ int function_token_pos, FunctionLiteral::FunctionType function_type,
+ FunctionLiteral::ArityRestriction arity_restriction,
+ LanguageMode language_mode, bool* ok);
+ void ParseLazyFunctionLiteralBody(bool* ok,
+ Scanner::BookmarkScope* bookmark = nullptr);
+
+ PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok);
+};
+
+
+void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+}
+
+
+void PreParserTraits::MaterializeUnspreadArgumentsLiterals(int count) {
+ for (int i = 0; i < count; ++i) {
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+ }
+}
+
+
+PreParserExpression PreParserTraits::SpreadCall(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos) {
+ return pre_parser_->factory()->NewCall(function, args, pos);
+}
+
+PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos) {
+ return pre_parser_->factory()->NewCallNew(function, args, pos);
+}
+
+
+void PreParserTraits::ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters,
+ PreParserExpression params, const Scanner::Location& params_loc,
+ Scanner::Location* duplicate_loc, bool* ok) {
+ // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
+ // lists that are too long.
+}
+
+
+PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
+ return pre_parser_->ParseDoExpression(ok);
+}
+
+
+PreParserExpression PreParserTraits::RewriteNonPattern(
+ PreParserExpression expr, const ExpressionClassifier* classifier,
+ bool* ok) {
+ pre_parser_->ValidateExpression(classifier, ok);
+ return expr;
+}
+
+
+PreParserExpression PreParserTraits::RewriteNonPatternArguments(
+ PreParserExpression args, const ExpressionClassifier* classifier,
+ bool* ok) {
+ pre_parser_->ValidateExpression(classifier, ok);
+ return args;
+}
+
+
+PreParserExpression PreParserTraits::RewriteNonPatternObjectLiteralProperty(
+ PreParserExpression property, const ExpressionClassifier* classifier,
+ bool* ok) {
+ pre_parser_->ValidateExpression(classifier, ok);
+ return property;
+}
+
+
+PreParserStatementList PreParser::ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
+ ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+
+ ParseStatementList(Token::RBRACE, ok);
+ if (!*ok) return PreParserStatementList();
+
+ Expect(Token::RBRACE, ok);
+ return PreParserStatementList();
+}
+
+
+PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
+ PreParserIdentifier function_name, int pos,
+ const PreParserFormalParameters& parameters, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type, bool* ok) {
+ return pre_parser_->ParseEagerFunctionBody(function_name, pos, parameters,
+ kind, function_type, ok);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PREPARSER_H
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 1f19739331..4da60aca18 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/rewriter.h"
+#include "src/parsing/rewriter.h"
-#include "src/ast.h"
-#include "src/parser.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/rewriter.h b/deps/v8/src/parsing/rewriter.h
index fdb36d1d3e..477644a756 100644
--- a/deps/v8/src/rewriter.h
+++ b/deps/v8/src/parsing/rewriter.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REWRITER_H_
-#define V8_REWRITER_H_
+#ifndef V8_PARSING_REWRITER_H_
+#define V8_PARSING_REWRITER_H_
namespace v8 {
namespace internal {
@@ -33,4 +33,4 @@ class Rewriter {
} // namespace internal
} // namespace v8
-#endif // V8_REWRITER_H_
+#endif // V8_PARSING_REWRITER_H_
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index a58f392c0c..91ed54f7be 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/scanner-character-streams.h"
+#include "src/parsing/scanner-character-streams.h"
#include "include/v8.h"
#include "src/globals.h"
@@ -193,7 +193,8 @@ Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
size_t Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, size_t length,
const byte* src, size_t* src_pos,
size_t src_length) {
- static const unibrow::uchar kMaxUtf16Character = 0xffff;
+ static const unibrow::uchar kMaxUtf16Character =
+ unibrow::Utf16::kMaxNonSurrogateCharCode;
size_t i = 0;
// Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
// one character early (in the normal case), because we need to have at least
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 8a0ae23926..603db93d02 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SCANNER_CHARACTER_STREAMS_H_
-#define V8_SCANNER_CHARACTER_STREAMS_H_
+#ifndef V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
+#define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
#include "src/handles.h"
-#include "src/scanner.h"
+#include "src/parsing/scanner.h"
#include "src/vector.h"
namespace v8 {
@@ -186,4 +186,4 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
} // namespace internal
} // namespace v8
-#endif // V8_SCANNER_CHARACTER_STREAMS_H_
+#endif // V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 04712e9f32..19fab9355e 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -4,17 +4,17 @@
// Features shared by parsing and pre-parsing scanners.
-#include "src/scanner.h"
+#include "src/parsing/scanner.h"
#include <stdint.h>
#include <cmath>
-#include "src/ast-value-factory.h"
+#include "src/ast/ast-value-factory.h"
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
#include "src/list-inl.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -356,7 +356,7 @@ Token::Value Scanner::SkipSourceURLComment() {
void Scanner::TryToParseSourceURLComment() {
- // Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
+ // Magic comments are of the form: //[#]\s<name>=\s*<value>\s*.* and this
// function will just return if it cannot parse a magic comment.
if (c0_ < 0 || !unicode_cache_->IsWhiteSpace(c0_)) return;
Advance();
@@ -574,7 +574,7 @@ void Scanner::Scan() {
Advance();
if (c0_ == '/') {
Advance();
- if (c0_ == '@' || c0_ == '#') {
+ if (c0_ == '#') {
Advance();
token = SkipSourceURLComment();
} else {
@@ -1177,7 +1177,7 @@ uc32 Scanner::ScanUnicodeEscape() {
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
- int input_length) {
+ int input_length, bool escaped) {
DCHECK(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -1189,26 +1189,30 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
#define KEYWORD_GROUP_CASE(ch) \
break; \
case ch:
-#define KEYWORD(keyword, token) \
- { \
- /* 'keyword' is a char array, so sizeof(keyword) is */ \
- /* strlen(keyword) plus 1 for the NUL char. */ \
- const int keyword_length = sizeof(keyword) - 1; \
- STATIC_ASSERT(keyword_length >= kMinLength); \
- STATIC_ASSERT(keyword_length <= kMaxLength); \
- if (input_length == keyword_length && \
- input[1] == keyword[1] && \
- (keyword_length <= 2 || input[2] == keyword[2]) && \
- (keyword_length <= 3 || input[3] == keyword[3]) && \
- (keyword_length <= 4 || input[4] == keyword[4]) && \
- (keyword_length <= 5 || input[5] == keyword[5]) && \
- (keyword_length <= 6 || input[6] == keyword[6]) && \
- (keyword_length <= 7 || input[7] == keyword[7]) && \
- (keyword_length <= 8 || input[8] == keyword[8]) && \
- (keyword_length <= 9 || input[9] == keyword[9])) { \
- return token; \
- } \
- }
+#define KEYWORD(keyword, token) \
+ { \
+ /* 'keyword' is a char array, so sizeof(keyword) is */ \
+ /* strlen(keyword) plus 1 for the NUL char. */ \
+ const int keyword_length = sizeof(keyword) - 1; \
+ STATIC_ASSERT(keyword_length >= kMinLength); \
+ STATIC_ASSERT(keyword_length <= kMaxLength); \
+ if (input_length == keyword_length && input[1] == keyword[1] && \
+ (keyword_length <= 2 || input[2] == keyword[2]) && \
+ (keyword_length <= 3 || input[3] == keyword[3]) && \
+ (keyword_length <= 4 || input[4] == keyword[4]) && \
+ (keyword_length <= 5 || input[5] == keyword[5]) && \
+ (keyword_length <= 6 || input[6] == keyword[6]) && \
+ (keyword_length <= 7 || input[7] == keyword[7]) && \
+ (keyword_length <= 8 || input[8] == keyword[8]) && \
+ (keyword_length <= 9 || input[9] == keyword[9])) { \
+ if (escaped) { \
+ return token == Token::FUTURE_STRICT_RESERVED_WORD \
+ ? Token::ESCAPED_STRICT_RESERVED_WORD \
+ : Token::ESCAPED_KEYWORD; \
+ } \
+ return token; \
+ } \
+ }
KEYWORDS(KEYWORD_GROUP_CASE, KEYWORD)
}
return Token::IDENTIFIER;
@@ -1224,7 +1228,7 @@ bool Scanner::IdentifierIsFutureStrictReserved(
return true;
}
return Token::FUTURE_STRICT_RESERVED_WORD ==
- KeywordOrIdentifierToken(string->raw_data(), string->length());
+ KeywordOrIdentifierToken(string->raw_data(), string->length(), false);
}
@@ -1257,7 +1261,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
// Only a-z+: could be a keyword or identifier.
literal.Complete();
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length());
+ return KeywordOrIdentifierToken(chars.start(), chars.length(), false);
}
HandleLeadSurrogate();
@@ -1284,7 +1288,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
return Token::ILLEGAL;
}
AddLiteralChar(c);
- return ScanIdentifierSuffix(&literal);
+ return ScanIdentifierSuffix(&literal, true);
} else {
uc32 first_char = c0_;
Advance();
@@ -1300,24 +1304,26 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
continue;
}
// Fallthrough if no longer able to complete keyword.
- return ScanIdentifierSuffix(&literal);
+ return ScanIdentifierSuffix(&literal, false);
}
literal.Complete();
if (next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length());
+ return KeywordOrIdentifierToken(chars.start(), chars.length(), false);
}
return Token::IDENTIFIER;
}
-Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal,
+ bool escaped) {
// Scan the rest of the identifier characters.
while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ == '\\') {
uc32 c = ScanIdentifierUnicodeEscape();
+ escaped = true;
// Only allow legal identifier part characters.
if (c < 0 ||
c == '\\' ||
@@ -1332,6 +1338,10 @@ Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
}
literal->Complete();
+ if (escaped && next_.literal_chars->is_one_byte()) {
+ Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
+ return KeywordOrIdentifierToken(chars.start(), chars.length(), true);
+ }
return Token::IDENTIFIER;
}
@@ -1383,20 +1393,41 @@ bool Scanner::ScanRegExpPattern(bool seen_equal) {
}
-bool Scanner::ScanRegExpFlags() {
+Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
// Scan regular expression flags.
LiteralScope literal(this);
+ int flags = 0;
while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
- if (c0_ != '\\') {
- AddLiteralCharAdvance();
- } else {
- return false;
+ RegExp::Flags flag = RegExp::kNone;
+ switch (c0_) {
+ case 'g':
+ flag = RegExp::kGlobal;
+ break;
+ case 'i':
+ flag = RegExp::kIgnoreCase;
+ break;
+ case 'm':
+ flag = RegExp::kMultiline;
+ break;
+ case 'u':
+ if (!FLAG_harmony_unicode_regexps) return Nothing<RegExp::Flags>();
+ flag = RegExp::kUnicode;
+ break;
+ case 'y':
+ if (!FLAG_harmony_regexps) return Nothing<RegExp::Flags>();
+ flag = RegExp::kSticky;
+ break;
+ default:
+ return Nothing<RegExp::Flags>();
}
+ if (flags & flag) return Nothing<RegExp::Flags>();
+ AddLiteralCharAdvance();
+ flags |= flag;
}
literal.Complete();
next_.location.end_pos = source_pos();
- return true;
+ return Just(RegExp::Flags(flags));
}
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/parsing/scanner.h
index 6d0d4dc8ed..1d0aba0611 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -4,8 +4,8 @@
// Features shared by parsing and pre-parsing scanners.
-#ifndef V8_SCANNER_H_
-#define V8_SCANNER_H_
+#ifndef V8_PARSING_SCANNER_H_
+#define V8_PARSING_SCANNER_H_
#include "src/allocation.h"
#include "src/base/logging.h"
@@ -13,7 +13,7 @@
#include "src/globals.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/token.h"
+#include "src/parsing/token.h"
#include "src/unicode.h"
#include "src/unicode-decoder.h"
#include "src/utils.h"
@@ -28,17 +28,6 @@ class ParserRecorder;
class UnicodeCache;
-// Returns the value (0 .. 15) of a hexadecimal character c.
-// If c is not a legal hexadecimal character, returns a value < 0.
-inline int HexValue(uc32 c) {
- c -= '0';
- if (static_cast<unsigned>(c) <= 9) return c;
- c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36.
- if (static_cast<unsigned>(c) <= 5) return c + 10;
- return -1;
-}
-
-
// ---------------------------------------------------------------------
// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
// A code unit is a 16 bit value representing either a 16 bit code point
@@ -373,13 +362,10 @@ class Scanner {
Location peek_location() const { return next_.location; }
bool literal_contains_escapes() const {
- Location location = current_.location;
- int source_length = (location.end_pos - location.beg_pos);
- if (current_.token == Token::STRING) {
- // Subtract delimiters.
- source_length -= 2;
- }
- return current_.literal_chars->length() != source_length;
+ return LiteralContainsEscapes(current_);
+ }
+ bool next_literal_contains_escapes() const {
+ return LiteralContainsEscapes(next_);
}
bool is_literal_contextual_keyword(Vector<const char> keyword) {
DCHECK_NOT_NULL(current_.literal_chars);
@@ -448,9 +434,8 @@ class Scanner {
// Scans the input as a regular expression pattern, previous
// character(s) must be /(=). Returns true if a pattern is scanned.
bool ScanRegExpPattern(bool seen_equal);
- // Returns true if regexp flags are scanned (always since flags can
- // be empty).
- bool ScanRegExpFlags();
+ // Scans the input as regular expression flags. Returns the flags on success.
+ Maybe<RegExp::Flags> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateStart();
@@ -665,7 +650,7 @@ class Scanner {
void ScanDecimalDigits();
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
- Token::Value ScanIdentifierSuffix(LiteralScope* literal);
+ Token::Value ScanIdentifierSuffix(LiteralScope* literal, bool escaped);
Token::Value ScanString();
@@ -689,6 +674,16 @@ class Scanner {
return static_cast<int>(source_->pos()) - kCharacterLookaheadBufferSize;
}
+ static bool LiteralContainsEscapes(const TokenDesc& token) {
+ Location location = token.location;
+ int source_length = (location.end_pos - location.beg_pos);
+ if (token.token == Token::STRING) {
+ // Subtract delimiters.
+ source_length -= 2;
+ }
+ return token.literal_chars->length() != source_length;
+ }
+
UnicodeCache* unicode_cache_;
// Buffers collecting literal strings, numbers, etc.
@@ -762,4 +757,4 @@ class Scanner {
} // namespace internal
} // namespace v8
-#endif // V8_SCANNER_H_
+#endif // V8_PARSING_SCANNER_H_
diff --git a/deps/v8/src/token.cc b/deps/v8/src/parsing/token.cc
index 73e883f4bd..7edfefa821 100644
--- a/deps/v8/src/token.cc
+++ b/deps/v8/src/parsing/token.cc
@@ -3,7 +3,8 @@
// found in the LICENSE file.
#include <stdint.h>
-#include "src/token.h"
+
+#include "src/parsing/token.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/token.h b/deps/v8/src/parsing/token.h
index 2443e84238..fee1f7e85a 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TOKEN_H_
-#define V8_TOKEN_H_
+#ifndef V8_PARSING_TOKEN_H_
+#define V8_PARSING_TOKEN_H_
#include "src/base/logging.h"
#include "src/globals.h"
@@ -50,10 +50,7 @@ namespace internal {
/* IsAssignmentOp() and Assignment::is_compound() relies on */ \
/* this block of enum values being contiguous and sorted in the */ \
/* same order! */ \
- T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
- T(INIT_LET, "=init_let", 2) /* AST-use only. */ \
- T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
- T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */ \
+ T(INIT, "=init", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
T(ASSIGN_BIT_OR, "|=", 2) \
T(ASSIGN_BIT_XOR, "^=", 2) \
@@ -163,6 +160,8 @@ namespace internal {
\
/* Illegal token - not able to scan. */ \
T(ILLEGAL, "ILLEGAL", 0) \
+ T(ESCAPED_KEYWORD, NULL, 0) \
+ T(ESCAPED_STRICT_RESERVED_WORD, NULL, 0) \
\
/* Scanner-internal use only. */ \
T(WHITESPACE, NULL, 0) \
@@ -200,6 +199,7 @@ class Token {
switch (tok) {
case IDENTIFIER:
return true;
+ case ESCAPED_STRICT_RESERVED_WORD:
case FUTURE_STRICT_RESERVED_WORD:
case LET:
case STATIC:
@@ -214,7 +214,7 @@ class Token {
}
static bool IsAssignmentOp(Value tok) {
- return INIT_VAR <= tok && tok <= ASSIGN_MOD;
+ return INIT <= tok && tok <= ASSIGN_MOD;
}
static bool IsBinaryOp(Value op) {
@@ -321,4 +321,4 @@ class Token {
} // namespace internal
} // namespace v8
-#endif // V8_TOKEN_H_
+#endif // V8_PARSING_TOKEN_H_
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 4f6a35d66e..b384d3f4f9 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -60,7 +60,7 @@ void RelocInfo::apply(intptr_t delta) {
// mov sequence
DCHECK(IsInternalReferenceEncoded(rmode_));
Address target = Assembler::target_address_at(pc_, host_);
- Assembler::set_target_address_at(pc_, host_, target + delta,
+ Assembler::set_target_address_at(isolate_, pc_, host_, target + delta,
SKIP_ICACHE_FLUSH);
}
}
@@ -136,7 +136,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -196,8 +197,9 @@ void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(
- pc_, host_, reinterpret_cast<Address>(target), icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
@@ -286,7 +288,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + kCodeAgingTargetDelta, host_,
+ Assembler::set_target_address_at(isolate_, pc_ + kCodeAgingTargetDelta, host_,
stub->instruction_start(),
icache_flush_mode);
}
@@ -300,7 +302,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -319,9 +321,10 @@ void RelocInfo::WipeOut() {
} else if (IsInternalReferenceEncoded(rmode_)) {
// mov sequence
// Currently used only by deserializer, no need to flush.
- Assembler::set_target_address_at(pc_, host_, NULL, SKIP_ICACHE_FLUSH);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL,
+ SKIP_ICACHE_FLUSH);
} else {
- Assembler::set_target_address_at(pc_, host_, NULL);
+ Assembler::set_target_address_at(isolate_, pc_, host_, NULL);
}
}
@@ -637,16 +640,16 @@ Address Assembler::target_constant_pool_address_at(
// has already deserialized the mov instructions etc.
// There is a FIXED_SEQUENCE assumption here
void Assembler::deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
Code* code = NULL;
- set_target_address_at(pc, code, target, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate, pc, code, target, SKIP_ICACHE_FLUSH);
} else {
Memory::Address_at(pc) = target;
}
@@ -654,8 +657,8 @@ void Assembler::deserialization_set_target_internal_reference_at(
// This code assumes the FIXED_SEQUENCE of lis/ori
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
if (FLAG_enable_embedded_constant_pool && constant_pool) {
ConstantPoolEntry::Access access;
@@ -698,7 +701,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, 5 * kInstrSize);
+ Assembler::FlushICache(isolate, p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -713,7 +716,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, 2 * kInstrSize);
+ Assembler::FlushICache(isolate, p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index ac03ce6949..147fb59aae 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -453,7 +453,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_mov32(dst, offset);
break;
@@ -464,7 +464,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Register dst = Register::from_code((operands >> 21) & 0x1f);
Register base = Register::from_code((operands >> 16) & 0x1f);
int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
CodePatcher::DONT_FLUSH);
patcher.masm()->bitwise_add32(dst, base, offset);
break;
@@ -472,7 +472,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
case kUnboundMovLabelAddrOpcode: {
// Load the address of the label in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
kMovInstructionsNoConstantPool,
CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
@@ -480,7 +480,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
break;
}
case kUnboundJumpTableEntryOpcode: {
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
// Keep internal references relative until EmitRelocations.
patcher.masm()->dp(target_pos);
@@ -1844,7 +1844,10 @@ void Assembler::mtxer(Register src) {
}
-void Assembler::mcrfs(int bf, int bfa) {
+void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bf = cr.code();
+ int bfa = bit / CRWIDTH;
emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
}
@@ -2163,6 +2166,18 @@ void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
@@ -2181,6 +2196,18 @@ void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc) {
+ emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc) {
@@ -2195,6 +2222,20 @@ void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
}
+void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bt = bit;
+ emit(EXT4 | MTFSB0 | bt * B21 | rc);
+}
+
+
+void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
+ DCHECK(static_cast<int>(bit) < 32);
+ int bt = bit;
+ emit(EXT4 | MTFSB1 | bt * B21 | rc);
+}
+
+
void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
}
@@ -2299,6 +2340,7 @@ void Assembler::GrowBuffer(int needed) {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
@@ -2377,7 +2419,7 @@ void Assembler::EmitRelocations() {
RelocInfo::Mode rmode = it->rmode();
Address pc = buffer_ + it->position();
Code* code = NULL;
- RelocInfo rinfo(pc, rmode, it->data(), code);
+ RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
// Fix up internal references now that they are guaranteed to be bound.
if (RelocInfo::IsInternalReference(rmode)) {
@@ -2387,7 +2429,8 @@ void Assembler::EmitRelocations() {
} else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
// mov sequence
intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
- set_target_address_at(pc, code, buffer_ + pos, SKIP_ICACHE_FLUSH);
+ set_target_address_at(isolate(), pc, code, buffer_ + pos,
+ SKIP_ICACHE_FLUSH);
}
reloc_info_writer.Write(&rinfo);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 36843c17ab..e84d695251 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -56,8 +56,11 @@
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
(!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
-#define ABI_TOC_ADDRESSABILITY_VIA_IP \
- (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#define ABI_CALL_VIA_IP 1
+#else
+#define ABI_CALL_VIA_IP 0
+#endif
#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
#define ABI_TOC_REGISTER Register::kCode_r2
@@ -457,17 +460,18 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
- Address pc, Code* code, Address target,
+ Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -481,11 +485,12 @@ class Assembler : public AssemblerBase {
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Size of an instruction.
@@ -982,7 +987,7 @@ class Assembler : public AssemblerBase {
void mtlr(Register src);
void mtctr(Register src);
void mtxer(Register src);
- void mcrfs(int bf, int bfa);
+ void mcrfs(CRegister cr, FPSCRBit bit);
void mfcr(Register dst);
#if V8_TARGET_ARCH_PPC64
void mffprd(Register dst, DoubleRegister src);
@@ -1050,17 +1055,27 @@ class Assembler : public AssemblerBase {
RCBit rc = LeaveRC);
void fcfid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fcfids(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fctidz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void fctidu(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
+ void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
+ RCBit rc = LeaveRC);
void fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
void fneg(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
+ void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
@@ -1164,7 +1179,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index 9b3a3fb9ad..0476cd27e1 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -21,9 +21,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- r3 : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- r4 : called function
+ // -- r4 : target
+ // -- r6 : new.target
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
@@ -35,37 +34,29 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- __ push(r4);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ switch (extra_args) {
+ case BuiltinExtraArguments::kTarget:
+ __ Push(r4);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kNewTarget:
+ __ Push(r6);
+ ++num_extra_args;
+ break;
+ case BuiltinExtraArguments::kTargetAndNewTarget:
+ __ Push(r4, r6);
+ num_extra_args += 2;
+ break;
+ case BuiltinExtraArguments::kNone:
+ break;
}
// JumpToExternalReference expects r3 to contain the number of arguments
- // including the receiver and the extra arguments. But r3 is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r5);
-#endif
- __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(ne, r3, r5, r3);
- } else {
- Label skip;
- __ beq(&skip);
- __ mr(r3, r5);
- __ bind(&skip);
- }
+ // including the receiver and the extra arguments.
__ addi(r3, r3, Operand(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
@@ -75,31 +66,15 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the native context.
-
- __ LoadP(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result,
- FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- // Load the InternalArray function from the native context.
- __ LoadP(result,
- MemOperand(result, Context::SlotOffset(
- Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+ // Load the InternalArray function from the current native context.
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the native context.
-
- __ LoadP(result,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(result,
- FieldMemOperand(result, JSGlobalObject::kNativeContextOffset));
- // Load the Array function from the native context.
- __ LoadP(
- result,
- MemOperand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ // Load the Array function from the current native context.
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
@@ -161,6 +136,110 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0.
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ Ret(1);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- r6 : new target
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r5 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r5, MemOperand(sp, r5));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadSmiLiteral(r5, Smi::FromInt(0));
+ __ Drop(1);
+ __ bind(&done);
+ }
+
+ // 3. Make sure r5 is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(r5, &done_convert);
+ __ CompareObjectType(r5, r7, r7, HEAP_NUMBER_TYPE);
+ __ beq(&done_convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6);
+ __ mr(r3, r5);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r5, r3);
+ __ Pop(r4, r6);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r4, r6);
+ __ bne(&new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r5, r4, r6); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r5);
+ }
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -212,7 +291,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ bind(&symbol_descriptive_string);
{
__ Push(r3);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -222,13 +301,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
- // -- r6 : original constructor
+ // -- r6 : new target
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r5 and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into r5 and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -245,7 +327,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done);
}
- // 2. Make sure r5 is a string.
+ // 3. Make sure r5 is a string.
{
Label convert, done_convert;
__ JumpIfSmi(r5, &convert);
@@ -264,69 +346,43 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- r5 : the first argument
- // -- r4 : constructor function
- // -- r6 : original constructor
- // -- lr : return address
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
-
- // Fall back to runtime if the original constructor and function differ.
- __ cmp(r4, r6);
- __ bne(&rt_call);
-
- __ Allocate(JSValue::kSize, r3, r6, r7, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in r3.
- __ LoadGlobalFunctionInitialMap(r4, r6, r7);
- __ StoreP(r6, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(r4, r6);
+ __ bne(&new_object);
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ LoadSmiLiteral(r6, Smi::FromInt(JSValue::kSize));
- __ Push(r4, r5, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(r4, r5);
- }
- __ b(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r5, r4, r6); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(r4, r5);
- }
- __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r5, r4, r6); // first argument, constructor, new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(r5);
}
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ __ Ret();
}
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r4 : target function (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -----------------------------------
+
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
// Push function as parameter to the runtime call.
- __ Push(r4, r4);
+ __ Push(r4, r6, r4);
__ CallRuntime(function_id, 1);
- // Restore reciever.
- __ Pop(r4);
+ // Restore target function and new target.
+ __ Pop(r4, r6);
}
@@ -364,12 +420,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r5 : allocation site or undefined
- // -- r6 : original constructor
+ // -- r6 : new target
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -382,187 +439,175 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r5, r7);
- __ SmiTag(r3);
- __ Push(r5, r3, r4, r6);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(isolate);
- __ mov(r5, Operand(debug_step_in_fp));
- __ LoadP(r5, MemOperand(r5));
- __ cmpi(r5, Operand::Zero());
- __ bne(&rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
- __ bne(&rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r6: original constructor
- __ LoadP(r5,
- FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r5, &rt_call);
- __ CompareObjectType(r5, r8, r7, MAP_TYPE);
- __ bne(&rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
- __ cmp(r4, r8);
- __ bne(&rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r4: constructor function
- // r5: initial map
- __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
- __ beq(&rt_call);
-
- if (!is_api_function) {
- Label allocate;
- MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwz(r7, bit_field3);
- __ DecodeField<Map::Counter>(r11, r7);
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&allocate);
- // Decrease generous allocation count.
- __ Add(r7, r7, -(1 << Map::Counter::kShift), r0);
- __ stw(r7, bit_field3);
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ bne(&allocate);
-
- __ Push(r4, r5, r5); // r5 = initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ Pop(r4, r5);
-
- __ bind(&allocate);
- }
-
- // Now allocate the JSObject on the heap.
- // r4: constructor function
- // r5: initial map
- Label rt_call_reload_new_target;
- __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-
- __ Allocate(r6, r7, r8, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r4: constructor function
- // r5: initial map
- // r6: object size
- // r7: JSObject (not tagged)
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ mr(r8, r7);
- __ StoreP(r5, MemOperand(r8, JSObject::kMapOffset));
- __ StoreP(r9, MemOperand(r8, JSObject::kPropertiesOffset));
- __ StoreP(r9, MemOperand(r8, JSObject::kElementsOffset));
- __ addi(r8, r8, Operand(JSObject::kElementsOffset + kPointerSize));
-
- __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2));
- __ add(r9, r7, r9); // End of object.
-
- // Fill all the in-object properties with the appropriate filler.
- // r4: constructor function
- // r5: initial map
- // r6: object size
- // r7: JSObject (not tagged)
- // r8: First in-object property of JSObject (not tagged)
- // r9: End of object
- DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
- __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ lbz(
- r3,
- FieldMemOperand(
- r5, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ lbz(r5, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- __ sub(r3, r3, r5);
- if (FLAG_debug_code) {
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ add(r0, r8, r0);
- // r0: offset of first field after pre-allocated fields
- __ cmp(r0, r9);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- {
- Label done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&done);
- __ InitializeNFieldsWithFiller(r8, r3, r10);
- __ bind(&done);
+ if (!create_implicit_receiver) {
+ __ SmiTag(r7, r3, SetRC);
+ __ Push(r5, r7);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ } else {
+ __ SmiTag(r3);
+ __ Push(r5, r3);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
+ __ bne(&rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // r6: new target
+ __ LoadP(r5,
+ FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r5, &rt_call);
+ __ CompareObjectType(r5, r8, r7, MAP_TYPE);
+ __ bne(&rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r4, r8);
+ __ bne(&rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
+ __ beq(&rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ __ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+
+ __ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
+
+ // Allocated the JSObject, now initialize the fields. Map is set to
+ // initial map and properties and elements are set to empty fixed array.
+ // r4: constructor function
+ // r5: initial map
+ // r6: new target
+ // r7: JSObject (not HeapObject tagged - the actual address).
+ // r10: start of next object
+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
+ __ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
+ __ StoreP(r9, MemOperand(r7, JSObject::kElementsOffset));
+ __ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ addi(r7, r7, Operand(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // r7: JSObject (tagged)
+ // r8: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
+ // Check if slack tracking is enabled.
+ __ lwz(r3, bit_field3);
+ __ DecodeField<Map::ConstructionCounter>(r11, r3);
+ // r11: slack tracking counter
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ blt(&no_inobject_slack_tracking);
+ // Decrease generous allocation count.
+ __ Add(r3, r3, -(1 << Map::ConstructionCounter::kShift), r0);
+ __ stw(r3, bit_field3);
+
+ // Allocate object with a slack.
+ __ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ sub(r3, r10, r3);
+ // r3: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(r8, r3);
+ __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(r8, r3, r9);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r8, r10, r9);
+
+ // r11: slack tracking counter value before decreasing.
+ __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
+ __ bne(&allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(r4, r6, r7, r5);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r4, r6, r7);
+
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r6: new target
+ // r7: JSObject
+ __ b(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- // To allow for truncation.
- __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
- }
+ __ InitializeFieldsWithFiller(r8, r10, r9);
- __ InitializeFieldsWithFiller(r8, r9, r10);
+ // Continue with JSObject being successfully allocated
+ // r4: constructor function
+ // r6: new target
+ // r7: JSObject
+ __ b(&allocated);
+ }
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ addi(r7, r7, Operand(kHeapObjectTag));
+ // Allocate the new receiver object using the runtime call.
+ // r4: constructor function
+ // r6: new target
+ __ bind(&rt_call);
+
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(r4, r6, r4, r6);
+ __ CallRuntime(Runtime::kNewObject);
+ __ mr(r7, r3);
+ __ Pop(r4, r6);
- // Continue with JSObject being successfully allocated
+ // Receiver for constructor call allocated.
+ // r4: constructor function
+ // r6: new target
// r7: JSObject
- __ b(&allocated);
-
- // Reload the original constructor and fall-through.
- __ bind(&rt_call_reload_new_target);
- __ LoadP(r6, MemOperand(sp, 0 * kPointerSize));
- }
-
- // Allocate the new receiver object using the runtime call.
- // r4: constructor function
- // r6: original constructor
- __ bind(&rt_call);
- __ Push(r4, r6); // constructor function, original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mr(r7, r3);
+ __ bind(&allocated);
- // Receiver for constructor call allocated.
- // r7: JSObject
- __ bind(&allocated);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ LoadP(r3, MemOperand(sp));
+ __ SmiUntag(r3, SetRC);
- // Restore the parameters.
- __ Pop(r4, ip);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ LoadP(r6, MemOperand(sp));
-
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ Push(ip, r7, r7);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r7, r7);
+ }
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
+ // r3: number of arguments
// r4: constructor function
// r5: address of last argument (caller sp)
- // r6: number of arguments (smi-tagged)
+ // r6: new target
+ // cr0: condition indicating whether r3 is zero
// sp[0]: receiver
// sp[1]: receiver
- // sp[2]: new.target
- // sp[3]: number of arguments (smi-tagged)
+ // sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
- __ SmiUntag(r3, r6, SetRC);
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ sub(sp, sp, ip);
@@ -577,57 +622,60 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Call the function.
// r3: number of arguments
// r4: constructor function
+ // r6: new target
if (is_api_function) {
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r3: result
// sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
+ // sp[1]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r3: result
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r3, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r3, r4, r6, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ LoadP(r3, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r3: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: new.target (original constructor)
- // sp[2]: number of arguments (smi-tagged)
- __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r3: result
+ // sp[0]: receiver
+ // sp[1]: number of arguments (smi-tagged)
+ __ JumpIfSmi(r3, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r3, r4, r6, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r3, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r3: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: number of arguments (smi-tagged)
+ __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+ } else {
+ __ LoadP(r4, MemOperand(sp));
+ }
// Leave construct frame.
}
@@ -635,104 +683,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+ }
__ blr();
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- r4 : constructor function
- // -- r5 : allocation site or undefined
- // -- r6 : original constructor
- // -- lr : return address
- // -- sp[...]: constructor arguments
- // -----------------------------------
-
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
-
- __ AssertUndefinedOrAllocationSite(r5, r7);
-
- // Smi-tagged arguments count.
- __ mr(r7, r3);
- __ SmiTag(r7, SetRC);
-
- // receiver is the hole.
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-
- // allocation site, smi arguments count, new.target, receiver
- __ Push(r5, r7, r6, ip);
-
- // Set up pointer to last argument.
- __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- // r3: number of arguments
- // r4: constructor function
- // r5: address of last argument (caller sp)
- // r7: number of arguments (smi-tagged)
- // cr0: compare against zero of arguments
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- Label loop, no_args;
- __ beq(&no_args, cr0);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ mtctr(r3);
- __ bind(&loop);
- __ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r5, ip));
- __ push(r0);
- __ bdnz(&loop);
- __ bind(&no_args);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ mov(r5, Operand(debug_step_in_fp));
- __ LoadP(r5, MemOperand(r5));
- __ and_(r0, r5, r5, SetRC);
- __ beq(&skip_step_in, cr0);
-
- __ Push(r3, r4, r4);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(r3, r4);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- // r3: number of arguments
- // r4: constructor function
- ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Restore context from the frame.
- // r3: result
- // sp[0]: number of arguments (smi-tagged)
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- // Get arguments count, skipping over new.target.
- __ LoadP(r4, MemOperand(sp, kPointerSize));
- // Leave construct frame.
- }
-
- __ SmiToPtrArrayOffset(r4, r4);
- __ add(sp, sp, r4);
- __ addi(sp, sp, Operand(kPointerSize));
- __ blr();
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -761,7 +737,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bgt(&okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -863,6 +839,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o r4: the JS function object being called.
+// o r6: the new target
// o cp: our context
// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
@@ -880,6 +857,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r4);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ push(r6);
+
+ // Push zero for bytecode array offset.
+ __ li(r3, Operand::Zero());
+ __ push(r3);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
@@ -908,7 +890,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
__ cmpl(r6, r0);
__ bge(&ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -938,7 +920,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(sp, r0);
__ bge(&ok);
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -946,9 +928,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ subi(
- kInterpreterRegisterFileRegister, fp,
- Operand(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ addi(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -965,6 +946,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
+ __ bkpt(0); // Does not return here.
}
@@ -1025,13 +1007,14 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (not including receiver)
- // -- r6 : original constructor
+ // -- r6 : new target
// -- r4 : constructor to call
// -- r5 : address of the first argument
// -----------------------------------
// Push a slot for the receiver to be constructed.
- __ push(r3);
+ __ li(r0, Operand::Zero());
+ __ push(r0);
// Push the arguments (skip if none).
Label skip;
@@ -1041,40 +1024,105 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ bind(&skip);
// Call the constructor with r3, r4, and r6 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Save accumulator register and pass the deoptimization type to
+ // the runtime system.
+ __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
+ __ Push(kInterpreterAccumulatorRegister, r4);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts).
+ __ Drop(1);
+
+ // Initialize register file register and dispatch table register.
+ __ addi(kInterpreterRegisterFileRegister, fp,
+ Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ LoadP(kContextRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ LoadP(r4,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ LoadP(kInterpreterBytecodeOffsetRegister,
+ MemOperand(
+ kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
+ __ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push function as parameter to the runtime call.
- __ Push(r4, r4);
- // Whether to compile in a background thread.
- __ LoadRoot(
- r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ push(r0);
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(r4);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -1093,15 +1141,16 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
+ // r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
- __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 2);
- __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
__ Jump(ip);
@@ -1134,16 +1183,17 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// the runtime:
// r3 - contains return address (beginning of patch sequence)
// r4 - isolate
+ // r6 - new target
// lr - return address
FrameScope scope(masm, StackFrame::MANUAL);
__ mflr(r0);
- __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPush(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ PrepareCallCFunction(2, 0, r5);
__ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
__ CallCFunction(
ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
2);
- __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+ __ MultiPop(r0.bit() | r3.bit() | r4.bit() | r6.bit() | fp.bit());
__ mtlr(r0);
__ mr(ip, r3);
@@ -1177,7 +1227,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
@@ -1203,7 +1253,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ push(r3);
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
}
// Get the full codegen state from the stack and untag it -> r9.
@@ -1243,6 +1293,111 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// Clobbers registers {r7, r8, r9, r10}.
+void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Label* receiver_check_failed) {
+ Register signature = r7;
+ Register map = r8;
+ Register constructor = r9;
+ Register scratch = r10;
+
+ // If there is no signature, return the holder.
+ __ LoadP(signature, FieldMemOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ Label receiver_check_passed;
+ __ JumpIfRoot(signature, Heap::kUndefinedValueRootIndex,
+ &receiver_check_passed);
+
+ // Walk the prototype chain.
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, scratch, scratch);
+ __ cmpi(scratch, Operand(JS_FUNCTION_TYPE));
+ Label next_prototype;
+ __ bne(&next_prototype);
+ Register type = constructor;
+ __ LoadP(type,
+ FieldMemOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(type,
+ FieldMemOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(signature, type);
+ __ beq(&receiver_check_passed);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype);
+ __ CompareObjectType(type, scratch, scratch, FUNCTION_TEMPLATE_INFO_TYPE);
+ __ bne(&next_prototype);
+
+ // Otherwise load the parent function template and iterate.
+ __ LoadP(type,
+ FieldMemOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ b(&function_template_loop);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
+ __ DecodeField<Map::IsHiddenPrototype>(scratch, SetRC);
+ __ beq(receiver_check_failed, cr0);
+ // Iterate.
+ __ b(&prototype_loop_start);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments excluding receiver
+ // -- r4 : callee
+ // -- lr : return address
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
+
+
+ // Load the FunctionTemplateInfo.
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ ShiftLeftImm(r11, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r5, MemOperand(sp, r11));
+ CompatibleReceiverCheck(masm, r5, r6, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ LoadP(r7, FieldMemOperand(r6, FunctionTemplateInfo::kCallCodeOffset));
+ __ LoadP(r7, FieldMemOperand(r7, CallHandlerInfo::kFastHandlerOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+
+ // Compatible receiver check failed: throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ // Drop the arguments (including the receiver);
+ __ addi(r11, r11, Operand(kPointerSize));
+ __ add(sp, sp, r11);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1250,7 +1405,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r3);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
// If the code object is null, just return to the unoptimized code.
@@ -1298,7 +1453,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ bge(&ok);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
@@ -1309,7 +1464,127 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : receiver
+ // -----------------------------------
+
+ // 1. Pop receiver into r3 and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ Pop(r3);
+ __ JumpIfSmi(r3, &receiver_not_date);
+ __ CompareObjectType(r3, r4, r5, JS_DATE_TYPE);
+ __ bne(&receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ LoadP(r3, FieldMemOperand(r3, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(r4, Operand(ExternalReference::date_cache_stamp(masm->isolate())));
+ __ LoadP(r4, MemOperand(r4));
+ __ LoadP(ip, FieldMemOperand(r3, JSDate::kCacheStampOffset));
+ __ cmp(r4, ip);
+ __ bne(&stamp_mismatch);
+ __ LoadP(r3, FieldMemOperand(
+ r3, JSDate::kValueOffset + field_index * kPointerSize));
+ __ Ret();
+ __ bind(&stamp_mismatch);
+ }
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, r4);
+ __ LoadSmiLiteral(r4, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ Ret();
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ __ TailCallRuntime(Runtime::kThrowNotDateError);
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : argArray
+ // -- sp[4] : thisArg
+ // -- sp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into r4, argArray into r3 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r6;
+ Register scratch = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ mr(scratch, r3);
+ __ LoadP(r4, MemOperand(new_sp, 0)); // receiver
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 1 * -kPointerSize)); // thisArg
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argArray
+ __ bind(&skip);
+ __ mr(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- r3 : argArray
+ // -- r4 : receiver
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(r4, &receiver_not_callable);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&receiver_not_callable, cr0);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(r3, Heap::kNullValueRootIndex, &no_arguments);
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &no_arguments);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ li(r3, Operand::Zero());
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r3: actual number of arguments
{
@@ -1354,185 +1629,144 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ LoadP(key, MemOperand(fp, indexOffset));
- __ b(&entry);
- __ bind(&loop);
- __ LoadP(receiver, MemOperand(fp, argumentsOffset));
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- __ LoadP(vector, MemOperand(fp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
-
- // Push the nth argument.
- __ push(r3);
-
- // Update the index on the stack and in register key.
- __ LoadP(key, MemOperand(fp, indexOffset));
- __ AddSmiLiteral(key, key, Smi::FromInt(1), r0);
- __ StoreP(key, MemOperand(fp, indexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ LoadP(r0, MemOperand(fp, limitOffset));
- __ cmp(key, r0);
- __ bne(&loop);
-
- // On exit, the pushed arguments count is in r3, untagged
- __ SmiUntag(r3, key);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : argumentsList
+ // -- sp[4] : thisArgument
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- const int kFunctionOffset = kReceiverOffset + kPointerSize;
- const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(r4);
-
- __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ LoadP(r4, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ Push(r3, r4);
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
-
- Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r6;
+ Register scratch = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mr(scratch, r4);
+ __ mr(r3, r4);
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ beq(&skip);
+ __ LoadP(scratch, MemOperand(new_sp, 2 * -kPointerSize)); // thisArgument
+ __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 3 * -kPointerSize)); // argumentsList
+ __ bind(&skip);
+ __ mr(sp, new_sp);
+ __ StoreP(scratch, MemOperand(sp, 0));
+ }
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ li(r4, Operand::Zero());
- __ LoadP(r5, MemOperand(fp, kReceiverOffset));
- __ Push(r3, r4, r5); // limit, initial index and receiver.
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r4 : target
+ // -- sp[0] : thisArgument
+ // -----------------------------------
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(r4, &target_not_callable);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&target_not_callable, cr0);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Tear down the internal frame and remove function, receiver and args.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ addi(sp, sp, Operand(kStackSize * kPointerSize));
- __ blr();
}
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argc
+ // -- sp[0] : new.target (optional)
+ // -- sp[4] : argumentsList
+ // -- sp[8] : target
+ // -- sp[12] : receiver
+ // -----------------------------------
+ // 1. Load target into r4 (if present), argumentsList into r3 (if present),
+ // new.target into r6 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
- const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4,
- FieldMemOperand(r4, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(r4);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bne(&validate_arguments);
- __ LoadP(r3, MemOperand(fp, kFunctionOffset));
- __ StoreP(r3, MemOperand(fp, kNewTargetOffset));
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target
- __ push(r3);
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
-
- // Push current limit and index.
- const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
- const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ li(r4, Operand::Zero());
- __ Push(r3, r4); // limit and initial index.
- // Push the constructor function as callee
- __ LoadP(r3, MemOperand(fp, kFunctionOffset));
- __ push(r3);
-
- // Copy all arguments from the array to the stack.
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ LoadP(r7, MemOperand(fp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label skip;
+ Register arg_size = r5;
+ Register new_sp = r7;
+ __ ShiftLeftImm(arg_size, r3, Operand(kPointerSizeLog2));
+ __ add(new_sp, sp, arg_size);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ mr(r3, r4);
+ __ mr(r6, r4);
+ __ StoreP(r4, MemOperand(new_sp, 0)); // receiver (undefined)
+ __ cmpi(arg_size, Operand(kPointerSize));
+ __ blt(&skip);
+ __ LoadP(r4, MemOperand(new_sp, 1 * -kPointerSize)); // target
+ __ mr(r6, r4); // new.target defaults to target
+ __ beq(&skip);
+ __ LoadP(r3, MemOperand(new_sp, 2 * -kPointerSize)); // argumentsList
+ __ cmpi(arg_size, Operand(2 * kPointerSize));
+ __ beq(&skip);
+ __ LoadP(r6, MemOperand(new_sp, 3 * -kPointerSize)); // new.target
+ __ bind(&skip);
+ __ mr(sp, new_sp);
}
- __ addi(sp, sp, Operand(kStackSize * kPointerSize));
- __ blr();
-}
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r6 : new.target
+ // -- r4 : target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(r4, &target_not_constructor);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsConstructor, r0);
+ __ beq(&target_not_constructor, cr0);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(r6, &new_target_not_constructor);
+ __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsConstructor, r0);
+ __ beq(&new_target_not_constructor, cr0);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ StoreP(r4, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ StoreP(r6, MemOperand(sp, 0));
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1542,6 +1776,7 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
// -- r3 : actual number of arguments
// -- r4 : function (passed through to callee)
// -- r5 : expected number of arguments
+ // -- r6 : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -1588,6 +1823,131 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argumentsList
+ // -- r4 : target
+ // -- r6 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(r3, &create_runtime);
+
+ // Load the map of argumentsList into r5.
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+
+ // Load native context into r7.
+ __ LoadP(r7, NativeContextMemOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ LoadP(ip, ContextMemOperand(r7, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r5);
+ __ beq(&create_arguments);
+ __ LoadP(ip, ContextMemOperand(r7, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ cmp(ip, r5);
+ __ beq(&create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CompareInstanceType(r5, ip, JS_ARRAY_TYPE);
+ __ beq(&create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r6, r3);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(r4, r6);
+ __ LoadP(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ SmiUntag(r5);
+ }
+ __ b(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ LoadP(r5, FieldMemOperand(
+ r3, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
+ __ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
+ __ cmp(r5, ip);
+ __ bne(&create_runtime);
+ __ SmiUntag(r5);
+ __ mr(r3, r7);
+ __ b(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r5);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmpi(r5, Operand(FAST_ELEMENTS));
+ __ bgt(&create_runtime);
+ __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ beq(&create_runtime);
+ __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ SmiUntag(r5);
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(ip, Heap::kRealStackLimitRootIndex);
+ // Make ip the space we have left. The stack might already be overflowed
+ // here which will cause ip to become negative.
+ __ sub(ip, sp, ip);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+ __ cmp(ip, r0); // Signed comparison.
+ __ bgt(&done);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- r4 : target
+ // -- r3 : args (a FixedArray built from argumentsList)
+ // -- r5 : len (number of elements to push from args)
+ // -- r6 : new.target (checked to be constructor or undefined)
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label loop, no_args;
+ __ cmpi(r5, Operand::Zero());
+ __ beq(&no_args);
+ __ addi(r3, r3,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ __ mtctr(r5);
+ __ bind(&loop);
+ __ LoadPU(r0, MemOperand(r3, kPointerSize));
+ __ push(r0);
+ __ bdnz(&loop);
+ __ bind(&no_args);
+ __ mr(r3, r5);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET, eq);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1679,17 +2039,128 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
#if !V8_TARGET_ARCH_PPC64
__ SmiUntag(r5);
#endif
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
ParameterCount actual(r3);
ParameterCount expected(r5);
- __ InvokeCode(r6, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+ __ InvokeFunctionCode(r4, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : target (checked to be a JSBoundFunction)
+ // -- r6 : new.target (only in case of [[Construct]])
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into r5 and length of that into r7.
+ Label no_bound_arguments;
+ __ LoadP(r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset));
+ __ LoadP(r7, FieldMemOperand(r5, FixedArray::kLengthOffset));
+ __ SmiUntag(r7, SetRC);
+ __ beq(&no_bound_arguments, cr0);
+ {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : target (checked to be a JSBoundFunction)
+ // -- r5 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- r6 : new.target (only in case of [[Construct]])
+ // -- r7 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ mr(r9, sp); // preserve previous stack pointer
+ __ ShiftLeftImm(r10, r7, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, r10);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
+ __ bgt(&done); // Signed comparison.
+ // Restore the stack pointer.
+ __ mr(sp, r9);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ }
+ __ bind(&done);
+ }
+
+ // Relocate arguments down the stack.
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r9 : the previous stack pointer
+ // -- r10: the size of the [[BoundArguments]]
+ {
+ Label skip, loop;
+ __ li(r8, Operand::Zero());
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&skip);
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ LoadPX(r0, MemOperand(r9, r8));
+ __ StorePX(r0, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ bdnz(&loop);
+ __ bind(&skip);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r5, r5, r10);
+ __ mtctr(r7);
+ __ bind(&loop);
+ __ LoadPU(r0, MemOperand(r5, -kPointerSize));
+ __ StorePX(r0, MemOperand(sp, r8));
+ __ addi(r8, r8, Operand(kPointerSize));
+ __ bdnz(&loop);
+ __ add(r3, r3, r7);
+ }
}
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(r4);
+
+ // Patch the receiver to [[BoundThis]].
+ __ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(ip, MemOperand(sp, r0));
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ LoadP(r4,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
+ masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
}
@@ -1706,14 +2177,20 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
__ Jump(masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET, eq);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(r4);
- __ b(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ Push(r4);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ addi(r3, r3, Operand(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1726,7 +2203,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1736,7 +2213,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1746,10 +2223,9 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (checked to be a JSFunction)
- // -- r6 : the original constructor (checked to be a JSFunction)
+ // -- r6 : the new target (checked to be a constructor)
// -----------------------------------
__ AssertFunction(r4);
- __ AssertFunction(r6);
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
@@ -1765,17 +2241,51 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSBoundFunction)
+ // -- r6 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertBoundFunction(r4);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ Label skip;
+ __ cmp(r4, r6);
+ __ bne(&skip);
+ __ LoadP(r6,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip);
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ LoadP(r4,
+ FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ip, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ LoadP(ip, MemOperand(ip));
+ __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
- // -- r4 : the constructor to call (checked to be a JSFunctionProxy)
- // -- r6 : the original constructor (either the same as the constructor or
+ // -- r4 : the constructor to call (checked to be a JSProxy)
+ // -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ Push(r4, r6);
+ // Include the pushed new_target, constructor and the receiver.
+ __ addi(r3, r3, Operand(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1784,23 +2294,32 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the constructor to call (can be any Object)
- // -- r6 : the original constructor (either the same as the constructor or
+ // -- r6 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(r4, &non_constructor);
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Construct]] internal method.
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::kIsConstructor, r0);
__ beq(&non_constructor, cr0);
- // Dispatch based on instance type.
- __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET, eq);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ cmpi(r8, Operand(JS_PROXY_TYPE));
__ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
eq);
@@ -1810,7 +2329,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1818,11 +2337,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1831,11 +2347,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r3 : actual number of arguments
// -- r4 : function (passed through to callee)
// -- r5 : expected number of arguments
+ // -- r6 : new target (passed through to callee)
// -----------------------------------
- Label stack_overflow;
- ArgumentAdaptorStackCheck(masm, &stack_overflow);
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
@@ -1847,31 +2362,34 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: actual >= expected
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
- // Calculate copy start address into r3 and copy end address into r6.
+ // Calculate copy start address into r3 and copy end address into r7.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
- __ sub(r6, r3, r6);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, r3, r7);
// Copy the arguments (including the receiver) to the new stack frame.
// r3: copy start address
// r4: function
// r5: expected number of arguments
- // r6: copy end address
+ // r6: new target (passed through to callee)
+ // r7: copy end address
// ip: code entry to call
Label copy;
__ bind(&copy);
__ LoadP(r0, MemOperand(r3, 0));
__ push(r0);
- __ cmp(r3, r6); // Compare before moving to next argument.
+ __ cmp(r3, r7); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kPointerSize));
__ bne(&copy);
@@ -1902,16 +2420,18 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into r0 and copy end address is fp.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ SmiToPtrArrayOffset(r3, r3);
__ add(r3, r3, fp);
@@ -1920,6 +2440,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r3: copy start address
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
Label copy;
__ bind(&copy);
@@ -1933,18 +2454,19 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill the remaining expected arguments with undefined.
// r4: function
// r5: expected number of arguments
+ // r6: new target (passed through to callee)
// ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
- __ sub(r6, fp, r6);
+ __ ShiftLeftImm(r7, r5, Operand(kPointerSizeLog2));
+ __ sub(r7, fp, r7);
// Adjust for frame.
- __ subi(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ subi(r7, r7, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(r0);
- __ cmp(sp, r6);
+ __ cmp(sp, r7);
__ bne(&fill);
}
@@ -1953,6 +2475,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mr(r3, r5);
// r3 : expected number of arguments
// r4 : function (passed through to callee)
+ // r6 : new target (passed through to callee)
__ CallJSEntry(ip);
// Store offset of return address for deoptimizer.
@@ -1972,8 +2495,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bkpt(0);
}
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 92501a4a23..26fbe98cf9 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -260,7 +260,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
- __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
__ bge(slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
@@ -281,7 +281,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ beq(&heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
__ bge(slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpi(r7, Operand(SYMBOL_TYPE));
@@ -456,11 +456,11 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
// Get the type of the first operand into r5 and compare it with
- // FIRST_SPEC_OBJECT_TYPE.
- __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE);
+ // FIRST_JS_RECEIVER_TYPE.
+ __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
__ blt(&first_non_object);
// Return non-zero (r3 is not zero)
@@ -473,7 +473,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
__ cmpi(r5, Operand(ODDBALL_TYPE));
__ beq(&return_not_equal);
- __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
__ bge(&return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -536,9 +536,9 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ Ret();
__ bind(&object_test);
- __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
__ blt(not_both_strings);
- __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
__ blt(not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -708,8 +708,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
if (cc == eq) {
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
int ncr; // NaN compare result
if (cc == lt || cc == le) {
@@ -723,9 +722,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -948,7 +946,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -1093,16 +1091,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Call C built-in.
__ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+ Register target = r15;
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// Native AIX/PPC64 Linux use a function descriptor.
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
__ LoadP(ip, MemOperand(r15, 0)); // Instruction address
- Register target = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ target = ip;
+#elif ABI_CALL_VIA_IP
__ Move(ip, r15);
- Register target = ip;
-#else
- Register target = r15;
+ target = ip;
#endif
// To let the GC traverse the return address of the exit frames, we need to
@@ -1424,15 +1421,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
__ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
__ bne(&slow_case, cr0);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ LoadP(shared_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(scratch, FieldMemOperand(shared_info,
- SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(scratch, SharedFunctionInfo::kBoundBit, r0);
- __ bne(&slow_case, cr0);
-
// Get the "prototype" (or initial map) of the {function}.
__ LoadP(function_prototype,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1457,29 +1445,47 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
+ Register const object_instance_type = function_map;
+ Register const map_bit_field = function_map;
Register const null = scratch;
- Label done, loop;
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ Register const result = r3;
+
+ Label done, loop, fast_runtime_fallback;
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
__ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ LoadP(object_prototype,
- FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
+ __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
+ __ bne(&fast_runtime_fallback, cr0);
+ // Check if the current object is a Proxy.
+ __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+ __ beq(&fast_runtime_fallback);
+
+ __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ beq(&done);
- __ cmp(object_prototype, null);
- __ LoadP(object_map,
- FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ cmp(object, null);
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
__ bne(&loop);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime
+ __ bind(&fast_runtime_fallback);
+ __ Push(object, function_prototype);
+ // Invalidate the instanceof cache.
+ __ LoadSmiLiteral(scratch, Smi::FromInt(0));
+ __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ Push(object, function);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -1584,7 +1590,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r4);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -1612,7 +1618,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1621,8 +1627,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r5 : number of parameters (tagged)
// r6 : parameters pointer
// Registers used over whole function:
- // r8 : arguments count (tagged)
- // r9 : mapped parameter count (tagged)
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
@@ -1693,7 +1699,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r7, r11, &runtime, TAG_OBJECT);
+ __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
@@ -1703,9 +1709,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
- __ LoadP(r7,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
+ __ LoadP(r7, NativeContextMemOperand());
__ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
@@ -1856,7 +1860,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r8 = argument count (tagged)
__ bind(&runtime);
__ Push(r4, r6, r8);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -1875,7 +1879,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1920,12 +1924,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
- __ LoadP(r7,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ LoadP(r7, FieldMemOperand(r7, JSGlobalObject::kNativeContextOffset));
- __ LoadP(
- r7,
- MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
__ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
@@ -1972,7 +1971,30 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+ // r7 : rest parameter index (tagged)
+
+ Label runtime;
+ __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r0, r5);
+ __ add(r6, r8, r0);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ Push(r5, r6, r7);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -1981,7 +2003,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2276,7 +2298,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ beq(&runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure and exception return null.
@@ -2365,7 +2387,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2408,35 +2430,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r3 : number of arguments to the construct function
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
- // r7 : original constructor (for IsSuperConstructorCall)
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
- if (is_super) {
- __ Push(r6, r5, r4, r3, r7);
- } else {
- __ Push(r6, r5, r4, r3);
- }
+ __ Push(r6, r5, r4, r3);
__ CallStub(stub);
- if (is_super) {
- __ Pop(r6, r5, r4, r3, r7);
- } else {
- __ Pop(r6, r5, r4, r3);
- }
+ __ Pop(r6, r5, r4, r3);
__ SmiUntag(r3);
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -2444,7 +2456,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
- // r7 : original constructor (for IsSuperConstructorCall)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
@@ -2486,7 +2497,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bne(&miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
__ b(&done);
@@ -2510,7 +2521,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&not_array_function);
@@ -2518,13 +2529,13 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ b(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -2534,7 +2545,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : original constructor (for IsSuperConstructorCall)
Label non_function;
// Check that the function is not a smi.
@@ -2543,35 +2553,29 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
__ bne(&non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
-
- __ SmiToPtrArrayOffset(r8, r6);
- __ add(r8, r5, r8);
- // Put the AllocationSite from the feedback vector into r5, or undefined.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
- __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ isel(eq, r5, r5, r8);
- } else {
- Label feedback_register_initialized;
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ GenerateRecordCallTarget(masm);
- __ AssertUndefinedOrAllocationSite(r5, r8);
- }
-
- // Pass function as original constructor.
- if (IsSuperConstructorCall()) {
- __ mr(r6, r7);
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
+ // Put the AllocationSite from the feedback vector into r5, or undefined.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+ __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
} else {
- __ mr(r6, r4);
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
}
+ __ AssertUndefinedOrAllocationSite(r5, r8);
+
+ // Pass function as new target.
+ __ mr(r6, r4);
+
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -2590,7 +2594,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r6 - slot id
// r5 - vector
// r7 - allocation site (loaded from vector[slot])
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(miss);
@@ -2615,11 +2619,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r4 - function
// r6 - slot id (Smi)
// r5 - vector
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2656,9 +2656,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
__ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
- __ bind(&call);
+ __ bind(&call_function);
__ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2692,14 +2693,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
- // We have to update statistics for runtime profiling.
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
- __ LoadP(r7, FieldMemOperand(r5, generic_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
- __ b(&call);
+
+ __ bind(&call);
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2712,14 +2710,16 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ cmp(r4, r7);
__ beq(&miss);
- // Update stats.
- __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
- __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
- __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+ // Make sure the function belongs to the same native context.
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
+ __ LoadP(ip, NativeContextMemOperand());
+ __ cmp(r7, ip);
+ __ bne(&miss);
// Initialize the call counter.
__ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
@@ -2737,7 +2737,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(r4);
}
- __ b(&call);
+ __ b(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2755,7 +2755,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r5, r6);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
@@ -2814,11 +2814,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_, index_);
}
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2845,7 +2845,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
call_helper.BeforeCall(masm);
__ SmiTag(index_);
__ Push(object_, index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -2885,7 +2885,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r3);
call_helper.AfterCall(masm);
__ b(&exit_);
@@ -3139,7 +3139,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// r3: original string
@@ -3179,7 +3179,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ blr();
__ bind(&slow_string);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -3190,7 +3190,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3212,7 +3212,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ bind(&not_smi);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3242,7 +3242,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3397,7 +3397,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// tagged as a small integer.
__ bind(&runtime);
__ Push(r4, r3);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3439,7 +3439,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
@@ -3723,9 +3723,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&runtime);
__ Push(left, right);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3733,16 +3733,17 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ and_(r5, r4, r3);
__ JumpIfSmi(r5, &miss);
- __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
- __ bne(&miss);
- __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE);
- __ bne(&miss);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
+ __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(&miss);
DCHECK(GetCondition() == eq);
__ sub(r3, r3, r4);
@@ -3753,7 +3754,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ and_(r5, r4, r3);
@@ -3770,7 +3771,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(r3, r3, r4);
__ Ret();
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
@@ -3778,7 +3779,7 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ LoadSmiLiteral(r5, Smi::FromInt(LESS));
}
__ Push(r4, r3, r5);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3794,7 +3795,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));
__ push(r0);
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -3825,7 +3826,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
__ LoadP(ip, MemOperand(target, 0)); // Instruction address
#else
// ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_TOC_ADDRESSABILITY_VIA_IP.
+ // for ABI_CALL_VIA_IP.
__ Move(ip, target);
#endif
@@ -4242,11 +4243,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- regs_.address(), // Scratch.
- &need_incremental_pop_scratch);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ regs_.address(), // Scratch.
+ &need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
@@ -4266,75 +4267,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : element value to store
- // -- r6 : element index as smi
- // -- sp[0] : array literal index in function as smi
- // -- sp[4] : array literal
- // clobbers r3, r5, r7
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ LoadP(r7, MemOperand(sp, 0 * kPointerSize));
- __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset));
-
- __ CheckFastElements(r5, r8, &double_elements);
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(r3, &smi_element);
- __ CheckFastSmiElements(r5, r8, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- // call.
- __ Push(r4, r6, r3);
- __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset));
- __ Push(r8, r7);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r8, r9);
-#if V8_TARGET_ARCH_PPC64
- // add due to offset alignment requirements of StorePU
- __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ StoreP(r3, MemOperand(r9));
-#else
- __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag));
-#endif
- // Update the write barrier for the array store.
- __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r8, r9);
- __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
- __ Ret();
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements);
- __ Ret();
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4865,7 +4797,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Function descriptor
__ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
__ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+#elif ABI_CALL_VIA_IP
// ip set above, so nothing to do.
#endif
@@ -5074,7 +5006,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r3 : argc (only if argument_count() == ANY)
// -- r4 : constructor
// -- r5 : AllocationSite or undefined
- // -- r6 : original constructor
+ // -- r6 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -5095,6 +5027,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r5, r7);
}
+ // Enter the context of the Array function.
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
Label subclassing;
__ cmp(r6, r4);
__ bne(&subclassing);
@@ -5114,25 +5049,25 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- __ push(r4);
- __ push(r6);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ addi(r3, r3, Operand(2));
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r0));
+ __ addi(r3, r3, Operand(3));
break;
case NONE:
- __ li(r3, Operand(2));
+ __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
+ __ li(r3, Operand(3));
break;
case ONE:
- __ li(r3, Operand(3));
+ __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ li(r3, Operand(4));
break;
}
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ Push(r6, r5);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -5216,14 +5151,14 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
- __ LoadP(result, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(result, context, r0);
- __ LoadP(result, ContextOperand(result));
+ __ LoadP(result, ContextMemOperand(result));
__ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
@@ -5233,7 +5168,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Fallback to runtime.
__ SmiTag(slot);
__ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5259,14 +5194,14 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
- __ LoadP(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
__ add(cell, context, r0);
- __ LoadP(cell, ContextOperand(cell));
+ __ LoadP(cell, ContextMemOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
@@ -5361,8 +5296,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(slot, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5498,7 +5432,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index ef4bdce5d1..d394171d89 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -127,8 +127,8 @@ class RecordWriteStub : public PlatformCodeStub {
}
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL, stub->instruction_start(),
- stub->instruction_size());
+ MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
+ stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index b313d11bb3..2bf8b4ee83 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -18,23 +18,23 @@ namespace internal {
#if defined(USE_SIMULATOR)
-byte* fast_exp_ppc_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())
+byte* fast_exp_ppc_machine_code = nullptr;
+double fast_exp_simulator(double x, Isolate* isolate) {
+ return Simulator::current(isolate)
->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
}
#endif
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
{
DoubleRegister input = d1;
@@ -62,11 +62,11 @@ UnaryMathFunction CreateExpFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_ppc_machine_code = buffer;
return &fast_exp_simulator;
@@ -74,16 +74,17 @@ UnaryMathFunction CreateExpFunction() {
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
- return &std::sqrt;
+ return nullptr;
#else
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// Called from C
__ function_descriptor();
@@ -99,9 +100,9 @@ UnaryMathFunction CreateSqrtFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
@@ -607,15 +608,17 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// Since patcher is a large object, allocate it dynamically when needed,
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(new CodePatcher(
- young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ base::SmartPointer<CodePatcher> patcher(
+ new CodePatcher(isolate, young_sequence_.start(),
+ young_sequence_.length() / Assembler::kInstrSize,
+ CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher->masm()->PushFixedFrame(r4);
patcher->masm()->addi(fp, sp,
@@ -664,7 +667,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ CodePatcher patcher(isolate, sequence,
+ young_length / Assembler::kInstrSize);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// Don't use Call -- we need to preserve ip and lr.
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
index 7f19beea7d..c3cd9b39a0 100644
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ b/deps/v8/src/ppc/codegen-ppc.h
@@ -5,7 +5,7 @@
#ifndef V8_PPC_CODEGEN_PPC_H_
#define V8_PPC_CODEGEN_PPC_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 87a82719be..4c404ae911 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -275,24 +275,29 @@ enum OpcodeExt4 {
FMADD = 29 << 1, // Floating Multiply-Add
// Bits 10-1
- FCMPU = 0 << 1, // Floating Compare Unordered
- FRSP = 12 << 1, // Floating-Point Rounding
- FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
- FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
- FNEG = 40 << 1, // Floating Negate
- MCRFS = 64 << 1, // Move to Condition Register from FPSCR
- FMR = 72 << 1, // Floating Move Register
- MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
- FABS = 264 << 1, // Floating Absolute Value
- FRIN = 392 << 1, // Floating Round to Integer Nearest
- FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
- FRIP = 456 << 1, // Floating Round to Integer Plus
- FRIM = 488 << 1, // Floating Round to Integer Minus
- MFFS = 583 << 1, // move from FPSCR x-form
- MTFSF = 711 << 1, // move to FPSCR fields XFL-form
- FCFID = 846 << 1, // Floating convert from integer doubleword
- FCTID = 814 << 1, // Floating convert from integer doubleword
- FCTIDZ = 815 << 1 // Floating convert from integer doubleword
+ FCMPU = 0 << 1, // Floating Compare Unordered
+ FRSP = 12 << 1, // Floating-Point Rounding
+ FCTIW = 14 << 1, // Floating Convert to Integer Word X-form
+ FCTIWZ = 15 << 1, // Floating Convert to Integer Word with Round to Zero
+ MTFSB1 = 38 << 1, // Move to FPSCR Bit 1
+ FNEG = 40 << 1, // Floating Negate
+ MCRFS = 64 << 1, // Move to Condition Register from FPSCR
+ MTFSB0 = 70 << 1, // Move to FPSCR Bit 0
+ FMR = 72 << 1, // Floating Move Register
+ MTFSFI = 134 << 1, // Move to FPSCR Field Immediate
+ FABS = 264 << 1, // Floating Absolute Value
+ FRIN = 392 << 1, // Floating Round to Integer Nearest
+ FRIZ = 424 << 1, // Floating Round to Integer Toward Zero
+ FRIP = 456 << 1, // Floating Round to Integer Plus
+ FRIM = 488 << 1, // Floating Round to Integer Minus
+ MFFS = 583 << 1, // move from FPSCR x-form
+ MTFSF = 711 << 1, // move to FPSCR fields XFL-form
+ FCTID = 814 << 1, // Floating convert to integer doubleword
+ FCTIDZ = 815 << 1, // ^^^ with round toward zero
+ FCFID = 846 << 1, // Floating convert from integer doubleword
+ FCTIDU = 942 << 1, // Floating convert to integer doubleword unsigned
+ FCTIDUZ = 943 << 1, // ^^^ with round toward zero
+ FCFIDU = 974 << 1 // Floating convert from integer doubleword unsigned
};
enum OpcodeExt5 {
@@ -399,6 +404,13 @@ enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
#define CRWIDTH 4
+// These are the documented bit positions biased down by 32
+enum FPSCRBit {
+ VXSOFT = 21, // 53: Software-Defined Condition
+ VXSQRT = 22, // 54: Invalid Square Root
+ VXCVI = 23 // 55: Invalid Integer Convert
+};
+
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 831ccf6cdc..4232342b93 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -45,14 +45,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->bkpt(0);
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->bkpt(0);
}
}
@@ -75,7 +76,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
- CodePatcher patcher(call_address, call_size_in_words);
+ CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 83fbc7e29c..d9450f8a42 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -889,6 +889,10 @@ void Decoder::DecodeExt3(Instruction* instr) {
Format(instr, "fcfids'. 'Dt, 'Db");
break;
}
+ case FCFIDU: {
+ Format(instr, "fcfidus'.'Dt, 'Db");
+ break;
+ }
default: {
Unknown(instr); // not used by V8
}
@@ -945,6 +949,10 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fcfid'. 'Dt, 'Db");
break;
}
+ case FCFIDU: {
+ Format(instr, "fcfidu'. 'Dt, 'Db");
+ break;
+ }
case FCTID: {
Format(instr, "fctid 'Dt, 'Db");
break;
@@ -953,6 +961,14 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fctidz 'Dt, 'Db");
break;
}
+ case FCTIDU: {
+ Format(instr, "fctidu 'Dt, 'Db");
+ break;
+ }
+ case FCTIDUZ: {
+ Format(instr, "fctiduz 'Dt, 'Db");
+ break;
+ }
case FCTIW: {
Format(instr, "fctiw'. 'Dt, 'Db");
break;
@@ -1001,6 +1017,18 @@ void Decoder::DecodeExt4(Instruction* instr) {
Format(instr, "fneg'. 'Dt, 'Db");
break;
}
+ case MCRFS: {
+ Format(instr, "mcrfs ?,?");
+ break;
+ }
+ case MTFSB0: {
+ Format(instr, "mtfsb0'. ?");
+ break;
+ }
+ case MTFSB1: {
+ Format(instr, "mtfsb1'. ?");
+ break;
+ }
default: {
Unknown(instr); // not used by V8
}
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index b54845d4b3..b649f71ea3 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
+const Register RestParamAccessDescriptor::parameter_count() { return r5; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return r6; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return r7; }
+
+
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -125,6 +130,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r6, r5, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r6, r5, r4};
@@ -187,7 +199,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
// r4 : the function to call
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
- // r7 : original constructor (for IsSuperConstructorCall)
+ // r7 : new target (for IsSuperConstructorCall)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {r3, r4, r7, r5};
@@ -204,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : allocation site or undefined
+ Register registers[] = {r4, r6, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ Register registers[] = {r4, r6, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
@@ -341,6 +374,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r4, // JSFunction
+ r6, // the new target
r3, // actual number of arguments
r5, // expected number of arguments
};
@@ -373,27 +407,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // math rounding function
- r6, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // math rounding function
- r6, // vector slot id
- r7, // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -409,7 +422,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r3, // argument count (not including receiver)
- r6, // original constructor
+ r6, // new target
r4, // constructor to call
r5 // address of the first argument
};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index e543ba853b..9cd35ab01c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -20,11 +20,12 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -49,8 +50,7 @@ void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip, cr);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY ||
- rmode == RelocInfo::CONSTRUCT_CALL);
+ DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
mov(ip, Operand(target, rmode));
mtctr(ip);
@@ -671,6 +671,20 @@ void MacroAssembler::ConvertInt64ToDouble(Register src,
}
+void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfidus(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src,
+ DoubleRegister double_dst) {
+ MovInt64ToDouble(double_dst, src);
+ fcfidu(double_dst, double_dst);
+}
+
+
void MacroAssembler::ConvertInt64ToFloat(Register src,
DoubleRegister double_dst) {
MovInt64ToDouble(double_dst, src);
@@ -701,6 +715,22 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
dst, double_dst);
}
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
+ if (rounding_mode == kRoundToZero) {
+ fctiduz(double_dst, double_input);
+ } else {
+ SetRoundingMode(rounding_mode);
+ fctidu(double_dst, double_input);
+ ResetRoundingMode();
+ }
+
+ MovDoubleToInt64(dst, double_dst);
+}
+#endif
+
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
@@ -723,20 +753,26 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
}
-void MacroAssembler::StubPrologue(int prologue_offset) {
+void MacroAssembler::StubPrologue(Register base, int prologue_offset) {
LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
PushFixedFrame(r11);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_embedded_constant_pool) {
- // ip contains prologue address
- LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ if (!base.is(no_reg)) {
+ // base contains prologue address
+ LoadConstantPoolPointerRegister(base, -prologue_offset);
+ } else {
+ LoadConstantPoolPointerRegister();
+ }
set_constant_pool_available(true);
}
}
-void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
+void MacroAssembler::Prologue(bool code_pre_aging, Register base,
+ int prologue_offset) {
+ DCHECK(!base.is(no_reg));
{
PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength);
@@ -766,8 +802,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
}
if (FLAG_enable_embedded_constant_pool) {
- // ip contains prologue address
- LoadConstantPoolPointerRegister(ip, -prologue_offset);
+ // base contains prologue address
+ LoadConstantPoolPointerRegister(base, -prologue_offset);
set_constant_pool_available(true);
}
}
@@ -987,9 +1023,7 @@ void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_reg, Label* done,
+ const ParameterCount& actual, Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -1010,8 +1044,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// ARM has some sanity checks as per below, considering add them for PPC
// DCHECK(actual.is_immediate() || actual.reg().is(r3));
// DCHECK(expected.is_immediate() || expected.reg().is(r5));
- // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
- // || code_reg.is(r6));
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
@@ -1043,11 +1075,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
if (!definitely_matches) {
- if (!code_constant.is_null()) {
- mov(r6, Operand(code_constant));
- addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- }
-
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
@@ -1064,17 +1091,78 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ mov(r7, Operand(step_in_enabled));
+ lbz(r7, MemOperand(r7));
+ cmpi(r7, Operand::Zero());
+ beq(&skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun, fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(r4));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
- &definitely_mismatches, flag, call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = ip;
+ LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
CallJSEntry(code);
@@ -1091,7 +1179,8 @@ void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
}
-void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
+ const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
@@ -1101,20 +1190,19 @@ void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
DCHECK(fun.is(r4));
Register expected_reg = r5;
- Register code_reg = ip;
+ Register temp_reg = r7;
- LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
LoadWordArith(expected_reg,
FieldMemOperand(
- code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+ temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
#if !defined(V8_TARGET_ARCH_PPC64)
SmiUntag(expected_reg);
#endif
- LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper);
}
@@ -1132,11 +1220,7 @@ void MacroAssembler::InvokeFunction(Register function,
// Get the function and setup the context.
LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // We call indirectly through the code field in the function to
- // allow recompilation to take effect without changing any of the
- // call sites.
- LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- InvokeCode(ip, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper);
}
@@ -1223,11 +1307,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
#endif
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- LoadP(scratch, FieldMemOperand(scratch, offset));
- LoadP(scratch,
- FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1417,11 +1497,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
return;
}
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -1440,26 +1516,26 @@ void MacroAssembler::Allocate(int object_size, Register result,
DCHECK((limit - top) == kPointerSize);
// Set up allocation top address register.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ Register top_address = scratch1;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(topaddr));
- LoadP(ip, MemOperand(topaddr, kPointerSize));
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- LoadP(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- LoadP(ip, MemOperand(topaddr, limit - top), r0);
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1469,15 +1545,15 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
- cmpl(result, ip);
+ cmpl(result, alloc_limit);
bge(gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(scratch2, MemOperand(result));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
@@ -1485,17 +1561,17 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
- sub(r0, ip, result);
+ sub(r0, alloc_limit, result);
if (is_int16(object_size)) {
cmpi(r0, Operand(object_size));
blt(gc_required);
- addi(scratch2, result, Operand(object_size));
+ addi(result_end, result, Operand(object_size));
} else {
- Cmpi(r0, Operand(object_size), scratch2);
+ Cmpi(r0, Operand(object_size), result_end);
blt(gc_required);
- add(scratch2, result, scratch2);
+ add(result_end, result, result_end);
}
- StoreP(scratch2, MemOperand(topaddr));
+ StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1505,28 +1581,24 @@ void MacroAssembler::Allocate(int object_size, Register result,
void MacroAssembler::Allocate(Register object_size, Register result,
- Register scratch1, Register scratch2,
+ Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
li(result, Operand(0x7091));
- li(scratch1, Operand(0x7191));
- li(scratch2, Operand(0x7291));
+ li(scratch, Operand(0x7191));
+ li(result_end, Operand(0x7291));
}
b(gc_required);
return;
}
- // Assert that the register arguments are different and that none of
- // them are ip. ip is used explicitly in the code generated below.
- DCHECK(!result.is(scratch1));
- DCHECK(!result.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
- DCHECK(!object_size.is(ip));
- DCHECK(!result.is(ip));
- DCHECK(!scratch1.is(ip));
- DCHECK(!scratch2.is(ip));
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
ExternalReference allocation_top =
@@ -1537,27 +1609,26 @@ void MacroAssembler::Allocate(Register object_size, Register result,
intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
DCHECK((limit - top) == kPointerSize);
- // Set up allocation top address.
- Register topaddr = scratch1;
- mov(topaddr, Operand(allocation_top));
-
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
// This code stores a temporary value in ip. This is OK, as the code below
// does not need ip for implicit literal generation.
+ Register alloc_limit = ip;
+ mov(top_address, Operand(allocation_top));
+
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into ip.
- LoadP(result, MemOperand(topaddr));
- LoadP(ip, MemOperand(topaddr, kPointerSize));
+ // Load allocation top into result and allocation limit into alloc_limit..
+ LoadP(result, MemOperand(top_address));
+ LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
- // Assert that result actually contains top on entry. ip is used
- // immediately below so this use of ip does not cause difference with
- // respect to register content between debug and release mode.
- LoadP(ip, MemOperand(topaddr));
- cmp(result, ip);
+ // Assert that result actually contains top on entry.
+ LoadP(alloc_limit, MemOperand(top_address));
+ cmp(result, alloc_limit);
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit into ip. Result already contains allocation top.
- LoadP(ip, MemOperand(topaddr, limit - top));
+ // Load allocation limit. Result already contains allocation top.
+ LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1567,15 +1638,15 @@ void MacroAssembler::Allocate(Register object_size, Register result,
STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
#else
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
- andi(scratch2, result, Operand(kDoubleAlignmentMask));
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
beq(&aligned, cr0);
if ((flags & PRETENURE) != 0) {
- cmpl(result, ip);
+ cmpl(result, alloc_limit);
bge(gc_required);
}
- mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
- stw(scratch2, MemOperand(result));
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
addi(result, result, Operand(kDoubleSize / 2));
bind(&aligned);
#endif
@@ -1584,24 +1655,24 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
- sub(r0, ip, result);
+ sub(r0, alloc_limit, result);
if ((flags & SIZE_IN_WORDS) != 0) {
- ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
- cmp(r0, scratch2);
+ ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
+ cmp(r0, result_end);
blt(gc_required);
- add(scratch2, result, scratch2);
+ add(result_end, result, result_end);
} else {
cmp(r0, object_size);
blt(gc_required);
- add(scratch2, result, object_size);
+ add(result_end, result, object_size);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
- andi(r0, scratch2, Operand(kObjectAlignmentMask));
+ andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
- StoreP(scratch2, MemOperand(topaddr));
+ StoreP(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
@@ -1770,6 +1841,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register value_reg, Register key_reg, Register elements_reg,
Register scratch1, DoubleRegister double_scratch, Label* fail,
int elements_offset) {
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
@@ -2246,22 +2318,13 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- mov(r3, Operand(num_arguments));
- JumpToExternalReference(ext);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
- result_size);
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ mov(r3, Operand(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -2277,35 +2340,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(ip, native_context_index);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(ip));
- CallJSEntry(ip);
- call_wrapper.AfterCall();
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpToJSEntry(ip);
- }
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- LoadP(target,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(target, FieldMemOperand(target, JSGlobalObject::kNativeContextOffset));
- // Load the JavaScript builtin function from the builtins object.
- LoadP(target, ContextOperand(target, native_context_index), r0);
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(r4));
- GetBuiltinFunction(r4, native_context_index);
- // Load the code entry point from the builtins object.
- LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ // Fake a parameter count to avoid emitting code to do the check.
+ ParameterCount expected(0);
+ LoadNativeContextSlot(native_context_index, r4);
+ InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
}
@@ -2427,44 +2465,27 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- LoadP(dst, GlobalObjectOperand());
- LoadP(dst, FieldMemOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind, ElementsKind transitioned_kind,
Register map_in_out, Register scratch, Label* no_map_match) {
- // Load the global or builtins object from the current context.
- LoadP(scratch,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(scratch,
- FieldMemOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- LoadP(scratch,
- MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
- size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(ip, FieldMemOperand(scratch, offset));
+ LoadP(scratch, NativeContextMemOperand());
+ LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
bne(no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
- LoadP(map_in_out, FieldMemOperand(scratch, offset));
+ LoadP(map_in_out,
+ ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- LoadP(function,
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- LoadP(function,
- FieldMemOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ LoadP(dst, NativeContextMemOperand());
+ LoadP(dst, ContextMemOperand(dst, index));
}
@@ -2623,6 +2644,19 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotABoundFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2733,29 +2767,25 @@ void MacroAssembler::AllocateHeapNumberWithValue(
}
-// Copies a fixed number of fields of heap objects from src to dst.
-void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
- int field_count) {
- // At least one bit set in the first 15 registers.
- DCHECK((temps & ((1 << 15) - 1)) != 0);
- DCHECK((temps & dst.bit()) == 0);
- DCHECK((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch1,
+ Register scratch2, Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch1));
+ DCHECK(!result.is(scratch2));
+ DCHECK(!result.is(value));
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
- }
- DCHECK(!tmp.is(no_reg));
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
- for (int i = 0; i < field_count; i++) {
- LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
- StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
- }
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
+ StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0);
+ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0);
+ StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0);
+ StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -2866,25 +2896,25 @@ void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
}
-void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
+void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {
Label loop;
mtctr(count);
bind(&loop);
- StoreP(filler, MemOperand(start_offset));
- addi(start_offset, start_offset, Operand(kPointerSize));
+ StoreP(filler, MemOperand(current_address));
+ addi(current_address, current_address, Operand(kPointerSize));
bdnz(&loop);
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label done;
- sub(r0, end_offset, start_offset, LeaveOE, SetRC);
+ sub(r0, end_address, current_address, LeaveOE, SetRC);
beq(&done, cr0);
ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
- InitializeNFieldsWithFiller(start_offset, r0, filler);
+ InitializeNFieldsWithFiller(current_address, r0, filler);
bind(&done);
}
@@ -3060,17 +3090,16 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
+ Register dest = function;
#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
// AIX uses a function descriptor. When calling C code be aware
// of this descriptor and pick up values from it
LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
LoadP(ip, MemOperand(function, 0));
- Register dest = ip;
-#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+ dest = ip;
+#elif ABI_CALL_VIA_IP
Move(ip, function);
- Register dest = ip;
-#else
- Register dest = function;
+ dest = ip;
#endif
Call(dest);
@@ -3172,8 +3201,8 @@ void MacroAssembler::CheckPageFlag(
void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
Register scratch1, Label* on_black) {
- HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -3206,27 +3235,6 @@ void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
- Label* not_data_object) {
- Label is_data_object;
- LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- beq(&is_data_object);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
- andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
- bne(not_data_object, cr0);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg) {
DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
@@ -3243,117 +3251,23 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
}
-void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
- Register mask_scratch,
- Register load_scratch,
- Label* value_is_white_and_not_data) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Register load_scratch,
+ Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(r0, mask_scratch, load_scratch, SetRC);
- bne(&done, cr0);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- // LSL may overflow, making the check conservative.
- slwi(r0, mask_scratch, Operand(1));
- and_(r0, load_scratch, r0, SetRC);
- beq(&ok, cr0);
- stop("Impossible marking bit pattern");
- bind(&ok);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = load_scratch; // Holds map while checking type.
- Register length = load_scratch; // Holds length of object after testing type.
- Label is_data_object, maybe_string_object, is_string_object, is_encoded;
-#if V8_TARGET_ARCH_PPC64
- Label length_computed;
-#endif
-
-
- // Check for heap-number
- LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- bne(&maybe_string_object);
- li(length, Operand(HeapNumber::kSize));
- b(&is_data_object);
- bind(&maybe_string_object);
-
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = load_scratch;
- lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
- andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
- bne(value_is_white_and_not_data, cr0);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- andi(r0, instance_type, Operand(kExternalStringTag));
- beq(&is_string_object, cr0);
- li(length, Operand(ExternalString::kSize));
- b(&is_data_object);
- bind(&is_string_object);
-
- // Sequential string, either Latin1 or UC16.
- // For Latin1 (char-size of 1) we untag the smi to get the length.
- // For UC16 (char-size of 2):
- // - (32-bit) we just leave the smi tag in place, thereby getting
- // the length multiplied by 2.
- // - (64-bit) we compute the offset in the 2-byte array
- DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
- LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
- andi(r0, instance_type, Operand(kStringEncodingMask));
- beq(&is_encoded, cr0);
- SmiUntag(ip);
-#if V8_TARGET_ARCH_PPC64
- b(&length_computed);
-#endif
- bind(&is_encoded);
-#if V8_TARGET_ARCH_PPC64
- SmiToShortArrayOffset(ip, ip);
- bind(&length_computed);
-#else
- DCHECK(kSmiShift == 1);
-#endif
- addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
- li(r0, Operand(~kObjectAlignmentMask));
- and_(length, length, r0);
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
- orx(ip, ip, mask_scratch);
- stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
- mov(ip, Operand(~Page::kPageAlignmentMask));
- and_(bitmap_scratch, bitmap_scratch, ip);
- lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- add(ip, ip, length);
- stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
- bind(&done);
+ beq(value_is_white, cr0);
}
@@ -4324,10 +4238,12 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
- Register reg5, Register reg6, Register reg7, Register reg8) {
+ Register reg5, Register reg6, Register reg7, Register reg8,
+ Register reg9, Register reg10) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
- reg7.is_valid() + reg8.is_valid();
+ reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
+ reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
@@ -4338,6 +4254,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
+ if (reg9.is_valid()) regs |= reg9.bit();
+ if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
@@ -4345,11 +4263,11 @@ bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
#endif
-CodePatcher::CodePatcher(byte* address, int instructions,
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
- masm_(NULL, address, size_ + Assembler::kGap),
+ masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
@@ -4361,7 +4279,7 @@ CodePatcher::CodePatcher(byte* address, int instructions,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index d4660d9207..78de89aa5c 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -24,6 +24,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
@@ -65,7 +66,8 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
Register reg4 = no_reg, Register reg5 = no_reg,
Register reg6 = no_reg, Register reg7 = no_reg,
- Register reg8 = no_reg);
+ Register reg8 = no_reg, Register reg9 = no_reg,
+ Register reg10 = no_reg);
#endif
// These exist to provide portability between 32 and 64bit
@@ -109,11 +111,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Returns the size of a call in instructions. Note, the value returned is
@@ -217,18 +216,10 @@ class MacroAssembler : public Assembler {
void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
- Register scratch3, Label* object_is_white_and_not_data);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value, Register scratch,
- Label* not_data_object);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Register scratch3, Label* value_is_white);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -387,8 +378,10 @@ class MacroAssembler : public Assembler {
const Register int_scratch);
#if V8_TARGET_ARCH_PPC64
- void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
+ void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
#endif
// Converts the double_input to an integer. Note that, upon return,
@@ -400,9 +393,18 @@ class MacroAssembler : public Assembler {
const Register dst, const DoubleRegister double_dst,
FPRoundingMode rounding_mode = kRoundToZero);
+#if V8_TARGET_ARCH_PPC64
+ // Converts the double_input to an unsigned integer. Note that, upon return,
+ // the contents of double_dst will also hold the fixed point representation.
+ void ConvertDoubleToUnsignedInt64(
+ const DoubleRegister double_input, const Register dst,
+ const DoubleRegister double_dst,
+ FPRoundingMode rounding_mode = kRoundToZero);
+#endif
+
// Generates function and stub prologue code.
- void StubPrologue(int prologue_offset = 0);
- void Prologue(bool code_pre_aging, int prologue_offset = 0);
+ void StubPrologue(Register base = no_reg, int prologue_offset = 0);
+ void Prologue(bool code_pre_aging, Register base, int prologue_offset = 0);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
@@ -421,8 +423,15 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -434,7 +443,7 @@ class MacroAssembler : public Assembler {
Register scratch,
Label* no_map_match);
- void LoadGlobalFunction(int index, Register function);
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -542,14 +551,20 @@ class MacroAssembler : public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code, const ParameterCount& expected,
- const ParameterCount& actual, InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function, const ParameterCount& actual,
- InvokeFlag flag, const CallWrapper& call_wrapper);
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
@@ -641,8 +656,8 @@ class MacroAssembler : public Assembler {
void Allocate(int object_size, Register result, Register scratch1,
Register scratch2, Label* gc_required, AllocationFlags flags);
- void Allocate(Register object_size, Register result, Register scratch1,
- Register scratch2, Label* gc_required, AllocationFlags flags);
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
@@ -675,8 +690,11 @@ class MacroAssembler : public Assembler {
Register heap_number_map,
Label* gc_required);
- // Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch1, Register scratch2,
+ Label* gc_required);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -684,17 +702,17 @@ class MacroAssembler : public Assembler {
void CopyBytes(Register src, Register dst, Register length, Register scratch);
// Initialize fields with filler values. |count| fields starting at
- // |start_offset| are overwritten with the value in |filler|. At the end the
- // loop, |start_offset| points at the next uninitialized field. |count| is
- // assumed to be non-zero.
- void InitializeNFieldsWithFiller(Register start_offset, Register count,
+ // |current_address| are overwritten with the value in |filler|. At the end
+ // the loop, |current_address| points at the next uninitialized field.
+ // |count| is assumed to be non-zero.
+ void InitializeNFieldsWithFiller(Register current_address, Register count,
Register filler);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -919,29 +937,29 @@ class MacroAssembler : public Assembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments, int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
@@ -990,13 +1008,6 @@ class MacroAssembler : public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the code object for the given builtin in the target register and
- // setup the function in r1.
- void GetBuiltinEntry(Register target, int native_context_index);
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1317,6 +1328,10 @@ class MacroAssembler : public Assembler {
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1483,8 +1498,7 @@ class MacroAssembler : public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual, Handle<Code> code_constant,
- Register code_reg, Label* done,
+ const ParameterCount& actual, Label* done,
bool* definitely_mismatches, InvokeFlag flag,
const CallWrapper& call_wrapper);
@@ -1531,7 +1545,8 @@ class CodePatcher {
public:
enum FlushICache { FLUSH, DONT_FLUSH };
- CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
+ CodePatcher(Isolate* isolate, byte* address, int instructions,
+ FlushICache flush_cache = FLUSH);
~CodePatcher();
// Macro assembler to emit code.
@@ -1555,13 +1570,13 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
-inline MemOperand ContextOperand(Register context, int index = 0) {
+inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-inline MemOperand GlobalObjectOperand() {
- return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+inline MemOperand NativeContextMemOperand() {
+ return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index fa088a2c30..0efa6605d5 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -445,7 +445,7 @@ void PPCDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
intptr_t value = *cur;
- Heap* current_heap = v8::internal::Isolate::Current()->heap();
+ Heap* current_heap = sim_->isolate_->heap();
if (((value & 1) == 0) || current_heap->Contains(obj)) {
PrintF(" (");
if ((value & 1) == 0) {
@@ -844,12 +844,12 @@ Simulator::~Simulator() { free(stack_); }
// offset from the svc instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, ExternalReference::Type type)
+ Redirection(Isolate* isolate, void* external_function,
+ ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
type_(type),
next_(NULL) {
- Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->FlushICache(
isolate->simulator_i_cache(),
@@ -864,9 +864,8 @@ class Redirection {
void* external_function() { return external_function_; }
ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function,
+ static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
- Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
@@ -874,7 +873,7 @@ class Redirection {
return current;
}
}
- return new Redirection(external_function, type);
+ return new Redirection(isolate, external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -919,9 +918,10 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
}
-void* Simulator::RedirectExternalReference(void* external_function,
+void* Simulator::RedirectExternalReference(Isolate* isolate,
+ void* external_function,
ExternalReference::Type type) {
- Redirection* redirection = Redirection::Get(external_function, type);
+ Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -2702,9 +2702,17 @@ void Simulator::ExecuteExt3(Instruction* instr) {
// fcfids
int frt = instr->RTValue();
int frb = instr->RBValue();
- double t_val = get_double_from_d_register(frb);
- int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
- double frt_val = static_cast<float>(*frb_val_p);
+ int64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<float>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCFIDU: {
+ // fcfidus
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ uint64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<float>(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -2746,10 +2754,11 @@ void Simulator::ExecuteExt4(Instruction* instr) {
return;
}
case FSQRT: {
+ lazily_initialize_fast_sqrt(isolate_);
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- double frt_val = fast_sqrt(frb_val);
+ double frt_val = fast_sqrt(frb_val, isolate_);
set_d_register_from_double(frt, frt_val);
return;
}
@@ -2886,64 +2895,107 @@ void Simulator::ExecuteExt4(Instruction* instr) {
case FCFID: {
int frt = instr->RTValue();
int frb = instr->RBValue();
- double t_val = get_double_from_d_register(frb);
- int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
- double frt_val = static_cast<double>(*frb_val_p);
+ int64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<double>(frb_val);
set_d_register_from_double(frt, frt_val);
return;
}
- case FCTID: {
+ case FCFIDU: {
+ int frt = instr->RTValue();
+ int frb = instr->RBValue();
+ uint64_t frb_val = get_d_register(frb);
+ double frt_val = static_cast<double>(frb_val);
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case FCTID:
+ case FCTIDZ: {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
+ int mode = (opcode == FCTIDZ) ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
int64_t frt_val;
int64_t one = 1; // work-around gcc
- int64_t kMinLongLong = (one << 63);
- int64_t kMaxLongLong = kMinLongLong - 1;
+ int64_t kMinVal = (one << 63);
+ int64_t kMaxVal = kMinVal - 1;
+ bool invalid_convert = false;
- if (frb_val > kMaxLongLong) {
- frt_val = kMaxLongLong;
- } else if (frb_val < kMinLongLong) {
- frt_val = kMinLongLong;
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
} else {
- switch (fp_condition_reg_ & kFPRoundingModeMask) {
+ switch (mode) {
case kRoundToZero:
- frt_val = (int64_t)frb_val;
+ frb_val = std::trunc(frb_val);
break;
case kRoundToPlusInf:
- frt_val = (int64_t)std::ceil(frb_val);
+ frb_val = std::ceil(frb_val);
break;
case kRoundToMinusInf:
- frt_val = (int64_t)std::floor(frb_val);
+ frb_val = std::floor(frb_val);
break;
default:
- frt_val = (int64_t)frb_val;
UNIMPLEMENTED(); // Not used by V8.
break;
}
+ if (frb_val < static_cast<double>(kMinVal)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
+ } else if (frb_val >= static_cast<double>(kMaxVal)) {
+ frt_val = kMaxVal;
+ invalid_convert = true;
+ } else {
+ frt_val = (int64_t)frb_val;
+ }
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
+ if (invalid_convert) SetFPSCR(VXCVI);
return;
}
- case FCTIDZ: {
+ case FCTIDU:
+ case FCTIDUZ: {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
- int64_t frt_val;
- int64_t one = 1; // work-around gcc
- int64_t kMinLongLong = (one << 63);
- int64_t kMaxLongLong = kMinLongLong - 1;
-
- if (frb_val > kMaxLongLong) {
- frt_val = kMaxLongLong;
- } else if (frb_val < kMinLongLong) {
- frt_val = kMinLongLong;
+ int mode = (opcode == FCTIDUZ)
+ ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
+ uint64_t frt_val;
+ uint64_t kMinVal = 0;
+ uint64_t kMaxVal = kMinVal - 1;
+ bool invalid_convert = false;
+
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
} else {
- frt_val = (int64_t)frb_val;
+ switch (mode) {
+ case kRoundToZero:
+ frb_val = std::trunc(frb_val);
+ break;
+ case kRoundToPlusInf:
+ frb_val = std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frb_val = std::floor(frb_val);
+ break;
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ if (frb_val < static_cast<double>(kMinVal)) {
+ frt_val = kMinVal;
+ invalid_convert = true;
+ } else if (frb_val >= static_cast<double>(kMaxVal)) {
+ frt_val = kMaxVal;
+ invalid_convert = true;
+ } else {
+ frt_val = (uint64_t)frb_val;
+ }
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
+ if (invalid_convert) SetFPSCR(VXCVI);
return;
}
case FCTIW:
@@ -2951,44 +3003,47 @@ void Simulator::ExecuteExt4(Instruction* instr) {
int frt = instr->RTValue();
int frb = instr->RBValue();
double frb_val = get_double_from_d_register(frb);
+ int mode = (opcode == FCTIWZ) ? kRoundToZero
+ : (fp_condition_reg_ & kFPRoundingModeMask);
int64_t frt_val;
- if (frb_val > kMaxInt) {
- frt_val = kMaxInt;
- } else if (frb_val < kMinInt) {
- frt_val = kMinInt;
- } else {
- if (opcode == FCTIWZ) {
- frt_val = (int64_t)frb_val;
- } else {
- switch (fp_condition_reg_ & kFPRoundingModeMask) {
- case kRoundToZero:
- frt_val = (int64_t)frb_val;
- break;
- case kRoundToPlusInf:
- frt_val = (int64_t)std::ceil(frb_val);
- break;
- case kRoundToMinusInf:
- frt_val = (int64_t)std::floor(frb_val);
- break;
- case kRoundToNearest:
- frt_val = (int64_t)lround(frb_val);
-
- // Round to even if exactly halfway. (lround rounds up)
- if (std::fabs(static_cast<double>(frt_val) - frb_val) == 0.5 &&
- (frt_val % 2)) {
- frt_val += ((frt_val > 0) ? -1 : 1);
- }
+ int64_t kMinVal = kMinInt;
+ int64_t kMaxVal = kMaxInt;
- break;
- default:
- DCHECK(false);
- frt_val = (int64_t)frb_val;
- break;
+ if (std::isnan(frb_val)) {
+ frt_val = kMinVal;
+ } else {
+ switch (mode) {
+ case kRoundToZero:
+ frb_val = std::trunc(frb_val);
+ break;
+ case kRoundToPlusInf:
+ frb_val = std::ceil(frb_val);
+ break;
+ case kRoundToMinusInf:
+ frb_val = std::floor(frb_val);
+ break;
+ case kRoundToNearest: {
+ double orig = frb_val;
+ frb_val = lround(frb_val);
+ // Round to even if exactly halfway. (lround rounds up)
+ if (std::fabs(frb_val - orig) == 0.5 && ((int64_t)frb_val % 2)) {
+ frb_val += ((frb_val > 0) ? -1.0 : 1.0);
+ }
+ break;
}
+ default:
+ UNIMPLEMENTED(); // Not used by V8.
+ break;
+ }
+ if (frb_val < kMinVal) {
+ frt_val = kMinVal;
+ } else if (frb_val > kMaxVal) {
+ frt_val = kMaxVal;
+ } else {
+ frt_val = (int64_t)frb_val;
}
}
- double* p = reinterpret_cast<double*>(&frt_val);
- set_d_register_from_double(frt, *p);
+ set_d_register(frt, frt_val);
return;
}
case FNEG: {
@@ -3041,6 +3096,43 @@ void Simulator::ExecuteExt4(Instruction* instr) {
set_d_register(frt, lval);
return;
}
+ case MCRFS: {
+ int bf = instr->Bits(25, 23);
+ int bfa = instr->Bits(20, 18);
+ int cr_shift = (7 - bf) * CRWIDTH;
+ int fp_shift = (7 - bfa) * CRWIDTH;
+ int field_val = (fp_condition_reg_ >> fp_shift) & 0xf;
+ condition_reg_ &= ~(0x0f << cr_shift);
+ condition_reg_ |= (field_val << cr_shift);
+ // Clear copied exception bits
+ switch (bfa) {
+ case 5:
+ ClearFPSCR(VXSOFT);
+ ClearFPSCR(VXSQRT);
+ ClearFPSCR(VXCVI);
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return;
+ }
+ case MTFSB0: {
+ int bt = instr->Bits(25, 21);
+ ClearFPSCR(bt);
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ }
+ return;
+ }
+ case MTFSB1: {
+ int bt = instr->Bits(25, 21);
+ SetFPSCR(bt);
+ if (instr->Bit(0)) { // RC bit set
+ UNIMPLEMENTED();
+ }
+ return;
+ }
case FABS: {
int frt = instr->RTValue();
int frb = instr->RBValue();
@@ -3769,6 +3861,9 @@ void Simulator::CallInternal(byte* entry) {
set_pc(reinterpret_cast<intptr_t>(entry));
#endif
+ // Put target address in ip (for JS prologue).
+ set_register(r12, get_pc());
+
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
// the LR the simulation stops when returning to this call point.
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index bdf50ba474..a3b03dc506 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -22,7 +22,7 @@ namespace v8 {
namespace internal {
// When running without a simulator we call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
@@ -33,8 +33,9 @@ typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
// should act as a function matching the type ppc_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
// The stack limit beyond which we will throw stack overflow errors in
@@ -48,11 +49,15 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() {}
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ USE(isolate);
+ }
};
} // namespace internal
} // namespace v8
@@ -318,6 +323,9 @@ class Simulator {
#endif
void ExecuteGeneric(Instruction* instr);
+ void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
+ void ClearFPSCR(int bit) { fp_condition_reg_ &= ~(1 << (31 - bit)); }
+
// Executes one instruction.
void ExecuteInstruction(Instruction* instr);
@@ -329,7 +337,8 @@ class Simulator {
// Runtime call support.
static void* RedirectExternalReference(
- void* external_function, v8::internal::ExternalReference::Type type);
+ Isolate* isolate, void* external_function,
+ v8::internal::ExternalReference::Type type);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, intptr_t* z);
@@ -391,16 +400,17 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
- FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
+ FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
(intptr_t)p3, (intptr_t)p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, \
- (intptr_t)p3, (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, \
- (intptr_t)p7, (intptr_t)NULL, (intptr_t)p8)
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
+ Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
+ (intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
+ (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
+ (intptr_t)NULL, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
@@ -414,13 +424,14 @@ class SimulatorStack : public v8::internal::AllStatic {
return Simulator::current(isolate)->StackLimit(c_limit);
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- Simulator* sim = Simulator::current(Isolate::Current());
+ static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
+ uintptr_t try_catch_address) {
+ Simulator* sim = Simulator::current(isolate);
return sim->PushAddress(try_catch_address);
}
- static inline void UnregisterCTryCatch() {
- Simulator::current(Isolate::Current())->PopAddress();
+ static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
+ Simulator::current(isolate)->PopAddress();
}
};
} // namespace internal
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index ea82c5589f..45e4ccf136 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -65,7 +65,7 @@ TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == NULL) return NULL;
TickSampleEventRecord* evt =
- new(address) TickSampleEventRecord(last_code_event_id_);
+ new (address) TickSampleEventRecord(last_code_event_id_.Value());
return &evt->sample;
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 5e61697339..bbddc873b1 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -7,6 +7,7 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
+#include "src/locked-queue-inl.h"
#include "src/log-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
@@ -35,14 +36,14 @@ ProfilerEventsProcessor::~ProfilerEventsProcessor() {}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
- event.generic.order = ++last_code_event_id_;
+ event.generic.order = last_code_event_id_.Increment(1);
events_buffer_.Enqueue(event);
}
void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
int fp_to_sp_delta) {
- TickSampleEventRecord record(last_code_event_id_);
+ TickSampleEventRecord record(last_code_event_id_.Value());
RegisterState regs;
Address fp = isolate->c_entry_fp(isolate->thread_local_top());
regs.sp = fp - fp_to_sp_delta;
@@ -54,7 +55,7 @@ void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
- TickSampleEventRecord record(last_code_event_id_);
+ TickSampleEventRecord record(last_code_event_id_.Value());
RegisterState regs;
StackFrameIterator it(isolate);
if (!it.done()) {
@@ -96,9 +97,9 @@ bool ProfilerEventsProcessor::ProcessCodeEvent() {
ProfilerEventsProcessor::SampleProcessingResult
ProfilerEventsProcessor::ProcessOneSample() {
- if (!ticks_from_vm_buffer_.IsEmpty()
- && ticks_from_vm_buffer_.Peek()->order ==
- last_processed_code_event_id_) {
+ TickSampleEventRecord record1;
+ if (ticks_from_vm_buffer_.Peek(&record1) &&
+ (record1.order == last_processed_code_event_id_)) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 2326bb7652..e5ef0ac7c4 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -6,12 +6,13 @@
#define V8_PROFILER_CPU_PROFILER_H_
#include "src/allocation.h"
+#include "src/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
+#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/sampler.h"
-#include "src/profiler/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -169,14 +170,14 @@ class ProfilerEventsProcessor : public base::Thread {
base::Atomic32 running_;
// Sampling period in microseconds.
const base::TimeDelta period_;
- UnboundQueue<CodeEventsContainer> events_buffer_;
+ LockedQueue<CodeEventsContainer> events_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
kTickSampleBufferSize / sizeof(TickSampleEventRecord);
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
- UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- unsigned last_code_event_id_;
+ LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
+ AtomicNumber<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
};
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index d6fcbbdaca..9a04e83af4 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -66,6 +66,8 @@ class HeapProfiler {
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
void ClearHeapObjectMap();
+ Isolate* isolate() const { return heap()->isolate(); }
+
private:
Heap* heap() const;
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index fb1e891c94..169ab569e8 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -16,6 +16,11 @@ HeapEntry* HeapGraphEdge::from() const {
}
+Isolate* HeapGraphEdge::isolate() const {
+ return snapshot()->profiler()->isolate();
+}
+
+
HeapSnapshot* HeapGraphEdge::snapshot() const {
return to_entry_->snapshot();
}
@@ -43,6 +48,8 @@ HeapGraphEdge** HeapEntry::children_arr() {
}
+Isolate* HeapEntry::isolate() const { return snapshot_->profiler()->isolate(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 2268db223f..69ed5e6f29 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -7,6 +7,7 @@
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
+#include "src/objects-body-descriptors.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -804,9 +805,10 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
- const char* name = shared->bound() ? "native_bind" :
- names_->GetName(String::cast(shared->name()));
+ const char* name = names_->GetName(String::cast(shared->name()));
return AddEntry(object, HeapEntry::kClosure, name);
+ } else if (object->IsJSBoundFunction()) {
+ return AddEntry(object, HeapEntry::kClosure, "native_bind");
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
@@ -987,14 +989,14 @@ int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
class IndexedReferencesExtractor : public ObjectVisitor {
public:
- IndexedReferencesExtractor(V8HeapExplorer* generator,
- HeapObject* parent_obj,
+ IndexedReferencesExtractor(V8HeapExplorer* generator, HeapObject* parent_obj,
int parent)
: generator_(generator),
parent_obj_(parent_obj),
+ parent_start_(HeapObject::RawField(parent_obj_, 0)),
+ parent_end_(HeapObject::RawField(parent_obj_, parent_obj_->Size())),
parent_(parent),
- next_index_(0) {
- }
+ next_index_(0) {}
void VisitCodeEntry(Address entry_address) override {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
generator_->SetInternalReference(parent_obj_, parent_, "code", code);
@@ -1002,40 +1004,24 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
+ intptr_t index =
+ static_cast<intptr_t>(p - HeapObject::RawField(parent_obj_, 0));
++next_index_;
- if (CheckVisitedAndUnmark(p)) continue;
+ // |p| could be outside of the object, e.g., while visiting RelocInfo of
+ // code objects.
+ if (p >= parent_start_ && p < parent_end_ && generator_->marks_[index]) {
+ generator_->marks_[index] = false;
+ continue;
+ }
generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p);
}
}
- static void MarkVisitedField(HeapObject* obj, int offset) {
- if (offset < 0) return;
- Address field = obj->address() + offset;
- DCHECK(Memory::Object_at(field)->IsHeapObject());
- intptr_t p = reinterpret_cast<intptr_t>(Memory::Object_at(field));
- DCHECK(!IsMarked(p));
- intptr_t p_tagged = p | kTag;
- Memory::Object_at(field) = reinterpret_cast<Object*>(p_tagged);
- }
private:
- bool CheckVisitedAndUnmark(Object** field) {
- intptr_t p = reinterpret_cast<intptr_t>(*field);
- if (IsMarked(p)) {
- intptr_t p_untagged = (p & ~kTaggingMask) | kHeapObjectTag;
- *field = reinterpret_cast<Object*>(p_untagged);
- DCHECK((*field)->IsHeapObject());
- return true;
- }
- return false;
- }
-
- static const intptr_t kTaggingMask = 3;
- static const intptr_t kTag = 3;
-
- static bool IsMarked(intptr_t p) { return (p & kTaggingMask) == kTag; }
-
V8HeapExplorer* generator_;
HeapObject* parent_obj_;
+ Object** parent_start_;
+ Object** parent_end_;
int parent_;
int next_index_;
};
@@ -1113,13 +1099,29 @@ void V8HeapExplorer::ExtractJSGlobalProxyReferences(
void V8HeapExplorer::ExtractJSObjectReferences(
int entry, JSObject* js_obj) {
HeapObject* obj = js_obj;
- ExtractClosureReferences(js_obj, entry);
ExtractPropertyReferences(js_obj, entry);
ExtractElementReferences(js_obj, entry);
ExtractInternalReferences(js_obj, entry);
PrototypeIterator iter(heap_->isolate(), js_obj);
SetPropertyReference(obj, entry, heap_->proto_string(), iter.GetCurrent());
- if (obj->IsJSFunction()) {
+ if (obj->IsJSBoundFunction()) {
+ JSBoundFunction* js_fun = JSBoundFunction::cast(obj);
+ TagObject(js_fun->bound_arguments(), "(bound arguments)");
+ SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
+ JSBoundFunction::kBoundArgumentsOffset);
+ TagObject(js_fun->creation_context(), "(creation context)");
+ SetInternalReference(js_fun, entry, "creation_context",
+ js_fun->creation_context(),
+ JSBoundFunction::kCreationContextOffset);
+ SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
+ SetNativeBindReference(js_obj, entry, "bound_function",
+ js_fun->bound_target_function());
+ FixedArray* bindings = js_fun->bound_arguments();
+ for (int i = 0; i < bindings->length(); i++) {
+ const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
+ SetNativeBindReference(js_obj, entry, reference_name, bindings->get(i));
+ }
+ } else if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
if (!proto_or_map->IsTheHole()) {
@@ -1139,13 +1141,8 @@ void V8HeapExplorer::ExtractJSObjectReferences(
}
}
SharedFunctionInfo* shared_info = js_fun->shared();
- // JSFunction has either bindings or literals and never both.
- bool bound = shared_info->bound();
- TagObject(js_fun->literals_or_bindings(),
- bound ? "(function bindings)" : "(function literals)");
- SetInternalReference(js_fun, entry,
- bound ? "bindings" : "literals",
- js_fun->literals_or_bindings(),
+ TagObject(js_fun->literals(), "(function literals)");
+ SetInternalReference(js_fun, entry, "literals", js_fun->literals(),
JSFunction::kLiteralsOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(js_fun, entry,
@@ -1262,11 +1259,10 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
}
EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
- EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, JSGlobalObject, global);
+ EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, HeapObject, extension);
+ EXTRACT_CONTEXT_FIELD(NATIVE_CONTEXT_INDEX, Context, native_context);
if (context->IsNativeContext()) {
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->embedder_data(), "(context data)");
NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD)
EXTRACT_CONTEXT_FIELD(OPTIMIZED_FUNCTIONS_LIST, unused,
@@ -1542,7 +1538,7 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
// Do not visit weak_next as it is not visited by the StaticVisitor,
// and we're not very interested in weak_next field here.
STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
- AllocationSite::BodyDescriptor::kEndOffset);
+ AllocationSite::BodyDescriptor::kEndOffset);
}
@@ -1591,24 +1587,6 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
}
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
- if (!js_obj->IsJSFunction()) return;
-
- JSFunction* func = JSFunction::cast(js_obj);
- if (func->shared()->bound()) {
- BindingsArray* bindings = func->function_bindings();
- SetNativeBindReference(js_obj, entry, "bound_this", bindings->bound_this());
- SetNativeBindReference(js_obj, entry, "bound_function",
- bindings->bound_function());
- for (int i = 0; i < bindings->bindings_count(); i++) {
- const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
- SetNativeBindReference(js_obj, entry, reference_name,
- bindings->binding(i));
- }
- }
-}
-
-
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
@@ -1739,14 +1717,11 @@ void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
String* V8HeapExplorer::GetConstructorName(JSObject* object) {
- Heap* heap = object->GetHeap();
- if (object->IsJSFunction()) return heap->closure_string();
- String* constructor_name = object->constructor_name();
- if (constructor_name == heap->Object_string()) {
- // TODO(verwaest): Try to get object.constructor.name in this case.
- // This requires handlification of the V8HeapExplorer.
- }
- return object->constructor_name();
+ Isolate* isolate = object->GetIsolate();
+ if (object->IsJSFunction()) return isolate->heap()->closure_string();
+ DisallowHeapAllocation no_gc;
+ HandleScope scope(isolate);
+ return *JSReceiver::GetConstructorName(handle(object, isolate));
}
@@ -1870,6 +1845,14 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
obj = iterator.next(), progress_->ProgressStep()) {
if (interrupted) continue;
+ size_t max_pointer = obj->Size() / kPointerSize;
+ if (max_pointer > marks_.size()) {
+ // Clear the current bits.
+ std::vector<bool>().swap(marks_);
+ // Reallocate to right size.
+ marks_.resize(max_pointer, false);
+ }
+
HeapEntry* heap_entry = GetEntry(obj);
int entry = heap_entry->index();
if ((this->*extractor)(entry, obj)) {
@@ -1914,11 +1897,19 @@ void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
parent_entry,
names_->GetName(reference_name),
child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
}
+void V8HeapExplorer::MarkVisitedField(HeapObject* obj, int offset) {
+ if (offset < 0) return;
+ int index = offset / kPointerSize;
+ DCHECK(!marks_[index]);
+ marks_[index] = true;
+}
+
+
void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
int parent_entry,
const char* reference_name,
@@ -1963,7 +1954,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
reference_name,
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -1981,7 +1972,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
names_->GetName(index),
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -2014,7 +2005,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
reference_name,
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -2032,7 +2023,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
names_->GetFormatted("%d", index),
child_entry);
}
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
@@ -2073,7 +2064,7 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
parent_entry,
name,
child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(parent_obj, field_offset);
}
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 3d6693b0d9..857f2401bf 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -50,6 +50,8 @@ class HeapGraphEdge BASE_EMBEDDED {
INLINE(HeapEntry* from() const);
HeapEntry* to() const { return to_entry_; }
+ INLINE(Isolate* isolate() const);
+
private:
INLINE(HeapSnapshot* snapshot() const);
int from_index() const { return FromIndexField::decode(bit_field_); }
@@ -115,6 +117,7 @@ class HeapEntry BASE_EMBEDDED {
}
Vector<HeapGraphEdge*> children() {
return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
+ INLINE(Isolate* isolate() const);
void SetIndexedReference(
HeapGraphEdge::Type type, int index, HeapEntry* entry);
@@ -351,6 +354,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
HeapObject* object);
+ void MarkVisitedField(HeapObject* obj, int offset);
+
HeapEntry* AddEntry(HeapObject* object);
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
@@ -385,7 +390,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractFixedArrayReferences(int entry, FixedArray* array);
- void ExtractClosureReferences(JSObject* js_obj, int entry);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
void ExtractAccessorPairProperty(JSObject* js_obj, int entry, Name* key,
Object* callback_obj, int field_offset = -1);
@@ -465,6 +469,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet weak_containers_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
+ std::vector<bool> marks_;
+
friend class IndexedReferencesExtractor;
friend class RootsReferencesExtractor;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 04d7e39c16..85edce2663 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -42,6 +42,10 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
inline unsigned ProfileNode::function_id() const {
return tree_->GetFunctionId(this);
}
+
+
+inline Isolate* ProfileNode::isolate() const { return tree_->isolate(); }
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 21fa5ca4a4..890f341e89 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -4,12 +4,12 @@
#include "src/profiler/profile-generator.h"
+#include "src/ast/scopeinfo.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/sampler.h"
-#include "src/scopeinfo.h"
#include "src/splay-tree-inl.h"
#include "src/unicode.h"
@@ -249,10 +249,11 @@ class DeleteNodesCallback {
};
-ProfileTree::ProfileTree()
+ProfileTree::ProfileTree(Isolate* isolate)
: root_entry_(Logger::FUNCTION_TAG, "(root)"),
next_node_id_(1),
root_(new ProfileNode(this, &root_entry_)),
+ isolate_(isolate),
next_function_id_(1),
function_ids_(ProfileNode::CodeEntriesMatch) {}
@@ -347,11 +348,11 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
-CpuProfile::CpuProfile(const char* title, bool record_samples)
+CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
: title_(title),
record_samples_(record_samples),
- start_time_(base::TimeTicks::HighResolutionNow()) {
-}
+ start_time_(base::TimeTicks::HighResolutionNow()),
+ top_down_(isolate) {}
void CpuProfile::AddPath(base::TimeTicks timestamp,
@@ -440,8 +441,8 @@ void CodeMap::Print() {
CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
: function_and_resource_names_(heap),
- current_profiles_semaphore_(1) {
-}
+ isolate_(heap->isolate()),
+ current_profiles_semaphore_(1) {}
static void DeleteCodeEntry(CodeEntry** entry_ptr) {
@@ -476,7 +477,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.Add(new CpuProfile(title, record_samples));
+ current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -618,11 +619,12 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
src_line_not_found = false;
*entry++ = pc_entry;
- if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
- pc_entry->builtin_id() == Builtins::kFunctionApply) {
- // When current function is FunctionCall or FunctionApply builtin the
- // top frame is either frame of the calling JS function or internal
- // frame. In the latter case we know the caller for sure but in the
+ if (pc_entry->builtin_id() == Builtins::kFunctionPrototypeApply ||
+ pc_entry->builtin_id() == Builtins::kFunctionPrototypeCall) {
+ // When current function is either the Function.prototype.apply or the
+ // Function.prototype.call builtin the top frame is either frame of
+ // the calling JS function or internal frame.
+ // In the latter case we know the caller for sure but in the
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 079413a8cd..47a73f191a 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -156,6 +156,7 @@ class ProfileNode {
const std::vector<CpuProfileDeoptInfo>& deopt_infos() const {
return deopt_infos_;
}
+ Isolate* isolate() const;
void Print(int indent);
@@ -186,7 +187,7 @@ class ProfileNode {
class ProfileTree {
public:
- ProfileTree();
+ explicit ProfileTree(Isolate* isolate);
~ProfileTree();
ProfileNode* AddPathFromEnd(
@@ -200,6 +201,8 @@ class ProfileTree {
root_->Print(0);
}
+ Isolate* isolate() const { return isolate_; }
+
private:
template <typename Callback>
void TraverseDepthFirst(Callback* callback);
@@ -207,6 +210,7 @@ class ProfileTree {
CodeEntry root_entry_;
unsigned next_node_id_;
ProfileNode* root_;
+ Isolate* isolate_;
unsigned next_function_id_;
HashMap function_ids_;
@@ -217,7 +221,7 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(const char* title, bool record_samples);
+ CpuProfile(Isolate* isolate, const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
@@ -339,6 +343,8 @@ class CpuProfilesCollection {
List<CodeEntry*> code_entries_;
List<CpuProfile*> finished_profiles_;
+ Isolate* isolate_;
+
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
base::Semaphore current_profiles_semaphore_;
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 8c8dfa4f4a..243a9faac3 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -20,11 +20,11 @@ bool GetPropertyIfPresent(Handle<Object> obj, Handle<String> name,
Handle<Object>* value) {
LookupIterator it(obj, name);
// 4. Let hasEnumerable be HasProperty(Obj, "enumerable").
- Maybe<PropertyAttributes> maybe_attr = JSReceiver::GetPropertyAttributes(&it);
+ Maybe<bool> has_property = JSReceiver::HasProperty(&it);
// 5. ReturnIfAbrupt(hasEnumerable).
- if (!maybe_attr.IsJust()) return false;
+ if (has_property.IsNothing()) return false;
// 6. If hasEnumerable is true, then
- if (maybe_attr.FromJust() != ABSENT) {
+ if (has_property.FromJust() == true) {
// 6a. Let enum be ToBoolean(Get(Obj, "enumerable")).
// 6b. ReturnIfAbrupt(enum).
if (!JSObject::GetProperty(&it).ToHandle(value)) return false;
@@ -103,7 +103,7 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
static void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
Handle<String> name, Handle<Object> value) {
- LookupIterator it(object, name);
+ LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
CHECK(result.IsJust() && result.FromJust());
}
@@ -148,7 +148,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
PropertyDescriptor* desc) {
// 1. ReturnIfAbrupt(Obj).
// 2. If Type(Obj) is not Object, throw a TypeError exception.
- if (!obj->IsSpecObject()) {
+ if (!obj->IsJSReceiver()) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kPropertyDescObject, obj));
return false;
@@ -160,109 +160,141 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
return true;
}
- // TODO(jkummerow): Implement JSProxy support.
- // Specifically, instead of taking the attributes != ABSENT shortcut, we
- // have to implement proper HasProperty for proxies.
- if (!obj->IsJSProxy()) {
- { // enumerable?
- Handle<Object> enumerable;
- // 4 through 6b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->enumerable_string(),
- &enumerable)) {
- return false;
- }
- // 6c. Set the [[Enumerable]] field of desc to enum.
- if (!enumerable.is_null()) {
- desc->set_enumerable(enumerable->BooleanValue());
- }
- }
- { // configurable?
- Handle<Object> configurable;
- // 7 through 9b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->configurable_string(),
- &configurable)) {
- return false;
- }
- // 9c. Set the [[Configurable]] field of desc to conf.
- if (!configurable.is_null()) {
- desc->set_configurable(configurable->BooleanValue());
- }
- }
- { // value?
- Handle<Object> value;
- // 10 through 12b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->value_string(),
- &value))
- return false;
- // 12c. Set the [[Value]] field of desc to value.
- if (!value.is_null()) desc->set_value(value);
- }
- { // writable?
- Handle<Object> writable;
- // 13 through 15b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->writable_string(),
- &writable)) {
- return false;
- }
- // 15c. Set the [[Writable]] field of desc to writable.
- if (!writable.is_null()) desc->set_writable(writable->BooleanValue());
+ // enumerable?
+ Handle<Object> enumerable;
+ // 4 through 6b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->enumerable_string(),
+ &enumerable)) {
+ return false;
+ }
+ // 6c. Set the [[Enumerable]] field of desc to enum.
+ if (!enumerable.is_null()) {
+ desc->set_enumerable(enumerable->BooleanValue());
+ }
+
+ // configurable?
+ Handle<Object> configurable;
+ // 7 through 9b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->configurable_string(),
+ &configurable)) {
+ return false;
+ }
+ // 9c. Set the [[Configurable]] field of desc to conf.
+ if (!configurable.is_null()) {
+ desc->set_configurable(configurable->BooleanValue());
+ }
+
+ // value?
+ Handle<Object> value;
+ // 10 through 12b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->value_string(), &value)) {
+ return false;
+ }
+ // 12c. Set the [[Value]] field of desc to value.
+ if (!value.is_null()) desc->set_value(value);
+
+ // writable?
+ Handle<Object> writable;
+ // 13 through 15b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->writable_string(),
+ &writable)) {
+ return false;
+ }
+ // 15c. Set the [[Writable]] field of desc to writable.
+ if (!writable.is_null()) desc->set_writable(writable->BooleanValue());
+
+ // getter?
+ Handle<Object> getter;
+ // 16 through 18b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->get_string(), &getter)) {
+ return false;
+ }
+ if (!getter.is_null()) {
+ // 18c. If IsCallable(getter) is false and getter is not undefined,
+ // throw a TypeError exception.
+ if (!getter->IsCallable() && !getter->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectGetterCallable, getter));
+ return false;
}
- { // getter?
- Handle<Object> getter;
- // 16 through 18b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->get_string(), &getter))
- return false;
- if (!getter.is_null()) {
- // 18c. If IsCallable(getter) is false and getter is not undefined,
- // throw a TypeError exception.
- if (!getter->IsCallable() && !getter->IsUndefined()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kObjectGetterCallable, getter));
- return false;
- }
- // 18d. Set the [[Get]] field of desc to getter.
- desc->set_get(getter);
- }
- { // setter?
- Handle<Object> setter;
- // 19 through 21b.
- if (!GetPropertyIfPresent(obj, isolate->factory()->set_string(),
- &setter))
- return false;
- if (!setter.is_null()) {
- // 21c. If IsCallable(setter) is false and setter is not undefined,
- // throw a TypeError exception.
- if (!setter->IsCallable() && !setter->IsUndefined()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kObjectSetterCallable, setter));
- return false;
- }
- // 21d. Set the [[Set]] field of desc to setter.
- desc->set_set(setter);
- }
- }
- // 22. If either desc.[[Get]] or desc.[[Set]] is present, then
- // 22a. If either desc.[[Value]] or desc.[[Writable]] is present,
- // throw a TypeError exception.
- if ((desc->has_get() || desc->has_set()) &&
- (desc->has_value() || desc->has_writable())) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kValueAndAccessor, obj));
- return false;
- }
+ // 18d. Set the [[Get]] field of desc to getter.
+ desc->set_get(getter);
+ }
+ // setter?
+ Handle<Object> setter;
+ // 19 through 21b.
+ if (!GetPropertyIfPresent(obj, isolate->factory()->set_string(), &setter)) {
+ return false;
+ }
+ if (!setter.is_null()) {
+ // 21c. If IsCallable(setter) is false and setter is not undefined,
+ // throw a TypeError exception.
+ if (!setter->IsCallable() && !setter->IsUndefined()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kObjectSetterCallable, setter));
+ return false;
}
- } else {
- DCHECK(obj->IsJSProxy());
- // Having an UNIMPLEMENTED() here would upset ClusterFuzz, because
- // --harmony-proxies makes it possible to reach this branch.
- isolate->Throw(
- *isolate->factory()->NewTypeError(MessageTemplate::kUnsupported));
+ // 21d. Set the [[Set]] field of desc to setter.
+ desc->set_set(setter);
+ }
+
+ // 22. If either desc.[[Get]] or desc.[[Set]] is present, then
+ // 22a. If either desc.[[Value]] or desc.[[Writable]] is present,
+ // throw a TypeError exception.
+ if ((desc->has_get() || desc->has_set()) &&
+ (desc->has_value() || desc->has_writable())) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kValueAndAccessor, obj));
return false;
}
+
// 23. Return desc.
return true;
}
+// ES6 6.2.4.6
+// static
+void PropertyDescriptor::CompletePropertyDescriptor(Isolate* isolate,
+ PropertyDescriptor* desc) {
+ // 1. ReturnIfAbrupt(Desc).
+ // 2. Assert: Desc is a Property Descriptor.
+ // 3. Let like be Record{
+ // [[Value]]: undefined, [[Writable]]: false,
+ // [[Get]]: undefined, [[Set]]: undefined,
+ // [[Enumerable]]: false, [[Configurable]]: false}.
+ // 4. If either IsGenericDescriptor(Desc) or IsDataDescriptor(Desc) is true,
+ // then:
+ if (!IsAccessorDescriptor(desc)) {
+ // 4a. If Desc does not have a [[Value]] field, set Desc.[[Value]] to
+ // like.[[Value]].
+ if (!desc->has_value()) {
+ desc->set_value(isolate->factory()->undefined_value());
+ }
+ // 4b. If Desc does not have a [[Writable]] field, set Desc.[[Writable]]
+ // to like.[[Writable]].
+ if (!desc->has_writable()) desc->set_writable(false);
+ } else {
+ // 5. Else,
+ // 5a. If Desc does not have a [[Get]] field, set Desc.[[Get]] to
+ // like.[[Get]].
+ if (!desc->has_get()) {
+ desc->set_get(isolate->factory()->undefined_value());
+ }
+ // 5b. If Desc does not have a [[Set]] field, set Desc.[[Set]] to
+ // like.[[Set]].
+ if (!desc->has_set()) {
+ desc->set_set(isolate->factory()->undefined_value());
+ }
+ }
+ // 6. If Desc does not have an [[Enumerable]] field, set
+ // Desc.[[Enumerable]] to like.[[Enumerable]].
+ if (!desc->has_enumerable()) desc->set_enumerable(false);
+ // 7. If Desc does not have a [[Configurable]] field, set
+ // Desc.[[Configurable]] to like.[[Configurable]].
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ // 8. Return Desc.
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/property-descriptor.h b/deps/v8/src/property-descriptor.h
index 9425ab10ef..5fbbfa36ec 100644
--- a/deps/v8/src/property-descriptor.h
+++ b/deps/v8/src/property-descriptor.h
@@ -41,6 +41,17 @@ class PropertyDescriptor {
return !IsAccessorDescriptor(desc) && !IsDataDescriptor(desc);
}
+ // ES6 6.2.4.4
+ Handle<Object> ToObject(Isolate* isolate);
+
+ // ES6 6.2.4.5
+ static bool ToPropertyDescriptor(Isolate* isolate, Handle<Object> obj,
+ PropertyDescriptor* desc);
+
+ // ES6 6.2.4.6
+ static void CompletePropertyDescriptor(Isolate* isolate,
+ PropertyDescriptor* desc);
+
bool is_empty() const {
return !has_enumerable() && !has_configurable() && !has_writable() &&
!has_value() && !has_get() && !has_set();
@@ -89,11 +100,6 @@ class PropertyDescriptor {
(has_writable() && !writable() ? READ_ONLY : NONE));
}
- Handle<Object> ToObject(Isolate* isolate);
-
- static bool ToPropertyDescriptor(Isolate* isolate, Handle<Object> obj,
- PropertyDescriptor* desc);
-
private:
bool enumerable_ : 1;
bool has_enumerable_ : 1;
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 7e5c78b8d9..44f32cbc93 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -9,21 +9,21 @@
#include "src/allocation.h"
#include "src/utils.h"
-// Ecma-262 3rd 8.6.1
+namespace v8 {
+namespace internal {
+
+// ES6 6.1.7.1
enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
+ NONE = ::v8::None,
+ READ_ONLY = ::v8::ReadOnly,
+ DONT_ENUM = ::v8::DontEnum,
+ DONT_DELETE = ::v8::DontDelete,
+
+ ALL_ATTRIBUTES_MASK = READ_ONLY | DONT_ENUM | DONT_DELETE,
SEALED = DONT_DELETE,
FROZEN = SEALED | READ_ONLY,
- STRING = 8, // Used to filter symbols and string names
- SYMBOLIC = 16,
- PRIVATE_SYMBOL = 32,
-
- DONT_SHOW = DONT_ENUM | SYMBOLIC | PRIVATE_SYMBOL,
ABSENT = 64, // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
@@ -36,8 +36,24 @@ enum PropertyAttributes {
};
-namespace v8 {
-namespace internal {
+enum PropertyFilter {
+ ALL_PROPERTIES = 0,
+ ONLY_WRITABLE = 1,
+ ONLY_ENUMERABLE = 2,
+ ONLY_CONFIGURABLE = 4,
+ SKIP_STRINGS = 8,
+ SKIP_SYMBOLS = 16,
+ ONLY_ALL_CAN_READ = 32,
+ ENUMERABLE_STRINGS = ONLY_ENUMERABLE | SKIP_SYMBOLS,
+};
+// Enable fast comparisons of PropertyAttributes against PropertyFilters.
+STATIC_ASSERT(ALL_PROPERTIES == static_cast<PropertyFilter>(NONE));
+STATIC_ASSERT(ONLY_WRITABLE == static_cast<PropertyFilter>(READ_ONLY));
+STATIC_ASSERT(ONLY_ENUMERABLE == static_cast<PropertyFilter>(DONT_ENUM));
+STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(DONT_DELETE));
+STATIC_ASSERT(((SKIP_STRINGS | SKIP_SYMBOLS | ONLY_ALL_CAN_READ) &
+ ALL_ATTRIBUTES_MASK) == 0);
+
class Smi;
template<class> class TypeImpl;
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 0727749853..3253791f90 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -28,12 +28,15 @@ class PrototypeIterator {
enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
+ const int kProxyPrototypeLimit = 100 * 1000;
+
PrototypeIterator(Isolate* isolate, Handle<Object> receiver,
WhereToStart where_to_start = START_AT_PROTOTYPE)
: did_jump_to_prototype_chain_(false),
object_(NULL),
handle_(receiver),
- isolate_(isolate) {
+ isolate_(isolate),
+ seen_proxies_(0) {
CHECK(!handle_.is_null());
if (where_to_start == START_AT_PROTOTYPE) {
Advance();
@@ -44,7 +47,8 @@ class PrototypeIterator {
WhereToStart where_to_start = START_AT_PROTOTYPE)
: did_jump_to_prototype_chain_(false),
object_(receiver),
- isolate_(isolate) {
+ isolate_(isolate),
+ seen_proxies_(0) {
if (where_to_start == START_AT_PROTOTYPE) {
Advance();
}
@@ -63,6 +67,17 @@ class PrototypeIterator {
~PrototypeIterator() {}
+ bool HasAccess() const {
+ // We can only perform access check in the handlified version of the
+ // PrototypeIterator.
+ DCHECK(!handle_.is_null());
+ if (handle_->IsAccessCheckNeeded()) {
+ return isolate_->MayAccess(handle(isolate_->context()),
+ Handle<JSObject>::cast(handle_));
+ }
+ return true;
+ }
+
template <typename T = Object>
T* GetCurrent() const {
DCHECK(handle_.is_null());
@@ -72,6 +87,7 @@ class PrototypeIterator {
template <typename T = Object>
static Handle<T> GetCurrent(const PrototypeIterator& iterator) {
DCHECK(!iterator.handle_.is_null());
+ DCHECK(iterator.object_ == NULL);
return Handle<T>::cast(iterator.handle_);
}
@@ -106,6 +122,33 @@ class PrototypeIterator {
}
}
+ // Returns false iff a call to JSProxy::GetPrototype throws.
+ // TODO(neis): This should probably replace Advance().
+ bool AdvanceFollowingProxies() {
+ DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
+ if (!HasAccess()) {
+ // Abort the lookup if we do not have access to the current object.
+ handle_ = isolate_->factory()->null_value();
+ return true;
+ }
+ if (handle_.is_null() || !handle_->IsJSProxy()) {
+ AdvanceIgnoringProxies();
+ return true;
+ }
+ // Due to possible __proto__ recursion limit the number of Proxies
+ // we visit to an arbitrarily chosen large number.
+ seen_proxies_++;
+ if (seen_proxies_ > kProxyPrototypeLimit) {
+ isolate_->Throw(
+ *isolate_->factory()->NewRangeError(MessageTemplate::kStackOverflow));
+ return false;
+ }
+ did_jump_to_prototype_chain_ = true;
+ MaybeHandle<Object> proto =
+ JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
+ return proto.ToHandle(&handle_);
+ }
+
bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const {
if (handle_.is_null()) {
return object_->IsNull() ||
@@ -135,6 +178,7 @@ class PrototypeIterator {
Object* object_;
Handle<Object> handle_;
Isolate* isolate_;
+ int seen_proxies_;
DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
};
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index d296d90e7d..6fafdfb4ad 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -58,7 +58,7 @@ namespace internal {
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
+ * string start - 1). Used to initialize capture registers to a
* non-position.
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
@@ -98,7 +98,8 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -176,29 +177,18 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r0, current_input_offset(), Operand(-char_size()));
__ cmp(r0, r1);
BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
- __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+void RegExpMacroAssemblerARM::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
__ cmp(r0, r1);
BranchOrBacktrack(ne, on_not_at_start);
}
@@ -220,20 +210,27 @@ void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ ldr(r0, register_location(start_reg)); // Index of start of capture
__ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
__ sub(r1, r1, r0, SetCC); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ b(eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r3, r3, r1);
+ __ cmp(current_input_offset(), r3);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -242,9 +239,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0 - offset of start of capture
// r1 - length of capture
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r0, Operand(r1));
+ __ add(r0, r0, end_of_input_address());
+ __ add(r2, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r2, r2, r1); // Offset by length when matching backwards.
+ }
+ __ add(r1, r0, r1);
// r0 - Address of start of capture.
// r1 - Address of end of capture
@@ -283,6 +283,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r2, end_of_input_address());
+ if (read_backward) {
+ __ ldr(r0, register_location(start_reg)); // Index of start of capture
+ __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r0);
+ __ sub(current_input_offset(), current_input_offset(), r1);
+ }
} else {
DCHECK(mode_ == UC16);
int argument_count = 4;
@@ -305,7 +311,10 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Save length in callee-save register for use on return.
__ mov(r4, Operand(r1));
// Address of current input position.
- __ add(r1, current_input_offset(), Operand(end_of_input_address()));
+ __ add(r1, current_input_offset(), end_of_input_address());
+ if (read_backward) {
+ __ sub(r1, r1, r4);
+ }
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
@@ -319,17 +328,22 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), Operand(r4));
+
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(current_input_offset(), current_input_offset(), r4);
+ } else {
+ __ add(current_input_offset(), current_input_offset(), r4);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerARM::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerARM::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
@@ -337,17 +351,31 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
__ ldr(r0, register_location(start_reg));
__ ldr(r1, register_location(start_reg + 1));
__ sub(r1, r1, r0, SetCC); // Length to check.
- // Succeed on empty capture (including no capture).
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ b(eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ ldr(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r3, r3, r1);
+ __ cmp(current_input_offset(), r3);
+ BranchOrBacktrack(lt, on_no_match);
+ } else {
+ __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match);
+ }
- // Compute pointers to match string and capture string
- __ add(r0, r0, Operand(end_of_input_address()));
- __ add(r2, end_of_input_address(), Operand(current_input_offset()));
- __ add(r1, r1, Operand(r0));
+ // r0 - offset of start of capture
+ // r1 - length of capture
+ __ add(r0, r0, end_of_input_address());
+ __ add(r2, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r2, r2, r1); // Offset by length when matching backwards.
+ }
+ __ add(r1, r0, r1);
Label loop;
__ bind(&loop);
@@ -366,6 +394,13 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
// Move current character position to position after match.
__ sub(current_input_offset(), r2, end_of_input_address());
+ if (read_backward) {
+ __ ldr(r0, register_location(start_reg)); // Index of start of capture
+ __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r0);
+ __ sub(current_input_offset(), current_input_offset(), r1);
+ }
+
__ bind(&fallthrough);
}
@@ -603,7 +638,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand::Zero());
__ push(r0); // Make room for success counter and initialize it to 0.
- __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+ __ push(r0); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -647,7 +682,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
// Store this value in a local variable, for use when clearing
// position registers.
- __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ str(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -751,7 +786,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -892,10 +927,13 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -983,7 +1021,7 @@ void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ldr(r0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ str(r0, register_location(reg));
}
@@ -1069,8 +1107,15 @@ MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
- BranchOrBacktrack(ge, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
+ BranchOrBacktrack(ge, on_outside_input);
+ } else {
+ __ ldr(r1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ cmp(r0, r1);
+ BranchOrBacktrack(le, on_outside_input);
+ }
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index c6cff65635..233a98f761 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -34,9 +34,11 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -119,9 +121,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index d440879e26..9948597ca0 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -113,7 +113,8 @@ RegExpMacroAssemblerARM64::RegExpMacroAssemblerARM64(Isolate* isolate,
Zone* zone, Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -210,23 +211,17 @@ void RegExpMacroAssemblerARM64::CheckCharacterGT(uc16 limit,
void RegExpMacroAssemblerARM64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, &not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
+ __ Add(w10, current_input_offset(), Operand(-char_size()));
+ __ Cmp(w10, string_start_minus_one());
BranchOrBacktrack(eq, on_at_start);
- __ Bind(&not_at_start);
}
-void RegExpMacroAssemblerARM64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the input string?
- CompareAndBranchOrBacktrack(start_offset(), 0, ne, on_not_at_start);
- // If we did, are we still at the start of the input string?
- __ Add(x10, input_end(), Operand(current_input_offset(), SXTW));
- __ Cmp(x10, input_start());
+void RegExpMacroAssemblerARM64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ Add(w10, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ __ Cmp(w10, string_start_minus_one());
BranchOrBacktrack(ne, on_not_at_start);
}
@@ -277,9 +272,9 @@ void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(eq, on_equal);
}
+
void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
Register capture_start_offset = w10;
@@ -297,12 +292,21 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Ldp(w11, capture_start_offset, capture_location(start_reg, x10));
}
__ Sub(capture_length, w11, capture_start_offset); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ CompareAndBranch(capture_length, Operand(0), eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ Add(w12, string_start_minus_one(), capture_length);
+ __ Cmp(current_input_offset(), w12);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -322,6 +326,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Add(current_position_address,
input_end(),
Operand(current_input_offset(), SXTW));
+ if (read_backward) {
+ // Offset by length when matching backwards.
+ __ Sub(current_position_address, current_position_address,
+ Operand(capture_length, SXTW));
+ }
Label loop;
__ Bind(&loop);
@@ -355,6 +364,10 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Bind(&success);
// Compute new value of character position after the matched part.
__ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (read_backward) {
+ __ Sub(current_input_offset().X(), current_input_offset().X(),
+ Operand(capture_length, SXTW));
+ }
if (masm_->emit_debug_code()) {
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
@@ -383,6 +396,9 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Mov(w2, capture_length);
// Address of current input position.
__ Add(x1, input_end(), Operand(current_input_offset(), SXTW));
+ if (read_backward) {
+ __ Sub(x1, x1, Operand(capture_length, SXTW));
+ }
// Isolate.
__ Mov(x3, ExternalReference::isolate_address(isolate()));
@@ -400,16 +416,20 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ PopCPURegList(cached_registers);
BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ Add(current_input_offset(), current_input_offset(), capture_length);
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ Sub(current_input_offset(), current_input_offset(), capture_length);
+ } else {
+ __ Add(current_input_offset(), current_input_offset(), capture_length);
+ }
}
__ Bind(&fallthrough);
}
-void RegExpMacroAssemblerARM64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerARM64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Register capture_start_address = x12;
@@ -426,12 +446,21 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
__ Ldp(w11, w10, capture_location(start_reg, x10));
}
__ Sub(capture_length, w11, w10); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Cbz(capture_length, &fallthrough);
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ CompareAndBranch(capture_length, Operand(0), eq, &fallthrough);
// Check that there are enough characters left in the input.
- __ Cmn(capture_length, current_input_offset());
- BranchOrBacktrack(gt, on_no_match);
+ if (read_backward) {
+ __ Add(w12, string_start_minus_one(), capture_length);
+ __ Cmp(current_input_offset(), w12);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ Cmn(capture_length, current_input_offset());
+ BranchOrBacktrack(gt, on_no_match);
+ }
// Compute pointers to match string and capture string
__ Add(capture_start_address, input_end(), Operand(w10, SXTW));
@@ -441,6 +470,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
__ Add(current_position_address,
input_end(),
Operand(current_input_offset(), SXTW));
+ if (read_backward) {
+ // Offset by length when matching backwards.
+ __ Sub(current_position_address, current_position_address,
+ Operand(capture_length, SXTW));
+ }
Label loop;
__ Bind(&loop);
@@ -459,6 +493,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
// Move current character position to position after match.
__ Sub(current_input_offset().X(), current_position_address, input_end());
+ if (read_backward) {
+ __ Sub(current_input_offset().X(), current_input_offset().X(),
+ Operand(capture_length, SXTW));
+ }
+
if (masm_->emit_debug_code()) {
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
@@ -758,14 +797,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// The non-position value is used as a clearing value for the
// capture registers, it corresponds to the position of the first character
// minus one.
- __ Sub(non_position_value(), current_input_offset(), char_size());
- __ Sub(non_position_value(), non_position_value(),
+ __ Sub(string_start_minus_one(), current_input_offset(), char_size());
+ __ Sub(string_start_minus_one(), string_start_minus_one(),
Operand(start_offset(), LSL, (mode_ == UC16) ? 1 : 0));
// We can store this value twice in an X register for initializing
// on-stack registers later.
- __ Orr(twice_non_position_value(),
- non_position_value().X(),
- Operand(non_position_value().X(), LSL, kWRegSizeInBits));
+ __ Orr(twice_non_position_value(), string_start_minus_one().X(),
+ Operand(string_start_minus_one().X(), LSL, kWRegSizeInBits));
// Initialize code pointer register.
__ Mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -1081,11 +1119,14 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacter(int cp_offset,
int characters) {
// TODO(pielan): Make sure long strings are caught before this, and not
// just asserted in debug mode.
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
// Be sane! (And ensure that an int32_t can be used to index the string)
DCHECK(cp_offset < (1<<30));
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1210,7 +1251,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
// If the first capture register is cached in a hardware register but not
// aligned on a 64-bit one, we need to clear the first one specifically.
if ((reg_from < kNumCachedRegisters) && ((reg_from % 2) != 0)) {
- StoreRegister(reg_from, non_position_value());
+ StoreRegister(reg_from, string_start_minus_one());
num_registers--;
reg_from++;
}
@@ -1224,7 +1265,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
}
if ((num_registers % 2) == 1) {
- StoreRegister(reg_from, non_position_value());
+ StoreRegister(reg_from, string_start_minus_one());
num_registers--;
reg_from++;
}
@@ -1301,10 +1342,14 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(
void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
Label* on_outside_input) {
- CompareAndBranchOrBacktrack(current_input_offset(),
- -cp_offset * char_size(),
- ge,
- on_outside_input);
+ if (cp_offset >= 0) {
+ CompareAndBranchOrBacktrack(current_input_offset(),
+ -cp_offset * char_size(), ge, on_outside_input);
+ } else {
+ __ Add(w12, current_input_offset(), Operand(cp_offset * char_size()));
+ __ Cmp(w12, string_start_minus_one());
+ BranchOrBacktrack(le, on_outside_input);
+ }
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 0dc519580d..d71f063d00 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -39,9 +39,11 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -190,7 +192,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
- Register non_position_value() { return w24; }
+ Register string_start_minus_one() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
Register twice_non_position_value() { return x24; }
diff --git a/deps/v8/src/regexp/bytecodes-irregexp.h b/deps/v8/src/regexp/bytecodes-irregexp.h
index d6110a3cb5..2dbfbc0b82 100644
--- a/deps/v8/src/regexp/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/bytecodes-irregexp.h
@@ -57,15 +57,17 @@ V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_REGS_EQUAL, 39, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
-V(CHECK_REGISTER_LT, 40, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_GE, 41, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_EQ_POS, 42, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_AT_START, 43, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_NOT_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_GREEDY, 45, 8) /* bc8 pad24 addr32 */ \
-V(ADVANCE_CP_AND_GOTO, 46, 8) /* bc8 offset24 addr32 */ \
-V(SET_CURRENT_POSITION_FROM_END, 47, 4) /* bc8 idx24 */
+V(CHECK_NOT_BACK_REF_BACKWARD, 39, 8) /* bc8 reg_idx24 addr32 */ \
+V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32 */ \
+V(CHECK_NOT_REGS_EQUAL, 41, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
+V(CHECK_REGISTER_LT, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \
+V(CHECK_REGISTER_GE, 43, 12) /* bc8 reg_idx24 value32 addr32 */ \
+V(CHECK_REGISTER_EQ_POS, 44, 8) /* bc8 reg_idx24 addr32 */ \
+V(CHECK_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
+V(CHECK_NOT_AT_START, 46, 8) /* bc8 offset24 addr32 */ \
+V(CHECK_GREEDY, 47, 8) /* bc8 pad24 addr32 */ \
+V(ADVANCE_CP_AND_GOTO, 48, 8) /* bc8 offset24 addr32 */ \
+V(SET_CURRENT_POSITION_FROM_END, 49, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code;
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 9e50a10574..6ef0f5fff6 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -53,7 +53,8 @@ namespace internal {
* - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
* - register 0 ebp[-4] (only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -80,7 +81,8 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -156,25 +158,16 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+ __ lea(eax, Operand(edi, -char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+void RegExpMacroAssemblerIA32::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -196,26 +189,28 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
__ sub(ebx, edx); // Length of capture.
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ add(eax, ebx);
+ __ cmp(edi, eax);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -228,6 +223,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(edx, esi); // Start of capture
__ add(edi, esi); // Start of text to match against capture.
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ add(ebx, edi); // End of text to match against capture.
Label loop;
@@ -278,6 +276,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(esp, Immediate(kPointerSize));
// Compute new value of character position after the matched part.
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
} else {
DCHECK(mode_ == UC16);
// Save registers before calling C function.
@@ -304,6 +307,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Found by adding negative string-end offset of current position (edi)
// to end of string.
__ add(edi, esi);
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
@@ -325,16 +331,20 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ or_(eax, eax);
BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, ebx);
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(edi, ebx);
+ } else {
+ __ add(edi, ebx);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerIA32::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerIA32::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
Label fail;
@@ -343,22 +353,33 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
__ sub(eax, edx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(ebx, Operand(ebp, kStringStartMinusOne));
+ __ add(ebx, eax);
+ __ cmp(edi, ebx);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(ebx, edi);
+ __ add(ebx, eax);
+ BranchOrBacktrack(greater, on_no_match);
+ }
// Save register to make it available below.
__ push(backtrack_stackpointer());
// Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
__ add(edx, esi); // Start of capture.
+ __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
+ if (read_backward) {
+ __ sub(ebx, eax); // Offset by length when matching backwards.
+ }
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
@@ -389,6 +410,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference(
// Move current character position to position after match.
__ mov(edi, ecx);
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
@@ -634,7 +660,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -684,7 +710,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
+ __ mov(Operand(ebp, kStringStartMinusOne), eax);
#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
@@ -767,7 +793,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
if (global()) {
- // Restart matching if the regular expression is flagged as global.
+ // Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
@@ -784,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -944,10 +970,13 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1031,7 +1060,7 @@ void RegExpMacroAssemblerIA32::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ mov(register_location(reg), eax);
}
@@ -1100,8 +1129,14 @@ Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
void RegExpMacroAssemblerIA32::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmp(edi, -cp_offset * char_size());
+ BranchOrBacktrack(greater_equal, on_outside_input);
+ } else {
+ __ lea(eax, Operand(edi, cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ BranchOrBacktrack(less_equal, on_outside_input);
+ }
}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 06b9699d01..1ef87eef38 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -116,9 +118,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index afc31a3d57..ea748e4e55 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -6,7 +6,7 @@
#include "src/regexp/interpreter-irregexp.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -270,7 +270,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
break;
BYTECODE(LOAD_CURRENT_CHAR) {
int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos >= subject.length()) {
+ if (pos >= subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
current_char = subject[pos];
@@ -286,7 +286,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
}
BYTECODE(LOAD_2_CURRENT_CHARS) {
int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 2 > subject.length()) {
+ if (pos + 2 > subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
Char next = subject[pos + 1];
@@ -306,7 +306,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
BYTECODE(LOAD_4_CURRENT_CHARS) {
DCHECK(sizeof(Char) == 1);
int pos = current + (insn >> BYTECODE_SHIFT);
- if (pos + 4 > subject.length()) {
+ if (pos + 4 > subject.length() || pos < 0) {
pc = code_base + Load32Aligned(pc + 4);
} else {
Char next1 = subject[pos + 1];
@@ -497,46 +497,59 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
BYTECODE(CHECK_NOT_BACK_REF) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_LENGTH;
- break;
- }
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- int i;
- for (i = 0; i < len; i++) {
- if (subject[from + i] != subject[current + i]) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- }
+ if (from >= 0 && len > 0) {
+ if (current + len > subject.length() ||
+ CompareChars(&subject[from], &subject[current], len) != 0) {
+ pc = code_base + Load32Aligned(pc + 4);
+ break;
}
- if (i < len) break;
current += len;
}
pc += BC_CHECK_NOT_BACK_REF_LENGTH;
break;
}
+ BYTECODE(CHECK_NOT_BACK_REF_BACKWARD) {
+ int from = registers[insn >> BYTECODE_SHIFT];
+ int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ if (from >= 0 && len > 0) {
+ if (current - len < 0 ||
+ CompareChars(&subject[from], &subject[current - len], len) != 0) {
+ pc = code_base + Load32Aligned(pc + 4);
+ break;
+ }
+ current -= len;
+ }
+ pc += BC_CHECK_NOT_BACK_REF_BACKWARD_LENGTH;
+ break;
+ }
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
- if (from < 0 || len <= 0) {
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- break;
+ if (from >= 0 && len > 0) {
+ if (current + len > subject.length() ||
+ !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
+ from, current, len, subject)) {
+ pc = code_base + Load32Aligned(pc + 4);
+ break;
+ }
+ current += len;
}
- if (current + len > subject.length()) {
- pc = code_base + Load32Aligned(pc + 4);
- break;
- } else {
- if (BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
- from, current, len, subject)) {
- current += len;
- pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
- } else {
+ pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
+ break;
+ }
+ BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
+ int from = registers[insn >> BYTECODE_SHIFT];
+ int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
+ if (from >= 0 && len > 0) {
+ if (current - len < 0 ||
+ !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
+ from, current - len, len, subject)) {
pc = code_base + Load32Aligned(pc + 4);
+ break;
}
+ current -= len;
}
+ pc += BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD_LENGTH;
break;
}
BYTECODE(CHECK_AT_START)
@@ -547,7 +560,7 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
}
break;
BYTECODE(CHECK_NOT_AT_START)
- if (current == 0) {
+ if (current + (insn >> BYTECODE_SHIFT) == 0) {
pc += BC_CHECK_NOT_AT_START_LENGTH;
} else {
pc = code_base + Load32Aligned(pc + 4);
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 225ad73c4e..34d20fe781 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -4,7 +4,7 @@
#include "src/regexp/jsregexp.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
@@ -13,12 +13,12 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/ostreams.h"
-#include "src/parser.h"
#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/regexp/regexp-macro-assembler-tracer.h"
+#include "src/regexp/regexp-parser.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
#include "src/splay-tree-inl.h"
@@ -51,16 +51,6 @@
namespace v8 {
namespace internal {
-MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral(
- Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags) {
- // Call the construct code with 2 arguments.
- Handle<Object> argv[] = { pattern, flags };
- return Execution::New(constructor, arraysize(argv), argv);
-}
-
-
MUST_USE_RESULT
static inline MaybeHandle<Object> ThrowRegExpException(
Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text) {
@@ -156,25 +146,21 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader,
- flags.is_multiline(), flags.is_unicode(),
- &parse_result)) {
+ flags & JSRegExp::kMultiline,
+ flags & JSRegExp::kUnicode, &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return ThrowRegExpException(re, pattern, parse_result.error);
}
bool has_been_compiled = false;
- if (parse_result.simple &&
- !flags.is_ignore_case() &&
- !flags.is_sticky() &&
- !HasFewDifferentCharacters(pattern)) {
+ if (parse_result.simple && !(flags & JSRegExp::kIgnoreCase) &&
+ !(flags & JSRegExp::kSticky) && !HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
AtomCompile(re, pattern, flags, pattern);
has_been_compiled = true;
- } else if (parse_result.tree->IsAtom() &&
- !flags.is_ignore_case() &&
- !flags.is_sticky() &&
- parse_result.capture_count == 0) {
+ } else if (parse_result.tree->IsAtom() && !(flags & JSRegExp::kIgnoreCase) &&
+ !(flags & JSRegExp::kSticky) && parse_result.capture_count == 0) {
RegExpAtom* atom = parse_result.tree->AsAtom();
Vector<const uc16> atom_pattern = atom->data();
Handle<String> atom_string;
@@ -385,17 +371,18 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
pattern = String::Flatten(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags.is_multiline(),
- flags.is_unicode(), &compile_data)) {
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader,
+ flags & JSRegExp::kMultiline,
+ flags & JSRegExp::kUnicode, &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(ThrowRegExpException(re, pattern, compile_data.error));
return false;
}
RegExpEngine::CompilationResult result = RegExpEngine::Compile(
- isolate, &zone, &compile_data, flags.is_ignore_case(), flags.is_global(),
- flags.is_multiline(), flags.is_sticky(), pattern, sample_subject,
- is_one_byte);
+ isolate, &zone, &compile_data, flags & JSRegExp::kIgnoreCase,
+ flags & JSRegExp::kGlobal, flags & JSRegExp::kMultiline,
+ flags & JSRegExp::kSticky, pattern, sample_subject, is_one_byte);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
@@ -1002,6 +989,8 @@ class RegExpCompiler {
inline void set_limiting_recursion(bool value) {
limiting_recursion_ = value;
}
+ bool read_backward() { return read_backward_; }
+ void set_read_backward(bool value) { read_backward_ = value; }
FrequencyCollator* frequency_collator() { return &frequency_collator_; }
int current_expansion_factor() { return current_expansion_factor_; }
@@ -1025,6 +1014,7 @@ class RegExpCompiler {
bool reg_exp_too_big_;
bool limiting_recursion_;
bool optimize_;
+ bool read_backward_;
int current_expansion_factor_;
FrequencyCollator frequency_collator_;
Isolate* isolate_;
@@ -1060,6 +1050,7 @@ RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
reg_exp_too_big_(false),
limiting_recursion_(false),
optimize_(FLAG_regexp_optimization),
+ read_backward_(false),
current_expansion_factor_(1),
frequency_collator_(),
isolate_(isolate),
@@ -1224,7 +1215,8 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
int value = 0;
bool absolute = false;
bool clear = false;
- int store_position = -1;
+ static const int kNoStore = kMinInt;
+ int store_position = kNoStore;
// This is a little tricky because we are scanning the actions in reverse
// historical order (newest first).
for (DeferredAction* action = actions_;
@@ -1245,7 +1237,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// we can set undo_action to IGNORE if we know there is no value to
// restore.
undo_action = RESTORE;
- DCHECK_EQ(store_position, -1);
+ DCHECK_EQ(store_position, kNoStore);
DCHECK(!clear);
break;
}
@@ -1253,14 +1245,14 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
if (!absolute) {
value++;
}
- DCHECK_EQ(store_position, -1);
+ DCHECK_EQ(store_position, kNoStore);
DCHECK(!clear);
undo_action = RESTORE;
break;
case ActionNode::STORE_POSITION: {
Trace::DeferredCapture* pc =
static_cast<Trace::DeferredCapture*>(action);
- if (!clear && store_position == -1) {
+ if (!clear && store_position == kNoStore) {
store_position = pc->cp_offset();
}
@@ -1284,7 +1276,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// Since we're scanning in reverse order, if we've already
// set the position we have to ignore historically earlier
// clearing operations.
- if (store_position == -1) {
+ if (store_position == kNoStore) {
clear = true;
}
undo_action = RESTORE;
@@ -1315,7 +1307,7 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
}
// Perform the chronologically last action (or accumulated increment)
// for the register.
- if (store_position != -1) {
+ if (store_position != kNoStore) {
assembler->WriteCurrentPositionToRegister(reg, store_position);
} else if (clear) {
assembler->ClearRegisters(reg, reg);
@@ -2313,6 +2305,7 @@ void AssertionNode::FillInBMInfo(Isolate* isolate, int offset, int budget,
int BackReferenceNode::EatsAtLeast(int still_to_find,
int budget,
bool not_at_start) {
+ if (read_backward()) return 0;
if (budget <= 0) return 0;
return on_success()->EatsAtLeast(still_to_find,
budget - 1,
@@ -2323,6 +2316,7 @@ int BackReferenceNode::EatsAtLeast(int still_to_find,
int TextNode::EatsAtLeast(int still_to_find,
int budget,
bool not_at_start) {
+ if (read_backward()) return 0;
int answer = Length();
if (answer >= still_to_find) return answer;
if (budget <= 0) return answer;
@@ -2333,9 +2327,8 @@ int TextNode::EatsAtLeast(int still_to_find,
}
-int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
- int budget,
- bool not_at_start) {
+int NegativeLookaroundChoiceNode::EatsAtLeast(int still_to_find, int budget,
+ bool not_at_start) {
if (budget <= 0) return 0;
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
@@ -2344,10 +2337,8 @@ int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
}
-void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
- QuickCheckDetails* details,
- RegExpCompiler* compiler,
- int filled_in,
+void NegativeLookaroundChoiceNode::GetQuickCheckDetails(
+ QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in,
bool not_at_start) {
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
@@ -2517,6 +2508,9 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) {
+ // Do not collect any quick check details if the text node reads backward,
+ // since it reads in the opposite direction than we use for quick checks.
+ if (read_backward()) return;
Isolate* isolate = compiler->macro_assembler()->isolate();
DCHECK(characters_filled_in < details->characters());
int characters = details->characters();
@@ -2526,8 +2520,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
- for (int k = 0; k < elms_->length(); k++) {
- TextElement elm = elms_->at(k);
+ for (int k = 0; k < elements()->length(); k++) {
+ TextElement elm = elements()->at(k);
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int i = 0; i < characters && i < quarks.length(); i++) {
@@ -2678,11 +2672,13 @@ void QuickCheckDetails::Clear() {
void QuickCheckDetails::Advance(int by, bool one_byte) {
- DCHECK(by >= 0);
- if (by >= characters_) {
+ if (by >= characters_ || by < 0) {
+ DCHECK_IMPLIES(by < 0, characters_ == 0);
Clear();
return;
}
+ DCHECK_LE(characters_ - by, 4);
+ DCHECK_LE(characters_, 4);
for (int i = 0; i < characters_ - by; i++) {
positions_[i] = positions_[by + i];
}
@@ -2780,9 +2776,9 @@ RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
if (depth < 0) return this;
DCHECK(!info()->visited);
VisitMarker marker(info());
- int element_count = elms_->length();
+ int element_count = elements()->length();
for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
+ TextElement elm = elements()->at(i);
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
@@ -2898,8 +2894,8 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
}
-RegExpNode* NegativeLookaheadChoiceNode::FilterOneByte(int depth,
- bool ignore_case) {
+RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
+ bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -3146,9 +3142,9 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
return;
}
if (trace->at_start() == Trace::UNKNOWN) {
- assembler->CheckNotAtStart(trace->backtrack());
+ assembler->CheckNotAtStart(trace->cp_offset(), trace->backtrack());
Trace at_start_trace = *trace;
- at_start_trace.set_at_start(true);
+ at_start_trace.set_at_start(Trace::TRUE_VALUE);
on_success()->Emit(compiler, &at_start_trace);
return;
}
@@ -3221,10 +3217,11 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
bool one_byte = compiler->one_byte();
Label* backtrack = trace->backtrack();
QuickCheckDetails* quick_check = trace->quick_check_performed();
- int element_count = elms_->length();
+ int element_count = elements()->length();
+ int backward_offset = read_backward() ? -Length() : 0;
for (int i = preloaded ? 0 : element_count - 1; i >= 0; i--) {
- TextElement elm = elms_->at(i);
- int cp_offset = trace->cp_offset() + elm.cp_offset();
+ TextElement elm = elements()->at(i);
+ int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
Vector<const uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
@@ -3252,13 +3249,10 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
break;
}
if (emit_function != NULL) {
- bool bound_checked = emit_function(isolate,
- compiler,
- quarks[j],
- backtrack,
- cp_offset + j,
- *checked_up_to < cp_offset + j,
- preloaded);
+ bool bounds_check = *checked_up_to < cp_offset + j || read_backward();
+ bool bound_checked =
+ emit_function(isolate, compiler, quarks[j], backtrack,
+ cp_offset + j, bounds_check, preloaded);
if (bound_checked) UpdateBoundsCheck(cp_offset + j, checked_up_to);
}
}
@@ -3268,8 +3262,9 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
if (first_element_checked && i == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
RegExpCharacterClass* cc = elm.char_class();
+ bool bounds_check = *checked_up_to < cp_offset || read_backward();
EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
- *checked_up_to < cp_offset, preloaded, zone());
+ bounds_check, preloaded, zone());
UpdateBoundsCheck(cp_offset, checked_up_to);
}
}
@@ -3278,7 +3273,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
int TextNode::Length() {
- TextElement elm = elms_->last();
+ TextElement elm = elements()->last();
DCHECK(elm.cp_offset() >= 0);
return elm.cp_offset() + elm.length();
}
@@ -3347,8 +3342,11 @@ void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
}
Trace successor_trace(*trace);
- successor_trace.set_at_start(false);
- successor_trace.AdvanceCurrentPositionInTrace(Length(), compiler);
+ // If we advance backward, we may end up at the start.
+ successor_trace.AdvanceCurrentPositionInTrace(
+ read_backward() ? -Length() : Length(), compiler);
+ successor_trace.set_at_start(read_backward() ? Trace::UNKNOWN
+ : Trace::FALSE_VALUE);
RecursionCheck rc(compiler);
on_success()->Emit(compiler, &successor_trace);
}
@@ -3360,7 +3358,6 @@ void Trace::InvalidateCurrentCharacter() {
void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
- DCHECK(by > 0);
// We don't have an instruction for shifting the current character register
// down or for using a shifted value for anything so lets just forget that
// we preloaded any characters into it.
@@ -3379,9 +3376,9 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
- int element_count = elms_->length();
+ int element_count = elements()->length();
for (int i = 0; i < element_count; i++) {
- TextElement elm = elms_->at(i);
+ TextElement elm = elements()->at(i);
if (elm.text_type() == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.char_class();
// None of the standard character classes is different in the case
@@ -3397,16 +3394,14 @@ void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
}
-int TextNode::GreedyLoopTextLength() {
- TextElement elm = elms_->at(elms_->length() - 1);
- return elm.cp_offset() + elm.length();
-}
+int TextNode::GreedyLoopTextLength() { return Length(); }
RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) {
- if (elms_->length() != 1) return NULL;
- TextElement elm = elms_->at(0);
+ if (read_backward()) return NULL;
+ if (elements()->length() != 1) return NULL;
+ TextElement elm = elements()->at(0);
if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
@@ -3450,7 +3445,7 @@ int ChoiceNode::GreedyLoopTextLengthForAlternative(
SeqRegExpNode* seq_node = static_cast<SeqRegExpNode*>(node);
node = seq_node->on_success();
}
- return length;
+ return read_backward() ? -length : length;
}
@@ -3881,7 +3876,7 @@ void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
GreedyLoopState::GreedyLoopState(bool not_at_start) {
counter_backtrack_trace_.set_backtrack(&label_);
- if (not_at_start) counter_backtrack_trace_.set_at_start(false);
+ if (not_at_start) counter_backtrack_trace_.set_at_start(Trace::FALSE_VALUE);
}
@@ -4008,7 +4003,7 @@ Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
macro_assembler->PushCurrentPosition();
Label greedy_match_failed;
Trace greedy_match_trace;
- if (not_at_start()) greedy_match_trace.set_at_start(false);
+ if (not_at_start()) greedy_match_trace.set_at_start(Trace::FALSE_VALUE);
greedy_match_trace.set_backtrack(&greedy_match_failed);
Label loop_label;
macro_assembler->Bind(&loop_label);
@@ -4354,11 +4349,14 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
DCHECK_EQ(start_reg_ + 1, end_reg_);
if (compiler->ignore_case()) {
- assembler->CheckNotBackReferenceIgnoreCase(start_reg_,
+ assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
trace->backtrack());
} else {
- assembler->CheckNotBackReference(start_reg_, trace->backtrack());
+ assembler->CheckNotBackReference(start_reg_, read_backward(),
+ trace->backtrack());
}
+ // We are going to advance backward, so we may end up at the start.
+ if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
on_success()->Emit(compiler, trace);
}
@@ -4719,13 +4717,15 @@ RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
ZoneList<TextElement>* elms =
new(compiler->zone()) ZoneList<TextElement>(1, compiler->zone());
elms->Add(TextElement::Atom(this), compiler->zone());
- return new(compiler->zone()) TextNode(elms, on_success);
+ return new (compiler->zone())
+ TextNode(elms, compiler->read_backward(), on_success);
}
RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new(compiler->zone()) TextNode(elements(), on_success);
+ return new (compiler->zone())
+ TextNode(elements(), compiler->read_backward(), on_success);
}
@@ -4822,7 +4822,8 @@ bool RegExpCharacterClass::is_standard(Zone* zone) {
RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new(compiler->zone()) TextNode(this, on_success);
+ return new (compiler->zone())
+ TextNode(this, compiler->read_backward(), on_success);
}
@@ -5204,7 +5205,9 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
GuardedAlternative(body->ToNode(compiler, answer)));
}
answer = alternation;
- if (not_at_start) alternation->set_not_at_start();
+ if (not_at_start && !compiler->read_backward()) {
+ alternation->set_not_at_start();
+ }
}
return answer;
}
@@ -5216,9 +5219,9 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
int reg_ctr = needs_counter
? compiler->AllocateRegister()
: RegExpCompiler::kNoRegister;
- LoopChoiceNode* center = new(zone) LoopChoiceNode(body->min_match() == 0,
- zone);
- if (not_at_start) center->set_not_at_start();
+ LoopChoiceNode* center = new (zone)
+ LoopChoiceNode(body->min_match() == 0, compiler->read_backward(), zone);
+ if (not_at_start && !compiler->read_backward()) center->set_not_at_start();
RegExpNode* loop_return = needs_counter
? static_cast<RegExpNode*>(ActionNode::IncrementRegister(reg_ctr, center))
: static_cast<RegExpNode*>(center);
@@ -5294,14 +5297,13 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
ZoneList<CharacterRange>* newline_ranges =
new(zone) ZoneList<CharacterRange>(3, zone);
CharacterRange::AddClassEscape('n', newline_ranges, zone);
- RegExpCharacterClass* newline_atom = new(zone) RegExpCharacterClass('n');
- TextNode* newline_matcher = new(zone) TextNode(
- newline_atom,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- 0, // No captures inside.
- -1, // Ignored if no captures.
- on_success));
+ RegExpCharacterClass* newline_atom = new (zone) RegExpCharacterClass('n');
+ TextNode* newline_matcher = new (zone) TextNode(
+ newline_atom, false, ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register,
+ 0, // No captures inside.
+ -1, // Ignored if no captures.
+ on_success));
// Create an end-of-input matcher.
RegExpNode* end_of_line = ActionNode::BeginSubmatch(
stack_pointer_register,
@@ -5323,10 +5325,10 @@ RegExpNode* RegExpAssertion::ToNode(RegExpCompiler* compiler,
RegExpNode* RegExpBackReference::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new(compiler->zone())
+ return new (compiler->zone())
BackReferenceNode(RegExpCapture::StartRegister(index()),
RegExpCapture::EndRegister(index()),
- on_success);
+ compiler->read_backward(), on_success);
}
@@ -5336,8 +5338,8 @@ RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
}
-RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
- RegExpNode* on_success) {
+RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
int stack_pointer_register = compiler->AllocateRegister();
int position_register = compiler->AllocateRegister();
@@ -5347,19 +5349,16 @@ RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
int register_start =
register_of_first_capture + capture_from_ * registers_per_capture;
- RegExpNode* success;
+ RegExpNode* result;
+ bool was_reading_backward = compiler->read_backward();
+ compiler->set_read_backward(type() == LOOKBEHIND);
if (is_positive()) {
- RegExpNode* node = ActionNode::BeginSubmatch(
- stack_pointer_register,
- position_register,
- body()->ToNode(
- compiler,
- ActionNode::PositiveSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- on_success)));
- return node;
+ result = ActionNode::BeginSubmatch(
+ stack_pointer_register, position_register,
+ body()->ToNode(compiler,
+ ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register,
+ register_count, register_start, on_success)));
} else {
// We use a ChoiceNode for a negative lookahead because it has most of
// the characteristics we need. It has the body of the lookahead as its
@@ -5374,21 +5373,16 @@ RegExpNode* RegExpLookahead::ToNode(RegExpCompiler* compiler,
Zone* zone = compiler->zone();
GuardedAlternative body_alt(
- body()->ToNode(
- compiler,
- success = new(zone) NegativeSubmatchSuccess(stack_pointer_register,
- position_register,
- register_count,
- register_start,
- zone)));
- ChoiceNode* choice_node =
- new(zone) NegativeLookaheadChoiceNode(body_alt,
- GuardedAlternative(on_success),
- zone);
- return ActionNode::BeginSubmatch(stack_pointer_register,
- position_register,
- choice_node);
- }
+ body()->ToNode(compiler, new (zone) NegativeSubmatchSuccess(
+ stack_pointer_register, position_register,
+ register_count, register_start, zone)));
+ ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
+ body_alt, GuardedAlternative(on_success), zone);
+ result = ActionNode::BeginSubmatch(stack_pointer_register,
+ position_register, choice_node);
+ }
+ compiler->set_read_backward(was_reading_backward);
+ return result;
}
@@ -5402,8 +5396,10 @@ RegExpNode* RegExpCapture::ToNode(RegExpTree* body,
int index,
RegExpCompiler* compiler,
RegExpNode* on_success) {
+ DCHECK_NOT_NULL(body);
int start_reg = RegExpCapture::StartRegister(index);
int end_reg = RegExpCapture::EndRegister(index);
+ if (compiler->read_backward()) std::swap(start_reg, end_reg);
RegExpNode* store_end = ActionNode::StorePosition(end_reg, true, on_success);
RegExpNode* body_node = body->ToNode(compiler, store_end);
return ActionNode::StorePosition(start_reg, true, body_node);
@@ -5414,8 +5410,14 @@ RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
ZoneList<RegExpTree*>* children = nodes();
RegExpNode* current = on_success;
- for (int i = children->length() - 1; i >= 0; i--) {
- current = children->at(i)->ToNode(compiler, current);
+ if (compiler->read_backward()) {
+ for (int i = 0; i < children->length(); i++) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
+ } else {
+ for (int i = children->length() - 1; i >= 0; i--) {
+ current = children->at(i)->ToNode(compiler, current);
+ }
}
return current;
}
@@ -6291,22 +6293,17 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (!is_start_anchored && !is_sticky) {
// Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning or sticky.
- RegExpNode* loop_node =
- RegExpQuantifier::ToNode(0,
- RegExpTree::kInfinity,
- false,
- new(zone) RegExpCharacterClass('*'),
- &compiler,
- captured_body,
- data->contains_anchor);
+ RegExpNode* loop_node = RegExpQuantifier::ToNode(
+ 0, RegExpTree::kInfinity, false, new (zone) RegExpCharacterClass('*'),
+ &compiler, captured_body, data->contains_anchor);
if (data->contains_anchor) {
// Unroll loop once, to take care of the case that might start
// at the start of input.
ChoiceNode* first_step_node = new(zone) ChoiceNode(2, zone);
first_step_node->AddAlternative(GuardedAlternative(captured_body));
- first_step_node->AddAlternative(GuardedAlternative(
- new(zone) TextNode(new(zone) RegExpCharacterClass('*'), loop_node)));
+ first_step_node->AddAlternative(GuardedAlternative(new (zone) TextNode(
+ new (zone) RegExpCharacterClass('*'), false, loop_node)));
node = first_step_node;
} else {
node = loop_node;
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 537bdff8e2..0ad4b79c87 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
+#include "src/regexp/regexp-ast.h"
namespace v8 {
namespace internal {
@@ -29,13 +30,6 @@ class RegExpImpl {
#endif
}
- // Creates a regular expression literal in the old space.
- // This function calls the garbage collector if necessary.
- MUST_USE_RESULT static MaybeHandle<Object> CreateRegExpLiteral(
- Handle<JSFunction> constructor,
- Handle<String> pattern,
- Handle<String> flags);
-
// Returns a string representation of a regular expression.
// Implements RegExp.prototype.toString, see ECMA-262 section 15.10.6.4.
// This function calls the garbage collector if necessary.
@@ -233,63 +227,6 @@ enum ElementInSetsRelation {
};
-// Represents code units in the range from from_ to to_, both ends are
-// inclusive.
-class CharacterRange {
- public:
- CharacterRange() : from_(0), to_(0) { }
- // For compatibility with the CHECK_OK macro
- CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT
- CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
- static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
- Zone* zone);
- static Vector<const int> GetWordBounds();
- static inline CharacterRange Singleton(uc16 value) {
- return CharacterRange(value, value);
- }
- static inline CharacterRange Range(uc16 from, uc16 to) {
- DCHECK(from <= to);
- return CharacterRange(from, to);
- }
- static inline CharacterRange Everything() {
- return CharacterRange(0, 0xFFFF);
- }
- bool Contains(uc16 i) { return from_ <= i && i <= to_; }
- uc16 from() const { return from_; }
- void set_from(uc16 value) { from_ = value; }
- uc16 to() const { return to_; }
- void set_to(uc16 value) { to_ = value; }
- bool is_valid() { return from_ <= to_; }
- bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
- bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(Isolate* isolate, Zone* zone,
- ZoneList<CharacterRange>* ranges, bool is_one_byte);
- static void Split(ZoneList<CharacterRange>* base,
- Vector<const int> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone);
- // Whether a range list is in canonical form: Ranges ordered by from value,
- // and ranges non-overlapping and non-adjacent.
- static bool IsCanonical(ZoneList<CharacterRange>* ranges);
- // Convert range list to canonical form. The characters covered by the ranges
- // will still be the same, but no character is in more than one range, and
- // adjacent ranges are merged. The resulting list may be shorter than the
- // original, but cannot be longer.
- static void Canonicalize(ZoneList<CharacterRange>* ranges);
- // Negate the contents of a character range in canonical form.
- static void Negate(ZoneList<CharacterRange>* src,
- ZoneList<CharacterRange>* dst,
- Zone* zone);
- static const int kStartMarker = (1 << 24);
- static const int kPayloadMask = (1 << 24) - 1;
-
- private:
- uc16 from_;
- uc16 to_;
-};
-
-
// A set of unsigned integers that behaves especially well on small
// integers (< 32). May do zone-allocation.
class OutSet: public ZoneObject {
@@ -387,63 +324,6 @@ class DispatchTable : public ZoneObject {
VISIT(Text)
-#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT) \
- VISIT(Disjunction) \
- VISIT(Alternative) \
- VISIT(Assertion) \
- VISIT(CharacterClass) \
- VISIT(Atom) \
- VISIT(Quantifier) \
- VISIT(Capture) \
- VISIT(Lookahead) \
- VISIT(BackReference) \
- VISIT(Empty) \
- VISIT(Text)
-
-
-#define FORWARD_DECLARE(Name) class RegExp##Name;
-FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
-#undef FORWARD_DECLARE
-
-
-class TextElement final BASE_EMBEDDED {
- public:
- enum TextType {
- ATOM,
- CHAR_CLASS
- };
-
- static TextElement Atom(RegExpAtom* atom);
- static TextElement CharClass(RegExpCharacterClass* char_class);
-
- int cp_offset() const { return cp_offset_; }
- void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
- int length() const;
-
- TextType text_type() const { return text_type_; }
-
- RegExpTree* tree() const { return tree_; }
-
- RegExpAtom* atom() const {
- DCHECK(text_type() == ATOM);
- return reinterpret_cast<RegExpAtom*>(tree());
- }
-
- RegExpCharacterClass* char_class() const {
- DCHECK(text_type() == CHAR_CLASS);
- return reinterpret_cast<RegExpCharacterClass*>(tree());
- }
-
- private:
- TextElement(TextType text_type, RegExpTree* tree)
- : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
-
- int cp_offset_;
- TextType text_type_;
- RegExpTree* tree_;
-};
-
-
class Trace;
struct PreloadState;
class GreedyLoopState;
@@ -603,7 +483,7 @@ class RegExpNode: public ZoneObject {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start) = 0;
- static const int kNodeIsTooComplexForGreedyLoops = -1;
+ static const int kNodeIsTooComplexForGreedyLoops = kMinInt;
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
// Only returns the successor for a text node of length 1 that matches any
// character and that has no guards on it.
@@ -695,33 +575,6 @@ class RegExpNode: public ZoneObject {
};
-// A simple closed interval.
-class Interval {
- public:
- Interval() : from_(kNone), to_(kNone) { }
- Interval(int from, int to) : from_(from), to_(to) { }
- Interval Union(Interval that) {
- if (that.from_ == kNone)
- return *this;
- else if (from_ == kNone)
- return that;
- else
- return Interval(Min(from_, that.from_), Max(to_, that.to_));
- }
- bool Contains(int value) {
- return (from_ <= value) && (value <= to_);
- }
- bool is_empty() { return from_ == kNone; }
- int from() const { return from_; }
- int to() const { return to_; }
- static Interval Empty() { return Interval(); }
- static const int kNone = -1;
- private:
- int from_;
- int to_;
-};
-
-
class SeqRegExpNode: public RegExpNode {
public:
explicit SeqRegExpNode(RegExpNode* on_success)
@@ -827,14 +680,14 @@ class ActionNode: public SeqRegExpNode {
class TextNode: public SeqRegExpNode {
public:
- TextNode(ZoneList<TextElement>* elms,
+ TextNode(ZoneList<TextElement>* elms, bool read_backward,
RegExpNode* on_success)
- : SeqRegExpNode(on_success),
- elms_(elms) { }
- TextNode(RegExpCharacterClass* that,
+ : SeqRegExpNode(on_success), elms_(elms), read_backward_(read_backward) {}
+ TextNode(RegExpCharacterClass* that, bool read_backward,
RegExpNode* on_success)
: SeqRegExpNode(on_success),
- elms_(new(zone()) ZoneList<TextElement>(1, zone())) {
+ elms_(new (zone()) ZoneList<TextElement>(1, zone())),
+ read_backward_(read_backward) {
elms_->Add(TextElement::CharClass(that), zone());
}
virtual void Accept(NodeVisitor* visitor);
@@ -845,6 +698,7 @@ class TextNode: public SeqRegExpNode {
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
+ bool read_backward() { return read_backward_; }
void MakeCaseIndependent(Isolate* isolate, bool is_one_byte);
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
@@ -873,6 +727,7 @@ class TextNode: public SeqRegExpNode {
int* checked_up_to);
int Length();
ZoneList<TextElement>* elms_;
+ bool read_backward_;
};
@@ -925,15 +780,16 @@ class AssertionNode: public SeqRegExpNode {
class BackReferenceNode: public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg,
- int end_reg,
+ BackReferenceNode(int start_reg, int end_reg, bool read_backward,
RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
- end_reg_(end_reg) { }
+ end_reg_(end_reg),
+ read_backward_(read_backward) {}
virtual void Accept(NodeVisitor* visitor);
int start_register() { return start_reg_; }
int end_register() { return end_reg_; }
+ bool read_backward() { return read_backward_; }
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
virtual int EatsAtLeast(int still_to_find,
int recursion_depth,
@@ -950,6 +806,7 @@ class BackReferenceNode: public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
+ bool read_backward_;
};
@@ -1074,6 +931,7 @@ class ChoiceNode: public RegExpNode {
return true;
}
virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
+ virtual bool read_backward() { return false; }
protected:
int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1116,11 +974,11 @@ class ChoiceNode: public RegExpNode {
};
-class NegativeLookaheadChoiceNode: public ChoiceNode {
+class NegativeLookaroundChoiceNode : public ChoiceNode {
public:
- explicit NegativeLookaheadChoiceNode(GuardedAlternative this_must_fail,
- GuardedAlternative then_do_this,
- Zone* zone)
+ explicit NegativeLookaroundChoiceNode(GuardedAlternative this_must_fail,
+ GuardedAlternative then_do_this,
+ Zone* zone)
: ChoiceNode(2, zone) {
AddAlternative(this_must_fail);
AddAlternative(then_do_this);
@@ -1150,12 +1008,12 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
class LoopChoiceNode: public ChoiceNode {
public:
- explicit LoopChoiceNode(bool body_can_be_zero_length, Zone* zone)
+ LoopChoiceNode(bool body_can_be_zero_length, bool read_backward, Zone* zone)
: ChoiceNode(2, zone),
loop_node_(NULL),
continue_node_(NULL),
- body_can_be_zero_length_(body_can_be_zero_length)
- { }
+ body_can_be_zero_length_(body_can_be_zero_length),
+ read_backward_(read_backward) {}
void AddLoopAlternative(GuardedAlternative alt);
void AddContinueAlternative(GuardedAlternative alt);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -1169,6 +1027,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
+ virtual bool read_backward() { return read_backward_; }
virtual void Accept(NodeVisitor* visitor);
virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
@@ -1183,6 +1042,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* loop_node_;
RegExpNode* continue_node_;
bool body_can_be_zero_length_;
+ bool read_backward_;
};
@@ -1438,9 +1298,7 @@ class Trace {
at_start_ == UNKNOWN;
}
TriBool at_start() { return at_start_; }
- void set_at_start(bool at_start) {
- at_start_ = at_start ? TRUE_VALUE : FALSE_VALUE;
- }
+ void set_at_start(TriBool at_start) { at_start_ = at_start; }
Label* backtrack() { return backtrack_; }
Label* loop_label() { return loop_label_; }
RegExpNode* stop_node() { return stop_node_; }
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 77f09917c0..9c59328ed1 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -97,7 +97,8 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -181,26 +182,17 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
-
- // If we did, are we still at the start of the input?
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+ __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(a0, current_input_offset(), Operand(-char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
- // If we did, are we still at the start of the input?
- __ lw(a1, MemOperand(frame_pointer(), kInputStart));
- __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
}
@@ -223,20 +215,26 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ lw(a0, register_location(start_reg)); // Index of start of capture.
__ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
__ Subu(a1, a1, a0); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Addu(t5, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ if (read_backward) {
+ __ lw(t0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(t0, t0, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t0));
+ } else {
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ }
if (mode_ == LATIN1) {
Label success;
@@ -247,6 +245,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a1 - length of capture.
__ Addu(a0, a0, Operand(end_of_input_address()));
__ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Subu(a2, a2, Operand(a1));
+ }
__ Addu(a1, a0, Operand(a1));
// a0 - Address of start of capture.
@@ -285,6 +286,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ Subu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ lw(t0, register_location(start_reg)); // Index of start of capture.
+ __ lw(t5, register_location(start_reg + 1)); // Index of end of capture.
+ __ Addu(current_input_offset(), current_input_offset(), Operand(t0));
+ __ Subu(current_input_offset(), current_input_offset(), Operand(t5));
+ }
} else {
DCHECK(mode_ == UC16);
// Put regexp engine registers on stack.
@@ -313,6 +320,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ mov(s3, a1);
// Address of current input position.
__ Addu(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Subu(a1, a1, Operand(s3));
+ }
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
@@ -330,17 +340,21 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
- // On success, increment position by length of capture.
- __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ Subu(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Addu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerMIPS::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
@@ -348,17 +362,35 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
__ lw(a0, register_location(start_reg));
__ lw(a1, register_location(start_reg + 1));
__ Subu(a1, a1, a0); // Length to check.
- // Succeed on empty capture (including no capture).
- __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Addu(t5, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, le, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ lw(t0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(t0, t0, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t0));
+ } else {
+ __ Addu(t5, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
+ }
- // Compute pointers to match string and capture string.
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
__ Addu(a0, a0, Operand(end_of_input_address()));
__ Addu(a2, end_of_input_address(), Operand(current_input_offset()));
- __ Addu(a1, a1, Operand(a0));
+ if (read_backward) {
+ __ Subu(a2, a2, Operand(a1));
+ }
+ __ Addu(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
Label loop;
__ bind(&loop);
@@ -379,6 +411,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
// Move current character position to position after match.
__ Subu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ lw(t0, register_location(start_reg)); // Index of start of capture.
+ __ lw(t5, register_location(start_reg + 1)); // Index of end of capture.
+ __ Addu(current_input_offset(), current_input_offset(), Operand(t0));
+ __ Subu(current_input_offset(), current_input_offset(), Operand(t5));
+ }
__ bind(&fallthrough);
}
@@ -599,7 +637,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
- __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -642,7 +680,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Subu(a0, a0, t5);
// Store this value in a local variable, for use when clearing
// position registers.
- __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ sw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -751,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -905,10 +943,13 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1016,7 +1057,7 @@ void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ sw(a0, register_location(reg));
}
@@ -1129,10 +1170,14 @@ MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
Label* on_outside_input) {
- BranchOrBacktrack(on_outside_input,
- ge,
- current_input_offset(),
- Operand(-cp_offset * char_size()));
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
}
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index da59546a79..902e2208fe 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -120,9 +122,9 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 869cbc4f2e..5153bd018b 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -61,7 +61,7 @@ namespace internal {
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
+ * string start - 1). Used to initialize capture registers to a
* non-position.
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
@@ -91,7 +91,7 @@ namespace internal {
* - fp[-56] start index (character index of start). kStartIndex
* - fp[-64] void* input_string (location of a handle containing the string). kInputString
* - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
- * - fp[-80] Offset of location before start of input (effectively character kInputStartMinusOne
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
* position -1). Used to initialize capture registers to a
* non-position.
* --------- The following output registers are 32-bit values. ---------
@@ -133,7 +133,8 @@ RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -217,26 +218,17 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ ld(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
-
- // If we did, are we still at the start of the input?
- __ ld(a1, MemOperand(frame_pointer(), kInputStart));
- __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+ __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(a0, current_input_offset(), Operand(-char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ ld(a0, MemOperand(frame_pointer(), kStartIndex));
- BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
- // If we did, are we still at the start of the input?
- __ ld(a1, MemOperand(frame_pointer(), kInputStart));
- __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
}
@@ -259,20 +251,26 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ ld(a0, register_location(start_reg)); // Index of start of capture.
__ ld(a1, register_location(start_reg + 1)); // Index of end of capture.
__ Dsubu(a1, a1, a0); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Daddu(t1, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ if (read_backward) {
+ __ ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Daddu(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
if (mode_ == LATIN1) {
Label success;
@@ -283,6 +281,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a1 - length of capture.
__ Daddu(a0, a0, Operand(end_of_input_address()));
__ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Dsubu(a2, a2, Operand(a1));
+ }
__ Daddu(a1, a0, Operand(a1));
// a0 - Address of start of capture.
@@ -321,6 +322,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ Dsubu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Dsubu(current_input_offset(), current_input_offset(), Operand(a2));
+ }
} else {
DCHECK(mode_ == UC16);
// Put regexp engine registers on stack.
@@ -349,6 +356,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ mov(s3, a1);
// Address of current input position.
__ Daddu(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Dsubu(a1, a1, Operand(s3));
+ }
// Isolate.
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
@@ -367,16 +377,20 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
// On success, increment position by length of capture.
- __ Daddu(current_input_offset(), current_input_offset(), Operand(s3));
+ if (read_backward) {
+ __ Dsubu(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(s3));
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerMIPS::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
@@ -384,16 +398,28 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
__ ld(a0, register_location(start_reg));
__ ld(a1, register_location(start_reg + 1));
__ Dsubu(a1, a1, a0); // Length to check.
- // Succeed on empty capture (including no capture).
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
- __ Daddu(t1, a1, current_input_offset());
- // Check that there are enough characters left in the input.
- BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ if (read_backward) {
+ __ ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Daddu(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
// Compute pointers to match string and capture string.
__ Daddu(a0, a0, Operand(end_of_input_address()));
__ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Dsubu(a2, a2, Operand(a1));
+ }
__ Daddu(a1, a1, Operand(a0));
Label loop;
@@ -415,6 +441,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(
// Move current character position to position after match.
__ Dsubu(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Daddu(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Dsubu(current_input_offset(), current_input_offset(), Operand(a2));
+ }
__ bind(&fallthrough);
}
@@ -644,7 +676,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Daddu(frame_pointer(), sp, Operand(8 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
- __ push(a0); // Make room for "position - 1" constant (value irrelevant).
+ __ push(a0); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -687,7 +719,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Dsubu(a0, a0, t1);
// Store this value in a local variable, for use when clearing
// position registers.
- __ sd(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ sd(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -797,7 +829,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
- __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -951,10 +983,13 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works).
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1062,7 +1097,7 @@ void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ sd(a0, register_location(reg));
}
@@ -1175,10 +1210,14 @@ MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
Label* on_outside_input) {
- BranchOrBacktrack(on_outside_input,
- ge,
- current_input_offset(),
- Operand(-cp_offset * char_size()));
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 265bf773eb..9a8ca179d5 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -125,9 +127,9 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
#elif defined(MIPS_ABI_O32)
// Offsets from frame_pointer() of function parameters and stored registers.
@@ -158,9 +160,9 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
#else
# error "undefined MIPS ABI"
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 03f9741147..f3ddf7bf98 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -60,7 +60,7 @@ namespace internal {
* - fp[-32] void* input_string (location of a handle containing the string).
* - fp[-36] success counter (only for global regexps to count matches).
* - fp[-40] Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a
+ * string start - 1). Used to initialize capture registers to a
* non-position.
* - fp[-44] At start (if 1, we are starting at the start of the
* string, otherwise 0)
@@ -100,7 +100,8 @@ RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -189,30 +190,18 @@ void RegExpMacroAssemblerPPC::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerPPC::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
- __ cmpi(r3, Operand::Zero());
- BranchOrBacktrack(ne, &not_at_start);
-
- // If we did, are we still at the start of the input?
- __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
- __ mr(r0, current_input_offset());
- __ add(r3, end_of_input_address(), r0);
- __ cmp(r4, r3);
+ __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ addi(r3, current_input_offset(), Operand(-char_size()));
+ __ cmp(r3, r4);
BranchOrBacktrack(eq, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerPPC::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
- __ cmpi(r3, Operand::Zero());
- BranchOrBacktrack(ne, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
- __ add(r3, end_of_input_address(), current_input_offset());
+void RegExpMacroAssemblerPPC::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ addi(r3, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
__ cmp(r3, r4);
BranchOrBacktrack(ne, on_not_at_start);
}
@@ -238,20 +227,27 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
- int start_reg, Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
__ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
__ sub(r4, r4, r3, LeaveOE, SetRC); // Length of capture.
- // If length is zero, either the capture is empty or it is not participating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ beq(&fallthrough, cr0);
// Check that there are enough characters left in the input.
- __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
- // __ cmn(r1, Operand(current_input_offset()));
- BranchOrBacktrack(gt, on_no_match, cr0);
+ if (read_backward) {
+ __ LoadP(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r6, r6, r4);
+ __ cmp(current_input_offset(), r6);
+ BranchOrBacktrack(le, on_no_match);
+ } else {
+ __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+ BranchOrBacktrack(gt, on_no_match, cr0);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -262,6 +258,9 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// r4 - length of capture
__ add(r3, r3, end_of_input_address());
__ add(r5, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r5, r5, r4); // Offset by length when matching backwards.
+ }
__ add(r4, r3, r4);
// r3 - Address of start of capture.
@@ -303,6 +302,13 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ bind(&success);
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r5, end_of_input_address());
+ if (read_backward) {
+ __ LoadP(r3, register_location(start_reg)); // Index of start of capture
+ __ LoadP(r4,
+ register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r3);
+ __ sub(current_input_offset(), current_input_offset(), r4);
+ }
} else {
DCHECK(mode_ == UC16);
int argument_count = 4;
@@ -326,6 +332,9 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ mr(r25, r4);
// Address of current input position.
__ add(r4, current_input_offset(), end_of_input_address());
+ if (read_backward) {
+ __ sub(r4, r4, r25);
+ }
// Isolate.
__ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
@@ -339,8 +348,13 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ cmpi(r3, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
- // On success, increment position by length of capture.
- __ add(current_input_offset(), current_input_offset(), r25);
+
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(current_input_offset(), current_input_offset(), r25);
+ } else {
+ __ add(current_input_offset(), current_input_offset(), r25);
+ }
}
__ bind(&fallthrough);
@@ -348,6 +362,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
+ bool read_backward,
Label* on_no_match) {
Label fallthrough;
Label success;
@@ -356,16 +371,30 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
__ LoadP(r3, register_location(start_reg), r0);
__ LoadP(r4, register_location(start_reg + 1), r0);
__ sub(r4, r4, r3, LeaveOE, SetRC); // Length to check.
- // Succeed on empty capture (including no capture).
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ beq(&fallthrough, cr0);
// Check that there are enough characters left in the input.
- __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
- BranchOrBacktrack(gt, on_no_match, cr0);
+ if (read_backward) {
+ __ LoadP(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ add(r6, r6, r4);
+ __ cmp(current_input_offset(), r6);
+ BranchOrBacktrack(lt, on_no_match);
+ } else {
+ __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+ BranchOrBacktrack(gt, on_no_match, cr0);
+ }
- // Compute pointers to match string and capture string
+ // r3 - offset of start of capture
+ // r4 - length of capture
__ add(r3, r3, end_of_input_address());
__ add(r5, end_of_input_address(), current_input_offset());
+ if (read_backward) {
+ __ sub(r5, r5, r4); // Offset by length when matching backwards.
+ }
__ add(r4, r4, r3);
Label loop;
@@ -389,6 +418,13 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
// Move current character position to position after match.
__ sub(current_input_offset(), r5, end_of_input_address());
+ if (read_backward) {
+ __ LoadP(r3, register_location(start_reg)); // Index of start of capture
+ __ LoadP(r4, register_location(start_reg + 1)); // Index of end of capture
+ __ add(current_input_offset(), current_input_offset(), r3);
+ __ sub(current_input_offset(), current_input_offset(), r4);
+ }
+
__ bind(&fallthrough);
}
@@ -639,7 +675,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ addi(frame_pointer(), sp, Operand(8 * kPointerSize));
__ li(r3, Operand::Zero());
__ push(r3); // Make room for success counter and initialize it to 0.
- __ push(r3); // Make room for "position - 1" constant (value is irrelevant)
+ __ push(r3); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@@ -688,7 +724,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ StoreP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ StoreP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -797,7 +833,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ StoreP(r5, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r3 to initialize registers with its value in the next run.
- __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -936,10 +972,13 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1 << 30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1028,7 +1067,7 @@ void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+ __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreP(r3, register_location(reg), r0);
}
@@ -1132,8 +1171,15 @@ MemOperand RegExpMacroAssemblerPPC::register_location(int register_index) {
void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
- BranchOrBacktrack(ge, on_outside_input);
+ if (cp_offset >= 0) {
+ __ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
+ BranchOrBacktrack(ge, on_outside_input);
+ } else {
+ __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ addi(r3, current_input_offset(), Operand(cp_offset * char_size()));
+ __ cmp(r3, r4);
+ BranchOrBacktrack(le, on_outside_input);
+ }
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 04a0e5e416..4d1836fc71 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -34,9 +34,11 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
@@ -112,9 +114,9 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
new file mode 100644
index 0000000000..31c93b114f
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -0,0 +1,337 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ostreams.h"
+#include "src/regexp/regexp-ast.h"
+
+namespace v8 {
+namespace internal {
+
+#define MAKE_ACCEPT(Name) \
+ void* RegExp##Name::Accept(RegExpVisitor* visitor, void* data) { \
+ return visitor->Visit##Name(this, data); \
+ }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ACCEPT)
+#undef MAKE_ACCEPT
+
+#define MAKE_TYPE_CASE(Name) \
+ RegExp##Name* RegExpTree::As##Name() { return NULL; } \
+ bool RegExpTree::Is##Name() { return false; }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
+#undef MAKE_TYPE_CASE
+
+#define MAKE_TYPE_CASE(Name) \
+ RegExp##Name* RegExp##Name::As##Name() { return this; } \
+ bool RegExp##Name::Is##Name() { return true; }
+FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
+#undef MAKE_TYPE_CASE
+
+
+static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
+ Interval result = Interval::Empty();
+ for (int i = 0; i < children->length(); i++)
+ result = result.Union(children->at(i)->CaptureRegisters());
+ return result;
+}
+
+
+Interval RegExpAlternative::CaptureRegisters() {
+ return ListCaptureRegisters(nodes());
+}
+
+
+Interval RegExpDisjunction::CaptureRegisters() {
+ return ListCaptureRegisters(alternatives());
+}
+
+
+Interval RegExpLookaround::CaptureRegisters() {
+ return body()->CaptureRegisters();
+}
+
+
+Interval RegExpCapture::CaptureRegisters() {
+ Interval self(StartRegister(index()), EndRegister(index()));
+ return self.Union(body()->CaptureRegisters());
+}
+
+
+Interval RegExpQuantifier::CaptureRegisters() {
+ return body()->CaptureRegisters();
+}
+
+
+bool RegExpAssertion::IsAnchoredAtStart() {
+ return assertion_type() == RegExpAssertion::START_OF_INPUT;
+}
+
+
+bool RegExpAssertion::IsAnchoredAtEnd() {
+ return assertion_type() == RegExpAssertion::END_OF_INPUT;
+}
+
+
+bool RegExpAlternative::IsAnchoredAtStart() {
+ ZoneList<RegExpTree*>* nodes = this->nodes();
+ for (int i = 0; i < nodes->length(); i++) {
+ RegExpTree* node = nodes->at(i);
+ if (node->IsAnchoredAtStart()) {
+ return true;
+ }
+ if (node->max_match() > 0) {
+ return false;
+ }
+ }
+ return false;
+}
+
+
+bool RegExpAlternative::IsAnchoredAtEnd() {
+ ZoneList<RegExpTree*>* nodes = this->nodes();
+ for (int i = nodes->length() - 1; i >= 0; i--) {
+ RegExpTree* node = nodes->at(i);
+ if (node->IsAnchoredAtEnd()) {
+ return true;
+ }
+ if (node->max_match() > 0) {
+ return false;
+ }
+ }
+ return false;
+}
+
+
+bool RegExpDisjunction::IsAnchoredAtStart() {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ for (int i = 0; i < alternatives->length(); i++) {
+ if (!alternatives->at(i)->IsAnchoredAtStart()) return false;
+ }
+ return true;
+}
+
+
+bool RegExpDisjunction::IsAnchoredAtEnd() {
+ ZoneList<RegExpTree*>* alternatives = this->alternatives();
+ for (int i = 0; i < alternatives->length(); i++) {
+ if (!alternatives->at(i)->IsAnchoredAtEnd()) return false;
+ }
+ return true;
+}
+
+
+bool RegExpLookaround::IsAnchoredAtStart() {
+ return is_positive() && type() == LOOKAHEAD && body()->IsAnchoredAtStart();
+}
+
+
+bool RegExpCapture::IsAnchoredAtStart() { return body()->IsAnchoredAtStart(); }
+
+
+bool RegExpCapture::IsAnchoredAtEnd() { return body()->IsAnchoredAtEnd(); }
+
+
+// Convert regular expression trees to a simple sexp representation.
+// This representation should be different from the input grammar
+// in as many cases as possible, to make it more difficult for incorrect
+// parses to look as correct ones which is likely if the input and
+// output formats are alike.
+class RegExpUnparser final : public RegExpVisitor {
+ public:
+ RegExpUnparser(std::ostream& os, Zone* zone) : os_(os), zone_(zone) {}
+ void VisitCharacterRange(CharacterRange that);
+#define MAKE_CASE(Name) void* Visit##Name(RegExp##Name*, void* data) override;
+ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
+#undef MAKE_CASE
+ private:
+ std::ostream& os_;
+ Zone* zone_;
+};
+
+
+void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
+ os_ << "(|";
+ for (int i = 0; i < that->alternatives()->length(); i++) {
+ os_ << " ";
+ that->alternatives()->at(i)->Accept(this, data);
+ }
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
+ os_ << "(:";
+ for (int i = 0; i < that->nodes()->length(); i++) {
+ os_ << " ";
+ that->nodes()->at(i)->Accept(this, data);
+ }
+ os_ << ")";
+ return NULL;
+}
+
+
+void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
+ os_ << AsUC16(that.from());
+ if (!that.IsSingleton()) {
+ os_ << "-" << AsUC16(that.to());
+ }
+}
+
+
+void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
+ void* data) {
+ if (that->is_negated()) os_ << "^";
+ os_ << "[";
+ for (int i = 0; i < that->ranges(zone_)->length(); i++) {
+ if (i > 0) os_ << " ";
+ VisitCharacterRange(that->ranges(zone_)->at(i));
+ }
+ os_ << "]";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
+ switch (that->assertion_type()) {
+ case RegExpAssertion::START_OF_INPUT:
+ os_ << "@^i";
+ break;
+ case RegExpAssertion::END_OF_INPUT:
+ os_ << "@$i";
+ break;
+ case RegExpAssertion::START_OF_LINE:
+ os_ << "@^l";
+ break;
+ case RegExpAssertion::END_OF_LINE:
+ os_ << "@$l";
+ break;
+ case RegExpAssertion::BOUNDARY:
+ os_ << "@b";
+ break;
+ case RegExpAssertion::NON_BOUNDARY:
+ os_ << "@B";
+ break;
+ }
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
+ os_ << "'";
+ Vector<const uc16> chardata = that->data();
+ for (int i = 0; i < chardata.length(); i++) {
+ os_ << AsUC16(chardata[i]);
+ }
+ os_ << "'";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitText(RegExpText* that, void* data) {
+ if (that->elements()->length() == 1) {
+ that->elements()->at(0).tree()->Accept(this, data);
+ } else {
+ os_ << "(!";
+ for (int i = 0; i < that->elements()->length(); i++) {
+ os_ << " ";
+ that->elements()->at(i).tree()->Accept(this, data);
+ }
+ os_ << ")";
+ }
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
+ os_ << "(# " << that->min() << " ";
+ if (that->max() == RegExpTree::kInfinity) {
+ os_ << "- ";
+ } else {
+ os_ << that->max() << " ";
+ }
+ os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
+ os_ << "(^ ";
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
+ os_ << "(";
+ os_ << (that->type() == RegExpLookaround::LOOKAHEAD ? "->" : "<-");
+ os_ << (that->is_positive() ? " + " : " - ");
+ that->body()->Accept(this, data);
+ os_ << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
+ void* data) {
+ os_ << "(<- " << that->index() << ")";
+ return NULL;
+}
+
+
+void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
+ os_ << '%';
+ return NULL;
+}
+
+
+std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
+ RegExpUnparser unparser(os, zone);
+ Accept(&unparser, NULL);
+ return os;
+}
+
+
+RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
+ : alternatives_(alternatives) {
+ DCHECK(alternatives->length() > 1);
+ RegExpTree* first_alternative = alternatives->at(0);
+ min_match_ = first_alternative->min_match();
+ max_match_ = first_alternative->max_match();
+ for (int i = 1; i < alternatives->length(); i++) {
+ RegExpTree* alternative = alternatives->at(i);
+ min_match_ = Min(min_match_, alternative->min_match());
+ max_match_ = Max(max_match_, alternative->max_match());
+ }
+}
+
+
+static int IncreaseBy(int previous, int increase) {
+ if (RegExpTree::kInfinity - previous < increase) {
+ return RegExpTree::kInfinity;
+ } else {
+ return previous + increase;
+ }
+}
+
+
+RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
+ : nodes_(nodes) {
+ DCHECK(nodes->length() > 1);
+ min_match_ = 0;
+ max_match_ = 0;
+ for (int i = 0; i < nodes->length(); i++) {
+ RegExpTree* node = nodes->at(i);
+ int node_min_match = node->min_match();
+ min_match_ = IncreaseBy(min_match_, node_min_match);
+ int node_max_match = node->max_match();
+ max_match_ = IncreaseBy(max_match_, node_max_match);
+ }
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
new file mode 100644
index 0000000000..f87778596a
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -0,0 +1,496 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_AST_H_
+#define V8_REGEXP_REGEXP_AST_H_
+
+#include "src/utils.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+#define FOR_EACH_REG_EXP_TREE_TYPE(VISIT) \
+ VISIT(Disjunction) \
+ VISIT(Alternative) \
+ VISIT(Assertion) \
+ VISIT(CharacterClass) \
+ VISIT(Atom) \
+ VISIT(Quantifier) \
+ VISIT(Capture) \
+ VISIT(Lookaround) \
+ VISIT(BackReference) \
+ VISIT(Empty) \
+ VISIT(Text)
+
+
+#define FORWARD_DECLARE(Name) class RegExp##Name;
+FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
+class RegExpCompiler;
+class RegExpNode;
+class RegExpTree;
+
+
+class RegExpVisitor BASE_EMBEDDED {
+ public:
+ virtual ~RegExpVisitor() {}
+#define MAKE_CASE(Name) \
+ virtual void* Visit##Name(RegExp##Name*, void* data) = 0;
+ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
+#undef MAKE_CASE
+};
+
+
+// A simple closed interval.
+class Interval {
+ public:
+ Interval() : from_(kNone), to_(kNone) {}
+ Interval(int from, int to) : from_(from), to_(to) {}
+ Interval Union(Interval that) {
+ if (that.from_ == kNone)
+ return *this;
+ else if (from_ == kNone)
+ return that;
+ else
+ return Interval(Min(from_, that.from_), Max(to_, that.to_));
+ }
+ bool Contains(int value) { return (from_ <= value) && (value <= to_); }
+ bool is_empty() { return from_ == kNone; }
+ int from() const { return from_; }
+ int to() const { return to_; }
+ static Interval Empty() { return Interval(); }
+ static const int kNone = -1;
+
+ private:
+ int from_;
+ int to_;
+};
+
+
+// Represents code units in the range from from_ to to_, both ends are
+// inclusive.
+class CharacterRange {
+ public:
+ CharacterRange() : from_(0), to_(0) {}
+ // For compatibility with the CHECK_OK macro
+ CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT
+ CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) {}
+ static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
+ Zone* zone);
+ static Vector<const int> GetWordBounds();
+ static inline CharacterRange Singleton(uc16 value) {
+ return CharacterRange(value, value);
+ }
+ static inline CharacterRange Range(uc16 from, uc16 to) {
+ DCHECK(from <= to);
+ return CharacterRange(from, to);
+ }
+ static inline CharacterRange Everything() {
+ return CharacterRange(0, 0xFFFF);
+ }
+ bool Contains(uc16 i) { return from_ <= i && i <= to_; }
+ uc16 from() const { return from_; }
+ void set_from(uc16 value) { from_ = value; }
+ uc16 to() const { return to_; }
+ void set_to(uc16 value) { to_ = value; }
+ bool is_valid() { return from_ <= to_; }
+ bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
+ bool IsSingleton() { return (from_ == to_); }
+ void AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges, bool is_one_byte);
+ static void Split(ZoneList<CharacterRange>* base, Vector<const int> overlay,
+ ZoneList<CharacterRange>** included,
+ ZoneList<CharacterRange>** excluded, Zone* zone);
+ // Whether a range list is in canonical form: Ranges ordered by from value,
+ // and ranges non-overlapping and non-adjacent.
+ static bool IsCanonical(ZoneList<CharacterRange>* ranges);
+ // Convert range list to canonical form. The characters covered by the ranges
+ // will still be the same, but no character is in more than one range, and
+ // adjacent ranges are merged. The resulting list may be shorter than the
+ // original, but cannot be longer.
+ static void Canonicalize(ZoneList<CharacterRange>* ranges);
+ // Negate the contents of a character range in canonical form.
+ static void Negate(ZoneList<CharacterRange>* src,
+ ZoneList<CharacterRange>* dst, Zone* zone);
+ static const int kStartMarker = (1 << 24);
+ static const int kPayloadMask = (1 << 24) - 1;
+
+ private:
+ uc16 from_;
+ uc16 to_;
+};
+
+
+class CharacterSet final BASE_EMBEDDED {
+ public:
+ explicit CharacterSet(uc16 standard_set_type)
+ : ranges_(NULL), standard_set_type_(standard_set_type) {}
+ explicit CharacterSet(ZoneList<CharacterRange>* ranges)
+ : ranges_(ranges), standard_set_type_(0) {}
+ ZoneList<CharacterRange>* ranges(Zone* zone);
+ uc16 standard_set_type() { return standard_set_type_; }
+ void set_standard_set_type(uc16 special_set_type) {
+ standard_set_type_ = special_set_type;
+ }
+ bool is_standard() { return standard_set_type_ != 0; }
+ void Canonicalize();
+
+ private:
+ ZoneList<CharacterRange>* ranges_;
+ // If non-zero, the value represents a standard set (e.g., all whitespace
+ // characters) without having to expand the ranges.
+ uc16 standard_set_type_;
+};
+
+
+class TextElement final BASE_EMBEDDED {
+ public:
+ enum TextType { ATOM, CHAR_CLASS };
+
+ static TextElement Atom(RegExpAtom* atom);
+ static TextElement CharClass(RegExpCharacterClass* char_class);
+
+ int cp_offset() const { return cp_offset_; }
+ void set_cp_offset(int cp_offset) { cp_offset_ = cp_offset; }
+ int length() const;
+
+ TextType text_type() const { return text_type_; }
+
+ RegExpTree* tree() const { return tree_; }
+
+ RegExpAtom* atom() const {
+ DCHECK(text_type() == ATOM);
+ return reinterpret_cast<RegExpAtom*>(tree());
+ }
+
+ RegExpCharacterClass* char_class() const {
+ DCHECK(text_type() == CHAR_CLASS);
+ return reinterpret_cast<RegExpCharacterClass*>(tree());
+ }
+
+ private:
+ TextElement(TextType text_type, RegExpTree* tree)
+ : cp_offset_(-1), text_type_(text_type), tree_(tree) {}
+
+ int cp_offset_;
+ TextType text_type_;
+ RegExpTree* tree_;
+};
+
+
+class RegExpTree : public ZoneObject {
+ public:
+ static const int kInfinity = kMaxInt;
+ virtual ~RegExpTree() {}
+ virtual void* Accept(RegExpVisitor* visitor, void* data) = 0;
+ virtual RegExpNode* ToNode(RegExpCompiler* compiler,
+ RegExpNode* on_success) = 0;
+ virtual bool IsTextElement() { return false; }
+ virtual bool IsAnchoredAtStart() { return false; }
+ virtual bool IsAnchoredAtEnd() { return false; }
+ virtual int min_match() = 0;
+ virtual int max_match() = 0;
+ // Returns the interval of registers used for captures within this
+ // expression.
+ virtual Interval CaptureRegisters() { return Interval::Empty(); }
+ virtual void AppendToText(RegExpText* text, Zone* zone);
+ std::ostream& Print(std::ostream& os, Zone* zone); // NOLINT
+#define MAKE_ASTYPE(Name) \
+ virtual RegExp##Name* As##Name(); \
+ virtual bool Is##Name();
+ FOR_EACH_REG_EXP_TREE_TYPE(MAKE_ASTYPE)
+#undef MAKE_ASTYPE
+};
+
+
+class RegExpDisjunction final : public RegExpTree {
+ public:
+ explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpDisjunction* AsDisjunction() override;
+ Interval CaptureRegisters() override;
+ bool IsDisjunction() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+ ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
+
+ private:
+ bool SortConsecutiveAtoms(RegExpCompiler* compiler);
+ void RationalizeConsecutiveAtoms(RegExpCompiler* compiler);
+ void FixSingleCharacterDisjunctions(RegExpCompiler* compiler);
+ ZoneList<RegExpTree*>* alternatives_;
+ int min_match_;
+ int max_match_;
+};
+
+
+class RegExpAlternative final : public RegExpTree {
+ public:
+ explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpAlternative* AsAlternative() override;
+ Interval CaptureRegisters() override;
+ bool IsAlternative() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+ ZoneList<RegExpTree*>* nodes() { return nodes_; }
+
+ private:
+ ZoneList<RegExpTree*>* nodes_;
+ int min_match_;
+ int max_match_;
+};
+
+
+class RegExpAssertion final : public RegExpTree {
+ public:
+ enum AssertionType {
+ START_OF_LINE,
+ START_OF_INPUT,
+ END_OF_LINE,
+ END_OF_INPUT,
+ BOUNDARY,
+ NON_BOUNDARY
+ };
+ explicit RegExpAssertion(AssertionType type) : assertion_type_(type) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpAssertion* AsAssertion() override;
+ bool IsAssertion() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ int min_match() override { return 0; }
+ int max_match() override { return 0; }
+ AssertionType assertion_type() { return assertion_type_; }
+
+ private:
+ AssertionType assertion_type_;
+};
+
+
+class RegExpCharacterClass final : public RegExpTree {
+ public:
+ RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
+ : set_(ranges), is_negated_(is_negated) {}
+ explicit RegExpCharacterClass(uc16 type) : set_(type), is_negated_(false) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpCharacterClass* AsCharacterClass() override;
+ bool IsCharacterClass() override;
+ bool IsTextElement() override { return true; }
+ int min_match() override { return 1; }
+ int max_match() override { return 1; }
+ void AppendToText(RegExpText* text, Zone* zone) override;
+ CharacterSet character_set() { return set_; }
+ // TODO(lrn): Remove need for complex version if is_standard that
+ // recognizes a mangled standard set and just do { return set_.is_special(); }
+ bool is_standard(Zone* zone);
+ // Returns a value representing the standard character set if is_standard()
+ // returns true.
+ // Currently used values are:
+ // s : unicode whitespace
+ // S : unicode non-whitespace
+ // w : ASCII word character (digit, letter, underscore)
+ // W : non-ASCII word character
+ // d : ASCII digit
+ // D : non-ASCII digit
+ // . : non-unicode non-newline
+ // * : All characters
+ uc16 standard_type() { return set_.standard_set_type(); }
+ ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
+ bool is_negated() { return is_negated_; }
+
+ private:
+ CharacterSet set_;
+ bool is_negated_;
+};
+
+
+class RegExpAtom final : public RegExpTree {
+ public:
+ explicit RegExpAtom(Vector<const uc16> data) : data_(data) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpAtom* AsAtom() override;
+ bool IsAtom() override;
+ bool IsTextElement() override { return true; }
+ int min_match() override { return data_.length(); }
+ int max_match() override { return data_.length(); }
+ void AppendToText(RegExpText* text, Zone* zone) override;
+ Vector<const uc16> data() { return data_; }
+ int length() { return data_.length(); }
+
+ private:
+ Vector<const uc16> data_;
+};
+
+
+class RegExpText final : public RegExpTree {
+ public:
+ explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpText* AsText() override;
+ bool IsText() override;
+ bool IsTextElement() override { return true; }
+ int min_match() override { return length_; }
+ int max_match() override { return length_; }
+ void AppendToText(RegExpText* text, Zone* zone) override;
+ void AddElement(TextElement elm, Zone* zone) {
+ elements_.Add(elm, zone);
+ length_ += elm.length();
+ }
+ ZoneList<TextElement>* elements() { return &elements_; }
+
+ private:
+ ZoneList<TextElement> elements_;
+ int length_;
+};
+
+
+class RegExpQuantifier final : public RegExpTree {
+ public:
+ enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
+ RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
+ : body_(body),
+ min_(min),
+ max_(max),
+ min_match_(min * body->min_match()),
+ quantifier_type_(type) {
+ if (max > 0 && body->max_match() > kInfinity / max) {
+ max_match_ = kInfinity;
+ } else {
+ max_match_ = max * body->max_match();
+ }
+ }
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ static RegExpNode* ToNode(int min, int max, bool is_greedy, RegExpTree* body,
+ RegExpCompiler* compiler, RegExpNode* on_success,
+ bool not_at_start = false);
+ RegExpQuantifier* AsQuantifier() override;
+ Interval CaptureRegisters() override;
+ bool IsQuantifier() override;
+ int min_match() override { return min_match_; }
+ int max_match() override { return max_match_; }
+ int min() { return min_; }
+ int max() { return max_; }
+ bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
+ bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
+ bool is_greedy() { return quantifier_type_ == GREEDY; }
+ RegExpTree* body() { return body_; }
+
+ private:
+ RegExpTree* body_;
+ int min_;
+ int max_;
+ int min_match_;
+ int max_match_;
+ QuantifierType quantifier_type_;
+};
+
+
+class RegExpCapture final : public RegExpTree {
+ public:
+ explicit RegExpCapture(int index) : body_(NULL), index_(index) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ static RegExpNode* ToNode(RegExpTree* body, int index,
+ RegExpCompiler* compiler, RegExpNode* on_success);
+ RegExpCapture* AsCapture() override;
+ bool IsAnchoredAtStart() override;
+ bool IsAnchoredAtEnd() override;
+ Interval CaptureRegisters() override;
+ bool IsCapture() override;
+ int min_match() override { return body_->min_match(); }
+ int max_match() override { return body_->max_match(); }
+ RegExpTree* body() { return body_; }
+ void set_body(RegExpTree* body) { body_ = body; }
+ int index() { return index_; }
+ static int StartRegister(int index) { return index * 2; }
+ static int EndRegister(int index) { return index * 2 + 1; }
+
+ private:
+ RegExpTree* body_;
+ int index_;
+};
+
+
+class RegExpLookaround final : public RegExpTree {
+ public:
+ enum Type { LOOKAHEAD, LOOKBEHIND };
+
+ RegExpLookaround(RegExpTree* body, bool is_positive, int capture_count,
+ int capture_from, Type type)
+ : body_(body),
+ is_positive_(is_positive),
+ capture_count_(capture_count),
+ capture_from_(capture_from),
+ type_(type) {}
+
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpLookaround* AsLookaround() override;
+ Interval CaptureRegisters() override;
+ bool IsLookaround() override;
+ bool IsAnchoredAtStart() override;
+ int min_match() override { return 0; }
+ int max_match() override { return 0; }
+ RegExpTree* body() { return body_; }
+ bool is_positive() { return is_positive_; }
+ int capture_count() { return capture_count_; }
+ int capture_from() { return capture_from_; }
+ Type type() { return type_; }
+
+ private:
+ RegExpTree* body_;
+ bool is_positive_;
+ int capture_count_;
+ int capture_from_;
+ Type type_;
+};
+
+
+class RegExpBackReference final : public RegExpTree {
+ public:
+ explicit RegExpBackReference(RegExpCapture* capture) : capture_(capture) {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpBackReference* AsBackReference() override;
+ bool IsBackReference() override;
+ int min_match() override { return 0; }
+ // The back reference may be recursive, e.g. /(\2)(\1)/. To avoid infinite
+ // recursion, we give up. Ignorance is bliss.
+ int max_match() override { return kInfinity; }
+ int index() { return capture_->index(); }
+ RegExpCapture* capture() { return capture_; }
+
+ private:
+ RegExpCapture* capture_;
+};
+
+
+class RegExpEmpty final : public RegExpTree {
+ public:
+ RegExpEmpty() {}
+ void* Accept(RegExpVisitor* visitor, void* data) override;
+ RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
+ RegExpEmpty* AsEmpty() override;
+ bool IsEmpty() override;
+ int min_match() override { return 0; }
+ int max_match() override { return 0; }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_AST_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index 6f176cd12c..4d0b1bc0a7 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index ca567c9bda..751ee441c8 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -4,7 +4,7 @@
#include "src/regexp/regexp-macro-assembler-irregexp.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
@@ -273,8 +273,9 @@ void RegExpMacroAssemblerIrregexp::CheckAtStart(Label* on_at_start) {
}
-void RegExpMacroAssemblerIrregexp::CheckNotAtStart(Label* on_not_at_start) {
- Emit(BC_CHECK_NOT_AT_START, 0);
+void RegExpMacroAssemblerIrregexp::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ Emit(BC_CHECK_NOT_AT_START, cp_offset);
EmitOrLink(on_not_at_start);
}
@@ -370,20 +371,23 @@ void RegExpMacroAssemblerIrregexp::CheckBitInTable(
void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
+ bool read_backward,
Label* on_not_equal) {
DCHECK(start_reg >= 0);
DCHECK(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF, start_reg);
+ Emit(read_backward ? BC_CHECK_NOT_BACK_REF_BACKWARD : BC_CHECK_NOT_BACK_REF,
+ start_reg);
EmitOrLink(on_not_equal);
}
void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_not_equal) {
+ int start_reg, bool read_backward, Label* on_not_equal) {
DCHECK(start_reg >= 0);
DCHECK(start_reg <= kMaxRegister);
- Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg);
+ Emit(read_backward ? BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD
+ : BC_CHECK_NOT_BACK_REF_NO_CASE,
+ start_reg);
EmitOrLink(on_not_equal);
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
index bbfe5203d9..f1ace63a74 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -66,7 +66,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckAtStart(Label* on_at_start);
- virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
@@ -82,8 +82,10 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 2abe55588e..5301ead69b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -4,7 +4,7 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
namespace v8 {
namespace internal {
@@ -13,9 +13,9 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
Isolate* isolate, RegExpMacroAssembler* assembler)
: RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) {
unsigned int type = assembler->Implementation();
- DCHECK(type < 6);
- const char* impl_names[] = {"IA32", "ARM", "ARM64",
- "MIPS", "X64", "X87", "Bytecode"};
+ DCHECK(type < 8);
+ const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS",
+ "PPC", "X64", "X87", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
@@ -241,9 +241,11 @@ void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
}
-void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
- PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
- assembler_->CheckNotAtStart(on_not_at_start);
+void RegExpMacroAssemblerTracer::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ PrintF(" CheckNotAtStart(cp_offset=%d, label[%08x]);\n", cp_offset,
+ LabelToInt(on_not_at_start));
+ assembler_->CheckNotAtStart(cp_offset, on_not_at_start);
}
@@ -349,19 +351,29 @@ void RegExpMacroAssemblerTracer::CheckBitInTable(
void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
+ bool read_backward,
Label* on_no_match) {
- PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
- LabelToInt(on_no_match));
- assembler_->CheckNotBackReference(start_reg, on_no_match);
+ PrintF(" CheckNotBackReference(register=%d, %s, label[%08x]);\n", start_reg,
+ read_backward ? "backward" : "forward", LabelToInt(on_no_match));
+ assembler_->CheckNotBackReference(start_reg, read_backward, on_no_match);
}
void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
- start_reg, LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
+ int start_reg, bool read_backward, Label* on_no_match) {
+ PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s, label[%08x]);\n",
+ start_reg, read_backward ? "backward" : "forward",
+ LabelToInt(on_no_match));
+ assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward,
+ on_no_match);
+}
+
+
+void RegExpMacroAssemblerTracer::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ PrintF(" CheckPosition(cp_offset=%d, label[%08x]);\n", cp_offset,
+ LabelToInt(on_outside_input));
+ assembler_->CheckPosition(cp_offset, on_outside_input);
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index f9364195fa..77377aac31 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -30,9 +30,11 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
@@ -49,6 +51,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 9916d5f32f..caf8b51fe5 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -5,7 +5,6 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/assembler.h"
-#include "src/ast.h"
#include "src/isolate-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
@@ -189,16 +188,9 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
Address stack_base = stack_scope.stack()->stack_base();
int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
- input,
- start_offset,
- input_start,
- input_end,
- output,
- output_size,
- stack_base,
- direct_call,
- isolate);
+ int result = CALL_GENERATED_REGEXP_CODE(
+ isolate, code->entry(), input, start_offset, input_start, input_end,
+ output, output_size, stack_base, direct_call, isolate);
DCHECK(result >= RETRY);
if (result == EXCEPTION && !isolate->has_pending_exception()) {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index ea97d5b29b..20599334cd 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -5,7 +5,8 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
-#include "src/ast.h"
+#include "src/assembler.h"
+#include "src/regexp/regexp-ast.h"
namespace v8 {
namespace internal {
@@ -71,9 +72,11 @@ class RegExpMacroAssembler {
virtual void CheckCharacterGT(uc16 limit, Label* on_greater) = 0;
virtual void CheckCharacterLT(uc16 limit, Label* on_less) = 0;
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position) = 0;
- virtual void CheckNotAtStart(Label* on_not_at_start) = 0;
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match) = 0;
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start) = 0;
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match) = 0;
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match) = 0;
// Check the current character for a match with a literal character. If we
// fail to match then goto the on_failure label. End of input always
@@ -102,17 +105,12 @@ class RegExpMacroAssembler {
// Checks whether the given offset from the current position is before
// the end of the string. May overwrite the current character.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input) {
- LoadCurrentCharacter(cp_offset, on_outside_input, true);
- }
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input) = 0;
// Check whether a standard/default character class matches the current
// character. Returns false if the type of special character class does
// not have custom support.
// May clobber the current loaded character.
- virtual bool CheckSpecialCharacterClass(uc16 type,
- Label* on_no_match) {
- return false;
- }
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match) = 0;
virtual void Fail() = 0;
virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
new file mode 100644
index 0000000000..fa8900342c
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -0,0 +1,1180 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-parser.h"
+
+#include "src/char-predicates-inl.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/regexp/jsregexp.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
+ bool multiline, bool unicode, Isolate* isolate,
+ Zone* zone)
+ : isolate_(isolate),
+ zone_(zone),
+ error_(error),
+ captures_(NULL),
+ in_(in),
+ current_(kEndMarker),
+ next_pos_(0),
+ captures_started_(0),
+ capture_count_(0),
+ has_more_(true),
+ multiline_(multiline),
+ unicode_(unicode),
+ simple_(false),
+ contains_anchor_(false),
+ is_scanned_for_captures_(false),
+ failed_(false) {
+ Advance();
+}
+
+
+uc32 RegExpParser::Next() {
+ if (has_next()) {
+ return in()->Get(next_pos_);
+ } else {
+ return kEndMarker;
+ }
+}
+
+
+void RegExpParser::Advance() {
+ if (next_pos_ < in()->length()) {
+ StackLimitCheck check(isolate());
+ if (check.HasOverflowed()) {
+ ReportError(CStrVector(Isolate::kStackOverflowMessage));
+ } else if (zone()->excess_allocation()) {
+ ReportError(CStrVector("Regular expression too large"));
+ } else {
+ current_ = in()->Get(next_pos_);
+ next_pos_++;
+ // Read the whole surrogate pair in case of unicode flag, if possible.
+ if (unicode_ && next_pos_ < in()->length() &&
+ unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(current_))) {
+ uc16 trail = in()->Get(next_pos_);
+ if (unibrow::Utf16::IsTrailSurrogate(trail)) {
+ current_ = unibrow::Utf16::CombineSurrogatePair(
+ static_cast<uc16>(current_), trail);
+ next_pos_++;
+ }
+ }
+ }
+ } else {
+ current_ = kEndMarker;
+ // Advance so that position() points to 1-after-the-last-character. This is
+ // important so that Reset() to this position works correctly.
+ next_pos_ = in()->length() + 1;
+ has_more_ = false;
+ }
+}
+
+
+void RegExpParser::Reset(int pos) {
+ next_pos_ = pos;
+ has_more_ = (pos < in()->length());
+ Advance();
+}
+
+
+void RegExpParser::Advance(int dist) {
+ next_pos_ += dist - 1;
+ Advance();
+}
+
+
+bool RegExpParser::simple() { return simple_; }
+
+
+bool RegExpParser::IsSyntaxCharacter(uc32 c) {
+ return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
+ c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
+ c == '{' || c == '}' || c == '|';
+}
+
+
+RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
+ failed_ = true;
+ *error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
+ // Zip to the end to make sure the no more input is read.
+ current_ = kEndMarker;
+ next_pos_ = in()->length();
+ return NULL;
+}
+
+
+#define CHECK_FAILED /**/); \
+ if (failed_) return NULL; \
+ ((void)0
+
+
+// Pattern ::
+// Disjunction
+RegExpTree* RegExpParser::ParsePattern() {
+ RegExpTree* result = ParseDisjunction(CHECK_FAILED);
+ DCHECK(!has_more());
+ // If the result of parsing is a literal string atom, and it has the
+ // same length as the input, then the atom is identical to the input.
+ if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
+ simple_ = true;
+ }
+ return result;
+}
+
+
+// Disjunction ::
+// Alternative
+// Alternative | Disjunction
+// Alternative ::
+// [empty]
+// Term Alternative
+// Term ::
+// Assertion
+// Atom
+// Atom Quantifier
+RegExpTree* RegExpParser::ParseDisjunction() {
+ // Used to store current state while parsing subexpressions.
+ RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
+ zone());
+ RegExpParserState* state = &initial_state;
+ // Cache the builder in a local variable for quick access.
+ RegExpBuilder* builder = initial_state.builder();
+ while (true) {
+ switch (current()) {
+ case kEndMarker:
+ if (state->IsSubexpression()) {
+ // Inside a parenthesized group when hitting end of input.
+ ReportError(CStrVector("Unterminated group") CHECK_FAILED);
+ }
+ DCHECK_EQ(INITIAL, state->group_type());
+ // Parsing completed successfully.
+ return builder->ToRegExp();
+ case ')': {
+ if (!state->IsSubexpression()) {
+ ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
+ }
+ DCHECK_NE(INITIAL, state->group_type());
+
+ Advance();
+ // End disjunction parsing and convert builder content to new single
+ // regexp atom.
+ RegExpTree* body = builder->ToRegExp();
+
+ int end_capture_index = captures_started();
+
+ int capture_index = state->capture_index();
+ SubexpressionType group_type = state->group_type();
+
+ // Build result of subexpression.
+ if (group_type == CAPTURE) {
+ RegExpCapture* capture = GetCapture(capture_index);
+ capture->set_body(body);
+ body = capture;
+ } else if (group_type != GROUPING) {
+ DCHECK(group_type == POSITIVE_LOOKAROUND ||
+ group_type == NEGATIVE_LOOKAROUND);
+ bool is_positive = (group_type == POSITIVE_LOOKAROUND);
+ body = new (zone()) RegExpLookaround(
+ body, is_positive, end_capture_index - capture_index,
+ capture_index, state->lookaround_type());
+ }
+
+ // Restore previous state.
+ state = state->previous_state();
+ builder = state->builder();
+
+ builder->AddAtom(body);
+ // For compatability with JSC and ES3, we allow quantifiers after
+ // lookaheads, and break in all cases.
+ break;
+ }
+ case '|': {
+ Advance();
+ builder->NewAlternative();
+ continue;
+ }
+ case '*':
+ case '+':
+ case '?':
+ return ReportError(CStrVector("Nothing to repeat"));
+ case '^': {
+ Advance();
+ if (multiline_) {
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
+ } else {
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+ set_contains_anchor();
+ }
+ continue;
+ }
+ case '$': {
+ Advance();
+ RegExpAssertion::AssertionType assertion_type =
+ multiline_ ? RegExpAssertion::END_OF_LINE
+ : RegExpAssertion::END_OF_INPUT;
+ builder->AddAssertion(new (zone()) RegExpAssertion(assertion_type));
+ continue;
+ }
+ case '.': {
+ Advance();
+ // everything except \x0a, \x0d, \u2028 and \u2029
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape('.', ranges, zone());
+ RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
+ builder->AddAtom(atom);
+ break;
+ }
+ case '(': {
+ SubexpressionType subexpr_type = CAPTURE;
+ RegExpLookaround::Type lookaround_type = state->lookaround_type();
+ Advance();
+ if (current() == '?') {
+ switch (Next()) {
+ case ':':
+ subexpr_type = GROUPING;
+ break;
+ case '=':
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ case '!':
+ lookaround_type = RegExpLookaround::LOOKAHEAD;
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ case '<':
+ if (FLAG_harmony_regexp_lookbehind) {
+ Advance();
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ if (Next() == '=') {
+ subexpr_type = POSITIVE_LOOKAROUND;
+ break;
+ } else if (Next() == '!') {
+ subexpr_type = NEGATIVE_LOOKAROUND;
+ break;
+ }
+ }
+ // Fall through.
+ default:
+ ReportError(CStrVector("Invalid group") CHECK_FAILED);
+ break;
+ }
+ Advance(2);
+ } else {
+ if (captures_started_ >= kMaxCaptures) {
+ ReportError(CStrVector("Too many captures") CHECK_FAILED);
+ }
+ captures_started_++;
+ }
+ // Store current state and begin new disjunction parsing.
+ state = new (zone()) RegExpParserState(
+ state, subexpr_type, lookaround_type, captures_started_, zone());
+ builder = state->builder();
+ continue;
+ }
+ case '[': {
+ RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
+ builder->AddAtom(atom);
+ break;
+ }
+ // Atom ::
+ // \ AtomEscape
+ case '\\':
+ switch (Next()) {
+ case kEndMarker:
+ return ReportError(CStrVector("\\ at end of pattern"));
+ case 'b':
+ Advance(2);
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
+ continue;
+ case 'B':
+ Advance(2);
+ builder->AddAssertion(
+ new (zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+ continue;
+ // AtomEscape ::
+ // CharacterClassEscape
+ //
+ // CharacterClassEscape :: one of
+ // d D s S w W
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W': {
+ uc32 c = Next();
+ Advance(2);
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ CharacterRange::AddClassEscape(c, ranges, zone());
+ RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
+ builder->AddAtom(atom);
+ break;
+ }
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ int index = 0;
+ if (ParseBackReferenceIndex(&index)) {
+ if (state->IsInsideCaptureGroup(index)) {
+ // The back reference is inside the capture group it refers to.
+ // Nothing can possibly have been captured yet, so we use empty
+ // instead. This ensures that, when checking a back reference,
+ // the capture registers of the referenced capture are either
+ // both set or both cleared.
+ builder->AddEmpty();
+ } else {
+ RegExpCapture* capture = GetCapture(index);
+ RegExpTree* atom = new (zone()) RegExpBackReference(capture);
+ builder->AddAtom(atom);
+ }
+ break;
+ }
+ uc32 first_digit = Next();
+ if (first_digit == '8' || first_digit == '9') {
+ // If the 'u' flag is present, only syntax characters can be
+ // escaped,
+ // no other identity escapes are allowed. If the 'u' flag is not
+ // present, all identity escapes are allowed.
+ if (!unicode_) {
+ builder->AddCharacter(first_digit);
+ Advance(2);
+ } else {
+ return ReportError(CStrVector("Invalid escape"));
+ }
+ break;
+ }
+ }
+ // FALLTHROUGH
+ case '0': {
+ Advance();
+ uc32 octal = ParseOctalLiteral();
+ builder->AddCharacter(octal);
+ break;
+ }
+ // ControlEscape :: one of
+ // f n r t v
+ case 'f':
+ Advance(2);
+ builder->AddCharacter('\f');
+ break;
+ case 'n':
+ Advance(2);
+ builder->AddCharacter('\n');
+ break;
+ case 'r':
+ Advance(2);
+ builder->AddCharacter('\r');
+ break;
+ case 't':
+ Advance(2);
+ builder->AddCharacter('\t');
+ break;
+ case 'v':
+ Advance(2);
+ builder->AddCharacter('\v');
+ break;
+ case 'c': {
+ Advance();
+ uc32 controlLetter = Next();
+ // Special case if it is an ASCII letter.
+ // Convert lower case letters to uppercase.
+ uc32 letter = controlLetter & ~('a' ^ 'A');
+ if (letter < 'A' || 'Z' < letter) {
+ // controlLetter is not in range 'A'-'Z' or 'a'-'z'.
+ // This is outside the specification. We match JSC in
+ // reading the backslash as a literal character instead
+ // of as starting an escape.
+ builder->AddCharacter('\\');
+ } else {
+ Advance(2);
+ builder->AddCharacter(controlLetter & 0x1f);
+ }
+ break;
+ }
+ case 'x': {
+ Advance(2);
+ uc32 value;
+ if (ParseHexEscape(2, &value)) {
+ builder->AddCharacter(value);
+ } else if (!unicode_) {
+ builder->AddCharacter('x');
+ } else {
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ return ReportError(CStrVector("Invalid escape"));
+ }
+ break;
+ }
+ case 'u': {
+ Advance(2);
+ uc32 value;
+ if (ParseUnicodeEscape(&value)) {
+ builder->AddUnicodeCharacter(value);
+ } else if (!unicode_) {
+ builder->AddCharacter('u');
+ } else {
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ return ReportError(CStrVector("Invalid unicode escape"));
+ }
+ break;
+ }
+ default:
+ Advance();
+ // If the 'u' flag is present, only syntax characters can be
+ // escaped, no
+ // other identity escapes are allowed. If the 'u' flag is not
+ // present,
+ // all identity escapes are allowed.
+ if (!unicode_ || IsSyntaxCharacter(current())) {
+ builder->AddCharacter(current());
+ Advance();
+ } else {
+ return ReportError(CStrVector("Invalid escape"));
+ }
+ break;
+ }
+ break;
+ case '{': {
+ int dummy;
+ if (ParseIntervalQuantifier(&dummy, &dummy)) {
+ ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+ }
+ // fallthrough
+ }
+ default:
+ builder->AddUnicodeCharacter(current());
+ Advance();
+ break;
+ } // end switch(current())
+
+ int min;
+ int max;
+ switch (current()) {
+ // QuantifierPrefix ::
+ // *
+ // +
+ // ?
+ // {
+ case '*':
+ min = 0;
+ max = RegExpTree::kInfinity;
+ Advance();
+ break;
+ case '+':
+ min = 1;
+ max = RegExpTree::kInfinity;
+ Advance();
+ break;
+ case '?':
+ min = 0;
+ max = 1;
+ Advance();
+ break;
+ case '{':
+ if (ParseIntervalQuantifier(&min, &max)) {
+ if (max < min) {
+ ReportError(CStrVector("numbers out of order in {} quantifier.")
+ CHECK_FAILED);
+ }
+ break;
+ } else {
+ continue;
+ }
+ default:
+ continue;
+ }
+ RegExpQuantifier::QuantifierType quantifier_type = RegExpQuantifier::GREEDY;
+ if (current() == '?') {
+ quantifier_type = RegExpQuantifier::NON_GREEDY;
+ Advance();
+ } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
+ // FLAG_regexp_possessive_quantifier is a debug-only flag.
+ quantifier_type = RegExpQuantifier::POSSESSIVE;
+ Advance();
+ }
+ builder->AddQuantifierToAtom(min, max, quantifier_type);
+ }
+}
+
+
+#ifdef DEBUG
+// Currently only used in an DCHECK.
+static bool IsSpecialClassEscape(uc32 c) {
+ switch (c) {
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ return true;
+ default:
+ return false;
+ }
+}
+#endif
+
+
+// In order to know whether an escape is a backreference or not we have to scan
+// the entire regexp and find the number of capturing parentheses. However we
+// don't want to scan the regexp twice unless it is necessary. This mini-parser
+// is called when needed. It can see the difference between capturing and
+// noncapturing parentheses and can skip character classes and backslash-escaped
+// characters.
+void RegExpParser::ScanForCaptures() {
+ // Start with captures started previous to current position
+ int capture_count = captures_started();
+ // Add count of captures after this position.
+ int n;
+ while ((n = current()) != kEndMarker) {
+ Advance();
+ switch (n) {
+ case '\\':
+ Advance();
+ break;
+ case '[': {
+ int c;
+ while ((c = current()) != kEndMarker) {
+ Advance();
+ if (c == '\\') {
+ Advance();
+ } else {
+ if (c == ']') break;
+ }
+ }
+ break;
+ }
+ case '(':
+ if (current() != '?') capture_count++;
+ break;
+ }
+ }
+ capture_count_ = capture_count;
+ is_scanned_for_captures_ = true;
+}
+
+
+bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
+ DCHECK_EQ('\\', current());
+ DCHECK('1' <= Next() && Next() <= '9');
+ // Try to parse a decimal literal that is no greater than the total number
+ // of left capturing parentheses in the input.
+ int start = position();
+ int value = Next() - '0';
+ Advance(2);
+ while (true) {
+ uc32 c = current();
+ if (IsDecimalDigit(c)) {
+ value = 10 * value + (c - '0');
+ if (value > kMaxCaptures) {
+ Reset(start);
+ return false;
+ }
+ Advance();
+ } else {
+ break;
+ }
+ }
+ if (value > captures_started()) {
+ if (!is_scanned_for_captures_) {
+ int saved_position = position();
+ ScanForCaptures();
+ Reset(saved_position);
+ }
+ if (value > capture_count_) {
+ Reset(start);
+ return false;
+ }
+ }
+ *index_out = value;
+ return true;
+}
+
+
+RegExpCapture* RegExpParser::GetCapture(int index) {
+ // The index for the capture groups are one-based. Its index in the list is
+ // zero-based.
+ int know_captures =
+ is_scanned_for_captures_ ? capture_count_ : captures_started_;
+ DCHECK(index <= know_captures);
+ if (captures_ == NULL) {
+ captures_ = new (zone()) ZoneList<RegExpCapture*>(know_captures, zone());
+ }
+ while (captures_->length() < know_captures) {
+ captures_->Add(new (zone()) RegExpCapture(captures_->length() + 1), zone());
+ }
+ return captures_->at(index - 1);
+}
+
+
+bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
+ for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
+ if (s->group_type() != CAPTURE) continue;
+ // Return true if we found the matching capture index.
+ if (index == s->capture_index()) return true;
+ // Abort if index is larger than what has been parsed up till this state.
+ if (index > s->capture_index()) return false;
+ }
+ return false;
+}
+
+
+// QuantifierPrefix ::
+// { DecimalDigits }
+// { DecimalDigits , }
+// { DecimalDigits , DecimalDigits }
+//
+// Returns true if parsing succeeds, and set the min_out and max_out
+// values. Values are truncated to RegExpTree::kInfinity if they overflow.
+bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
+ DCHECK_EQ(current(), '{');
+ int start = position();
+ Advance();
+ int min = 0;
+ if (!IsDecimalDigit(current())) {
+ Reset(start);
+ return false;
+ }
+ while (IsDecimalDigit(current())) {
+ int next = current() - '0';
+ if (min > (RegExpTree::kInfinity - next) / 10) {
+ // Overflow. Skip past remaining decimal digits and return -1.
+ do {
+ Advance();
+ } while (IsDecimalDigit(current()));
+ min = RegExpTree::kInfinity;
+ break;
+ }
+ min = 10 * min + next;
+ Advance();
+ }
+ int max = 0;
+ if (current() == '}') {
+ max = min;
+ Advance();
+ } else if (current() == ',') {
+ Advance();
+ if (current() == '}') {
+ max = RegExpTree::kInfinity;
+ Advance();
+ } else {
+ while (IsDecimalDigit(current())) {
+ int next = current() - '0';
+ if (max > (RegExpTree::kInfinity - next) / 10) {
+ do {
+ Advance();
+ } while (IsDecimalDigit(current()));
+ max = RegExpTree::kInfinity;
+ break;
+ }
+ max = 10 * max + next;
+ Advance();
+ }
+ if (current() != '}') {
+ Reset(start);
+ return false;
+ }
+ Advance();
+ }
+ } else {
+ Reset(start);
+ return false;
+ }
+ *min_out = min;
+ *max_out = max;
+ return true;
+}
+
+
+uc32 RegExpParser::ParseOctalLiteral() {
+ DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
+ // For compatibility with some other browsers (not all), we parse
+ // up to three octal digits with a value below 256.
+ uc32 value = current() - '0';
+ Advance();
+ if ('0' <= current() && current() <= '7') {
+ value = value * 8 + current() - '0';
+ Advance();
+ if (value < 32 && '0' <= current() && current() <= '7') {
+ value = value * 8 + current() - '0';
+ Advance();
+ }
+ }
+ return value;
+}
+
+
+bool RegExpParser::ParseHexEscape(int length, uc32* value) {
+ int start = position();
+ uc32 val = 0;
+ for (int i = 0; i < length; ++i) {
+ uc32 c = current();
+ int d = HexValue(c);
+ if (d < 0) {
+ Reset(start);
+ return false;
+ }
+ val = val * 16 + d;
+ Advance();
+ }
+ *value = val;
+ return true;
+}
+
+
+bool RegExpParser::ParseUnicodeEscape(uc32* value) {
+ // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
+ // allowed). In the latter case, the number of hex digits between { } is
+ // arbitrary. \ and u have already been read.
+ if (current() == '{' && unicode_) {
+ int start = position();
+ Advance();
+ if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
+ if (current() == '}') {
+ Advance();
+ return true;
+ }
+ }
+ Reset(start);
+ return false;
+ }
+ // \u but no {, or \u{...} escapes not allowed.
+ return ParseHexEscape(4, value);
+}
+
+
+bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
+ uc32 x = 0;
+ int d = HexValue(current());
+ if (d < 0) {
+ return false;
+ }
+ while (d >= 0) {
+ x = x * 16 + d;
+ if (x > max_value) {
+ return false;
+ }
+ Advance();
+ d = HexValue(current());
+ }
+ *value = x;
+ return true;
+}
+
+
+uc32 RegExpParser::ParseClassCharacterEscape() {
+ DCHECK(current() == '\\');
+ DCHECK(has_next() && !IsSpecialClassEscape(Next()));
+ Advance();
+ switch (current()) {
+ case 'b':
+ Advance();
+ return '\b';
+ // ControlEscape :: one of
+ // f n r t v
+ case 'f':
+ Advance();
+ return '\f';
+ case 'n':
+ Advance();
+ return '\n';
+ case 'r':
+ Advance();
+ return '\r';
+ case 't':
+ Advance();
+ return '\t';
+ case 'v':
+ Advance();
+ return '\v';
+ case 'c': {
+ uc32 controlLetter = Next();
+ uc32 letter = controlLetter & ~('A' ^ 'a');
+ // For compatibility with JSC, inside a character class
+ // we also accept digits and underscore as control characters.
+ if ((controlLetter >= '0' && controlLetter <= '9') ||
+ controlLetter == '_' || (letter >= 'A' && letter <= 'Z')) {
+ Advance(2);
+ // Control letters mapped to ASCII control characters in the range
+ // 0x00-0x1f.
+ return controlLetter & 0x1f;
+ }
+ // We match JSC in reading the backslash as a literal
+ // character instead of as starting an escape.
+ return '\\';
+ }
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ // For compatibility, we interpret a decimal escape that isn't
+ // a back reference (and therefore either \0 or not valid according
+ // to the specification) as a 1..3 digit octal character code.
+ return ParseOctalLiteral();
+ case 'x': {
+ Advance();
+ uc32 value;
+ if (ParseHexEscape(2, &value)) {
+ return value;
+ }
+ if (!unicode_) {
+ // If \x is not followed by a two-digit hexadecimal, treat it
+ // as an identity escape.
+ return 'x';
+ }
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
+ }
+ case 'u': {
+ Advance();
+ uc32 value;
+ if (ParseUnicodeEscape(&value)) {
+ return value;
+ }
+ if (!unicode_) {
+ return 'u';
+ }
+ // If the 'u' flag is present, invalid escapes are not treated as
+ // identity escapes.
+ ReportError(CStrVector("Invalid unicode escape"));
+ return 0;
+ }
+ default: {
+ uc32 result = current();
+ // If the 'u' flag is present, only syntax characters can be escaped, no
+ // other identity escapes are allowed. If the 'u' flag is not present, all
+ // identity escapes are allowed.
+ if (!unicode_ || IsSyntaxCharacter(result)) {
+ Advance();
+ return result;
+ }
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
+ }
+ }
+ return 0;
+}
+
+
+CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
+ DCHECK_EQ(0, *char_class);
+ uc32 first = current();
+ if (first == '\\') {
+ switch (Next()) {
+ case 'w':
+ case 'W':
+ case 'd':
+ case 'D':
+ case 's':
+ case 'S': {
+ *char_class = Next();
+ Advance(2);
+ return CharacterRange::Singleton(0); // Return dummy value.
+ }
+ case kEndMarker:
+ return ReportError(CStrVector("\\ at end of pattern"));
+ default:
+ uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
+ return CharacterRange::Singleton(c);
+ }
+ } else {
+ Advance();
+ return CharacterRange::Singleton(first);
+ }
+}
+
+
+static const uc16 kNoCharClass = 0;
+
+// Adds range or pre-defined character class to character ranges.
+// If char_class is not kInvalidClass, it's interpreted as a class
+// escape (i.e., 's' means whitespace, from '\s').
+static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
+ uc16 char_class, CharacterRange range,
+ Zone* zone) {
+ if (char_class != kNoCharClass) {
+ CharacterRange::AddClassEscape(char_class, ranges, zone);
+ } else {
+ ranges->Add(range, zone);
+ }
+}
+
+
+RegExpTree* RegExpParser::ParseCharacterClass() {
+ static const char* kUnterminated = "Unterminated character class";
+ static const char* kRangeOutOfOrder = "Range out of order in character class";
+
+ DCHECK_EQ(current(), '[');
+ Advance();
+ bool is_negated = false;
+ if (current() == '^') {
+ is_negated = true;
+ Advance();
+ }
+ ZoneList<CharacterRange>* ranges =
+ new (zone()) ZoneList<CharacterRange>(2, zone());
+ while (has_more() && current() != ']') {
+ uc16 char_class = kNoCharClass;
+ CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
+ if (current() == '-') {
+ Advance();
+ if (current() == kEndMarker) {
+ // If we reach the end we break out of the loop and let the
+ // following code report an error.
+ break;
+ } else if (current() == ']') {
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
+ break;
+ }
+ uc16 char_class_2 = kNoCharClass;
+ CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
+ if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
+ // Either end is an escaped character class. Treat the '-' verbatim.
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ ranges->Add(CharacterRange::Singleton('-'), zone());
+ AddRangeOrEscape(ranges, char_class_2, next, zone());
+ continue;
+ }
+ if (first.from() > next.to()) {
+ return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
+ }
+ ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
+ } else {
+ AddRangeOrEscape(ranges, char_class, first, zone());
+ }
+ }
+ if (!has_more()) {
+ return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
+ }
+ Advance();
+ if (ranges->length() == 0) {
+ ranges->Add(CharacterRange::Everything(), zone());
+ is_negated = !is_negated;
+ }
+ return new (zone()) RegExpCharacterClass(ranges, is_negated);
+}
+
+
+#undef CHECK_FAILED
+
+
+bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
+ FlatStringReader* input, bool multiline,
+ bool unicode, RegExpCompileData* result) {
+ DCHECK(result != NULL);
+ RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
+ RegExpTree* tree = parser.ParsePattern();
+ if (parser.failed()) {
+ DCHECK(tree == NULL);
+ DCHECK(!result->error.is_null());
+ } else {
+ DCHECK(tree != NULL);
+ DCHECK(result->error.is_null());
+ if (FLAG_trace_regexp_parser) {
+ OFStream os(stdout);
+ tree->Print(os, zone);
+ os << "\n";
+ }
+ result->tree = tree;
+ int capture_count = parser.captures_started();
+ result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
+ result->contains_anchor = parser.contains_anchor();
+ result->capture_count = capture_count;
+ }
+ return !parser.failed();
+}
+
+
+RegExpBuilder::RegExpBuilder(Zone* zone)
+ : zone_(zone),
+ pending_empty_(false),
+ characters_(NULL),
+ terms_(),
+ alternatives_()
+#ifdef DEBUG
+ ,
+ last_added_(ADD_NONE)
+#endif
+{
+}
+
+
+void RegExpBuilder::FlushCharacters() {
+ pending_empty_ = false;
+ if (characters_ != NULL) {
+ RegExpTree* atom = new (zone()) RegExpAtom(characters_->ToConstVector());
+ characters_ = NULL;
+ text_.Add(atom, zone());
+ LAST(ADD_ATOM);
+ }
+}
+
+
+void RegExpBuilder::FlushText() {
+ FlushCharacters();
+ int num_text = text_.length();
+ if (num_text == 0) {
+ return;
+ } else if (num_text == 1) {
+ terms_.Add(text_.last(), zone());
+ } else {
+ RegExpText* text = new (zone()) RegExpText(zone());
+ for (int i = 0; i < num_text; i++) text_.Get(i)->AppendToText(text, zone());
+ terms_.Add(text, zone());
+ }
+ text_.Clear();
+}
+
+
+void RegExpBuilder::AddCharacter(uc16 c) {
+ pending_empty_ = false;
+ if (characters_ == NULL) {
+ characters_ = new (zone()) ZoneList<uc16>(4, zone());
+ }
+ characters_->Add(c, zone());
+ LAST(ADD_CHAR);
+}
+
+
+void RegExpBuilder::AddUnicodeCharacter(uc32 c) {
+ if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ ZoneList<uc16> surrogate_pair(2, zone());
+ surrogate_pair.Add(unibrow::Utf16::LeadSurrogate(c), zone());
+ surrogate_pair.Add(unibrow::Utf16::TrailSurrogate(c), zone());
+ RegExpAtom* atom = new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
+ AddAtom(atom);
+ } else {
+ AddCharacter(static_cast<uc16>(c));
+ }
+}
+
+
+void RegExpBuilder::AddEmpty() { pending_empty_ = true; }
+
+
+void RegExpBuilder::AddAtom(RegExpTree* term) {
+ if (term->IsEmpty()) {
+ AddEmpty();
+ return;
+ }
+ if (term->IsTextElement()) {
+ FlushCharacters();
+ text_.Add(term, zone());
+ } else {
+ FlushText();
+ terms_.Add(term, zone());
+ }
+ LAST(ADD_ATOM);
+}
+
+
+void RegExpBuilder::AddAssertion(RegExpTree* assert) {
+ FlushText();
+ terms_.Add(assert, zone());
+ LAST(ADD_ASSERT);
+}
+
+
+void RegExpBuilder::NewAlternative() { FlushTerms(); }
+
+
+void RegExpBuilder::FlushTerms() {
+ FlushText();
+ int num_terms = terms_.length();
+ RegExpTree* alternative;
+ if (num_terms == 0) {
+ alternative = new (zone()) RegExpEmpty();
+ } else if (num_terms == 1) {
+ alternative = terms_.last();
+ } else {
+ alternative = new (zone()) RegExpAlternative(terms_.GetList(zone()));
+ }
+ alternatives_.Add(alternative, zone());
+ terms_.Clear();
+ LAST(ADD_NONE);
+}
+
+
+RegExpTree* RegExpBuilder::ToRegExp() {
+ FlushTerms();
+ int num_alternatives = alternatives_.length();
+ if (num_alternatives == 0) return new (zone()) RegExpEmpty();
+ if (num_alternatives == 1) return alternatives_.last();
+ return new (zone()) RegExpDisjunction(alternatives_.GetList(zone()));
+}
+
+
+void RegExpBuilder::AddQuantifierToAtom(
+ int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
+ if (pending_empty_) {
+ pending_empty_ = false;
+ return;
+ }
+ RegExpTree* atom;
+ if (characters_ != NULL) {
+ DCHECK(last_added_ == ADD_CHAR);
+ // Last atom was character.
+ Vector<const uc16> char_vector = characters_->ToConstVector();
+ int num_chars = char_vector.length();
+ if (num_chars > 1) {
+ Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
+ text_.Add(new (zone()) RegExpAtom(prefix), zone());
+ char_vector = char_vector.SubVector(num_chars - 1, num_chars);
+ }
+ characters_ = NULL;
+ atom = new (zone()) RegExpAtom(char_vector);
+ FlushText();
+ } else if (text_.length() > 0) {
+ DCHECK(last_added_ == ADD_ATOM);
+ atom = text_.RemoveLast();
+ FlushText();
+ } else if (terms_.length() > 0) {
+ DCHECK(last_added_ == ADD_ATOM);
+ atom = terms_.RemoveLast();
+ if (atom->max_match() == 0) {
+ // Guaranteed to only match an empty string.
+ LAST(ADD_TERM);
+ if (min == 0) {
+ return;
+ }
+ terms_.Add(atom, zone());
+ return;
+ }
+ } else {
+ // Only call immediately after adding an atom or character!
+ UNREACHABLE();
+ return;
+ }
+ terms_.Add(new (zone()) RegExpQuantifier(min, max, quantifier_type, atom),
+ zone());
+ LAST(ADD_TERM);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
new file mode 100644
index 0000000000..af9b765fba
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -0,0 +1,277 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_PARSER_H_
+#define V8_REGEXP_REGEXP_PARSER_H_
+
+#include "src/objects.h"
+#include "src/regexp/regexp-ast.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+struct RegExpCompileData;
+
+
+// A BufferedZoneList is an automatically growing list, just like (and backed
+// by) a ZoneList, that is optimized for the case of adding and removing
+// a single element. The last element added is stored outside the backing list,
+// and if no more than one element is ever added, the ZoneList isn't even
+// allocated.
+// Elements must not be NULL pointers.
+template <typename T, int initial_size>
+class BufferedZoneList {
+ public:
+ BufferedZoneList() : list_(NULL), last_(NULL) {}
+
+ // Adds element at end of list. This element is buffered and can
+ // be read using last() or removed using RemoveLast until a new Add or until
+ // RemoveLast or GetList has been called.
+ void Add(T* value, Zone* zone) {
+ if (last_ != NULL) {
+ if (list_ == NULL) {
+ list_ = new (zone) ZoneList<T*>(initial_size, zone);
+ }
+ list_->Add(last_, zone);
+ }
+ last_ = value;
+ }
+
+ T* last() {
+ DCHECK(last_ != NULL);
+ return last_;
+ }
+
+ T* RemoveLast() {
+ DCHECK(last_ != NULL);
+ T* result = last_;
+ if ((list_ != NULL) && (list_->length() > 0))
+ last_ = list_->RemoveLast();
+ else
+ last_ = NULL;
+ return result;
+ }
+
+ T* Get(int i) {
+ DCHECK((0 <= i) && (i < length()));
+ if (list_ == NULL) {
+ DCHECK_EQ(0, i);
+ return last_;
+ } else {
+ if (i == list_->length()) {
+ DCHECK(last_ != NULL);
+ return last_;
+ } else {
+ return list_->at(i);
+ }
+ }
+ }
+
+ void Clear() {
+ list_ = NULL;
+ last_ = NULL;
+ }
+
+ int length() {
+ int length = (list_ == NULL) ? 0 : list_->length();
+ return length + ((last_ == NULL) ? 0 : 1);
+ }
+
+ ZoneList<T*>* GetList(Zone* zone) {
+ if (list_ == NULL) {
+ list_ = new (zone) ZoneList<T*>(initial_size, zone);
+ }
+ if (last_ != NULL) {
+ list_->Add(last_, zone);
+ last_ = NULL;
+ }
+ return list_;
+ }
+
+ private:
+ ZoneList<T*>* list_;
+ T* last_;
+};
+
+
+// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
+class RegExpBuilder : public ZoneObject {
+ public:
+ explicit RegExpBuilder(Zone* zone);
+ void AddCharacter(uc16 character);
+ void AddUnicodeCharacter(uc32 character);
+ // "Adds" an empty expression. Does nothing except consume a
+ // following quantifier
+ void AddEmpty();
+ void AddAtom(RegExpTree* tree);
+ void AddAssertion(RegExpTree* tree);
+ void NewAlternative(); // '|'
+ void AddQuantifierToAtom(int min, int max,
+ RegExpQuantifier::QuantifierType type);
+ RegExpTree* ToRegExp();
+
+ private:
+ void FlushCharacters();
+ void FlushText();
+ void FlushTerms();
+ Zone* zone() const { return zone_; }
+
+ Zone* zone_;
+ bool pending_empty_;
+ ZoneList<uc16>* characters_;
+ BufferedZoneList<RegExpTree, 2> terms_;
+ BufferedZoneList<RegExpTree, 2> text_;
+ BufferedZoneList<RegExpTree, 2> alternatives_;
+#ifdef DEBUG
+ enum { ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM } last_added_;
+#define LAST(x) last_added_ = x;
+#else
+#define LAST(x)
+#endif
+};
+
+
+class RegExpParser BASE_EMBEDDED {
+ public:
+ RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
+ bool unicode, Isolate* isolate, Zone* zone);
+
+ static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
+ bool multiline, bool unicode,
+ RegExpCompileData* result);
+
+ RegExpTree* ParsePattern();
+ RegExpTree* ParseDisjunction();
+ RegExpTree* ParseGroup();
+ RegExpTree* ParseCharacterClass();
+
+ // Parses a {...,...} quantifier and stores the range in the given
+ // out parameters.
+ bool ParseIntervalQuantifier(int* min_out, int* max_out);
+
+ // Parses and returns a single escaped character. The character
+ // must not be 'b' or 'B' since they are usually handle specially.
+ uc32 ParseClassCharacterEscape();
+
+ // Checks whether the following is a length-digit hexadecimal number,
+ // and sets the value if it is.
+ bool ParseHexEscape(int length, uc32* value);
+ bool ParseUnicodeEscape(uc32* value);
+ bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
+
+ uc32 ParseOctalLiteral();
+
+ // Tries to parse the input as a back reference. If successful it
+ // stores the result in the output parameter and returns true. If
+ // it fails it will push back the characters read so the same characters
+ // can be reparsed.
+ bool ParseBackReferenceIndex(int* index_out);
+
+ CharacterRange ParseClassAtom(uc16* char_class);
+ RegExpTree* ReportError(Vector<const char> message);
+ void Advance();
+ void Advance(int dist);
+ void Reset(int pos);
+
+ // Reports whether the pattern might be used as a literal search string.
+ // Only use if the result of the parse is a single atom node.
+ bool simple();
+ bool contains_anchor() { return contains_anchor_; }
+ void set_contains_anchor() { contains_anchor_ = true; }
+ int captures_started() { return captures_started_; }
+ int position() { return next_pos_ - 1; }
+ bool failed() { return failed_; }
+
+ static bool IsSyntaxCharacter(uc32 c);
+
+ static const int kMaxCaptures = 1 << 16;
+ static const uc32 kEndMarker = (1 << 21);
+
+ private:
+ enum SubexpressionType {
+ INITIAL,
+ CAPTURE, // All positive values represent captures.
+ POSITIVE_LOOKAROUND,
+ NEGATIVE_LOOKAROUND,
+ GROUPING
+ };
+
+ class RegExpParserState : public ZoneObject {
+ public:
+ RegExpParserState(RegExpParserState* previous_state,
+ SubexpressionType group_type,
+ RegExpLookaround::Type lookaround_type,
+ int disjunction_capture_index, Zone* zone)
+ : previous_state_(previous_state),
+ builder_(new (zone) RegExpBuilder(zone)),
+ group_type_(group_type),
+ lookaround_type_(lookaround_type),
+ disjunction_capture_index_(disjunction_capture_index) {}
+ // Parser state of containing expression, if any.
+ RegExpParserState* previous_state() { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != NULL; }
+ // RegExpBuilder building this regexp's AST.
+ RegExpBuilder* builder() { return builder_; }
+ // Type of regexp being parsed (parenthesized group or entire regexp).
+ SubexpressionType group_type() { return group_type_; }
+ // Lookahead or Lookbehind.
+ RegExpLookaround::Type lookaround_type() { return lookaround_type_; }
+ // Index in captures array of first capture in this sub-expression, if any.
+ // Also the capture index of this sub-expression itself, if group_type
+ // is CAPTURE.
+ int capture_index() { return disjunction_capture_index_; }
+
+ // Check whether the parser is inside a capture group with the given index.
+ bool IsInsideCaptureGroup(int index);
+
+ private:
+ // Linked list implementation of stack of states.
+ RegExpParserState* previous_state_;
+ // Builder for the stored disjunction.
+ RegExpBuilder* builder_;
+ // Stored disjunction type (capture, look-ahead or grouping), if any.
+ SubexpressionType group_type_;
+ // Stored read direction.
+ RegExpLookaround::Type lookaround_type_;
+ // Stored disjunction's capture index (if any).
+ int disjunction_capture_index_;
+ };
+
+ // Return the 1-indexed RegExpCapture object, allocate if necessary.
+ RegExpCapture* GetCapture(int index);
+
+ Isolate* isolate() { return isolate_; }
+ Zone* zone() const { return zone_; }
+
+ uc32 current() { return current_; }
+ bool has_more() { return has_more_; }
+ bool has_next() { return next_pos_ < in()->length(); }
+ uc32 Next();
+ FlatStringReader* in() { return in_; }
+ void ScanForCaptures();
+
+ Isolate* isolate_;
+ Zone* zone_;
+ Handle<String>* error_;
+ ZoneList<RegExpCapture*>* captures_;
+ FlatStringReader* in_;
+ uc32 current_;
+ int next_pos_;
+ int captures_started_;
+ // The capture count is only valid after we have scanned for captures.
+ int capture_count_;
+ bool has_more_;
+ bool multiline_;
+ bool unicode_;
+ bool simple_;
+ bool contains_anchor_;
+ bool is_scanned_for_captures_;
+ bool failed_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_PARSER_H_
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 969edc1b3b..286f159cc8 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -64,7 +64,8 @@ namespace internal {
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - success counter (only useful for global regexp to count matches)
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
* - At start of string (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 rbp[-n] (Only positions must be stored in the first
@@ -94,7 +95,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(isolate, NULL, kRegExpCodeSize),
+ masm_(isolate, NULL, kRegExpCodeSize, CodeObjectRequired::kYes),
no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4, zone),
mode_(mode),
@@ -171,25 +172,16 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ leap(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpp(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rdi, -char_size()));
+ __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ leap(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpp(rax, Operand(rbp, kInputStart));
+void RegExpMacroAssemblerX64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ leap(rax, Operand(rdi, -char_size() + cp_offset * char_size()));
+ __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -211,8 +203,7 @@ void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
@@ -222,23 +213,25 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// rdx = Start offset of capture.
// rbx = Length of capture
- // If length is negative, this code will fail (it's a symptom of a partial or
- // illegal capture where start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp, and we must not generate code that can cause
- // this condition).
-
- // If length is zero, either the capture is empty or it is nonparticipating.
- // In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// -----------------------
// rdx - Start of capture
// rbx - length of capture
// Check that there are sufficient characters left in the input.
- __ movl(rax, rdi);
- __ addl(rax, rbx);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ movl(rax, Operand(rbp, kStringStartMinusOne));
+ __ addl(rax, rbx);
+ __ cmpl(rdi, rax);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ movl(rax, rdi);
+ __ addl(rax, rbx);
+ BranchOrBacktrack(greater, on_no_match);
+ }
if (mode_ == LATIN1) {
Label loop_increment;
@@ -248,6 +241,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ leap(r9, Operand(rsi, rdx, times_1, 0));
__ leap(r11, Operand(rsi, rdi, times_1, 0));
+ if (read_backward) {
+ __ subp(r11, rbx); // Offset by length when matching backwards.
+ }
__ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
@@ -290,6 +286,11 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Compute new value of character position after the matched part.
__ movp(rdi, r11);
__ subq(rdi, rsi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ addq(rdi, register_location(start_reg));
+ __ subq(rdi, register_location(start_reg + 1));
+ }
} else {
DCHECK(mode_ == UC16);
// Save important/volatile registers before calling C function.
@@ -313,6 +314,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ leap(rdx, Operand(rsi, rdi, times_1, 0));
+ if (read_backward) {
+ __ subq(rdx, rbx);
+ }
// Set byte_length.
__ movp(r8, rbx);
// Isolate.
@@ -324,6 +328,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ movp(rsi, rax);
+ if (read_backward) {
+ __ subq(rsi, rbx);
+ }
// Set byte_length.
__ movp(rdx, rbx);
// Isolate.
@@ -349,17 +356,21 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
+ // On success, advance position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
- __ addq(rdi, rbx);
+ if (read_backward) {
+ __ subq(rdi, rbx);
+ } else {
+ __ addq(rdi, rbx);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerX64::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerX64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
// Find length of back-referenced capture.
@@ -367,25 +378,31 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
ReadPositionFromRegister(rax, start_reg + 1); // Offset of end of capture
__ subp(rax, rdx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- // This must not happen (no back-reference can reference a capture that wasn't
- // closed before in the reg-exp).
- __ Check(greater_equal, kInvalidCaptureReferenced);
-
- // Succeed on empty capture (including non-participating capture)
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// -----------------------
// rdx - Start of capture
// rax - length of capture
-
// Check that there are sufficient characters left in the input.
- __ movl(rbx, rdi);
- __ addl(rbx, rax);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ movl(rbx, Operand(rbp, kStringStartMinusOne));
+ __ addl(rbx, rax);
+ __ cmpl(rdi, rbx);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ movl(rbx, rdi);
+ __ addl(rbx, rax);
+ BranchOrBacktrack(greater, on_no_match);
+ }
// Compute pointers to match string and capture string
__ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ if (read_backward) {
+ __ subq(rbx, rax); // Offset by length when matching backwards.
+ }
__ addp(rdx, rsi); // Start of capture.
__ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
@@ -416,6 +433,11 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Set current character position to position after match.
__ movp(rdi, rbx);
__ subq(rdi, rsi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ addq(rdi, register_location(start_reg));
+ __ subq(rdi, register_location(start_reg + 1));
+ }
__ bind(&fallthrough);
}
@@ -682,7 +704,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#endif
__ Push(Immediate(0)); // Number of successful matches in a global regexp.
- __ Push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -732,7 +754,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ movp(Operand(rbp, kInputStartMinusOne), rax);
+ __ movp(Operand(rbp, kStringStartMinusOne), rax);
#if V8_OS_WIN
// Ensure that we have written to each stack page, in order. Skipping a page
@@ -835,7 +857,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
- __ movp(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -1018,10 +1040,13 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1124,7 +1149,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ movp(rax, Operand(rbp, kInputStartMinusOne));
+ __ movp(rax, Operand(rbp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ movp(register_location(reg), rax);
}
@@ -1205,8 +1230,14 @@ Operand RegExpMacroAssemblerX64::register_location(int register_index) {
void RegExpMacroAssemblerX64::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmpl(rdi, Immediate(-cp_offset * char_size()));
- BranchOrBacktrack(greater_equal, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmpl(rdi, Immediate(-cp_offset * char_size()));
+ BranchOrBacktrack(greater_equal, on_outside_input);
+ } else {
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
+ __ cmpp(rax, Operand(rbp, kStringStartMinusOne));
+ BranchOrBacktrack(less_equal, on_outside_input);
+ }
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index dbee9e86b5..257804739f 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -34,9 +34,11 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -171,10 +173,10 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index c6968dc197..01d0b249b6 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -53,7 +53,8 @@ namespace internal {
* - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
+ * string start - 1). Used to initialize capture registers to a
+ * non-position.
* - register 0 ebp[-4] (only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@@ -80,7 +81,8 @@ RegExpMacroAssemblerX87::RegExpMacroAssemblerX87(Isolate* isolate, Zone* zone,
Mode mode,
int registers_to_save)
: NativeRegExpMacroAssembler(isolate, zone),
- masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize)),
+ masm_(new MacroAssembler(isolate, NULL, kRegExpCodeSize,
+ CodeObjectRequired::kYes)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@@ -156,25 +158,16 @@ void RegExpMacroAssemblerX87::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerX87::CheckAtStart(Label* on_at_start) {
- Label not_at_start;
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, &not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+ __ lea(eax, Operand(edi, -char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(equal, on_at_start);
- __ bind(&not_at_start);
}
-void RegExpMacroAssemblerX87::CheckNotAtStart(Label* on_not_at_start) {
- // Did we start the match at the start of the string at all?
- __ cmp(Operand(ebp, kStartIndex), Immediate(0));
- BranchOrBacktrack(not_equal, on_not_at_start);
- // If we did, are we still at the start of the input?
- __ lea(eax, Operand(esi, edi, times_1, 0));
- __ cmp(eax, Operand(ebp, kInputStart));
+void RegExpMacroAssemblerX87::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ lea(eax, Operand(edi, -char_size() + cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -196,26 +189,28 @@ void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
- int start_reg,
- Label* on_no_match) {
+ int start_reg, bool read_backward, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
__ sub(ebx, edx); // Length of capture.
- // The length of a capture should not be negative. This can only happen
- // if the end of the capture is unrecorded, or at a point earlier than
- // the start of the capture.
- BranchOrBacktrack(less, on_no_match);
-
- // If length is zero, either the capture is empty or it is completely
- // uncaptured. In either case succeed immediately.
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(eax, edi);
- __ add(eax, ebx);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
+ __ add(eax, ebx);
+ __ cmp(edi, eax);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(eax, edi);
+ __ add(eax, ebx);
+ BranchOrBacktrack(greater, on_no_match);
+ }
if (mode_ == LATIN1) {
Label success;
@@ -228,6 +223,9 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
__ add(edx, esi); // Start of capture
__ add(edi, esi); // Start of text to match against capture.
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ add(ebx, edi); // End of text to match against capture.
Label loop;
@@ -278,6 +276,11 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
__ add(esp, Immediate(kPointerSize));
// Compute new value of character position after the matched part.
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
} else {
DCHECK(mode_ == UC16);
// Save registers before calling C function.
@@ -304,6 +307,9 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
// Found by adding negative string-end offset of current position (edi)
// to end of string.
__ add(edi, esi);
+ if (read_backward) {
+ __ sub(edi, ebx); // Offset by length when matching backwards.
+ }
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
@@ -325,16 +331,20 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
// Check if function returned non-zero for success or zero for failure.
__ or_(eax, eax);
BranchOrBacktrack(zero, on_no_match);
- // On success, increment position by length of capture.
- __ add(edi, ebx);
+ // On success, advance position by length of capture.
+ if (read_backward) {
+ __ sub(edi, ebx);
+ } else {
+ __ add(edi, ebx);
+ }
}
__ bind(&fallthrough);
}
-void RegExpMacroAssemblerX87::CheckNotBackReference(
- int start_reg,
- Label* on_no_match) {
+void RegExpMacroAssemblerX87::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
Label fallthrough;
Label success;
Label fail;
@@ -343,22 +353,33 @@ void RegExpMacroAssemblerX87::CheckNotBackReference(
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
__ sub(eax, edx); // Length to check.
- // Fail on partial or illegal capture (start of capture after end of capture).
- BranchOrBacktrack(less, on_no_match);
- // Succeed on empty capture (including no capture)
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
__ j(equal, &fallthrough);
// Check that there are sufficient characters left in the input.
- __ mov(ebx, edi);
- __ add(ebx, eax);
- BranchOrBacktrack(greater, on_no_match);
+ if (read_backward) {
+ __ mov(ebx, Operand(ebp, kStringStartMinusOne));
+ __ add(ebx, eax);
+ __ cmp(edi, ebx);
+ BranchOrBacktrack(less_equal, on_no_match);
+ } else {
+ __ mov(ebx, edi);
+ __ add(ebx, eax);
+ BranchOrBacktrack(greater, on_no_match);
+ }
// Save register to make it available below.
__ push(backtrack_stackpointer());
// Compute pointers to match string and capture string
- __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
__ add(edx, esi); // Start of capture.
+ __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
+ if (read_backward) {
+ __ sub(ebx, eax); // Offset by length when matching backwards.
+ }
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
@@ -389,6 +410,11 @@ void RegExpMacroAssemblerX87::CheckNotBackReference(
// Move current character position to position after match.
__ mov(edi, ecx);
__ sub(edi, esi);
+ if (read_backward) {
+ // Subtract match length if we matched backward.
+ __ add(edi, register_location(start_reg));
+ __ sub(edi, register_location(start_reg + 1));
+ }
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
@@ -634,7 +660,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ push(Immediate(0)); // Make room for "string start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -684,7 +710,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
}
// Store this value in a local variable, for use when clearing
// position registers.
- __ mov(Operand(ebp, kInputStartMinusOne), eax);
+ __ mov(Operand(ebp, kStringStartMinusOne), eax);
#if V8_OS_WIN
// Ensure that we write to each stack page, in order. Skipping a page
@@ -767,7 +793,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
}
if (global()) {
- // Restart matching if the regular expression is flagged as global.
+ // Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
@@ -784,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -944,10 +970,13 @@ void RegExpMacroAssemblerX87::LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds,
int characters) {
- DCHECK(cp_offset >= -1); // ^ and \b can look behind one character.
DCHECK(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
if (check_bounds) {
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (cp_offset >= 0) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ } else {
+ CheckPosition(cp_offset, on_end_of_input);
+ }
}
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -1031,7 +1060,7 @@ void RegExpMacroAssemblerX87::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ mov(eax, Operand(ebp, kInputStartMinusOne));
+ __ mov(eax, Operand(ebp, kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ mov(register_location(reg), eax);
}
@@ -1100,8 +1129,14 @@ Operand RegExpMacroAssemblerX87::register_location(int register_index) {
void RegExpMacroAssemblerX87::CheckPosition(int cp_offset,
Label* on_outside_input) {
- __ cmp(edi, -cp_offset * char_size());
- BranchOrBacktrack(greater_equal, on_outside_input);
+ if (cp_offset >= 0) {
+ __ cmp(edi, -cp_offset * char_size());
+ BranchOrBacktrack(greater_equal, on_outside_input);
+ } else {
+ __ lea(eax, Operand(edi, cp_offset * char_size()));
+ __ cmp(eax, Operand(ebp, kStringStartMinusOne));
+ BranchOrBacktrack(less_equal, on_outside_input);
+ }
}
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
index 0deea50357..c95541224f 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
@@ -33,9 +33,11 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
- virtual void CheckNotAtStart(Label* on_not_at_start);
- virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -116,9 +118,9 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
- static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+ static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index f7a8eaba79..6b1655a81b 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -17,6 +17,16 @@ static const int kMaxAllocatableGeneralRegisterCount =
static const int kMaxAllocatableDoubleRegisterCount =
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT)0;
+static const int kAllocatableGeneralCodes[] = {
+#define REGISTER_CODE(R) Register::kCode_##R,
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+
+static const int kAllocatableDoubleCodes[] = {
+#define REGISTER_CODE(R) DoubleRegister::kCode_##R,
+ ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+
static const char* const kGeneralRegisterNames[] = {
#define REGISTER_NAME(R) #R,
GENERAL_REGISTERS(REGISTER_NAME)
@@ -37,71 +47,55 @@ STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public:
explicit ArchDefaultRegisterConfiguration(CompilerSelector compiler)
- : RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
+ : RegisterConfiguration(Register::kNumRegisters,
+ DoubleRegister::kMaxNumRegisters,
#if V8_TARGET_ARCH_IA32
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X87
- kMaxAllocatableGeneralRegisterCount,
- compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
- compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ compiler == TURBOFAN
+ ? 1
+ : kMaxAllocatableDoubleRegisterCount,
+ compiler == TURBOFAN
+ ? 1
+ : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_ARM
- FLAG_enable_embedded_constant_pool
- ? (kMaxAllocatableGeneralRegisterCount - 1)
- : kMaxAllocatableGeneralRegisterCount,
- CpuFeatures::IsSupported(VFP32DREGS)
- ? kMaxAllocatableDoubleRegisterCount
- : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT)0),
- ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT)0,
+ FLAG_enable_embedded_constant_pool
+ ? (kMaxAllocatableGeneralRegisterCount - 1)
+ : kMaxAllocatableGeneralRegisterCount,
+ CpuFeatures::IsSupported(VFP32DREGS)
+ ? kMaxAllocatableDoubleRegisterCount
+ : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
+ REGISTER_COUNT)0),
+ ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
+ REGISTER_COUNT)0,
#elif V8_TARGET_ARCH_ARM64
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS64
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_PPC
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#else
- GetAllocatableGeneralRegisterCount(),
- GetAllocatableDoubleRegisterCount(),
- GetAllocatableAliasedDoubleRegisterCount(),
+#error Unsupported target architecture.
#endif
- GetAllocatableGeneralCodes(), GetAllocatableDoubleCodes(),
- kGeneralRegisterNames, kDoubleRegisterNames) {
- }
-
- const char* general_register_name_table_[Register::kNumRegisters];
- const char* double_register_name_table_[DoubleRegister::kMaxNumRegisters];
-
- private:
- static const int* GetAllocatableGeneralCodes() {
-#define REGISTER_CODE(R) Register::kCode_##R,
- static const int general_codes[] = {
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
-#undef REGISTER_CODE
- return general_codes;
- }
-
- static const int* GetAllocatableDoubleCodes() {
-#define REGISTER_CODE(R) DoubleRegister::kCode_##R,
- static const int double_codes[] = {
- ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
-#undef REGISTER_CODE
- return double_codes;
+ kAllocatableGeneralCodes, kAllocatableDoubleCodes,
+ kGeneralRegisterNames, kDoubleRegisterNames) {
}
};
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index d2edd1b2b7..2d4ee9c1a8 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -5,6 +5,7 @@
#include "src/runtime-profiler.h"
#include "src/assembler.h"
+#include "src/ast/scopeinfo.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -13,7 +14,6 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
-#include "src/scopeinfo.h"
namespace v8 {
namespace internal {
@@ -72,8 +72,10 @@ static void GetICCounts(SharedFunctionInfo* shared,
// Harvest vector-ics as well
TypeFeedbackVector* vector = shared->feedback_vector();
- *ic_with_type_info_count += vector->ic_with_type_info_count();
- *ic_generic_count += vector->ic_generic_count();
+ int with = 0, gen = 0;
+ vector->ComputeCounts(&with, &gen);
+ *ic_with_type_info_count += with;
+ *ic_generic_count += gen;
if (*ic_total_count > 0) {
*type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 67eaa4b632..28e92cbd2b 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -206,7 +206,7 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
}
- KeyAccumulator accumulator(isolate);
+ KeyAccumulator accumulator(isolate, ALL_PROPERTIES);
// No need to separate protoype levels since we only get numbers/element keys
for (PrototypeIterator iter(isolate, array,
PrototypeIterator::START_AT_RECEIVER);
@@ -220,7 +220,7 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
}
accumulator.NextPrototype();
Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- JSObject::CollectOwnElementKeys(current, &accumulator, NONE);
+ JSObject::CollectOwnElementKeys(current, &accumulator, ALL_PROPERTIES);
}
// Erase any keys >= length.
// TODO(adamk): Remove this step when the contract of %GetArrayKeys
@@ -233,15 +233,24 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
}
-static Object* ArrayConstructorCommon(Isolate* isolate,
- Handle<JSFunction> constructor,
- Handle<JSFunction> original_constructor,
- Handle<AllocationSite> site,
- Arguments* caller_args) {
+namespace {
+
+Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target,
+ Handle<AllocationSite> site,
+ Arguments* caller_args) {
Factory* factory = isolate->factory();
+ // If called through new, new.target can be:
+ // - a subclass of constructor,
+ // - a proxy wrapper around constructor, or
+ // - the constructor itself.
+ // If called through Reflect.construct, it's guaranteed to be a constructor by
+ // REFLECT_CONSTRUCT_PREPARE.
+ DCHECK(new_target->IsConstructor());
+
bool holey = false;
- bool can_use_type_feedback = true;
+ bool can_use_type_feedback = !site.is_null();
bool can_inline_array_constructor = true;
if (caller_args->length() == 1) {
Handle<Object> argument_one = caller_args->at<Object>(0);
@@ -263,43 +272,36 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
}
}
- Handle<JSArray> array;
- if (!site.is_null() && can_use_type_feedback) {
- ElementsKind to_kind = site->GetElementsKind();
- if (holey && !IsFastHoleyElementsKind(to_kind)) {
- to_kind = GetHoleyElementsKind(to_kind);
- // Update the allocation site info to reflect the advice alteration.
- site->SetElementsKind(to_kind);
- }
-
- // We should allocate with an initial map that reflects the allocation site
- // advice. Therefore we use AllocateJSObjectFromMap instead of passing
- // the constructor.
- Handle<Map> initial_map(constructor->initial_map(), isolate);
- if (to_kind != initial_map->elements_kind()) {
- initial_map = Map::AsElementsKind(initial_map, to_kind);
- }
-
- // If we don't care to track arrays of to_kind ElementsKind, then
- // don't emit a memento for them.
- Handle<AllocationSite> allocation_site;
- if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
- allocation_site = site;
- }
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, constructor, new_target));
+
+ ElementsKind to_kind = can_use_type_feedback ? site->GetElementsKind()
+ : initial_map->elements_kind();
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ if (!site.is_null()) site->SetElementsKind(to_kind);
+ }
- array = Handle<JSArray>::cast(
- factory->NewJSObjectFromMap(initial_map, NOT_TENURED, allocation_site));
- } else {
- array = Handle<JSArray>::cast(factory->NewJSObject(constructor));
+ // We should allocate with an initial map that reflects the allocation site
+ // advice. Therefore we use AllocateJSObjectFromMap instead of passing
+ // the constructor.
+ if (to_kind != initial_map->elements_kind()) {
+ initial_map = Map::AsElementsKind(initial_map, to_kind);
+ }
- // We might need to transition to holey
- ElementsKind kind = constructor->initial_map()->elements_kind();
- if (holey && !IsFastHoleyElementsKind(kind)) {
- kind = GetHoleyElementsKind(kind);
- JSObject::TransitionElementsKind(array, kind);
- }
+ // If we don't care to track arrays of to_kind ElementsKind, then
+ // don't emit a memento for them.
+ Handle<AllocationSite> allocation_site;
+ if (AllocationSite::GetMode(to_kind) == TRACK_ALLOCATION_SITE) {
+ allocation_site = site;
}
+ Handle<JSArray> array = Handle<JSArray>::cast(
+ factory->NewJSObjectFromMap(initial_map, NOT_TENURED, allocation_site));
+
factory->NewJSArrayStorage(array, 0, 0, DONT_INITIALIZE_ARRAY_ELEMENTS);
ElementsKind old_kind = array->GetElementsKind();
@@ -314,22 +316,28 @@ static Object* ArrayConstructorCommon(Isolate* isolate,
site->SetDoNotInlineCall();
}
- // Set up the prototoype using original function.
- // TODO(dslomov): instead of setting the __proto__,
- // use and cache the correct map.
- if (*original_constructor != *constructor) {
- if (original_constructor->has_instance_prototype()) {
- Handle<Object> prototype =
- handle(original_constructor->instance_prototype(), isolate);
- MAYBE_RETURN(JSObject::SetPrototype(array, prototype, false,
- Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- }
- }
-
return *array;
}
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_NewArray) {
+ HandleScope scope(isolate);
+ DCHECK_LE(3, args.length());
+ int const argc = args.length() - 3;
+ // TODO(bmeurer): Remove this Arguments nonsense.
+ Arguments argv(argc, args.arguments() - 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, argc + 1);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, type_info, argc + 2);
+ // TODO(bmeurer): Use MaybeHandle to pass around the AllocationSite.
+ Handle<AllocationSite> site = type_info->IsAllocationSite()
+ ? Handle<AllocationSite>::cast(type_info)
+ : Handle<AllocationSite>::null();
+ return ArrayConstructorCommon(isolate, constructor, new_target, site, &argv);
+}
+
RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
HandleScope scope(isolate);
@@ -365,25 +373,6 @@ RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
}
-RUNTIME_FUNCTION(Runtime_ArrayConstructorWithSubclassing) {
- HandleScope scope(isolate);
- int args_length = args.length();
- CHECK(args_length >= 2);
-
- // This variables and checks work around -Werror=strict-overflow.
- int pre_last_arg_index = args_length - 2;
- int last_arg_index = args_length - 1;
- CHECK(pre_last_arg_index >= 0);
- CHECK(last_arg_index >= 0);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, pre_last_arg_index);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, last_arg_index);
- Arguments caller_args(args_length - 2, args.arguments());
- return ArrayConstructorCommon(isolate, constructor, original_constructor,
- Handle<AllocationSite>::null(), &caller_args);
-}
-
-
RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
HandleScope scope(isolate);
Arguments empty_args(0, NULL);
@@ -498,5 +487,18 @@ RUNTIME_FUNCTION(Runtime_FastOneByteArrayJoin) {
// to a slow path.
return isolate->heap()->undefined_value();
}
+
+
+RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
+ Handle<Object> constructor;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, constructor,
+ Object::ArraySpeciesConstructor(isolate, original_array));
+ return *constructor;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 636371c134..94d98d4ffa 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -153,126 +153,80 @@ template <typename T>
T FromObject(Handle<Object> number);
template <>
-inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
+inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
return NumberToUint32(*number);
}
template <>
-inline int32_t FromObject<int32_t>(Handle<Object> number) {
+inline int8_t FromObject<int8_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
-template <typename T, typename F>
-inline T ToAtomic(F from) {
- return static_cast<T>(from);
-}
-
-template <typename T, typename F>
-inline T FromAtomic(F from) {
- return static_cast<T>(from);
-}
-
-template <typename T>
-inline Object* ToObject(Isolate* isolate, T t);
-
template <>
-inline Object* ToObject<int8_t>(Isolate* isolate, int8_t t) {
- return Smi::FromInt(t);
+inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
+ return NumberToUint32(*number);
}
template <>
-inline Object* ToObject<uint8_t>(Isolate* isolate, uint8_t t) {
- return Smi::FromInt(t);
+inline int16_t FromObject<int16_t>(Handle<Object> number) {
+ return NumberToInt32(*number);
}
template <>
-inline Object* ToObject<int16_t>(Isolate* isolate, int16_t t) {
- return Smi::FromInt(t);
+inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
+ return NumberToUint32(*number);
}
template <>
-inline Object* ToObject<uint16_t>(Isolate* isolate, uint16_t t) {
- return Smi::FromInt(t);
+inline int32_t FromObject<int32_t>(Handle<Object> number) {
+ return NumberToInt32(*number);
}
-template <>
-inline Object* ToObject<int32_t>(Isolate* isolate, int32_t t) {
- return *isolate->factory()->NewNumber(t);
-}
-template <>
-inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
- return *isolate->factory()->NewNumber(t);
-}
+inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
-template <typename T>
-struct FromObjectTraits {};
+inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
-template <>
-struct FromObjectTraits<int8_t> {
- typedef int32_t convert_type;
- typedef int8_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
-template <>
-struct FromObjectTraits<uint8_t> {
- typedef uint32_t convert_type;
- typedef uint8_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, uint16_t t) {
+ return Smi::FromInt(t);
+}
-template <>
-struct FromObjectTraits<int16_t> {
- typedef int32_t convert_type;
- typedef int16_t atomic_type;
-};
-template <>
-struct FromObjectTraits<uint16_t> {
- typedef uint32_t convert_type;
- typedef uint16_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, int32_t t) {
+ return *isolate->factory()->NewNumber(t);
+}
-template <>
-struct FromObjectTraits<int32_t> {
- typedef int32_t convert_type;
- typedef int32_t atomic_type;
-};
-template <>
-struct FromObjectTraits<uint32_t> {
- typedef uint32_t convert_type;
- typedef uint32_t atomic_type;
-};
+inline Object* ToObject(Isolate* isolate, uint32_t t) {
+ return *isolate->factory()->NewNumber(t);
+}
template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> oldobj, Handle<Object> newobj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type oldval = ToAtomic<atomic_type>(FromObject<convert_type>(oldobj));
- atomic_type newval = ToAtomic<atomic_type>(FromObject<convert_type>(newobj));
- atomic_type result = CompareExchangeSeqCst(
- static_cast<atomic_type*>(buffer) + index, oldval, newval);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T oldval = FromObject<T>(oldobj);
+ T newval = FromObject<T>(newobj);
+ T result =
+ CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- atomic_type result = LoadSeqCst(static_cast<atomic_type*>(buffer) + index);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T result = LoadSeqCst(static_cast<T*>(buffer) + index);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ T value = FromObject<T>(obj);
+ StoreSeqCst(static_cast<T*>(buffer) + index, value);
return *obj;
}
@@ -280,72 +234,54 @@ inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
template <typename T>
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- AddSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- SubSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- AndSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- OrSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- XorSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
template <typename T>
inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
- typedef typename FromObjectTraits<T>::atomic_type atomic_type;
- typedef typename FromObjectTraits<T>::convert_type convert_type;
- atomic_type value = ToAtomic<atomic_type>(FromObject<convert_type>(obj));
- atomic_type result =
- ExchangeSeqCst(static_cast<atomic_type*>(buffer) + index, value);
- return ToObject<T>(isolate, FromAtomic<T>(result));
+ T value = FromObject<T>(obj);
+ T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
+ return ToObject(isolate, result);
}
@@ -363,21 +299,19 @@ inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
Handle<Object> oldobj,
Handle<Object> newobj) {
typedef int32_t convert_type;
- typedef uint8_t atomic_type;
- atomic_type oldval = ClampToUint8(FromObject<convert_type>(oldobj));
- atomic_type newval = ClampToUint8(FromObject<convert_type>(newobj));
- atomic_type result = CompareExchangeSeqCst(
- static_cast<atomic_type*>(buffer) + index, oldval, newval);
- return ToObject<uint8_t>(isolate, FromAtomic<uint8_t>(result));
+ uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj));
+ uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj));
+ uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index,
+ oldval, newval);
+ return ToObject(isolate, result);
}
inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
typedef int32_t convert_type;
- typedef uint8_t atomic_type;
- atomic_type value = ClampToUint8(FromObject<convert_type>(obj));
- StoreSeqCst(static_cast<atomic_type*>(buffer) + index, value);
+ uint8_t value = ClampToUint8(FromObject<convert_type>(obj));
+ StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value);
return *obj;
}
@@ -386,16 +320,15 @@ inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
size_t index, Handle<Object> obj) { \
typedef int32_t convert_type; \
- typedef uint8_t atomic_type; \
- atomic_type* p = static_cast<atomic_type*>(buffer) + index; \
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index; \
convert_type operand = FromObject<convert_type>(obj); \
- atomic_type expected; \
- atomic_type result; \
+ uint8_t expected; \
+ uint8_t result; \
do { \
expected = *p; \
result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
} while (CompareExchangeSeqCst(p, expected, result) != expected); \
- return ToObject<uint8_t>(isolate, expected); \
+ return ToObject(isolate, expected); \
}
DO_UINT8_CLAMPED_OP(Add, +)
@@ -410,14 +343,13 @@ DO_UINT8_CLAMPED_OP(Xor, ^)
inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
size_t index, Handle<Object> obj) {
typedef int32_t convert_type;
- typedef uint8_t atomic_type;
- atomic_type* p = static_cast<atomic_type*>(buffer) + index;
- atomic_type result = ClampToUint8(FromObject<convert_type>(obj));
- atomic_type expected;
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index;
+ uint8_t result = ClampToUint8(FromObject<convert_type>(obj));
+ uint8_t expected;
do {
expected = *p;
} while (CompareExchangeSeqCst(p, expected, result) != expected);
- return ToObject<uint8_t>(isolate, expected);
+ return ToObject(isolate, expected);
}
@@ -444,18 +376,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoCompareExchange<ctype>(isolate, buffer, index, oldobj, newobj);
+ return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
+ return DoCompareExchangeUint8Clamped(isolate, source, index, oldobj,
newobj);
default:
@@ -475,18 +408,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoLoad<ctype>(isolate, buffer, index);
+ return DoLoad<ctype>(isolate, source, index);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoLoad<uint8_t>(isolate, buffer, index);
+ return DoLoad<uint8_t>(isolate, source, index);
default:
break;
@@ -506,18 +440,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsStore) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoStore<ctype>(isolate, buffer, index, value);
+ return DoStore<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoStoreUint8Clamped(isolate, buffer, index, value);
+ return DoStoreUint8Clamped(isolate, source, index, value);
default:
break;
@@ -537,18 +472,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoAdd<ctype>(isolate, buffer, index, value);
+ return DoAdd<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoAddUint8Clamped(isolate, buffer, index, value);
+ return DoAddUint8Clamped(isolate, source, index, value);
default:
break;
@@ -568,18 +504,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoSub<ctype>(isolate, buffer, index, value);
+ return DoSub<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoSubUint8Clamped(isolate, buffer, index, value);
+ return DoSubUint8Clamped(isolate, source, index, value);
default:
break;
@@ -599,18 +536,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoAnd<ctype>(isolate, buffer, index, value);
+ return DoAnd<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoAndUint8Clamped(isolate, buffer, index, value);
+ return DoAndUint8Clamped(isolate, source, index, value);
default:
break;
@@ -630,18 +568,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoOr<ctype>(isolate, buffer, index, value);
+ return DoOr<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoOrUint8Clamped(isolate, buffer, index, value);
+ return DoOrUint8Clamped(isolate, source, index, value);
default:
break;
@@ -661,18 +600,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoXor<ctype>(isolate, buffer, index, value);
+ return DoXor<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoXorUint8Clamped(isolate, buffer, index, value);
+ return DoXorUint8Clamped(isolate, source, index, value);
default:
break;
@@ -692,18 +632,19 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- void* buffer = sta->GetBuffer()->backing_store();
+ uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
+ NumberToSize(isolate, sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return DoExchange<ctype>(isolate, buffer, index, value);
+ return DoExchange<ctype>(isolate, source, index, value);
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case kExternalUint8ClampedArray:
- return DoExchangeUint8Clamped(isolate, buffer, index, value);
+ return DoExchangeUint8Clamped(isolate, source, index, value);
default:
break;
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index ca5fecb0ab..ccd15e8b5d 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -36,9 +36,11 @@ RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
+ Handle<Object> name(constructor->shared()->name(), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNonCallable));
+ isolate, NewTypeError(MessageTemplate::kConstructorNonCallable, name));
}
@@ -106,7 +108,7 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
isolate->factory()->prototype_string(),
SLOPPY),
Object);
- if (!prototype_parent->IsNull() && !prototype_parent->IsSpecObject()) {
+ if (!prototype_parent->IsNull() && !prototype_parent->IsJSReceiver()) {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kPrototypeParentNotAnObject,
prototype_parent),
@@ -142,7 +144,11 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
constructor->shared()->set_name(*name_string);
if (!super_class->IsTheHole()) {
- Handle<Code> stub(isolate->builtins()->JSConstructStubForDerived());
+ // Derived classes, just like builtins, don't create implicit receivers in
+ // [[construct]]. Instead they just set up new.target and call into the
+ // constructor. Hence we can reuse the builtins construct stub for derived
+ // classes.
+ Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStub());
constructor->shared()->set_construct_stub(*stub);
}
@@ -228,41 +234,17 @@ RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
if (constructor->map()->is_strong()) {
DCHECK(prototype->map()->is_strong());
- RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::Freeze(prototype));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::Freeze(constructor));
- return *result;
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(prototype, FROZEN,
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(constructor, FROZEN,
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
}
return *constructor;
}
-RUNTIME_FUNCTION(Runtime_ClassGetSourceCode) {
- HandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
-
- Handle<Symbol> start_position_symbol(
- isolate->heap()->class_start_position_symbol());
- Handle<Object> start_position =
- JSReceiver::GetDataProperty(fun, start_position_symbol);
- if (!start_position->IsSmi()) return isolate->heap()->undefined_value();
-
- Handle<Symbol> end_position_symbol(
- isolate->heap()->class_end_position_symbol());
- Handle<Object> end_position =
- JSReceiver::GetDataProperty(fun, end_position_symbol);
- CHECK(end_position->IsSmi());
-
- Handle<String> source(
- String::cast(Script::cast(fun->shared()->script())->source()));
- return *isolate->factory()->NewSubString(
- source, Handle<Smi>::cast(start_position)->value(),
- Handle<Smi>::cast(end_position)->value());
-}
-
-
static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
Handle<Object> receiver,
Handle<JSObject> home_object,
@@ -482,36 +464,11 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
}
-RUNTIME_FUNCTION(Runtime_HandleStepInForDerivedConstructors) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) debug->HandleStepIn(function, true);
- return *isolate->factory()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DefaultConstructorCallSuper) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, super_constructor, 1);
- JavaScriptFrameIterator it(isolate);
-
- // Determine the actual arguments passed to the function.
- int argument_count = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::New(isolate, super_constructor, original_constructor,
- argument_count, arguments.get()));
-
- return *result;
+RUNTIME_FUNCTION(Runtime_GetSuperConstructor) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, active_function, 0);
+ return active_function->map()->prototype();
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 8790da05e3..15a3a14156 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -44,17 +44,13 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
}
-RUNTIME_FUNCTION(Runtime_CompileOptimized) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+namespace {
+Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
+ Compiler::ConcurrencyMode mode) {
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
- Compiler::ConcurrencyMode mode =
- concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
Handle<Code> code;
Handle<Code> unoptimized(function->shared()->code());
if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
@@ -80,6 +76,24 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
return function->code();
}
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return CompileOptimized(isolate, function, Compiler::CONCURRENT);
+}
+
+
+RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return CompileOptimized(isolate, function, Compiler::NOT_CONCURRENT);
+}
+
RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
HandleScope scope(isolate);
@@ -137,6 +151,11 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
RUNTIME_ASSERT(frame->function()->IsJSFunction());
DCHECK(frame->function() == *function);
+ // Ensure the context register is updated for materialized objects.
+ JavaScriptFrameIterator top_it(isolate);
+ JavaScriptFrame* top_frame = top_it.frame();
+ isolate->set_context(Context::cast(top_frame->context()));
+
if (type == Deoptimizer::LAZY) {
return isolate->heap()->undefined_value();
}
@@ -352,40 +371,6 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
}
-RUNTIME_FUNCTION(Runtime_CompileString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
-
- // Extract native context.
- Handle<Context> context(isolate->native_context());
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, context)) {
- Handle<Object> error_message =
- context->ErrorMessageForCodeGenerationFromStrings();
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewEvalError(MessageTemplate::kCodeGenFromStrings, error_message));
- }
-
- // Compile source string in the native context.
- ParseRestriction restriction = function_literal_only
- ? ONLY_SINGLE_FUNCTION_LITERAL
- : NO_PARSE_RESTRICTION;
- Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
- Handle<JSFunction> fun;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, fun,
- Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
- restriction, RelocInfo::kNoPosition));
- return *fun;
-}
-
-
static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 614b4a9ede..96292ad1c5 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -7,7 +7,6 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
#include "src/date.h"
-#include "src/dateparser-inl.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -15,52 +14,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_DateMakeDay) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_SMI_ARG_CHECKED(year, 0);
- CONVERT_SMI_ARG_CHECKED(month, 1);
-
- int days = isolate->date_cache()->DaysFromYearMonth(year, month);
- RUNTIME_ASSERT(Smi::IsValid(days));
- return Smi::FromInt(days);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateSetValue) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0);
- CONVERT_DOUBLE_ARG_CHECKED(time, 1);
- CONVERT_SMI_ARG_CHECKED(is_utc, 2);
-
- DateCache* date_cache = isolate->date_cache();
-
- Handle<Object> value;
- bool is_value_nan = false;
- if (std::isnan(time)) {
- value = isolate->factory()->nan_value();
- is_value_nan = true;
- } else if (!is_utc && (time < -DateCache::kMaxTimeBeforeUTCInMs ||
- time > DateCache::kMaxTimeBeforeUTCInMs)) {
- value = isolate->factory()->nan_value();
- is_value_nan = true;
- } else {
- time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
- if (time < -DateCache::kMaxTimeInMs || time > DateCache::kMaxTimeInMs) {
- value = isolate->factory()->nan_value();
- is_value_nan = true;
- } else {
- value = isolate->factory()->NewNumber(DoubleToInteger(time));
- }
- }
- date->SetValue(*value, is_value_nan);
- return *value;
-}
-
-
RUNTIME_FUNCTION(Runtime_IsDate) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
@@ -71,7 +24,7 @@ RUNTIME_FUNCTION(Runtime_IsDate) {
RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kNotDateObject));
}
@@ -79,120 +32,8 @@ RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- if (FLAG_log_timer_events || FLAG_prof_cpp) LOG(isolate, CurrentTimeEvent());
-
- // According to ECMA-262, section 15.9.1, page 117, the precision of
- // the number in a Date object representing a particular instant in
- // time is milliseconds. Therefore, we floor the result of getting
- // the OS time.
- double millis;
- if (FLAG_verify_predictable) {
- millis = 1388534400000.0; // Jan 1 2014 00:00:00 GMT+0000
- millis += Floor(isolate->heap()->synthetic_time());
- } else {
- millis = Floor(base::OS::TimeCurrentMillis());
- }
- return *isolate->factory()->NewNumber(millis);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateParseString) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
-
- RUNTIME_ASSERT(output->HasFastElements());
- JSObject::EnsureCanContainHeapObjectElements(output);
- RUNTIME_ASSERT(output->HasFastObjectElements());
- Handle<FixedArray> output_array(FixedArray::cast(output->elements()));
- RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
-
- Handle<String> str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str,
- Object::ToString(isolate, input));
-
- str = String::Flatten(str);
- DisallowHeapAllocation no_gc;
-
- bool result;
- String::FlatContent str_content = str->GetFlatContent();
- if (str_content.IsOneByte()) {
- result = DateParser::Parse(str_content.ToOneByteVector(), *output_array,
- isolate->unicode_cache());
- } else {
- DCHECK(str_content.IsTwoByte());
- result = DateParser::Parse(str_content.ToUC16Vector(), *output_array,
- isolate->unicode_cache());
- }
-
- if (result) {
- return *output;
- } else {
- return isolate->heap()->null_value();
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateLocalTimezone) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
- x <= DateCache::kMaxTimeBeforeUTCInMs);
- const char* zone =
- isolate->date_cache()->LocalTimezone(static_cast<int64_t>(x));
- Handle<String> result =
- isolate->factory()->NewStringFromUtf8(CStrVector(zone)).ToHandleChecked();
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateToUTC) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
- x <= DateCache::kMaxTimeBeforeUTCInMs);
- int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x));
-
- return *isolate->factory()->NewNumber(static_cast<double>(time));
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
- HandleScope hs(isolate);
- DCHECK(args.length() == 0);
- if (isolate->serializer_enabled()) return isolate->heap()->undefined_value();
- if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
- Handle<FixedArray> date_cache_version =
- isolate->factory()->NewFixedArray(1, TENURED);
- date_cache_version->set(0, Smi::FromInt(0));
- isolate->eternal_handles()->CreateSingleton(
- isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
- }
- Handle<FixedArray> date_cache_version =
- Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
- EternalHandles::DATE_CACHE_VERSION));
- // Return result as a JS array.
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->array_function());
- JSArray::SetContent(Handle<JSArray>::cast(result), date_cache_version);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_DateField) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSDate, date, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- DCHECK_LE(0, index);
- if (index == 0) return date->value();
- return JSDate::GetField(date, Smi::FromInt(index));
+ DCHECK_EQ(0, args.length());
+ return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 27216fb323..d94c75fa0e 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -150,33 +150,29 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<Object> object) {
Factory* factory = isolate->factory();
- if (object->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- if (function->shared()->bound()) {
- RUNTIME_ASSERT_HANDLIFIED(function->function_bindings()->IsFixedArray(),
- JSArray);
-
- Handle<BindingsArray> bindings(function->function_bindings());
-
- Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
- Handle<String> target =
- factory->NewStringFromAsciiChecked("[[TargetFunction]]");
- result->set(0, *target);
- result->set(1, bindings->bound_function());
-
- Handle<String> bound_this =
- factory->NewStringFromAsciiChecked("[[BoundThis]]");
- result->set(2, *bound_this);
- result->set(3, bindings->bound_this());
-
- Handle<String> bound_args =
- factory->NewStringFromAsciiChecked("[[BoundArgs]]");
- result->set(4, *bound_args);
- Handle<JSArray> arguments_array =
- BindingsArray::CreateBoundArguments(bindings);
- result->set(5, *arguments_array);
- return factory->NewJSArrayWithElements(result);
- }
+ if (object->IsJSBoundFunction()) {
+ Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object);
+
+ Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
+ Handle<String> target =
+ factory->NewStringFromAsciiChecked("[[TargetFunction]]");
+ result->set(0, *target);
+ result->set(1, function->bound_target_function());
+
+ Handle<String> bound_this =
+ factory->NewStringFromAsciiChecked("[[BoundThis]]");
+ result->set(2, *bound_this);
+ result->set(3, function->bound_this());
+
+ Handle<String> bound_args =
+ factory->NewStringFromAsciiChecked("[[BoundArgs]]");
+ result->set(4, *bound_args);
+ Handle<FixedArray> bound_arguments =
+ factory->CopyFixedArray(handle(function->bound_arguments(), isolate));
+ Handle<JSArray> arguments_array =
+ factory->NewJSArrayWithElements(bound_arguments);
+ result->set(5, *arguments_array);
+ return factory->NewJSArrayWithElements(result);
} else if (object->IsJSMapIterator()) {
Handle<JSMapIterator> iterator = Handle<JSMapIterator>::cast(object);
return GetIteratorInternalProperties(isolate, iterator);
@@ -842,10 +838,10 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
- bool ignore_nested_scopes = false;
+ ScopeIterator::Option option = ScopeIterator::DEFAULT;
if (args.length() == 4) {
CONVERT_BOOLEAN_ARG_CHECKED(flag, 3);
- ignore_nested_scopes = flag;
+ if (flag) option = ScopeIterator::IGNORE_NESTED_SCOPES;
}
// Get the frame where the debugging is performed.
@@ -855,7 +851,7 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
List<Handle<JSObject> > result(4);
- ScopeIterator it(isolate, &frame_inspector, ignore_nested_scopes);
+ ScopeIterator it(isolate, &frame_inspector, option);
for (; !it.Done(); it.Next()) {
Handle<JSObject> details;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
@@ -873,15 +869,18 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
// Check arguments.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
// Count the visible scopes.
int n = 0;
- for (ScopeIterator it(isolate, fun); !it.Done(); it.Next()) {
- n++;
+ if (function->IsJSFunction()) {
+ for (ScopeIterator it(isolate, Handle<JSFunction>::cast(function));
+ !it.Done(); it.Next()) {
+ n++;
+ }
}
return Smi::FromInt(n);
@@ -1212,39 +1211,18 @@ RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
// of frames to step down.
RUNTIME_FUNCTION(Runtime_PrepareStep) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK(args.length() == 2);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
- if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
+ if (!args[1]->IsNumber()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
- CONVERT_NUMBER_CHECKED(int, wrapped_frame_id, Int32, args[3]);
-
- StackFrame::Id frame_id;
- if (wrapped_frame_id == 0) {
- frame_id = StackFrame::NO_ID;
- } else {
- frame_id = DebugFrameHelper::UnwrapFrameId(wrapped_frame_id);
- }
-
// Get the step action and check validity.
StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
if (step_action != StepIn && step_action != StepNext &&
- step_action != StepOut && step_action != StepInMin &&
- step_action != StepMin && step_action != StepFrame) {
- return isolate->Throw(isolate->heap()->illegal_argument_string());
- }
-
- if (frame_id != StackFrame::NO_ID && step_action != StepNext &&
- step_action != StepMin && step_action != StepOut) {
- return isolate->ThrowIllegalOperation();
- }
-
- // Get the number of steps.
- int step_count = NumberToInt32(args[2]);
- if (step_count < 1) {
+ step_action != StepOut && step_action != StepFrame) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
}
@@ -1252,8 +1230,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
isolate->debug()->ClearStepping();
// Prepare step.
- isolate->debug()->PrepareStep(static_cast<StepAction>(step_action),
- step_count, frame_id);
+ isolate->debug()->PrepareStep(static_cast<StepAction>(step_action));
return isolate->heap()->undefined_value();
}
@@ -1281,7 +1258,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 5);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 5);
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
@@ -1305,7 +1282,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, context_extension, 3);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 3);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1351,6 +1328,17 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
}
+static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate, Object* object,
+ Object* proto) {
+ PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
+ while (true) {
+ iter.AdvanceIgnoringProxies();
+ if (iter.IsAtEnd()) return false;
+ if (iter.IsAtEnd(proto)) return true;
+ }
+}
+
+
// Scan the heap for objects with direct references to an object
// args[0]: the object to find references to
// args[1]: constructor function for instances to exclude (Mirror)
@@ -1380,7 +1368,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
// Check filter if supplied. This is normally used to avoid
// references from mirror objects.
if (!filter->IsUndefined() &&
- obj->HasInPrototypeChain(isolate, *filter)) {
+ HasInPrototypeChainIgnoringProxies(isolate, obj, *filter)) {
continue;
}
if (obj->IsJSGlobalObject()) {
@@ -1449,7 +1437,12 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- return *Object::GetPrototype(isolate, obj);
+ Handle<Object> prototype;
+ // TODO(1543): Come up with a solution for clients to handle potential errors
+ // thrown by an intermediate proxy.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
+ Object::GetPrototype(isolate, obj));
+ return *prototype;
}
@@ -1474,10 +1467,28 @@ RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_CHECKED(Object, f, 0);
+ if (f->IsJSFunction()) {
+ return JSFunction::cast(f)->shared()->inferred_name();
+ }
+ return isolate->heap()->empty_string();
+}
+
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return f->shared()->inferred_name();
+RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+
+ if (function->IsJSBoundFunction()) {
+ return Handle<JSBoundFunction>::cast(function)->name();
+ }
+ Handle<Object> name =
+ JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
+ return *name;
}
@@ -1612,57 +1623,11 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
}
-bool DebugStepInIsActive(Debug* debug) {
- return debug->is_active() && debug->IsStepping() &&
- debug->last_step_action() == StepIn;
-}
-
-
-// Check whether debugger is about to step into the callback that is passed
-// to a built-in function such as Array.forEach. This check is done before
-// %DebugPrepareStepInIfStepping and is not strictly necessary. However, if it
-// returns false, we can skip %DebugPrepareStepInIfStepping, useful in loops.
-RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- if (!DebugStepInIsActive(isolate->debug())) {
- return isolate->heap()->false_value();
- }
- CONVERT_ARG_CHECKED(Object, object, 0);
- RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
- // We do not step into the callback if it's a builtin other than a bound,
- // or not even a function.
- JSFunction* fun;
- if (object->IsJSFunction()) {
- fun = JSFunction::cast(object);
- } else {
- fun = JSGeneratorObject::cast(object)->function();
- }
- return isolate->heap()->ToBoolean(fun->shared()->IsSubjectToDebugging() ||
- fun->shared()->bound());
-}
-
-
-void FloodDebugSubjectWithOneShot(Debug* debug, Handle<JSFunction> function) {
- if (function->shared()->IsSubjectToDebugging() ||
- function->shared()->bound()) {
- // When leaving the function, step out has been activated, but not performed
- // if we do not leave the builtin. To be able to step into the function
- // again, we need to clear the step out at this point.
- debug->ClearStepOut();
- debug->FloodWithOneShotGeneric(function);
- }
-}
-
-
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback,
// if we are indeed stepping and the callback is subject to debugging.
RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
DCHECK(args.length() == 1);
- Debug* debug = isolate->debug();
- if (!DebugStepInIsActive(debug)) return isolate->heap()->undefined_value();
-
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
@@ -1674,22 +1639,19 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
Handle<JSGeneratorObject>::cast(object)->function(), isolate);
}
- FloodDebugSubjectWithOneShot(debug, fun);
+ isolate->debug()->PrepareStepIn(fun);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, handler, 2);
isolate->PushPromise(promise, function);
- Debug* debug = isolate->debug();
- if (handler->IsJSFunction() && DebugStepInIsActive(debug)) {
- FloodDebugSubjectWithOneShot(debug, Handle<JSFunction>::cast(handler));
- }
+ // If we are in step-in mode, flood the handler.
+ isolate->debug()->EnableStepIn();
return isolate->heap()->undefined_value();
}
@@ -1726,17 +1688,6 @@ RUNTIME_FUNCTION(Runtime_DebugIsActive) {
}
-RUNTIME_FUNCTION(Runtime_DebugHandleStepIntoAccessor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) debug->HandleStepIn(function, false);
- return *isolate->factory()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
return NULL;
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 16e6149e7c..befd337098 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -7,7 +7,6 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/compiler.h"
-#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -38,74 +37,14 @@ RUNTIME_FUNCTION(Runtime_FunctionSetName) {
}
-RUNTIME_FUNCTION(Runtime_FunctionNameShouldPrintAsAnonymous) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(
- f->shared()->name_should_print_as_anonymous());
-}
-
-
-RUNTIME_FUNCTION(Runtime_CompleteFunctionConstruction) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 2);
- func->shared()->set_name_should_print_as_anonymous(true);
-
- // If new.target is equal to |constructor| then the function |func| created
- // is already correctly setup and nothing else should be done here.
- // But if new.target is not equal to |constructor| then we are have a
- // Function builtin subclassing case and therefore the function |func|
- // has wrong initial map. To fix that we create a new function object with
- // correct initial map.
- if (new_target->IsUndefined() || *constructor == *new_target) {
- return *func;
- }
-
- // Create a new JSFunction object with correct initial map.
- HandleScope handle_scope(isolate);
- Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(new_target);
-
- DCHECK(constructor->has_initial_map());
- Handle<Map> initial_map =
- JSFunction::EnsureDerivedHasInitialMap(original_constructor, constructor);
-
- Handle<SharedFunctionInfo> shared_info(func->shared(), isolate);
- Handle<Context> context(func->context(), isolate);
- Handle<JSFunction> result =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- initial_map, shared_info, context, NOT_TENURED);
- DCHECK_EQ(func->IsConstructor(), result->IsConstructor());
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionIsArrow) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->is_arrow());
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionIsConciseMethod) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->is_concise_method());
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
RUNTIME_ASSERT(f->RemovePrototype());
+ f->shared()->set_construct_stub(
+ *isolate->builtins()->ConstructedNonConstructable());
return isolate->heap()->undefined_value();
}
@@ -113,23 +52,23 @@ RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
- CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
+ if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
+ Handle<Object> script(Handle<JSFunction>::cast(function)->shared()->script(),
+ isolate);
if (!script->IsScript()) return isolate->heap()->undefined_value();
-
return *Script::GetWrapper(Handle<Script>::cast(script));
}
RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
- Handle<SharedFunctionInfo> shared(f->shared());
- return *shared->GetSourceCode();
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
+ return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
}
@@ -203,18 +142,6 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
}
-RUNTIME_FUNCTION(Runtime_FunctionHidesSource) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
-
- SharedFunctionInfo* shared = f->shared();
- bool hide_source = !shared->script()->IsScript() ||
- Script::cast(shared->script())->hide_source();
- return isolate->heap()->ToBoolean(hide_source);
-}
-
-
RUNTIME_FUNCTION(Runtime_SetCode) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -224,7 +151,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
Handle<SharedFunctionInfo> target_shared(target->shared());
Handle<SharedFunctionInfo> source_shared(source->shared());
- RUNTIME_ASSERT(!source_shared->bound());
if (!Compiler::Compile(source, KEEP_EXCEPTION)) {
return isolate->heap()->exception();
@@ -320,197 +246,25 @@ RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
}
-// Find the arguments of the JavaScript function invocation that called
-// into C++ code. Collect these in a newly allocated array of handles (possibly
-// prefixed by a number of empty handles).
-base::SmartArrayPointer<Handle<Object>> Runtime::GetCallerArguments(
- Isolate* isolate, int prefix_argc, int* total_argc) {
- // Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- List<JSFunction*> functions(2);
- frame->GetFunctions(&functions);
- if (functions.length() > 1) {
- int inlined_jsframe_index = functions.length() - 1;
- TranslatedState translated_values(frame);
- translated_values.Prepare(false, frame->fp());
-
- int argument_count = 0;
- TranslatedFrame* translated_frame =
- translated_values.GetArgumentsInfoFromJSFrameIndex(
- inlined_jsframe_index, &argument_count);
- TranslatedFrame::iterator iter = translated_frame->begin();
-
- // Skip the function.
- iter++;
-
- // Skip the receiver.
- iter++;
- argument_count--;
-
- *total_argc = prefix_argc + argument_count;
- base::SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
- bool should_deoptimize = false;
- for (int i = 0; i < argument_count; i++) {
- should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
- Handle<Object> value = iter->GetValue();
- param_data[prefix_argc + i] = value;
- iter++;
- }
-
- if (should_deoptimize) {
- translated_values.StoreMaterializedValuesAndDeopt();
- }
-
- return param_data;
- } else {
- it.AdvanceToArgumentsFrame();
- frame = it.frame();
- int args_count = frame->ComputeParametersCount();
-
- *total_argc = prefix_argc + args_count;
- base::SmartArrayPointer<Handle<Object> > param_data(
- NewArray<Handle<Object> >(*total_argc));
- for (int i = 0; i < args_count; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
- param_data[prefix_argc + i] = val;
- }
- return param_data;
- }
-}
-
-
-RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, bindee, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, this_object, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
-
- // TODO(lrn): Create bound function in C++ code from premade shared info.
- bound_function->shared()->set_bound(true);
- bound_function->shared()->set_optimized_code_map(Smi::FromInt(0));
- bound_function->shared()->set_inferred_name(isolate->heap()->empty_string());
- // Get all arguments of calling function (Function.prototype.bind).
- int argc = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argc);
- // Don't count the this-arg.
- if (argc > 0) {
- RUNTIME_ASSERT(arguments[0].is_identical_to(this_object));
- argc--;
- } else {
- RUNTIME_ASSERT(this_object->IsUndefined());
- }
- // Initialize array of bindings (function, this, and any existing arguments
- // if the function was already bound).
- Handle<BindingsArray> new_bindings;
- int out_index = 0;
- Handle<TypeFeedbackVector> vector(
- bound_function->shared()->feedback_vector());
- if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
- Handle<BindingsArray> old_bindings(
- JSFunction::cast(*bindee)->function_bindings());
- RUNTIME_ASSERT(old_bindings->bindings_count() >= 0);
- bindee = handle(old_bindings->bound_function(), isolate);
- Handle<Object> old_bound_this(old_bindings->bound_this(), isolate);
- new_bindings = BindingsArray::New(isolate, vector, bindee, old_bound_this,
- old_bindings->bindings_count() + argc);
- for (int n = old_bindings->bindings_count(); out_index < n; out_index++) {
- new_bindings->set_binding(out_index, old_bindings->binding(out_index));
- }
- } else {
- new_bindings =
- BindingsArray::New(isolate, vector, bindee, this_object, argc);
- }
- // Copy arguments, skipping the first which is "this_arg".
- for (int j = 0; j < argc; j++, out_index++) {
- new_bindings->set_binding(out_index, *arguments[j + 1]);
- }
- new_bindings->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
- bound_function->set_function_bindings(*new_bindings);
-
- // Update length. Have to remove the prototype first so that map migration
- // is happy about the number of fields.
- RUNTIME_ASSERT(bound_function->RemovePrototype());
-
- // The new function should have the same [[Prototype]] as the bindee.
- Handle<Map> bound_function_map =
- bindee->IsConstructor()
- ? isolate->bound_function_with_constructor_map()
- : isolate->bound_function_without_constructor_map();
- PrototypeIterator iter(isolate, bindee);
- Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
- if (bound_function_map->prototype() != *proto) {
- bound_function_map = Map::TransitionToPrototype(bound_function_map, proto,
- REGULAR_PROTOTYPE);
- }
- JSObject::MigrateToMap(bound_function, bound_function_map);
- DCHECK_EQ(bindee->IsConstructor(), bound_function->IsConstructor());
-
- Handle<String> length_string = isolate->factory()->length_string();
- // These attributes must be kept in sync with how the bootstrapper
- // configures the bound_function_map retrieved above.
- // We use ...IgnoreAttributes() here because of length's read-onliness.
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- bound_function, length_string, new_length, attr));
- return *bound_function;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) {
- HandleScope handles(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
- if (callable->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- if (function->shared()->bound()) {
- RUNTIME_ASSERT(function->function_bindings()->IsBindingsArray());
- Handle<BindingsArray> bindings(function->function_bindings());
- return *BindingsArray::CreateRuntimeBindings(bindings);
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
+RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- // First argument is a function to use as a constructor.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- RUNTIME_ASSERT(function->shared()->bound());
-
- // The argument is a bound function. Extract its bound arguments
- // and callable.
- Handle<BindingsArray> bound_args =
- handle(BindingsArray::cast(function->function_bindings()));
- int bound_argc = bound_args->bindings_count();
- Handle<Object> bound_function(bound_args->bound_function(), isolate);
- DCHECK(!bound_function->IsJSFunction() ||
- !Handle<JSFunction>::cast(bound_function)->shared()->bound());
-
- int total_argc = 0;
- base::SmartArrayPointer<Handle<Object>> param_data =
- Runtime::GetCallerArguments(isolate, bound_argc, &total_argc);
- for (int i = 0; i < bound_argc; i++) {
- param_data[i] = handle(bound_args->binding(i), isolate);
+ DCHECK_LE(2, args.length());
+ int const argc = args.length() - 2;
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ ScopedVector<Handle<Object>> argv(argc);
+ for (int i = 0; i < argc; ++i) {
+ argv[i] = args.at<Object>(2 + i);
}
-
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::New(isolate, bound_function, bound_function,
- total_argc, param_data.get()));
+ isolate, result,
+ Execution::Call(isolate, target, receiver, argc, argv.start()));
return *result;
}
-RUNTIME_FUNCTION(Runtime_Call) {
+RUNTIME_FUNCTION(Runtime_TailCall) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
int const argc = args.length() - 2;
@@ -565,18 +319,6 @@ RUNTIME_FUNCTION(Runtime_Apply) {
}
-RUNTIME_FUNCTION(Runtime_GetOriginalConstructor) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- // Currently we don't inline [[Construct]] calls.
- return frame->IsConstructor() && !frame->HasInlinedFrames()
- ? frame->GetOriginalConstructor()
- : isolate->heap()->undefined_value();
-}
-
-
// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
HandleScope scope(isolate);
@@ -589,20 +331,11 @@ RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
}
-RUNTIME_FUNCTION(Runtime_IsConstructCall) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- return isolate->heap()->ToBoolean(frame->IsConstructor());
-}
-
-
RUNTIME_FUNCTION(Runtime_IsFunction) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSFunction());
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return isolate->heap()->ToBoolean(object->IsFunction());
}
@@ -613,5 +346,16 @@ RUNTIME_FUNCTION(Runtime_ThrowStrongModeTooFewArguments) {
NewTypeError(MessageTemplate::kStrongArity));
}
+
+RUNTIME_FUNCTION(Runtime_FunctionToString) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+ return function->IsJSBoundFunction()
+ ? *JSBoundFunction::ToString(
+ Handle<JSBoundFunction>::cast(function))
+ : *JSFunction::ToString(Handle<JSFunction>::cast(function));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index b2bad77c98..f4ef679bf6 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -30,7 +30,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexWait) {
RUNTIME_ASSERT(timeout == V8_INFINITY || !std::isnan(timeout));
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = index << 2;
+ size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::Wait(isolate, array_buffer, addr, value, timeout);
}
@@ -47,7 +47,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexWake) {
RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = index << 2;
+ size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::Wake(isolate, array_buffer, addr, count);
}
@@ -67,8 +67,8 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexWakeOrRequeue) {
RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr1 = index1 << 2;
- size_t addr2 = index2 << 2;
+ size_t addr1 = (index1 << 2) + NumberToSize(isolate, sta->byte_offset());
+ size_t addr2 = (index2 << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::WakeOrRequeue(isolate, array_buffer, addr1, count,
value, addr2);
@@ -85,7 +85,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsFutexNumWaitersForTesting) {
RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = index << 2;
+ size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
}
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 208f7f6680..926cd3ce2d 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -209,14 +209,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
}
-RUNTIME_FUNCTION(Runtime_FunctionIsGenerator) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->is_generator());
-}
-
-
RUNTIME_FUNCTION(Runtime_GeneratorNext) {
UNREACHABLE(); // Optimization disabled in SetUpGenerators().
return NULL;
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 8b0c98f161..e1f0c8e959 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -389,10 +389,11 @@ RUNTIME_FUNCTION(Runtime_InternalDateParse) {
UDate date = date_format->parse(u_date, status);
if (U_FAILURE(status)) return isolate->heap()->undefined_value();
- Handle<Object> result;
+ Handle<JSDate> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::NewDate(isolate, static_cast<double>(date)));
- DCHECK(result->IsJSDate());
+ isolate, result,
+ JSDate::New(isolate->date_function(), isolate->date_function(),
+ static_cast<double>(date)));
return *result;
}
@@ -472,6 +473,8 @@ RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
+ isolate->CountUsage(v8::Isolate::UseCounterFeature::kIntlV8Parse);
+
v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
icu::DecimalFormat* number_format =
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 478a954b3e..ee664645d4 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -5,14 +5,14 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/parser.h"
-#include "src/prettyprinter.h"
+#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
@@ -92,7 +92,7 @@ RUNTIME_FUNCTION(Runtime_ReThrow) {
RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
+ DCHECK_LE(0, args.length());
return isolate->StackOverflow();
}
@@ -153,6 +153,14 @@ RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
}
+RUNTIME_FUNCTION(Runtime_ThrowIllegalInvocation) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIllegalInvocation));
+}
+
+
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -171,6 +179,16 @@ RUNTIME_FUNCTION(Runtime_ThrowStrongModeImplicitConversion) {
}
+RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<String> type = Object::TypeOf(isolate, object);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
+}
+
+
RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
DCHECK(args.length() == 3);
HandleScope scope(isolate);
@@ -284,18 +302,6 @@ RUNTIME_FUNCTION(Runtime_MessageGetScript) {
}
-RUNTIME_FUNCTION(Runtime_ErrorToStringRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, error, 0);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->error_tostring_helper()->Stringify(isolate, error));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_FormatMessageString) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
@@ -329,8 +335,8 @@ static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
}
-static inline Object* ReturnPositiveSmiOrNull(int value, Isolate* isolate) {
- if (value >= 0) return Smi::FromInt(value);
+static inline Object* ReturnPositiveNumberOrNull(int value, Isolate* isolate) {
+ if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
return isolate->heap()->null_value();
}
@@ -344,8 +350,8 @@ CALLSITE_GET(GetFileName, ReturnDereferencedHandle)
CALLSITE_GET(GetFunctionName, ReturnDereferencedHandle)
CALLSITE_GET(GetScriptNameOrSourceUrl, ReturnDereferencedHandle)
CALLSITE_GET(GetMethodName, ReturnDereferencedHandle)
-CALLSITE_GET(GetLineNumber, ReturnPositiveSmiOrNull)
-CALLSITE_GET(GetColumnNumber, ReturnPositiveSmiOrNull)
+CALLSITE_GET(GetLineNumber, ReturnPositiveNumberOrNull)
+CALLSITE_GET(GetColumnNumber, ReturnPositiveNumberOrNull)
CALLSITE_GET(IsNative, ReturnBoolean)
CALLSITE_GET(IsToplevel, ReturnBoolean)
CALLSITE_GET(IsEval, ReturnBoolean)
@@ -372,50 +378,46 @@ RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
}
-RUNTIME_FUNCTION(Runtime_HarmonyToString) {
- // TODO(caitp): Delete this runtime method when removing --harmony-tostring
- return isolate->heap()->ToBoolean(FLAG_harmony_tostring);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetTypeFeedbackVector) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- return function->shared()->feedback_vector();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetCallerJSFunction) {
- SealHandleScope shs(isolate);
- StackFrameIterator it(isolate);
- RUNTIME_ASSERT(it.frame()->type() == StackFrame::STUB);
- it.Advance();
- RUNTIME_ASSERT(it.frame()->type() == StackFrame::JAVA_SCRIPT);
- return JavaScriptFrame::cast(it.frame())->function();
-}
-
+namespace {
-RUNTIME_FUNCTION(Runtime_GetCodeStubExportsObject) {
- HandleScope shs(isolate);
- return isolate->heap()->code_stub_exports_object();
+bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
+ JavaScriptFrameIterator it(isolate);
+ if (!it.done()) {
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* fun = frame->function();
+ Object* script = fun->shared()->script();
+ if (script->IsScript() &&
+ !(Script::cast(script)->source()->IsUndefined())) {
+ Handle<Script> casted_script(Script::cast(script));
+ // Compute the location from the function and the relocation info of the
+ // baseline code. For optimized code this will use the deoptimization
+ // information to get canonical location information.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ it.frame()->Summarize(&frames);
+ FrameSummary& summary = frames.last();
+ int pos = summary.code()->SourcePosition(summary.pc());
+ *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
+ return true;
+ }
+ }
+ return false;
}
-namespace {
-
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
- if (isolate->ComputeLocation(&location)) {
+ if (ComputeLocation(isolate, &location)) {
Zone zone;
base::SmartPointer<ParseInfo> info(
location.function()->shared()->is_function()
? new ParseInfo(&zone, location.function())
: new ParseInfo(&zone, location.script()));
if (Parser::ParseStatic(info.get())) {
- CallPrinter printer(isolate);
+ CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
const char* string = printer.Print(info->literal(), location.start_pos());
- return isolate->factory()->NewStringFromAsciiChecked(string);
+ if (strlen(string) > 0) {
+ return isolate->factory()->NewStringFromAsciiChecked(string);
+ }
} else {
isolate->clear_pending_exception();
}
@@ -435,5 +437,37 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
}
+
+RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<String> callsite = RenderCallSite(isolate, object);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor, callsite));
+}
+
+
+// ES6 section 7.3.17 CreateListFromArrayLike (obj)
+RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<FixedArray> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::CreateListFromArrayLike(isolate, object, ElementTypes::kAll));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(counter, 0);
+ isolate->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(counter));
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index ef86869ccc..d061a4916d 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -150,18 +150,24 @@ RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, property_names, 1);
- Handle<Object> cache_type = property_names;
- Handle<Map> cache_type_map = handle(property_names->map(), isolate);
- Handle<Map> receiver_map = handle(receiver->map(), isolate);
+ Object* property_names = Runtime_GetPropertyNamesFast(
+ 1, Handle<Object>::cast(receiver).location(), isolate);
+ if (isolate->has_pending_exception()) {
+ return property_names;
+ }
+ Handle<Object> cache_type(property_names, isolate);
Handle<FixedArray> cache_array;
int cache_length;
- if (cache_type_map.is_identical_to(isolate->factory()->meta_map())) {
+ Handle<Map> receiver_map = handle(receiver->map(), isolate);
+ if (cache_type->IsMap()) {
+ Handle<Map> cache_type_map =
+ handle(Handle<Map>::cast(cache_type)->map(), isolate);
+ DCHECK(cache_type_map.is_identical_to(isolate->factory()->meta_map()));
int enum_length = cache_type_map->EnumLength();
DescriptorArray* descriptors = receiver_map->instance_descriptors();
if (enum_length > 0 && descriptors->HasEnumCache()) {
@@ -175,9 +181,8 @@ RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
cache_array = Handle<FixedArray>::cast(cache_type);
cache_length = cache_array->length();
- STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- if (receiver_map->instance_type() <= LAST_JS_PROXY_TYPE) {
- DCHECK_GE(receiver_map->instance_type(), LAST_JS_PROXY_TYPE);
+ STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+ if (receiver_map->instance_type() == JS_PROXY_TYPE) {
// Zero indicates proxy
cache_type = Handle<Object>(Smi::FromInt(0), isolate);
} else {
@@ -186,14 +191,12 @@ RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
}
}
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(4);
- result->set(0, *receiver);
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(3);
+ result->set(0, *cache_type);
result->set(1, *cache_array);
- result->set(2, *cache_type);
- result->set(3, Smi::FromInt(cache_length));
+ result->set(2, Smi::FromInt(cache_length));
return *result;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
index 07232d59b8..45f8183052 100644
--- a/deps/v8/src/runtime/runtime-json.cc
+++ b/deps/v8/src/runtime/runtime-json.cc
@@ -7,9 +7,9 @@
#include "src/arguments.h"
#include "src/char-predicates-inl.h"
#include "src/isolate-inl.h"
-#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/objects-inl.h"
+#include "src/parsing/json-parser.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 903e2feb53..b0e41dcdaa 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -6,9 +6,9 @@
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/isolate-inl.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -159,11 +159,9 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
DisallowHeapAllocation no_gc;
DCHECK(IsFastElementsKind(constant_elements_kind));
Context* native_context = isolate->context()->native_context();
- Object* maps_array = is_strong
- ? native_context->js_array_strong_maps()
- : native_context->js_array_maps();
- DCHECK(!maps_array->IsUndefined());
- Object* map = FixedArray::cast(maps_array)->get(constant_elements_kind);
+ Strength strength = is_strong ? Strength::STRONG : Strength::WEAK;
+ Object* map = native_context->get(
+ Context::ArrayMapIndex(constant_elements_kind, strength));
object->set_map(Map::cast(map));
}
@@ -236,13 +234,33 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
}
+RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
+ CONVERT_SMI_ARG_CHECKED(index, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
+ CONVERT_SMI_ARG_CHECKED(flags, 3);
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> boilerplate(closure->literals()->literal(index), isolate);
+ if (boilerplate->IsUndefined()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
+ closure->literals()->set_literal(index, *boilerplate);
+ }
+ return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
+}
+
+
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
+ Handle<LiteralsArray> literals(closure->literals(), isolate);
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
@@ -351,13 +369,14 @@ static MaybeHandle<JSObject> CreateArrayLiteralImpl(
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
+ DCHECK_EQ(4, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<JSObject> result;
+ Handle<LiteralsArray> literals(closure->literals(), isolate);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, CreateArrayLiteralImpl(isolate, literals, literals_index,
elements, flags));
@@ -367,12 +386,13 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
Handle<JSObject> result;
+ Handle<LiteralsArray> literals(closure->literals(), isolate);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index dd3405a44c..189ec08d33 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -200,22 +200,34 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
- RUNTIME_ASSERT(shared_array->length()->IsSmi());
- RUNTIME_ASSERT(shared_array->HasFastElements())
- int array_length = Smi::cast(shared_array->length())->value();
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, old_shared_array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, new_shared_array, 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 2);
+ USE(new_shared_array);
+ RUNTIME_ASSERT(old_shared_array->length()->IsSmi());
+ RUNTIME_ASSERT(new_shared_array->length() == old_shared_array->length());
+ RUNTIME_ASSERT(old_shared_array->HasFastElements())
+ RUNTIME_ASSERT(new_shared_array->HasFastElements())
+ int array_length = Smi::cast(old_shared_array->length())->value();
for (int i = 0; i < array_length; i++) {
- Handle<Object> element;
+ Handle<Object> old_element;
+ Handle<Object> new_element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, old_element, Object::GetElement(isolate, old_shared_array, i));
+ RUNTIME_ASSERT(
+ old_element->IsJSValue() &&
+ Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element, Object::GetElement(isolate, shared_array, i));
+ isolate, new_element, Object::GetElement(isolate, new_shared_array, i));
RUNTIME_ASSERT(
- element->IsJSValue() &&
- Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo());
+ new_element->IsUndefined() ||
+ (new_element->IsJSValue() &&
+ Handle<JSValue>::cast(new_element)->value()->IsSharedFunctionInfo()));
}
- return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
+ return *LiveEdit::CheckAndDropActivations(old_shared_array, new_shared_array,
+ do_drop);
}
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 70c587d745..427d2b868a 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -7,6 +7,7 @@
#include "src/arguments.h"
#include "src/assembler.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/third_party/fdlibm/fdlibm.h"
@@ -106,8 +107,8 @@ RUNTIME_FUNCTION(Runtime_MathExpRT) {
isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_exp();
- return *isolate->factory()->NewNumber(fast_exp(x));
+ lazily_initialize_fast_exp(isolate);
+ return *isolate->factory()->NewNumber(fast_exp(x, isolate));
}
@@ -149,7 +150,7 @@ RUNTIME_FUNCTION(Runtime_MathPow) {
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result = power_helper(x, y);
+ double result = power_helper(isolate, x, y);
if (std::isnan(result)) return isolate->heap()->nan_value();
return *isolate->factory()->NewNumber(result);
}
@@ -223,7 +224,8 @@ RUNTIME_FUNCTION(Runtime_MathSqrt) {
isolate->counters()->math_sqrt()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(fast_sqrt(x));
+ lazily_initialize_fast_sqrt(isolate);
+ return *isolate->factory()->NewNumber(fast_sqrt(x, isolate));
}
@@ -247,18 +249,50 @@ RUNTIME_FUNCTION(Runtime_IsMinusZero) {
}
-RUNTIME_FUNCTION(Runtime_InitializeRNG) {
+RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- static const int kSize = 4;
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(kSize);
- uint16_t seeds[kSize];
- do {
- isolate->random_number_generator()->NextBytes(seeds,
- kSize * sizeof(*seeds));
- } while (!(seeds[0] && seeds[1] && seeds[2] && seeds[3]));
- for (int i = 0; i < kSize; i++) array->set(i, Smi::FromInt(seeds[i]));
- return *isolate->factory()->NewJSArrayWithElements(array);
+ DCHECK(args.length() == 1);
+ // Random numbers in the snapshot are not really that random.
+ DCHECK(!isolate->bootstrapper()->IsActive());
+ static const int kState0Offset = 0;
+ static const int kState1Offset = 1;
+ static const int kRandomBatchSize = 64;
+ CONVERT_ARG_HANDLE_CHECKED(Object, maybe_typed_array, 0);
+ Handle<JSTypedArray> typed_array;
+ // Allocate typed array if it does not yet exist.
+ if (maybe_typed_array->IsJSTypedArray()) {
+ typed_array = Handle<JSTypedArray>::cast(maybe_typed_array);
+ } else {
+ static const int kByteLength = kRandomBatchSize * kDoubleSize;
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+ JSArrayBuffer::SetupAllocatingData(buffer, isolate, kByteLength, true,
+ SharedFlag::kNotShared);
+ typed_array = isolate->factory()->NewJSTypedArray(
+ kExternalFloat64Array, buffer, 0, kRandomBatchSize);
+ }
+
+ DisallowHeapAllocation no_gc;
+ double* array =
+ reinterpret_cast<double*>(typed_array->GetBuffer()->backing_store());
+ // Fetch existing state.
+ uint64_t state0 = double_to_uint64(array[kState0Offset]);
+ uint64_t state1 = double_to_uint64(array[kState1Offset]);
+ // Initialize state if not yet initialized.
+ while (state0 == 0 || state1 == 0) {
+ isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
+ isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
+ }
+ // Create random numbers.
+ for (int i = kState1Offset + 1; i < kRandomBatchSize; i++) {
+ // Generate random numbers using xorshift128+.
+ base::RandomNumberGenerator::XorShift128(&state0, &state1);
+ array[i] = base::RandomNumberGenerator::ToDouble(state0, state1);
+ }
+ // Persist current state.
+ array[kState0Offset] = uint64_to_double(state0);
+ array[kState1Offset] = uint64_to_double(state1);
+ return *typed_array;
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index f976df951c..46fbff3463 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -114,11 +114,13 @@ RUNTIME_FUNCTION(Runtime_StringToNumber) {
}
+// ES6 18.2.5 parseInt(string, radix) slow path
RUNTIME_FUNCTION(Runtime_StringParseInt) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
+ // Step 8.a. is already handled in the JS function.
RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
subject = String::Flatten(subject);
@@ -128,7 +130,6 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
DisallowHeapAllocation no_gc;
String::FlatContent flat = subject->GetFlatContent();
- // ECMA-262 section 15.1.2.3, empty string is NaN
if (flat.IsOneByte()) {
value =
StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(), radix);
@@ -141,6 +142,7 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
}
+// ES6 18.2.4 parseFloat(string)
RUNTIME_FUNCTION(Runtime_StringParseFloat) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -316,5 +318,20 @@ RUNTIME_FUNCTION(Runtime_GetRootNaN) {
return isolate->heap()->nan_value();
}
+
+RUNTIME_FUNCTION(Runtime_GetHoleNaNUpper) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ return *isolate->factory()->NewNumberFromUint(kHoleNanUpper32);
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetHoleNaNLower) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ return *isolate->factory()->NewNumberFromUint(kHoleNanLower32);
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index a16e1295b9..75ddb7bc22 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -9,6 +9,7 @@
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/property-descriptor.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -117,14 +118,14 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
}
-MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> key,
- LanguageMode language_mode) {
+Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> key,
+ LanguageMode language_mode) {
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, receiver, key, &success, LookupIterator::HIDDEN);
- if (!success) return MaybeHandle<Object>();
+ if (!success) return Nothing<bool>();
return JSReceiver::DeleteProperty(&it, language_mode);
}
@@ -158,7 +159,10 @@ RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- return *Object::GetPrototype(isolate, obj);
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
+ Object::GetPrototype(isolate, obj));
+ return *prototype;
}
@@ -205,14 +209,13 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
- PropertyAttributes attrs;
// Get attributes.
LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name,
LookupIterator::HIDDEN);
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return MaybeHandle<Object>();
- attrs = maybe.FromJust();
+ PropertyAttributes attrs = maybe.FromJust();
if (attrs == ABSENT) return factory->undefined_value();
DCHECK(!isolate->has_pending_exception());
@@ -250,7 +253,8 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
// [false, value, Writeable, Enumerable, Configurable]
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
+// TODO(jkummerow): Deprecated. Remove all callers and delete.
+RUNTIME_FUNCTION(Runtime_GetOwnProperty_Legacy) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
@@ -262,21 +266,28 @@ RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
}
-RUNTIME_FUNCTION(Runtime_PreventExtensions) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- if (JSReceiver::PreventExtensions(obj, Object::THROW_ON_ERROR).IsNothing())
- return isolate->heap()->exception();
- return *obj;
-}
-
-
-RUNTIME_FUNCTION(Runtime_IsExtensible) {
+// ES6 19.1.2.6
+RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- return isolate->heap()->ToBoolean(JSObject::IsExtensible(obj));
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, raw_name, 1);
+ // 1. Let obj be ? ToObject(O).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+ Execution::ToObject(isolate, object));
+ // 2. Let key be ? ToPropertyKey(P).
+ Handle<Name> key;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToName(isolate, raw_name));
+
+ // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, Handle<JSReceiver>::cast(object), key, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ // 4. Return FromPropertyDescriptor(desc).
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
}
@@ -295,36 +306,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
}
-RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- // %ObjectFreeze is a fast path and these cases are handled elsewhere.
- RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed() && !object->IsJSProxy());
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Freeze(object));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectSeal) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- // %ObjectSeal is a fast path and these cases are handled elsewhere.
- RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed() && !object->IsJSProxy());
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Seal(object));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -586,11 +567,10 @@ Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode));
- return *result;
+ Maybe<bool> result =
+ Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
} // namespace
@@ -625,7 +605,7 @@ static Object* HasOwnPropertyImplementation(Isolate* isolate,
// look like they are on this object.
PrototypeIterator iter(isolate, object);
if (!iter.IsAtEnd() &&
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))
+ PrototypeIterator::GetCurrent<HeapObject>(iter)
->map()
->is_hidden_prototype()) {
// TODO(verwaest): The recursion is not necessary for keys that are array
@@ -633,8 +613,7 @@ static Object* HasOwnPropertyImplementation(Isolate* isolate,
// Casting to JSObject is fine because JSProxies are never used as
// hidden prototypes.
return HasOwnPropertyImplementation(
- isolate, Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
- key);
+ isolate, PrototypeIterator::GetCurrent<JSObject>(iter), key);
}
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->false_value();
@@ -656,9 +635,13 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
+ // TODO(jkummerow): Make JSReceiver::HasOwnProperty fast enough to
+ // handle all cases directly (without this custom fast path).
Maybe<bool> maybe = Nothing<bool>();
if (key_is_array_index) {
- maybe = JSObject::HasOwnElement(js_obj, index);
+ LookupIterator it(js_obj->GetIsolate(), js_obj, index,
+ LookupIterator::HIDDEN);
+ maybe = JSReceiver::HasProperty(&it);
} else {
maybe = JSObject::HasRealNamedProperty(js_obj, key);
}
@@ -681,6 +664,11 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
if (index < static_cast<uint32_t>(string->length())) {
return isolate->heap()->true_value();
}
+ } else if (object->IsJSProxy()) {
+ Maybe<bool> result =
+ JSReceiver::HasOwnProperty(Handle<JSProxy>::cast(object), key);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ return isolate->heap()->ToBoolean(result.FromJust());
}
return isolate->heap()->false_value();
}
@@ -713,17 +701,17 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
}
-RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) {
+RUNTIME_FUNCTION(Runtime_PropertyIsEnumerable) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(object, key);
if (!maybe.IsJust()) return isolate->heap()->exception();
- if (maybe.FromJust() == ABSENT) maybe = Just(DONT_ENUM);
+ if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
@@ -744,8 +732,8 @@ RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
Handle<JSReceiver> object(raw_object);
Handle<FixedArray> content;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, content,
- JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS));
+ isolate, content, JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS,
+ ENUMERABLE_STRINGS));
// Test again, since cache may have been built by preceding call.
if (object->IsSimpleEnum()) return object->map();
@@ -754,136 +742,19 @@ RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
}
-// Return the names of the own named properties.
-// args[0]: object
-// args[1]: PropertyAttributes as int
-RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
+RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_SMI_ARG_CHECKED(filter_value, 1);
- PropertyAttributes filter = static_cast<PropertyAttributes>(filter_value);
-
- // Find the number of own properties for each of the objects.
- int total_property_count = 0;
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- // Casting to JSObject is fine because |object| is guaranteed to be one,
- // and we'll only look at hidden prototypes which are never JSProxies.
- Handle<JSObject> jsproto = PrototypeIterator::GetCurrent<JSObject>(iter);
- total_property_count += jsproto->NumberOfOwnProperties(filter);
- }
-
- // Allocate an array with storage for all the property names.
- Handle<FixedArray> names =
- isolate->factory()->NewFixedArray(total_property_count);
-
- // Get the property names.
- int next_copy_index = 0;
- int hidden_strings = 0;
- Handle<Object> hidden_string = isolate->factory()->hidden_string();
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
- // Casting to JSObject is fine because |object| is guaranteed to be one,
- // and we'll only look at hidden prototypes which are never JSProxies.
- Handle<JSObject> jsproto = PrototypeIterator::GetCurrent<JSObject>(iter);
- int own = jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
- // Names from hidden prototypes may already have been added
- // for inherited function template instances. Count the duplicates
- // and stub them out; the final copy pass at the end ignores holes.
- for (int j = next_copy_index; j < next_copy_index + own; j++) {
- Object* name_from_hidden_proto = names->get(j);
- if (isolate->IsInternallyUsedPropertyName(name_from_hidden_proto)) {
- hidden_strings++;
- } else {
- for (int k = 0; k < next_copy_index; k++) {
- Object* name = names->get(k);
- if (name_from_hidden_proto == name) {
- names->set(j, *hidden_string);
- hidden_strings++;
- break;
- }
- }
- }
- }
- next_copy_index += own;
- }
-
- CHECK_EQ(total_property_count, next_copy_index);
-
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), object)) {
- for (int i = 0; i < total_property_count; i++) {
- Handle<Name> name(Name::cast(names->get(i)));
- if (name.is_identical_to(hidden_string)) continue;
- LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- if (!JSObject::AllCanRead(&it)) {
- names->set(i, *hidden_string);
- hidden_strings++;
- }
- }
- }
-
- // Filter out name of hidden properties object and
- // hidden prototype duplicates.
- if (hidden_strings > 0) {
- if (hidden_strings == total_property_count) {
- names = isolate->factory()->empty_fixed_array();
- } else {
- int i;
- for (i = 0; i < total_property_count; i++) {
- Object* name = names->get(i);
- if (name == *hidden_string) break;
- }
- int dest_pos = i;
- for (; i < total_property_count; i++) {
- Object* name = names->get(i);
- if (name == *hidden_string) continue;
- names->set(dest_pos++, name);
- }
-
- isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *names, hidden_strings);
- }
- }
-
- return *isolate->factory()->NewJSArrayWithElements(names);
-}
-
-
-// Return the names of the own indexed properties.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetOwnElementNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return isolate->heap()->undefined_value();
- }
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ PropertyFilter filter = static_cast<PropertyFilter>(filter_value);
- // TODO(cbruni): implement proper prototype lookup like in GetOwnPropertyNames
- if (object->IsJSGlobalProxy()) {
- // All the elements are stored on the globa_object and not directly on the
- // global object proxy.
- PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_PROTOTYPE);
- if (iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- return *isolate->factory()->NewJSArray(0);
- }
- // Casting to JSObject is fine because |object| is guaranteed to be one,
- // and we'll only look at hidden prototypes which are never JSProxies.
- object = PrototypeIterator::GetCurrent<JSObject>(iter);
- }
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, filter,
+ CONVERT_TO_STRING));
- int n = object->NumberOfOwnElements(NONE);
- Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
- object->GetOwnElementKeys(*names, NONE);
- return *isolate->factory()->NewJSArrayWithElements(names);
+ return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -905,54 +776,6 @@ RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
}
-// Return property names from named interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetNamedInterceptorPropertyNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- if (obj->HasNamedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForNamedInterceptor(obj, obj).ToHandle(&result)) {
- return *result;
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-// Return element names from indexed interceptor.
-// args[0]: object
-RUNTIME_FUNCTION(Runtime_GetIndexedInterceptorElementNames) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-
- if (obj->HasIndexedInterceptor()) {
- Handle<JSObject> result;
- if (JSObject::GetKeysForIndexedInterceptor(obj, obj).ToHandle(&result)) {
- return *result;
- }
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_OwnKeys) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
- Handle<JSObject> object(raw_object);
-
- Handle<FixedArray> contents;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, contents, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY,
- SKIP_SYMBOLS, CONVERT_TO_STRING));
- return *isolate->factory()->NewJSArrayWithElements(contents);
-}
-
-
RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -972,65 +795,15 @@ RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
}
-static Object* Runtime_NewObjectHelper(Isolate* isolate,
- Handle<Object> constructor,
- Handle<Object> original_constructor,
- Handle<AllocationSite> site) {
- // If the constructor isn't a proper function we throw a type error.
- if (!constructor->IsJSFunction()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor, constructor));
- }
-
- Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
-
- CHECK(original_constructor->IsJSFunction());
- Handle<JSFunction> original_function =
- Handle<JSFunction>::cast(original_constructor);
-
-
- // Check that function is a constructor.
- if (!function->IsConstructor()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor, constructor));
- }
-
- Debug* debug = isolate->debug();
- // Handle stepping into constructors if step into is active.
- if (debug->StepInActive()) debug->HandleStepIn(function, true);
-
- // The function should be compiled for the optimization hints to be
- // available.
- Compiler::Compile(function, CLEAR_EXCEPTION);
-
- JSFunction::EnsureHasInitialMap(function);
- if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
- // The 'Function' function ignores the receiver object when
- // called using 'new' and creates a new JSFunction object that
- // is returned.
- return isolate->heap()->undefined_value();
- }
-
- Handle<Map> initial_map =
- JSFunction::EnsureDerivedHasInitialMap(original_function, function);
-
- Handle<JSObject> result =
- isolate->factory()->NewJSObjectFromMap(initial_map, NOT_TENURED, site);
-
- isolate->counters()->constructed_objects()->Increment();
- isolate->counters()->constructed_objects_runtime()->Increment();
-
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_NewObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, original_constructor, 1);
- return Runtime_NewObjectHelper(isolate, constructor, original_constructor,
- Handle<AllocationSite>::null());
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ return *result;
}
@@ -1218,22 +991,6 @@ RUNTIME_FUNCTION(Runtime_JSValueGetValue) {
}
-RUNTIME_FUNCTION(Runtime_HeapObjectGetMap) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(HeapObject, obj, 0);
- return obj->map();
-}
-
-
-RUNTIME_FUNCTION(Runtime_MapGetInstanceType) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Map, map, 0);
- return Smi::FromInt(map->instance_type());
-}
-
-
RUNTIME_FUNCTION(Runtime_ObjectEquals) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
@@ -1243,11 +1000,11 @@ RUNTIME_FUNCTION(Runtime_ObjectEquals) {
}
-RUNTIME_FUNCTION(Runtime_IsSpecObject) {
+RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsSpecObject());
+ return isolate->heap()->ToBoolean(obj->IsJSReceiver());
}
@@ -1492,14 +1249,12 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
if (!object->IsJSReceiver()) {
return isolate->heap()->false_value();
}
- // Check if {callable} is bound, if so, get [[BoundFunction]] from it and use
- // that instead of {callable}.
- if (callable->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- if (function->shared()->bound()) {
- Handle<BindingsArray> bindings(function->function_bindings(), isolate);
- callable = handle(bindings->bound_function(), isolate);
- }
+ // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
+ // and use that instead of {callable}.
+ while (callable->IsJSBoundFunction()) {
+ callable =
+ handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+ isolate);
}
DCHECK(callable->IsCallable());
// Get the "prototype" of {callable}; raise an error if it's not a receiver.
@@ -1513,18 +1268,20 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
}
// Return whether or not {prototype} is in the prototype chain of {object}.
- return isolate->heap()->ToBoolean(
- object->HasInPrototypeChain(isolate, *prototype));
+ Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
- SealHandleScope scope(isolate);
+ HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
- CONVERT_ARG_CHECKED(Object, prototype, 1);
- return isolate->heap()->ToBoolean(
- object->HasInPrototypeChain(isolate, prototype));
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+ Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1561,7 +1318,10 @@ RUNTIME_FUNCTION(Runtime_ObjectDefineProperties) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, properties, 1);
- return JSReceiver::DefineProperties(isolate, o, properties);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, o, JSReceiver::DefineProperties(isolate, o, properties));
+ return *o;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-observe.cc b/deps/v8/src/runtime/runtime-observe.cc
index df0b2a330c..0407b8a9df 100644
--- a/deps/v8/src/runtime/runtime-observe.cc
+++ b/deps/v8/src/runtime/runtime-observe.cc
@@ -56,7 +56,7 @@ RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, argument, 1);
v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
// We should send a message on uncaught exception thrown during
@@ -65,16 +65,8 @@ RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
catcher.SetVerbose(true);
Handle<Object> argv[] = {argument};
- // Allow stepping into the observer callback.
- Debug* debug = isolate->debug();
- if (debug->is_active() && debug->IsStepping() &&
- debug->last_step_action() == StepIn) {
- // Previous StepIn may have activated a StepOut if it was at the frame exit.
- // In this case to be able to step into the callback again, we need to clear
- // the step out first.
- debug->ClearStepOut();
- debug->FloodWithOneShot(callback);
- }
+ // If we are in step-in mode, flood the handler.
+ isolate->debug()->EnableStepIn();
USE(Execution::Call(isolate, callback, isolate->factory()->undefined_value(),
arraysize(argv), argv));
@@ -104,11 +96,18 @@ static bool ContextsHaveSameOrigin(Handle<Context> context1,
RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, observer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2);
- Handle<Context> observer_context(observer->context()->native_context());
+ while (observer->IsJSBoundFunction()) {
+ observer = handle(
+ Handle<JSBoundFunction>::cast(observer)->bound_target_function());
+ }
+ if (!observer->IsJSFunction()) return isolate->heap()->false_value();
+
+ Handle<Context> observer_context(
+ Handle<JSFunction>::cast(observer)->context()->native_context());
Handle<Context> object_context(object->GetCreationContext());
Handle<Context> record_context(record->GetCreationContext());
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 4699647b80..3a521c6b7c 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -5,33 +5,139 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/elements.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_CreateJSProxy) {
+
+// ES6 9.5.13 [[Call]] (thisArgument, argumentsList)
+RUNTIME_FUNCTION(Runtime_JSProxyCall) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
- return *isolate->factory()->NewJSProxy(handler, prototype);
+ DCHECK_LE(2, args.length());
+ // thisArgument == receiver
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, args.length() - 1);
+ Handle<String> trap_name = isolate->factory()->apply_string();
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ }
+ // 3. Assert: Type(handler) is Object.
+ DCHECK(handler->IsJSReceiver());
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate);
+ // 5. Let trap be ? GetMethod(handler, "apply").
+ Handle<Object> trap;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
+ // 6. If trap is undefined, then
+ int const arguments_length = args.length() - 2;
+ if (trap->IsUndefined()) {
+ // 6.a. Return Call(target, thisArgument, argumentsList).
+ ScopedVector<Handle<Object>> argv(arguments_length);
+ for (int i = 0; i < arguments_length; ++i) {
+ argv[i] = args.at<Object>(i + 1);
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, target, receiver,
+ arguments_length, argv.start()));
+ return *result;
+ }
+ // 7. Let argArray be CreateArrayFromList(argumentsList).
+ Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, arguments_length, arguments_length);
+ ElementsAccessor* accessor = arg_array->GetElementsAccessor();
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements = arg_array->elements();
+ for (int i = 0; i < arguments_length; i++) {
+ accessor->Set(elements, i, args[i + 1]);
+ }
+ }
+ // 8. Return Call(trap, handler, «target, thisArgument, argArray»).
+ Handle<Object> trap_result;
+ Handle<Object> trap_args[] = {target, receiver, arg_array};
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, trap_result,
+ Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
+ return *trap_result;
}
-RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
+// 9.5.14 [[Construct]] (argumentsList, newTarget)
+RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, call_trap, 1);
- RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, construct_trap, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 3);
- if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
- return *isolate->factory()->NewJSFunctionProxy(handler, call_trap,
- construct_trap, prototype);
+ DCHECK_LE(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, args.length() - 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_target, args.length() - 1);
+ Handle<String> trap_name = isolate->factory()->construct_string();
+
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate);
+ // 2. If handler is null, throw a TypeError exception.
+ if (proxy->IsRevoked()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProxyRevoked, trap_name));
+ }
+ // 3. Assert: Type(handler) is Object.
+ DCHECK(handler->IsJSReceiver());
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(JSReceiver::cast(proxy->target()), isolate);
+ // 5. Let trap be ? GetMethod(handler, "construct").
+ Handle<Object> trap;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
+ // 6. If trap is undefined, then
+ int const arguments_length = args.length() - 3;
+ if (trap->IsUndefined()) {
+ // 6.a. Assert: target has a [[Construct]] internal method.
+ DCHECK(target->IsConstructor());
+ // 6.b. Return Construct(target, argumentsList, newTarget).
+ ScopedVector<Handle<Object>> argv(arguments_length);
+ for (int i = 0; i < arguments_length; ++i) {
+ argv[i] = args.at<Object>(i + 1);
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Execution::New(isolate, target, new_target,
+ arguments_length, argv.start()));
+ return *result;
+ }
+ // 7. Let argArray be CreateArrayFromList(argumentsList).
+ Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, arguments_length, arguments_length);
+ ElementsAccessor* accessor = arg_array->GetElementsAccessor();
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements = arg_array->elements();
+ for (int i = 0; i < arguments_length; i++) {
+ accessor->Set(elements, i, args[i + 1]);
+ }
+ }
+ // 8. Let newObj be ? Call(trap, handler, «target, argArray, newTarget »).
+ Handle<Object> new_object;
+ Handle<Object> trap_args[] = {target, arg_array, new_target};
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_object,
+ Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
+ // 9. If Type(newObj) is not Object, throw a TypeError exception.
+ if (!new_object->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kProxyConstructNonObject, new_object));
+ }
+ // 10. Return newObj.
+ return *new_object;
}
@@ -43,15 +149,7 @@ RUNTIME_FUNCTION(Runtime_IsJSProxy) {
}
-RUNTIME_FUNCTION(Runtime_IsJSFunctionProxy) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetHandler) {
+RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
@@ -59,28 +157,21 @@ RUNTIME_FUNCTION(Runtime_GetHandler) {
}
-RUNTIME_FUNCTION(Runtime_GetCallTrap) {
+RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
- return proxy->call_trap();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetConstructTrap) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
- return proxy->construct_trap();
+ CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+ return proxy->target();
}
-RUNTIME_FUNCTION(Runtime_Fix) {
+RUNTIME_FUNCTION(Runtime_JSProxyRevoke) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
- JSProxy::Fix(proxy);
+ JSProxy::Revoke(proxy);
return isolate->heap()->undefined_value();
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index b4cf184c40..138b4dc71c 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -658,7 +658,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
subject = String::Flatten(subject);
@@ -825,163 +825,16 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
}
-static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
- bool* success) {
- uint32_t value = JSRegExp::NONE;
- int length = flags->length();
- // A longer flags string cannot be valid.
- if (length > 5) return JSRegExp::Flags(0);
- for (int i = 0; i < length; i++) {
- uint32_t flag = JSRegExp::NONE;
- switch (flags->Get(i)) {
- case 'g':
- flag = JSRegExp::GLOBAL;
- break;
- case 'i':
- flag = JSRegExp::IGNORE_CASE;
- break;
- case 'm':
- flag = JSRegExp::MULTILINE;
- break;
- case 'u':
- if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
- flag = JSRegExp::UNICODE_ESCAPES;
- break;
- case 'y':
- if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
- flag = JSRegExp::STICKY;
- break;
- default:
- return JSRegExp::Flags(0);
- }
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
- *success = true;
- return JSRegExp::Flags(value);
-}
-
-
-template <typename Char>
-inline int CountRequiredEscapes(Handle<String> source) {
- DisallowHeapAllocation no_gc;
- int escapes = 0;
- Vector<const Char> src = source->GetCharVector<Char>();
- for (int i = 0; i < src.length(); i++) {
- if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
- }
- return escapes;
-}
-
-
-template <typename Char, typename StringType>
-inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
- Handle<StringType> result) {
- DisallowHeapAllocation no_gc;
- Vector<const Char> src = source->GetCharVector<Char>();
- Vector<Char> dst(result->GetChars(), result->length());
- int s = 0;
- int d = 0;
- while (s < src.length()) {
- if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
- dst[d++] = src[s++];
- }
- DCHECK_EQ(result->length(), d);
- return result;
-}
-
-
-MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
- Handle<String> source) {
- String::Flatten(source);
- if (source->length() == 0) return isolate->factory()->query_colon_string();
- bool one_byte = source->IsOneByteRepresentationUnderneath();
- int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
- : CountRequiredEscapes<uc16>(source);
- if (escapes == 0) return source;
- int length = source->length() + escapes;
- if (one_byte) {
- Handle<SeqOneByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- isolate->factory()->NewRawOneByteString(length),
- String);
- return WriteEscapedRegExpSource<uint8_t>(source, result);
- } else {
- Handle<SeqTwoByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- isolate->factory()->NewRawTwoByteString(length),
- String);
- return WriteEscapedRegExpSource<uc16>(source, result);
- }
-}
-
-
RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 2);
- Factory* factory = isolate->factory();
- // If source is the empty string we set it to "(?:)" instead as
- // suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = factory->query_colon_string();
-
- bool success = false;
- JSRegExp::Flags flags = RegExpFlagsFromString(flags_string, &success);
- if (!success) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string));
- }
-
- Handle<String> escaped_source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, escaped_source,
- EscapeRegExpSource(isolate, source));
-
- regexp->set_source(*escaped_source);
- regexp->set_flags(Smi::FromInt(flags.value()));
+ CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
- Map* map = regexp->map();
- Object* constructor = map->GetConstructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->initial_map() == map) {
- // If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0), SKIP_WRITE_BARRIER);
- } else {
- // Map has changed, so use generic, but slower, method.
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->last_index_string(),
- Handle<Smi>(Smi::FromInt(0), isolate), writable)
- .Check();
- }
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSRegExp::Initialize(regexp, source, flags));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpImpl::Compile(regexp, source, flags));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
-
- Handle<JSFunction> constructor = isolate->regexp_function();
- // Compute the regular expression literal.
- Handle<Object> regexp;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, regexp,
- RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags));
- literals->set_literal(index, *regexp);
return *regexp;
}
@@ -1147,7 +1000,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
RUNTIME_ASSERT(result_array->HasFastObjectElements());
subject = String::Flatten(subject);
- RUNTIME_ASSERT(regexp->GetFlags().is_global());
+ RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
if (regexp->CaptureCount() == 0) {
return SearchRegExpMultiple<false>(isolate, subject, regexp,
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index ecbe5cd17d..094f1a10ed 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -6,11 +6,12 @@
#include "src/accessors.h"
#include "src/arguments.h"
+#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
+#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/scopeinfo.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -231,7 +232,8 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
&binding_flags);
if (attributes != ABSENT &&
(binding_flags == MUTABLE_CHECK_INITIALIZED ||
- binding_flags == IMMUTABLE_CHECK_INITIALIZED)) {
+ binding_flags == IMMUTABLE_CHECK_INITIALIZED ||
+ binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY)) {
return ThrowRedeclarationError(isolate, name);
}
attr = static_cast<PropertyAttributes>(attr & ~EVAL_DECLARED);
@@ -293,7 +295,7 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
DCHECK(context->IsBlockContext());
object = isolate->factory()->NewJSObject(
isolate->context_extension_function());
- Handle<Object> extension =
+ Handle<HeapObject> extension =
isolate->factory()->NewSloppyBlockWithEvalContextExtension(
handle(context->scope_info()), object);
context->set_extension(*extension);
@@ -411,6 +413,68 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
namespace {
+// Find the arguments of the JavaScript function invocation that called
+// into C++ code. Collect these in a newly allocated array of handles (possibly
+// prefixed by a number of empty handles).
+base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
+ int prefix_argc,
+ int* total_argc) {
+ // Find frame containing arguments passed to the caller.
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ List<JSFunction*> functions(2);
+ frame->GetFunctions(&functions);
+ if (functions.length() > 1) {
+ int inlined_jsframe_index = functions.length() - 1;
+ TranslatedState translated_values(frame);
+ translated_values.Prepare(false, frame->fp());
+
+ int argument_count = 0;
+ TranslatedFrame* translated_frame =
+ translated_values.GetArgumentsInfoFromJSFrameIndex(
+ inlined_jsframe_index, &argument_count);
+ TranslatedFrame::iterator iter = translated_frame->begin();
+
+ // Skip the function.
+ iter++;
+
+ // Skip the receiver.
+ iter++;
+ argument_count--;
+
+ *total_argc = prefix_argc + argument_count;
+ base::SmartArrayPointer<Handle<Object>> param_data(
+ NewArray<Handle<Object>>(*total_argc));
+ bool should_deoptimize = false;
+ for (int i = 0; i < argument_count; i++) {
+ should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
+ Handle<Object> value = iter->GetValue();
+ param_data[prefix_argc + i] = value;
+ iter++;
+ }
+
+ if (should_deoptimize) {
+ translated_values.StoreMaterializedValuesAndDeopt();
+ }
+
+ return param_data;
+ } else {
+ it.AdvanceToArgumentsFrame();
+ frame = it.frame();
+ int args_count = frame->ComputeParametersCount();
+
+ *total_argc = prefix_argc + args_count;
+ base::SmartArrayPointer<Handle<Object>> param_data(
+ NewArray<Handle<Object>>(*total_argc));
+ for (int i = 0; i < args_count; i++) {
+ Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
+ param_data[prefix_argc + i] = val;
+ }
+ return param_data;
+ }
+}
+
+
template <typename T>
Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
T parameters, int argument_count) {
@@ -520,6 +584,26 @@ Handle<JSObject> NewStrictArguments(Isolate* isolate, Handle<JSFunction> callee,
}
+template <typename T>
+Handle<JSObject> NewRestArguments(Isolate* isolate, Handle<JSFunction> callee,
+ T parameters, int argument_count,
+ int start_index) {
+ int num_elements = std::max(0, argument_count - start_index);
+ Handle<JSObject> result = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(result->elements());
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < num_elements; i++) {
+ elements->set(i, parameters[i + start_index], mode);
+ }
+ }
+ return result;
+}
+
+
class HandleArguments BASE_EMBEDDED {
public:
explicit HandleArguments(Handle<Object>* array) : array_(array) {}
@@ -547,10 +631,10 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
// This generic runtime function can also be used when the caller has been
- // inlined, we use the slow but accurate {Runtime::GetCallerArguments}.
+ // inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, 0, &argument_count);
HandleArguments argument_getter(arguments.get());
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
@@ -561,15 +645,31 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments_Generic) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
// This generic runtime function can also be used when the caller has been
- // inlined, we use the slow but accurate {Runtime::GetCallerArguments}.
+ // inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, 0, &argument_count);
HandleArguments argument_getter(arguments.get());
return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
}
+RUNTIME_FUNCTION(Runtime_NewRestArguments_Generic) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
+ CONVERT_SMI_ARG_CHECKED(start_index, 1);
+ // This generic runtime function can also be used when the caller has been
+ // inlined, we use the slow but accurate {GetCallerArguments}.
+ int argument_count = 0;
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ GetCallerArguments(isolate, 0, &argument_count);
+ HandleArguments argument_getter(arguments.get());
+ return *NewRestArguments(isolate, callee, argument_getter, argument_count,
+ start_index);
+}
+
+
RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -604,6 +704,25 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
}
+RUNTIME_FUNCTION(Runtime_NewRestParam) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_SMI_ARG_CHECKED(num_params, 0);
+ Object** parameters = reinterpret_cast<Object**>(args[1]);
+ CONVERT_SMI_ARG_CHECKED(rest_index, 2);
+#ifdef DEBUG
+ // This runtime function does not materialize the correct arguments when the
+ // caller has been inlined, better make sure we are not hitting that case.
+ JavaScriptFrameIterator it(isolate);
+ DCHECK(!it.frame()->HasInlinedFrames());
+#endif // DEBUG
+ Handle<JSFunction> callee;
+ ParameterArguments argument_getter(parameters);
+ return *NewRestArguments(isolate, callee, argument_getter, num_params,
+ rest_index);
+}
+
+
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -704,31 +823,9 @@ RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(Runtime_PushWithContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- Handle<JSReceiver> extension_object;
- if (args[0]->IsJSReceiver()) {
- extension_object = args.at<JSReceiver>(0);
- } else {
- // Try to convert the object to a proper JavaScript object.
- MaybeHandle<JSReceiver> maybe_object =
- Object::ToObject(isolate, args.at<Object>(0));
- if (!maybe_object.ToHandle(&extension_object)) {
- Handle<Object> handle = args.at<Object>(0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kWithExpression, handle));
- }
- }
-
- Handle<JSFunction> function;
- if (args[1]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = handle(isolate->native_context()->closure());
- } else {
- function = args.at<JSFunction>(1);
- }
-
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, extension_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
Handle<Context> current(isolate->context());
Handle<Context> context =
isolate->factory()->NewWithContext(function, current, extension_object);
@@ -739,18 +836,10 @@ RUNTIME_FUNCTION(Runtime_PushWithContext) {
RUNTIME_FUNCTION(Runtime_PushCatchContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1);
- Handle<JSFunction> function;
- if (args[2]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = handle(isolate->native_context()->closure());
- } else {
- function = args.at<JSFunction>(2);
- }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
Handle<Context> current(isolate->context());
Handle<Context> context = isolate->factory()->NewCatchContext(
function, current, name, thrown_object);
@@ -761,17 +850,9 @@ RUNTIME_FUNCTION(Runtime_PushCatchContext) {
RUNTIME_FUNCTION(Runtime_PushBlockContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
- Handle<JSFunction> function;
- if (args[1]->IsSmi()) {
- // A smi sentinel indicates a context nested inside global code rather
- // than some function. There is a canonical empty function that can be
- // gotten from the native context.
- function = handle(isolate->native_context()->closure());
- } else {
- function = args.at<JSFunction>(1);
- }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
Handle<Context> current(isolate->context());
Handle<Context> context =
isolate->factory()->NewBlockContext(function, current, scope_info);
@@ -813,7 +894,7 @@ RUNTIME_FUNCTION(Runtime_PushModuleContext) {
Context* previous = isolate->context();
context->set_previous(previous);
context->set_closure(previous->closure());
- context->set_global_object(previous->global_object());
+ context->set_native_context(previous->native_context());
isolate->set_context(*context);
// Find hosting scope and initialize internal variable holding module there.
@@ -863,8 +944,10 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
}
}
- if (JSObject::PreventExtensions(module, Object::THROW_ON_ERROR).IsNothing())
+ if (JSObject::PreventExtensions(module, Object::THROW_ON_ERROR)
+ .IsNothing()) {
DCHECK(false);
+ }
}
DCHECK(!isolate->has_pending_exception());
@@ -898,14 +981,13 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
return isolate->heap()->false_value();
}
- // The slot was found in a JSObject, either a context extension object,
+ // The slot was found in a JSReceiver, either a context extension object,
// the global object, or the subject of a with. Try to delete it
// (respecting DONT_DELETE).
- Handle<JSObject> object = Handle<JSObject>::cast(holder);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSReceiver::DeleteProperty(object, name));
- return *result;
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
+ Maybe<bool> result = JSReceiver::DeleteProperty(object, name);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1093,7 +1175,7 @@ RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
int argument_count = 0;
- Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, 0, &argument_count);
return Smi::FromInt(argument_count);
}
@@ -1106,7 +1188,7 @@ RUNTIME_FUNCTION(Runtime_Arguments) {
// Determine the actual arguments passed to the function.
int argument_count_signed = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- Runtime::GetCallerArguments(isolate, 0, &argument_count_signed);
+ GetCallerArguments(isolate, 0, &argument_count_signed);
const uint32_t argument_count = argument_count_signed;
// Try to convert the key to an index. If successful and within
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
index 0a1034dfc2..59e4fa1edb 100644
--- a/deps/v8/src/runtime/runtime-simd.cc
+++ b/deps/v8/src/runtime/runtime-simd.cc
@@ -212,10 +212,19 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
CONVERT_INT32_ARG_CHECKED(name, index); \
RUNTIME_ASSERT(name >= 0 && name < lanes);
+#define CONVERT_SIMD_ARG_HANDLE_THROW(Type, name, index) \
+ Handle<Type> name; \
+ if (args[index]->Is##Type()) { \
+ name = args.at<Type>(index); \
+ } else { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
+ }
+
#define SIMD_UNARY_OP(type, lane_type, lane_count, op, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = op(a->get_lane(i)); \
@@ -225,8 +234,8 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define SIMD_BINARY_OP(type, lane_type, lane_count, op, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = op(a->get_lane(i), b->get_lane(i)); \
@@ -236,8 +245,8 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, op, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
bool lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = a->get_lane(i) op b->get_lane(i); \
@@ -283,7 +292,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
RUNTIME_FUNCTION(Runtime_##type##ExtractLane) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, lane_count); \
return *isolate->factory()->extract(a->get_lane(lane)); \
}
@@ -293,7 +302,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(type, simd, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, simd, 0); \
CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, kLaneCount); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
@@ -307,7 +316,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define SIMD_CHECK_FUNCTION(type, lane_type, lane_count, extract, replace) \
RUNTIME_FUNCTION(Runtime_##type##Check) { \
HandleScope scope(isolate); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
return *a; \
}
@@ -316,7 +325,7 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 1 + kLaneCount); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 1, kLaneCount); \
@@ -331,8 +340,8 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2 + kLaneCount); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 2, kLaneCount * 2); \
@@ -437,7 +446,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
if (shift < lane_bits) { \
@@ -454,7 +463,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
if (shift < lane_bits) { \
@@ -472,7 +481,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
if (shift >= lane_bits) shift = lane_bits - 1; \
lane_type lanes[kLaneCount]; \
@@ -502,7 +511,7 @@ SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##AnyTrue) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
bool result = false; \
for (int i = 0; i < lane_count; i++) { \
if (a->get_lane(i)) { \
@@ -517,7 +526,7 @@ SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##AllTrue) { \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
bool result = true; \
for (int i = 0; i < lane_count; i++) { \
if (!a->get_lane(i)) { \
@@ -759,9 +768,9 @@ SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(bool_type, mask, 0); \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 1); \
- CONVERT_ARG_HANDLE_CHECKED(type, b, 2); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(bool_type, mask, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 1); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 2); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = mask->get_lane(i) ? a->get_lane(i) : b->get_lane(i); \
@@ -812,7 +821,7 @@ SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(from_type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
from_ctype a_value = a->get_lane(i); \
@@ -875,7 +884,7 @@ SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(from_type, a, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
lane_type lanes[kLaneCount]; \
a->CopyBits(lanes); \
Handle<type> result = isolate->factory()->New##type(lanes); \
@@ -900,7 +909,7 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 2); \
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
CONVERT_INT32_ARG_CHECKED(index, 1) \
size_t bpe = tarray->element_size(); \
uint32_t bytes = count * sizeof(lane_type); \
@@ -918,9 +927,9 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
#define SIMD_STORE(type, lane_type, lane_count, count, a) \
static const int kLaneCount = lane_count; \
DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2); \
CONVERT_INT32_ARG_CHECKED(index, 1) \
- CONVERT_ARG_HANDLE_CHECKED(type, a, 2); \
size_t bpe = tarray->element_size(); \
uint32_t bytes = count * sizeof(lane_type); \
size_t byte_length = NumberToSize(isolate, tarray->byte_length()); \
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index dd4983e75f..bd4dd699b4 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -1245,12 +1245,5 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
}
-
-RUNTIME_FUNCTION(Runtime_StringGetLength) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
- return Smi::FromInt(s->length());
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index fdfa42a6af..3b92d7f6ee 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -378,8 +378,7 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
DCHECK(args.length() == 0);
return Smi::FromInt(Natives::GetBuiltinsCount() +
- ExtraNatives::GetBuiltinsCount() +
- CodeStubNatives::GetBuiltinsCount());
+ ExtraNatives::GetBuiltinsCount());
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index f39e37072d..a82b71ddf2 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -13,32 +13,6 @@
namespace v8 {
namespace internal {
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(is_shared, 2);
- if (!holder->byte_length()->IsUndefined()) {
- // ArrayBuffer is already initialized; probably a fuzz test.
- return *holder;
- }
- size_t allocated_length = 0;
- if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
- if (!JSArrayBuffer::SetupAllocatingData(
- holder, isolate, allocated_length, true,
- is_shared ? SharedFlag::kShared : SharedFlag::kNotShared)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
- }
- return *holder;
-}
-
-
RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -49,14 +23,16 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
RUNTIME_ASSERT(!source.is_identical_to(target));
- size_t start = 0;
+ size_t start = 0, target_length = 0;
RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
- size_t target_length = NumberToSize(isolate, target->byte_length());
+ RUNTIME_ASSERT(TryNumberToSize(isolate, *new_length, &target_length));
+ RUNTIME_ASSERT(NumberToSize(isolate, target->byte_length()) >= target_length);
if (target_length == 0) return isolate->heap()->undefined_value();
@@ -70,14 +46,6 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
}
-RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(object->IsJSArrayBufferView());
-}
-
-
RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -433,6 +401,19 @@ RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
}
+RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ if (!args[0]->IsJSTypedArray()) {
+ return isolate->heap()->false_value();
+ }
+
+ Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
+ return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
+ obj->type() == kExternalInt32Array);
+}
+
+
RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 23f9bdc1f7..283087ae06 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -30,27 +30,28 @@ namespace internal {
// Entries have the form F(name, number of arguments, number of values):
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 0, 1) \
- F(TransitionElementsKind, 2, 1) \
- F(PushIfAbsent, 2, 1) \
- F(RemoveArrayHoles, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(ArrayConstructor, -1, 1) \
- F(ArrayConstructorWithSubclassing, -1, 1) \
- F(InternalArrayConstructor, -1, 1) \
- F(NormalizeElements, 1, 1) \
- F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
- F(IsArray, 1, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
- F(FixedArrayGet, 2, 1) \
- F(FixedArraySet, 3, 1) \
- F(FastOneByteArrayJoin, 2, 1)
+#define FOR_EACH_INTRINSIC_ARRAY(F) \
+ F(FinishArrayPrototypeSetup, 1, 1) \
+ F(SpecialArrayFunctions, 0, 1) \
+ F(TransitionElementsKind, 2, 1) \
+ F(PushIfAbsent, 2, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(ArrayConstructor, -1, 1) \
+ F(NewArray, -1 /* >= 3 */, 1) \
+ F(InternalArrayConstructor, -1, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(HasCachedArrayIndex, 1, 1) \
+ F(GetCachedArrayIndex, 1, 1) \
+ F(FixedArrayGet, 2, 1) \
+ F(FixedArraySet, 3, 1) \
+ F(FastOneByteArrayJoin, 2, 1) \
+ F(ArraySpeciesConstructor, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
@@ -73,26 +74,24 @@ namespace internal {
F(AtomicsFutexNumWaitersForTesting, 2, 1)
-#define FOR_EACH_INTRINSIC_CLASSES(F) \
- F(ThrowNonMethodError, 0, 1) \
- F(ThrowUnsupportedSuperError, 0, 1) \
- F(ThrowConstructorNonCallableError, 0, 1) \
- F(ThrowArrayNotSubclassableError, 0, 1) \
- F(ThrowStaticPrototypeError, 0, 1) \
- F(ThrowIfStaticPrototype, 1, 1) \
- F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 5, 1) \
- F(FinalizeClassDefinition, 2, 1) \
- F(DefineClassMethod, 3, 1) \
- F(ClassGetSourceCode, 1, 1) \
- F(LoadFromSuper, 4, 1) \
- F(LoadKeyedFromSuper, 4, 1) \
- F(StoreToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(HandleStepInForDerivedConstructors, 1, 1) \
- F(DefaultConstructorCallSuper, 2, 1)
+#define FOR_EACH_INTRINSIC_CLASSES(F) \
+ F(ThrowNonMethodError, 0, 1) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(ThrowConstructorNonCallableError, 1, 1) \
+ F(ThrowArrayNotSubclassableError, 0, 1) \
+ F(ThrowStaticPrototypeError, 0, 1) \
+ F(ThrowIfStaticPrototype, 1, 1) \
+ F(HomeObjectSymbol, 0, 1) \
+ F(DefineClass, 5, 1) \
+ F(FinalizeClassDefinition, 2, 1) \
+ F(DefineClassMethod, 3, 1) \
+ F(LoadFromSuper, 4, 1) \
+ F(LoadKeyedFromSuper, 4, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1) \
+ F(StoreKeyedToSuper_Strict, 4, 1) \
+ F(StoreKeyedToSuper_Sloppy, 4, 1) \
+ F(GetSuperConstructor, 1, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
@@ -126,28 +125,21 @@ namespace internal {
F(ObservationWeakMapCreate, 0, 1)
-#define FOR_EACH_INTRINSIC_COMPILER(F) \
- F(CompileLazy, 1, 1) \
- F(CompileOptimized, 2, 1) \
- F(NotifyStubFailure, 0, 1) \
- F(NotifyDeoptimized, 1, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(TryInstallOptimizedCode, 1, 1) \
- F(CompileString, 2, 1) \
+#define FOR_EACH_INTRINSIC_COMPILER(F) \
+ F(CompileLazy, 1, 1) \
+ F(CompileOptimized_Concurrent, 1, 1) \
+ F(CompileOptimized_NotConcurrent, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
+ F(NotifyDeoptimized, 1, 1) \
+ F(CompileForOnStackReplacement, 1, 1) \
+ F(TryInstallOptimizedCode, 1, 1) \
F(ResolvePossiblyDirectEval, 5, 1)
#define FOR_EACH_INTRINSIC_DATE(F) \
- F(DateMakeDay, 2, 1) \
- F(DateSetValue, 3, 1) \
F(IsDate, 1, 1) \
- F(ThrowNotDateError, 0, 1) \
F(DateCurrentTime, 0, 1) \
- F(DateParseString, 2, 1) \
- F(DateLocalTimezone, 1, 1) \
- F(DateToUTC, 1, 1) \
- F(DateCacheVersion, 0, 1) \
- F(DateField, 2 /* date object, field index */, 1)
+ F(ThrowNotDateError, 0, 1)
#define FOR_EACH_INTRINSIC_DEBUG(F) \
@@ -183,7 +175,7 @@ namespace internal {
F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 4, 1) \
+ F(PrepareStep, 2, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 6, 1) \
F(DebugEvaluateGlobal, 4, 1) \
@@ -193,15 +185,15 @@ namespace internal {
F(DebugGetPrototype, 1, 1) \
F(DebugSetScriptSource, 2, 1) \
F(FunctionGetInferredName, 1, 1) \
+ F(FunctionGetDebugName, 1, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
F(ExecuteInDebugContext, 1, 1) \
F(GetDebugContext, 0, 1) \
F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) \
F(GetScript, 1, 1) \
- F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
- F(DebugPushPromise, 3, 1) \
+ F(DebugPushPromise, 2, 1) \
F(DebugPopPromise, 0, 1) \
F(DebugPromiseEvent, 1, 1) \
F(DebugAsyncTaskEvent, 1, 1) \
@@ -232,37 +224,29 @@ namespace internal {
F(InterpreterForInPrepare, 1, 1)
-#define FOR_EACH_INTRINSIC_FUNCTION(F) \
- F(FunctionGetName, 1, 1) \
- F(FunctionSetName, 2, 1) \
- F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
- F(CompleteFunctionConstruction, 3, 1) \
- F(FunctionIsArrow, 1, 1) \
- F(FunctionIsConciseMethod, 1, 1) \
- F(FunctionRemovePrototype, 1, 1) \
- F(FunctionGetScript, 1, 1) \
- F(FunctionGetSourceCode, 1, 1) \
- F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetPositionForOffset, 2, 1) \
- F(FunctionSetInstanceClassName, 2, 1) \
- F(FunctionSetLength, 2, 1) \
- F(FunctionSetPrototype, 2, 1) \
- F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionHidesSource, 1, 1) \
- F(SetCode, 2, 1) \
- F(SetNativeFlag, 1, 1) \
- F(ThrowStrongModeTooFewArguments, 0, 1) \
- F(IsConstructor, 1, 1) \
- F(SetForceInlineFlag, 1, 1) \
- F(FunctionBindArguments, 4, 1) \
- F(BoundFunctionGetBindings, 1, 1) \
- F(NewObjectFromBound, 1, 1) \
- F(Call, -1 /* >= 2 */, 1) \
- F(Apply, 5, 1) \
- F(GetOriginalConstructor, 0, 1) \
- F(ConvertReceiver, 1, 1) \
- F(IsConstructCall, 0, 1) \
- F(IsFunction, 1, 1)
+#define FOR_EACH_INTRINSIC_FUNCTION(F) \
+ F(FunctionGetName, 1, 1) \
+ F(FunctionSetName, 2, 1) \
+ F(FunctionRemovePrototype, 1, 1) \
+ F(FunctionGetScript, 1, 1) \
+ F(FunctionGetSourceCode, 1, 1) \
+ F(FunctionGetScriptSourcePosition, 1, 1) \
+ F(FunctionGetPositionForOffset, 2, 1) \
+ F(FunctionSetInstanceClassName, 2, 1) \
+ F(FunctionSetLength, 2, 1) \
+ F(FunctionSetPrototype, 2, 1) \
+ F(FunctionIsAPIFunction, 1, 1) \
+ F(SetCode, 2, 1) \
+ F(SetNativeFlag, 1, 1) \
+ F(ThrowStrongModeTooFewArguments, 0, 1) \
+ F(IsConstructor, 1, 1) \
+ F(SetForceInlineFlag, 1, 1) \
+ F(Call, -1 /* >= 2 */, 1) \
+ F(TailCall, -1 /* >= 2 */, 1) \
+ F(Apply, 5, 1) \
+ F(ConvertReceiver, 1, 1) \
+ F(IsFunction, 1, 1) \
+ F(FunctionToString, 1, 1)
#define FOR_EACH_INTRINSIC_GENERATOR(F) \
@@ -275,7 +259,6 @@ namespace internal {
F(GeneratorGetReceiver, 1, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
- F(FunctionIsGenerator, 1, 1) \
F(GeneratorNext, 2, 1) \
F(GeneratorThrow, 2, 1)
@@ -320,9 +303,11 @@ namespace internal {
F(UnwindAndFindExceptionHandler, 0, 1) \
F(PromoteScheduledException, 0, 1) \
F(ThrowReferenceError, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
F(NewTypeError, 2, 1) \
F(NewSyntaxError, 2, 1) \
F(NewReferenceError, 2, 1) \
+ F(ThrowIllegalInvocation, 0, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowStrongModeImplicitConversion, 0, 1) \
@@ -335,7 +320,6 @@ namespace internal {
F(CollectStackTrace, 2, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
- F(ErrorToStringRT, 1, 1) \
F(FormatMessageString, 4, 1) \
F(CallSiteGetFileNameRT, 1, 1) \
F(CallSiteGetFunctionNameRT, 1, 1) \
@@ -349,11 +333,10 @@ namespace internal {
F(CallSiteIsConstructorRT, 1, 1) \
F(IS_VAR, 1, 1) \
F(IncrementStatsCounter, 1, 1) \
- F(HarmonyToString, 0, 1) \
- F(GetTypeFeedbackVector, 1, 1) \
- F(GetCallerJSFunction, 0, 1) \
- F(GetCodeStubExportsObject, 0, 1) \
- F(ThrowCalledNonCallable, 1, 1)
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(IncrementUseCounter, 1, 1)
#define FOR_EACH_INTRINSIC_JSON(F) \
@@ -363,6 +346,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_LITERALS(F) \
+ F(CreateRegExpLiteral, 4, 1) \
F(CreateObjectLiteral, 4, 1) \
F(CreateArrayLiteral, 4, 1) \
F(CreateArrayLiteralStubBailout, 3, 1) \
@@ -378,7 +362,7 @@ namespace internal {
F(LiveEditFunctionSetScript, 2, 1) \
F(LiveEditReplaceRefToNestedFunction, 3, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditCheckAndDropActivations, 2, 1) \
+ F(LiveEditCheckAndDropActivations, 3, 1) \
F(LiveEditCompareStrings, 2, 1) \
F(LiveEditRestartFrame, 2, 1)
@@ -402,7 +386,7 @@ namespace internal {
F(MathSqrt, 1, 1) \
F(MathFround, 1, 1) \
F(IsMinusZero, 1, 1) \
- F(InitializeRNG, 0, 1)
+ F(GenerateRandomNumbers, 1, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
@@ -422,7 +406,9 @@ namespace internal {
F(SmiLexicographicCompare, 2, 1) \
F(MaxSmi, 0, 1) \
F(IsSmi, 1, 1) \
- F(GetRootNaN, 0, 1)
+ F(GetRootNaN, 0, 1) \
+ F(GetHoleNaNUpper, 0, 1) \
+ F(GetHoleNaNLower, 0, 1)
#define FOR_EACH_INTRINSIC_OBJECT(F) \
@@ -430,11 +416,8 @@ namespace internal {
F(InternalSetPrototype, 2, 1) \
F(SetPrototype, 2, 1) \
F(GetOwnProperty, 2, 1) \
- F(PreventExtensions, 1, 1) \
- F(IsExtensible, 1, 1) \
+ F(GetOwnProperty_Legacy, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(ObjectFreeze, 1, 1) \
- F(ObjectSeal, 1, 1) \
F(GetProperty, 2, 1) \
F(GetPropertyStrong, 2, 1) \
F(KeyedGetProperty, 2, 1) \
@@ -450,14 +433,10 @@ namespace internal {
F(DeleteProperty_Strict, 2, 1) \
F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
- F(IsPropertyEnumerable, 2, 1) \
+ F(PropertyIsEnumerable, 2, 1) \
F(GetPropertyNamesFast, 1, 1) \
- F(GetOwnPropertyNames, 2, 1) \
- F(GetOwnElementNames, 1, 1) \
+ F(GetOwnPropertyKeys, 2, 1) \
F(GetInterceptorInfo, 1, 1) \
- F(GetNamedInterceptorPropertyNames, 1, 1) \
- F(GetIndexedInterceptorElementNames, 1, 1) \
- F(OwnKeys, 1, 1) \
F(ToFastProperties, 1, 1) \
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
@@ -474,10 +453,8 @@ namespace internal {
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
F(JSValueGetValue, 1, 1) \
- F(HeapObjectGetMap, 1, 1) \
- F(MapGetInstanceType, 1, 1) \
F(ObjectEquals, 2, 1) \
- F(IsSpecObject, 1, 1) \
+ F(IsJSReceiver, 1, 1) \
F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
@@ -541,16 +518,13 @@ namespace internal {
F(BitwiseXor, 2, 1) \
F(BitwiseXor_Strong, 2, 1)
-#define FOR_EACH_INTRINSIC_PROXY(F) \
- F(CreateJSProxy, 2, 1) \
- F(CreateJSFunctionProxy, 4, 1) \
- F(IsJSProxy, 1, 1) \
- F(IsJSFunctionProxy, 1, 1) \
- F(GetHandler, 1, 1) \
- F(GetCallTrap, 1, 1) \
- F(GetConstructTrap, 1, 1) \
- F(Fix, 1, 1)
-
+#define FOR_EACH_INTRINSIC_PROXY(F) \
+ F(IsJSProxy, 1, 1) \
+ F(JSProxyCall, -1 /* >= 2 */, 1) \
+ F(JSProxyConstruct, -1 /* >= 3 */, 1) \
+ F(JSProxyGetTarget, 1, 1) \
+ F(JSProxyGetHandler, 1, 1) \
+ F(JSProxyRevoke, 1, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F) \
F(StringReplaceGlobalRegExpWithString, 4, 1) \
@@ -560,7 +534,6 @@ namespace internal {
F(RegExpSource, 1, 1) \
F(RegExpConstructResult, 3, 1) \
F(RegExpInitializeAndCompile, 3, 1) \
- F(MaterializeRegExpLiteral, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpExecReThrow, 4, 1) \
F(IsRegExp, 1, 1)
@@ -575,8 +548,10 @@ namespace internal {
F(InitializeLegacyConstLookupSlot, 3, 1) \
F(NewSloppyArguments_Generic, 1, 1) \
F(NewStrictArguments_Generic, 1, 1) \
+ F(NewRestArguments_Generic, 2, 1) \
F(NewSloppyArguments, 3, 1) \
F(NewStrictArguments, 3, 1) \
+ F(NewRestParam, 3, 1) \
F(NewClosure, 1, 1) \
F(NewClosure_Tenured, 1, 1) \
F(NewScriptContext, 2, 1) \
@@ -900,36 +875,35 @@ namespace internal {
F(Bool8x16Shuffle, 18, 1)
-#define FOR_EACH_INTRINSIC_STRINGS(F) \
- F(StringReplaceOneCharWithString, 3, 1) \
- F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
- F(SubString, 3, 1) \
- F(StringAdd, 2, 1) \
- F(InternalizeString, 1, 1) \
- F(StringMatch, 3, 1) \
- F(StringCharCodeAtRT, 2, 1) \
- F(StringCompare, 2, 1) \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
- F(StringToArray, 2, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
- F(StringTrim, 3, 1) \
- F(TruncateString, 2, 1) \
- F(NewString, 2, 1) \
- F(StringEquals, 2, 1) \
- F(FlattenString, 1, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(OneByteSeqStringGetChar, 2, 1) \
- F(OneByteSeqStringSetChar, 3, 1) \
- F(TwoByteSeqStringGetChar, 2, 1) \
- F(TwoByteSeqStringSetChar, 3, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(StringGetLength, 1, 1)
+#define FOR_EACH_INTRINSIC_STRINGS(F) \
+ F(StringReplaceOneCharWithString, 3, 1) \
+ F(StringIndexOf, 3, 1) \
+ F(StringLastIndexOf, 3, 1) \
+ F(StringLocaleCompare, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringAdd, 2, 1) \
+ F(InternalizeString, 1, 1) \
+ F(StringMatch, 3, 1) \
+ F(StringCharCodeAtRT, 2, 1) \
+ F(StringCompare, 2, 1) \
+ F(StringBuilderConcat, 3, 1) \
+ F(StringBuilderJoin, 3, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
+ F(StringToArray, 2, 1) \
+ F(StringToLowerCase, 1, 1) \
+ F(StringToUpperCase, 1, 1) \
+ F(StringTrim, 3, 1) \
+ F(TruncateString, 2, 1) \
+ F(NewString, 2, 1) \
+ F(StringEquals, 2, 1) \
+ F(FlattenString, 1, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringGetChar, 2, 1) \
+ F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringGetChar, 2, 1) \
+ F(TwoByteSeqStringSetChar, 3, 1) \
+ F(StringCharCodeAt, 2, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
@@ -991,10 +965,8 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferInitialize, 3, 1) \
F(ArrayBufferGetByteLength, 1, 1) \
- F(ArrayBufferSliceImpl, 3, 1) \
- F(ArrayBufferIsView, 1, 1) \
+ F(ArrayBufferSliceImpl, 4, 1) \
F(ArrayBufferNeuter, 1, 1) \
F(TypedArrayInitialize, 6, 1) \
F(TypedArrayInitializeFromArrayLike, 4, 1) \
@@ -1008,6 +980,7 @@ namespace internal {
F(IsTypedArray, 1, 1) \
F(IsSharedTypedArray, 1, 1) \
F(IsSharedIntegerTypedArray, 1, 1) \
+ F(IsSharedInteger32TypedArray, 1, 1) \
F(DataViewInitialize, 4, 1) \
F(DataViewGetUint8, 3, 1) \
F(DataViewGetInt8, 3, 1) \
@@ -1040,29 +1013,29 @@ namespace internal {
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
#define FOR_EACH_INTRINSIC_IC(F) \
- F(LoadIC_Miss, 3, 1) \
- F(KeyedLoadIC_Miss, 3, 1) \
+ F(BinaryOpIC_Miss, 2, 1) \
+ F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
F(CallIC_Miss, 3, 1) \
- F(StoreIC_Miss, 3, 1) \
- F(StoreIC_Slow, 3, 1) \
- F(KeyedStoreIC_Miss, 3, 1) \
- F(KeyedStoreIC_Slow, 3, 1) \
- F(StoreCallbackProperty, 5, 1) \
- F(LoadPropertyWithInterceptorOnly, 3, 1) \
- F(LoadPropertyWithInterceptor, 3, 1) \
- F(LoadElementWithInterceptor, 2, 1) \
- F(StorePropertyWithInterceptor, 3, 1) \
F(CompareIC_Miss, 3, 1) \
- F(BinaryOpIC_Miss, 2, 1) \
F(CompareNilIC_Miss, 1, 1) \
- F(Unreachable, 0, 1) \
- F(ToBooleanIC_Miss, 1, 1) \
+ F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
+ F(KeyedLoadIC_Miss, 4, 1) \
F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
- F(KeyedStoreIC_MissFromStubFailure, 3, 1) \
- F(StoreIC_MissFromStubFailure, 3, 1) \
- F(ElementsTransitionAndStoreIC_Miss, 4, 1) \
- F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
- F(LoadIC_MissFromStubFailure, 0, 1)
+ F(KeyedStoreIC_Miss, 5, 1) \
+ F(KeyedStoreIC_MissFromStubFailure, 5, 1) \
+ F(KeyedStoreIC_Slow, 5, 1) \
+ F(LoadElementWithInterceptor, 2, 1) \
+ F(LoadIC_Miss, 4, 1) \
+ F(LoadIC_MissFromStubFailure, 4, 1) \
+ F(LoadPropertyWithInterceptor, 3, 1) \
+ F(LoadPropertyWithInterceptorOnly, 3, 1) \
+ F(StoreCallbackProperty, 5, 1) \
+ F(StoreIC_Miss, 5, 1) \
+ F(StoreIC_MissFromStubFailure, 5, 1) \
+ F(StoreIC_Slow, 5, 1) \
+ F(StorePropertyWithInterceptor, 3, 1) \
+ F(ToBooleanIC_Miss, 1, 1) \
+ F(Unreachable, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
@@ -1166,7 +1139,7 @@ class Runtime : public AllStatic {
// Get the runtime intrinsic function table.
static const Function* RuntimeFunctionTable(Isolate* isolate);
- MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
+ MUST_USE_RESULT static Maybe<bool> DeleteObjectProperty(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
LanguageMode language_mode);
@@ -1204,15 +1177,6 @@ class Runtime : public AllStatic {
static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
Handle<Object>);
-
- // Find the arguments of the JavaScript function invocation that called
- // into C++ code. Collect these in a newly allocated array of handles
- // (possibly prefixed by a number of empty handles).
- // TODO(mstarzinger): Temporary workaround until this is only used by the
- // %_Arguments and %_ArgumentsLength intrinsics. Make this function local to
- // runtime-scopes.cc then.
- static base::SmartArrayPointer<Handle<Object>> GetCallerArguments(
- Isolate* isolate, int prefix_argc, int* total_argc);
};
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index d027ec9dc7..f30e794009 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -35,12 +35,6 @@ FixedArray* NativesCollection<EXPERIMENTAL_EXTRAS>::GetSourceCache(Heap* heap) {
}
-template <>
-FixedArray* NativesCollection<CODE_STUB>::GetSourceCache(Heap* heap) {
- return heap->code_stub_natives_source_cache();
-}
-
-
template <NativeType type>
void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
for (int i = 0; i < GetBuiltinsCount(); i++) {
@@ -54,7 +48,6 @@ void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
// Explicit template instantiations.
template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<CODE_STUB>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXPERIMENTAL_EXTRAS>::UpdateSourceCache(
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index 7e5e6c7ba0..6505d15571 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -93,23 +93,17 @@ class NativesStore {
return Vector<const char>::cast(name);
}
- bool ReadNameAndContentPair(SnapshotByteSource* bytes) {
+ void ReadNameAndContentPair(SnapshotByteSource* bytes) {
const byte* id;
- int id_length;
const byte* source;
- int source_length;
- bool success = bytes->GetBlob(&id, &id_length) &&
- bytes->GetBlob(&source, &source_length);
- if (success) {
- Vector<const char> id_vector(reinterpret_cast<const char*>(id),
- id_length);
- Vector<const char> source_vector(
- reinterpret_cast<const char*>(source), source_length);
- native_ids_.Add(id_vector);
- native_source_.Add(source_vector);
- native_names_.Add(NameFromId(id, id_length));
- }
- return success;
+ int id_length = bytes->GetBlob(&id);
+ int source_length = bytes->GetBlob(&source);
+ Vector<const char> id_vector(reinterpret_cast<const char*>(id), id_length);
+ Vector<const char> source_vector(reinterpret_cast<const char*>(source),
+ source_length);
+ native_ids_.Add(id_vector);
+ native_source_.Add(source_vector);
+ native_names_.Add(NameFromId(id, id_length));
}
List<Vector<const char> > native_ids_;
@@ -125,11 +119,11 @@ template<NativeType type>
class NativesHolder {
public:
static NativesStore* get() {
- DCHECK(holder_);
+ CHECK(holder_);
return holder_;
}
static void set(NativesStore* store) {
- DCHECK(store);
+ CHECK(store);
holder_ = store;
}
static bool empty() { return holder_ == NULL; }
@@ -157,7 +151,6 @@ void ReadNatives() {
if (natives_blob_ && NativesHolder<CORE>::empty()) {
SnapshotByteSource bytes(natives_blob_->data, natives_blob_->raw_size);
NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
- NativesHolder<CODE_STUB>::set(NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXPERIMENTAL>::set(
NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXTRAS>::set(NativesStore::MakeFromScriptsSource(&bytes));
@@ -188,7 +181,6 @@ void SetNativesFromFile(StartupData* natives_blob) {
*/
void DisposeNatives() {
NativesHolder<CORE>::Dispose();
- NativesHolder<CODE_STUB>::Dispose();
NativesHolder<EXPERIMENTAL>::Dispose();
NativesHolder<EXTRAS>::Dispose();
NativesHolder<EXPERIMENTAL_EXTRAS>::Dispose();
@@ -241,7 +233,6 @@ Vector<const char> NativesCollection<type>::GetScriptsSource() {
template Vector<const char> NativesCollection<T>::GetScriptName(int i); \
template Vector<const char> NativesCollection<T>::GetScriptsSource();
INSTANTIATE_TEMPLATES(CORE)
-INSTANTIATE_TEMPLATES(CODE_STUB)
INSTANTIATE_TEMPLATES(EXPERIMENTAL)
INSTANTIATE_TEMPLATES(EXTRAS)
INSTANTIATE_TEMPLATES(EXPERIMENTAL_EXTRAS)
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index c1e2fcde62..07f6b1aed3 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -15,7 +15,6 @@ namespace internal {
enum NativeType {
CORE,
- CODE_STUB,
EXPERIMENTAL,
EXTRAS,
EXPERIMENTAL_EXTRAS,
@@ -49,7 +48,6 @@ class NativesCollection {
};
typedef NativesCollection<CORE> Natives;
-typedef NativesCollection<CODE_STUB> CodeStubNatives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
typedef NativesCollection<EXTRAS> ExtraNatives;
typedef NativesCollection<EXPERIMENTAL_EXTRAS> ExperimentalExtraNatives;
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index 4ccadd256d..421cf0721c 100644
--- a/deps/v8/src/snapshot/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -15,7 +15,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/objects.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/profiler/cpu-profiler.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/natives.h"
@@ -60,8 +60,6 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
"Heap::NewSpaceAllocationTopAddress()");
- Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
- "Debug::step_in_fp_addr()");
Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
"mod_two_doubles");
// Keyed lookup cache.
@@ -139,11 +137,10 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
"Debug::after_break_target_address()");
- Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
- .address(),
- "Debug::restarter_frame_function_pointer_address()");
Add(ExternalReference::debug_is_active_address(isolate).address(),
"Debug::is_active_address()");
+ Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
+ "Debug::step_in_enabled_address()");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
@@ -540,8 +537,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
isolate_->heap()->set_native_contexts_list(
- isolate_->heap()->code_stub_context());
-
+ isolate_->heap()->undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
@@ -552,7 +548,6 @@ void Deserializer::Deserialize(Isolate* isolate) {
// Update data pointers to the external strings containing natives sources.
Natives::UpdateSourceCache(isolate_->heap());
ExtraNatives::UpdateSourceCache(isolate_->heap());
- CodeStubNatives::UpdateSourceCache(isolate_->heap());
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
@@ -561,8 +556,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
MaybeHandle<Object> Deserializer::DeserializePartial(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
Initialize(isolate);
if (!ReserveSpace()) {
V8::FatalProcessOutOfMemory("deserialize context");
@@ -579,18 +573,13 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
OldSpace* code_space = isolate_->heap()->code_space();
Address start_address = code_space->top();
Object* root;
- Object* outdated_contexts;
VisitPointer(&root);
DeserializeDeferredObjects();
- VisitPointer(&outdated_contexts);
// There's no code deserialized here. If this assert fires then that's
// changed and logging should be added to notify the profiler et al of the
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
- CHECK(outdated_contexts->IsFixedArray());
- *outdated_contexts_out =
- Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate);
return Handle<Object>(root, isolate);
}
@@ -984,7 +973,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
if (how == kFromCode) { \
Address location_of_branch_data = reinterpret_cast<Address>(current); \
Assembler::deserialization_set_special_target_at( \
- location_of_branch_data, \
+ isolate, location_of_branch_data, \
Code::cast(HeapObject::FromAddress(current_object_address)), \
reinterpret_cast<Address>(new_object)); \
location_of_branch_data += Assembler::kSpecialTargetSize; \
@@ -1123,9 +1112,9 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Address pc = code->entry() + pc_offset;
Address target = code->entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
- pc, target, data == kInternalReference
- ? RelocInfo::INTERNAL_REFERENCE
- : RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ isolate, pc, target, data == kInternalReference
+ ? RelocInfo::INTERNAL_REFERENCE
+ : RelocInfo::INTERNAL_REFERENCE_ENCODED);
break;
}
@@ -1174,11 +1163,6 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
ExtraNatives::GetScriptSource(source_.Get()), current);
break;
- case kCodeStubNativesStringResource:
- current = CopyInNativesSource(
- CodeStubNatives::GetScriptSource(source_.Get()), current);
- break;
-
// Deserialize raw data of variable length.
case kVariableRawData: {
int size_in_bytes = source_.GetInt();
@@ -1484,39 +1468,10 @@ void PartialSerializer::Serialize(Object** o) {
}
VisitPointer(o);
SerializeDeferredObjects();
- SerializeOutdatedContextsAsFixedArray();
Pad();
}
-void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
- int length = outdated_contexts_.length();
- if (length == 0) {
- FixedArray* empty = isolate_->heap()->empty_fixed_array();
- SerializeObject(empty, kPlain, kStartOfObject, 0);
- } else {
- // Serialize an imaginary fixed array containing outdated contexts.
- int size = FixedArray::SizeFor(length);
- Allocate(NEW_SPACE, size);
- sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray");
- sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words");
- Map* map = isolate_->heap()->fixed_array_map();
- SerializeObject(map, kPlain, kStartOfObject, 0);
- Smi* length_smi = Smi::FromInt(length);
- sink_->Put(kOnePointerRawData, "Smi");
- for (int i = 0; i < kPointerSize; i++) {
- sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
- }
- for (int i = 0; i < length; i++) {
- Context* context = outdated_contexts_[i];
- BackReference back_reference = back_reference_map_.Lookup(context);
- sink_->Put(kBackref + back_reference.space(), "BackRef");
- PutBackReference(context, back_reference);
- }
- }
-}
-
-
bool Serializer::ShouldBeSkipped(Object** current) {
Object** roots = isolate()->heap()->roots_array_start();
return current == &roots[Heap::kStoreBufferTopRootIndex]
@@ -1714,10 +1669,7 @@ StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
- // Make sure that all functions are derived from the code-stub context
- DCHECK(!obj->IsJSFunction() ||
- JSFunction::cast(obj)->GetCreationContext() ==
- isolate()->heap()->code_stub_context());
+ DCHECK(!obj->IsJSFunction());
int root_index = root_index_map_.Lookup(obj);
// We can only encode roots as such if it has already been serialized.
@@ -1844,7 +1796,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
// Clear literal boilerplates.
- if (obj->IsJSFunction() && !JSFunction::cast(obj)->shared()->bound()) {
+ if (obj->IsJSFunction()) {
FixedArray* literals = JSFunction::cast(obj)->literals();
for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
}
@@ -1852,13 +1804,6 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
serializer.Serialize();
-
- if (obj->IsContext() &&
- Context::cast(obj)->global_object() == global_object_) {
- // Context refers to the current global object. This reference will
- // become outdated after deserialization.
- outdated_contexts_.Add(Context::cast(obj));
- }
}
@@ -1974,7 +1919,7 @@ class UnlinkWeakCellScope {
if (object->IsWeakCell()) {
weak_cell_ = WeakCell::cast(object);
next_ = weak_cell_->next();
- weak_cell_->clear_next(object->GetHeap());
+ weak_cell_->clear_next(object->GetHeap()->the_hole_value());
}
}
@@ -2249,12 +2194,6 @@ void Serializer::ObjectSerializer::VisitExternalOneByteString(
kExtraNativesStringResource)) {
return;
}
- if (SerializeExternalNativeSourceString(
- CodeStubNatives::GetBuiltinsCount(), resource_pointer,
- CodeStubNatives::GetSourceCache(serializer_->isolate()->heap()),
- kCodeStubNativesStringResource)) {
- return;
- }
// One of the strings in the natives cache should match the resource. We
// don't expect any other kinds of external strings here.
UNREACHABLE();
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
index f68ad3739a..7f4676eafa 100644
--- a/deps/v8/src/snapshot/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -136,8 +136,6 @@ class SerializerDeserializer: public ObjectVisitor {
public:
static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
- static int nop() { return kNop; }
-
// No reservation for large object space necessary.
static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
static const int kNumberOfSpaces = LAST_SPACE + 1;
@@ -221,10 +219,7 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNativesStringResource = 0x5d;
// Used for the source code for compiled stubs, which is in the executable,
// but is referred to from external strings in the snapshot.
- static const int kCodeStubNativesStringResource = 0x5e;
- // Used for the source code for V8 extras, which is in the executable,
- // but is referred to from external strings in the snapshot.
- static const int kExtraNativesStringResource = 0x5f;
+ static const int kExtraNativesStringResource = 0x5e;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
@@ -373,9 +368,8 @@ class Deserializer: public SerializerDeserializer {
void Deserialize(Isolate* isolate);
// Deserialize a single object and the objects reachable from it.
- MaybeHandle<Object> DeserializePartial(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out);
+ MaybeHandle<Object> DeserializePartial(Isolate* isolate,
+ Handle<JSGlobalProxy> global_proxy);
// Deserialize a shared function info. Fail gracefully.
MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
@@ -618,7 +612,6 @@ class PartialSerializer : public Serializer {
SnapshotByteSink* sink)
: Serializer(isolate, sink),
startup_serializer_(startup_snapshot_serializer),
- outdated_contexts_(0),
global_object_(NULL) {
InitializeCodeAddressMap();
}
@@ -634,10 +627,7 @@ class PartialSerializer : public Serializer {
int PartialSnapshotCacheIndex(HeapObject* o);
bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
- void SerializeOutdatedContextsAsFixedArray();
-
Serializer* startup_serializer_;
- List<Context*> outdated_contexts_;
Object* global_object_;
PartialCacheIndexMap partial_cache_index_map_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
@@ -661,10 +651,6 @@ class StartupSerializer : public Serializer {
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void SerializeWeakReferencesAndDeferred();
- void Serialize() {
- SerializeStrongReferences();
- SerializeWeakReferencesAndDeferred();
- }
private:
intptr_t root_index_wave_front_;
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 0b7e11d1ec..97e7c6b506 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -66,8 +66,7 @@ bool Snapshot::Initialize(Isolate* isolate) {
MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
if (!isolate->snapshot_available()) return Handle<Context>();
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -77,15 +76,11 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
- MaybeHandle<Object> maybe_context = deserializer.DeserializePartial(
- isolate, global_proxy, outdated_contexts_out);
+ MaybeHandle<Object> maybe_context =
+ deserializer.DeserializePartial(isolate, global_proxy);
Handle<Object> result;
if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>();
CHECK(result->IsContext());
- // If the snapshot does not contain a custom script, we need to update
- // the global object for exactly two contexts: the builtins context and the
- // script context that has the global "this" binding.
- CHECK(EmbedsScript(isolate) || (*outdated_contexts_out)->length() == 2);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = context_data.length();
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index 7048c355ec..812de5e2a8 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -40,27 +40,12 @@ void SnapshotByteSink::PutRaw(const byte* data, int number_of_bytes,
}
-bool SnapshotByteSource::AtEOF() {
- if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
- for (int x = position_; x < length_; x++) {
- if (data_[x] != SerializerDeserializer::nop()) return false;
- }
- return true;
-}
-
-
-bool SnapshotByteSource::GetBlob(const byte** data, int* number_of_bytes) {
+int SnapshotByteSource::GetBlob(const byte** data) {
int size = GetInt();
- *number_of_bytes = size;
-
- if (position_ + size <= length_) {
- *data = &data_[position_];
- Advance(size);
- return true;
- } else {
- Advance(length_ - position_); // proceed until end.
- return false;
- }
+ CHECK(position_ + size <= length_);
+ *data = &data_[position_];
+ Advance(size);
+ return size;
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index af617ccee1..360ec76bb6 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -57,9 +57,8 @@ class SnapshotByteSource final {
return answer;
}
- bool GetBlob(const byte** data, int* number_of_bytes);
-
- bool AtEOF();
+ // Returns length.
+ int GetBlob(const byte** data);
int position() { return position_; }
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index f0b90bbacd..d99f118bff 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -37,8 +37,7 @@ class Snapshot : public AllStatic {
static bool Initialize(Isolate* isolate);
// Create a new context using the internal partial snapshot.
static MaybeHandle<Context> NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- Handle<FixedArray>* outdated_contexts_out);
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy);
static bool HaveASnapshotToStartFrom(Isolate* isolate);
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/startup-data-util.cc
index 92c4b5b3e9..4e0ad97a0c 100644
--- a/deps/v8/src/startup-data-util.cc
+++ b/deps/v8/src/startup-data-util.cc
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/utils.h"
namespace v8 {
@@ -44,10 +45,13 @@ void Load(const char* blob_file, v8::StartupData* startup_data,
void (*setter_fn)(v8::StartupData*)) {
ClearStartupData(startup_data);
- if (!blob_file) return;
+ CHECK(blob_file);
FILE* file = fopen(blob_file, "rb");
- if (!file) return;
+ if (!file) {
+ PrintF(stderr, "Failed to open startup resource '%s'.\n", blob_file);
+ return;
+ }
fseek(file, 0, SEEK_END);
startup_data->raw_size = static_cast<int>(ftell(file));
@@ -58,7 +62,11 @@ void Load(const char* blob_file, v8::StartupData* startup_data,
1, startup_data->raw_size, file));
fclose(file);
- if (startup_data->raw_size == read_size) (*setter_fn)(startup_data);
+ if (startup_data->raw_size == read_size) {
+ (*setter_fn)(startup_data);
+ } else {
+ PrintF(stderr, "Corrupted startup resource '%s'.\n", blob_file);
+ }
}
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
index 92df29a82b..6adf365689 100644
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -37,10 +37,6 @@
'dependencies': [
'../../../tools/gyp/v8.gyp:v8',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'sources': [
'ittnotify_config.h',
'ittnotify_types.h',
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
new file mode 100644
index 0000000000..04f1f2e2ea
--- /dev/null
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/trace-event.h"
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+v8::Platform* TraceEventHelper::GetCurrentPlatform() {
+ return v8::internal::V8::GetCurrentPlatform();
+}
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
new file mode 100644
index 0000000000..d2d423c3be
--- /dev/null
+++ b/deps/v8/src/tracing/trace-event.h
@@ -0,0 +1,535 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_TRACING_TRACE_EVENT_H_
+#define SRC_TRACING_TRACE_EVENT_H_
+
+#include <stddef.h>
+
+#include "base/trace_event/common/trace_event_common.h"
+#include "include/v8-platform.h"
+#include "src/base/atomicops.h"
+
+// This header file defines implementation details of how the trace macros in
+// trace_event_common.h collect and store trace events. Anything not
+// implementation-specific should go in trace_macros_common.h instead of here.
+
+
+// The pointer returned from GetCategoryGroupEnabled() points to a
+// value with zero or more of the following bits. Used in this class only.
+// The TRACE_EVENT macros should only use the value as a bool.
+// These values must be in sync with macro values in trace_log.h in
+// chromium.
+enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
+ // Category group enabled to export events to ETW.
+ kEnabledForETWExport_CategoryGroupEnabledFlags = 1 << 3,
+};
+
+// By default, const char* asrgument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) v8::internal::tracing::TraceStringWithCopy(str)
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) v8::internal::tracing::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \
+ category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
+ name) \
+ v8::internal::TraceEventSamplingStateScope<bucket_number> \
+ traceEventSamplingScope(category "\0" name);
+
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (kEnabledForRecording_CategoryGroupEnabledFlags | \
+ kEnabledForEventCallback_CategoryGroupEnabledFlags)
+
+// The following macro has no implementation, but it needs to exist since
+// it gets called from scoped trace events. It cannot call UNIMPLEMENTED()
+// since an empty implementation is a valid one.
+#define INTERNAL_TRACE_MEMORY(category, name)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const uint8_t*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->getNumTracesRecorded
+
+// Add a trace event to the platform tracing system.
+// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id,
+// uint64_t bind_id,
+// int num_args,
+// const char** arg_names,
+// const uint8_t* arg_types,
+// const uint64_t* arg_values,
+// unsigned int flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform()->AddTraceEvent
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const uint8_t* category_group_enabled,
+// const char* name,
+// uint64_t id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
+ ->UpdateTraceEventDuration
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var))
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
+ v8::base::NoBarrier_Store(&(var), (value))
+
+// The thread buckets for the sampling profiler.
+extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+// TODO(fmeawad): This implementation contradicts that we can have a different
+// configuration for each isolate,
+// https://code.google.com/p/v8/issues/detail?id=4563
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \
+ if (!category_group_enabled) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE( \
+ atomic, reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ v8::internal::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, flags, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ uint64_t h = v8::internal::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kNoId, v8::internal::tracing::kNoId, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ v8::internal::tracing::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flow_flags; \
+ v8::internal::tracing::TraceID trace_event_bind_id(bind_id, \
+ &trace_event_flags); \
+ uint64_t h = v8::internal::tracing::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ v8::internal::tracing::kNoId, trace_event_bind_id.data(), \
+ trace_event_flags, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned int trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ v8::internal::tracing::TraceID trace_event_trace_id(id, \
+ &trace_event_flags); \
+ v8::internal::tracing::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.data(), v8::internal::tracing::kNoId, \
+ trace_event_flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Adds a trace event with a given timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
+ timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id and timestamp. Not Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_AND_TIMESTAMP( \
+ phase, category_group, name, id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+// Adds a trace event with a given id, thread_id, and timestamp. Not
+// Implemented.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ UNIMPLEMENTED()
+
+namespace v8 {
+namespace internal {
+namespace tracing {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const uint64_t kNoId = 0;
+
+class TraceEventHelper {
+ public:
+ static v8::Platform* GetCurrentPlatform();
+};
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* id)
+ : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {}
+ explicit DontMangle(uint64_t id) : data_(id) {}
+ explicit DontMangle(unsigned int id) : data_(id) {}
+ explicit DontMangle(uint16_t id) : data_(id) {}
+ explicit DontMangle(unsigned char id) : data_(id) {}
+ explicit DontMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(int id) : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit DontMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(uint64_t id) : data_(id) {}
+ explicit ForceMangle(unsigned int id) : data_(id) {}
+ explicit ForceMangle(uint16_t id) : data_(id) {}
+ explicit ForceMangle(unsigned char id) : data_(id) {}
+ explicit ForceMangle(int64_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(int id) : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(int16_t id) : data_(static_cast<uint64_t>(id)) {}
+ explicit ForceMangle(signed char id) : data_(static_cast<uint64_t>(id)) {}
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+ };
+
+ TraceID(const void* id, unsigned int* flags)
+ : data_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle id, unsigned int* flags) : data_(id.data()) {}
+ TraceID(uint64_t id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(unsigned int id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(uint16_t id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(unsigned char id, unsigned int* flags) : data_(id) { (void)flags; }
+ TraceID(int64_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ TraceID(int id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ TraceID(int16_t id, unsigned int* flags) : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+ TraceID(signed char id, unsigned int* flags)
+ : data_(static_cast<uint64_t>(id)) {
+ (void)flags;
+ }
+
+ uint64_t data() const { return data_; }
+
+ private:
+ uint64_t data_;
+};
+
+// Simple union to store various types as uint64_t.
+union TraceValueUnion {
+ bool as_bool;
+ uint64_t as_uint;
+ int64_t as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ operator const char*() const { return str_; }
+
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, union_member, \
+ value_type_id) \
+ static V8_INLINE void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, value_type_id) \
+ static V8_INLINE void SetTraceValue(actual_type arg, unsigned char* type, \
+ uint64_t* value) { \
+ *type = value_type_id; \
+ *value = static_cast<uint64_t>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint16_t, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int64_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int16_t, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
+ TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// These AddTraceEvent template
+// function is defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static V8_INLINE uint64_t AddTraceEvent(char phase,
+ const uint8_t* category_group_enabled,
+ const char* name, uint64_t id,
+ uint64_t bind_id, unsigned int flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ id, bind_id, kZeroNumArgs, NULL, NULL,
+ NULL, flags);
+}
+
+template <class ARG1_TYPE>
+static V8_INLINE uint64_t AddTraceEvent(char phase,
+ const uint8_t* category_group_enabled,
+ const char* name, uint64_t id,
+ uint64_t bind_id, unsigned int flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ uint8_t arg_types[1];
+ uint64_t arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ id, bind_id, num_args, &arg1_name,
+ arg_types, arg_values, flags);
+}
+
+template <class ARG1_TYPE, class ARG2_TYPE>
+static V8_INLINE uint64_t AddTraceEvent(
+ char phase, const uint8_t* category_group_enabled, const char* name,
+ uint64_t id, uint64_t bind_id, unsigned int flags, const char* arg1_name,
+ const ARG1_TYPE& arg1_val, const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = {arg1_name, arg2_name};
+ unsigned char arg_types[2];
+ uint64_t arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
+ id, bind_id, num_args, arg_names,
+ arg_types, arg_values, flags);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(NULL) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const uint8_t* category_group_enabled, const char* name,
+ uint64_t event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ uint64_t event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const uint8_t* category_group_enabled_;
+ const char* name_;
+ uint64_t event_handle_;
+};
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template <size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+ explicit TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+ TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+ }
+
+ static V8_INLINE const char* Current() {
+ return reinterpret_cast<const char*>(
+ TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber]));
+ }
+
+ static V8_INLINE void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber],
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+ const_cast<char*>(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace tracing
+} // namespace internal
+} // namespace v8
+
+#endif // SRC_TRACING_TRACE_EVENT_H_
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index a5ec52f667..96d9495bf4 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -17,6 +17,14 @@ TransitionArray* TransitionArray::cast(Object* object) {
}
+Object* TransitionArray::next_link() { return get(kNextLinkIndex); }
+
+
+void TransitionArray::set_next_link(Object* next, WriteBarrierMode mode) {
+ return set(kNextLinkIndex, next, mode);
+}
+
+
bool TransitionArray::HasPrototypeTransitions() {
return get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
}
@@ -29,10 +37,9 @@ FixedArray* TransitionArray::GetPrototypeTransitions() {
}
-void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
- WriteBarrierMode mode) {
+void TransitionArray::SetPrototypeTransitions(FixedArray* transitions) {
DCHECK(transitions->IsFixedArray());
- set(kPrototypeTransitionsIndex, transitions, mode);
+ set(kPrototypeTransitionsIndex, transitions);
}
@@ -104,6 +111,8 @@ bool TransitionArray::IsSpecialTransition(Name* name) {
return name == heap->nonextensible_symbol() ||
name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
name == heap->elements_transition_symbol() ||
+ name == heap->strict_function_transition_symbol() ||
+ name == heap->strong_function_transition_symbol() ||
name == heap->observed_symbol();
}
#endif
@@ -158,13 +167,9 @@ PropertyDetails TransitionArray::GetTargetDetails(Name* name, Map* target) {
}
-void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number,
- Name* key,
- Map* target) {
- FixedArray::NoIncrementalWriteBarrierSet(
- this, ToKeyIndex(transition_number), key);
- FixedArray::NoIncrementalWriteBarrierSet(
- this, ToTargetIndex(transition_number), target);
+void TransitionArray::Set(int transition_number, Name* key, Map* target) {
+ set(ToKeyIndex(transition_number), key);
+ set(ToTargetIndex(transition_number), target);
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 64b8133528..fc24b28867 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -51,8 +51,7 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
// Re-read existing data; the allocation might have caused it to be cleared.
if (IsSimpleTransition(map->raw_transitions())) {
old_target = GetSimpleTransition(map->raw_transitions());
- result->NoIncrementalWriteBarrierSet(
- 0, GetSimpleTransitionKey(old_target), old_target);
+ result->Set(0, GetSimpleTransitionKey(old_target), old_target);
} else {
result->SetNumberOfTransitions(0);
}
@@ -145,11 +144,11 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
DCHECK_NE(kNotFound, insertion_index);
for (int i = 0; i < insertion_index; ++i) {
- result->NoIncrementalWriteBarrierCopyFrom(array, i, i);
+ result->Set(i, array->GetKey(i), array->GetTarget(i));
}
- result->NoIncrementalWriteBarrierSet(insertion_index, *name, *target);
+ result->Set(insertion_index, *name, *target);
for (int i = insertion_index; i < number_of_transitions; ++i) {
- result->NoIncrementalWriteBarrierCopyFrom(array, i, i + 1);
+ result->Set(i + 1, array->GetKey(i), array->GetTarget(i));
}
SLOW_DCHECK(result->IsSortedNoDuplicates());
@@ -234,6 +233,61 @@ bool TransitionArray::CanHaveMoreTransitions(Handle<Map> map) {
// static
+bool TransitionArray::CompactPrototypeTransitionArray(FixedArray* array) {
+ const int header = kProtoTransitionHeaderSize;
+ int number_of_transitions = NumberOfPrototypeTransitions(array);
+ if (number_of_transitions == 0) {
+ // Empty array cannot be compacted.
+ return false;
+ }
+ int new_number_of_transitions = 0;
+ for (int i = 0; i < number_of_transitions; i++) {
+ Object* cell = array->get(header + i);
+ if (!WeakCell::cast(cell)->cleared()) {
+ if (new_number_of_transitions != i) {
+ array->set(header + new_number_of_transitions, cell);
+ }
+ new_number_of_transitions++;
+ }
+ }
+ // Fill slots that became free with undefined value.
+ for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
+ array->set_undefined(header + i);
+ }
+ if (number_of_transitions != new_number_of_transitions) {
+ SetNumberOfPrototypeTransitions(array, new_number_of_transitions);
+ }
+ return new_number_of_transitions < number_of_transitions;
+}
+
+
+// static
+Handle<FixedArray> TransitionArray::GrowPrototypeTransitionArray(
+ Handle<FixedArray> array, int new_capacity, Isolate* isolate) {
+ // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
+ int capacity = array->length() - kProtoTransitionHeaderSize;
+ new_capacity = Min(kMaxCachedPrototypeTransitions, new_capacity);
+ DCHECK_GT(new_capacity, capacity);
+ int grow_by = new_capacity - capacity;
+ array = isolate->factory()->CopyFixedArrayAndGrow(array, grow_by, TENURED);
+ if (capacity < 0) {
+ // There was no prototype transitions array before, so the size
+ // couldn't be copied. Initialize it explicitly.
+ SetNumberOfPrototypeTransitions(*array, 0);
+ }
+ return array;
+}
+
+
+// static
+int TransitionArray::NumberOfPrototypeTransitionsForTest(Map* map) {
+ FixedArray* transitions = GetPrototypeTransitions(map);
+ CompactPrototypeTransitionArray(transitions);
+ return TransitionArray::NumberOfPrototypeTransitions(transitions);
+}
+
+
+// static
void TransitionArray::PutPrototypeTransition(Handle<Map> map,
Handle<Object> prototype,
Handle<Map> target_map) {
@@ -252,23 +306,16 @@ void TransitionArray::PutPrototypeTransition(Handle<Map> map,
int transitions = NumberOfPrototypeTransitions(*cache) + 1;
if (transitions > capacity) {
- // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
- int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
- if (new_capacity == capacity) return;
- int grow_by = new_capacity - capacity;
-
- Isolate* isolate = map->GetIsolate();
- cache = isolate->factory()->CopyFixedArrayAndGrow(cache, grow_by);
- if (capacity < 0) {
- // There was no prototype transitions array before, so the size
- // couldn't be copied. Initialize it explicitly.
- SetNumberOfPrototypeTransitions(*cache, 0);
+ // Grow the array if compacting it doesn't free space.
+ if (!CompactPrototypeTransitionArray(*cache)) {
+ if (capacity == kMaxCachedPrototypeTransitions) return;
+ cache = GrowPrototypeTransitionArray(cache, 2 * transitions,
+ map->GetIsolate());
+ SetPrototypeTransitions(map, cache);
}
-
- SetPrototypeTransitions(map, cache);
}
- // Reload number of transitions as GC might shrink them.
+ // Reload number of transitions as they might have been compacted.
int last = NumberOfPrototypeTransitions(*cache);
int entry = header + last;
@@ -344,27 +391,23 @@ int TransitionArray::Capacity(Object* raw_transitions) {
Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
int number_of_transitions,
int slack) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> array = isolate->factory()->NewTransitionArray(
LengthFor(number_of_transitions + slack));
+ array->set(kNextLinkIndex, isolate->heap()->undefined_value());
array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
return Handle<TransitionArray>::cast(array);
}
-void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition) {
- NoIncrementalWriteBarrierSet(target_transition,
- origin->GetKey(origin_transition),
- origin->GetTarget(origin_transition));
-}
-
-
-static void ZapTransitionArray(TransitionArray* transitions) {
- MemsetPointer(transitions->data_start(),
+// static
+void TransitionArray::ZapTransitionArray(TransitionArray* transitions) {
+ // Do not zap the next link that is used by GC.
+ STATIC_ASSERT(kNextLinkIndex + 1 == kPrototypeTransitionsIndex);
+ MemsetPointer(transitions->data_start() + kPrototypeTransitionsIndex,
transitions->GetHeap()->the_hole_value(),
- transitions->length());
+ transitions->length() - kPrototypeTransitionsIndex);
+ transitions->SetNumberOfTransitions(0);
}
@@ -387,25 +430,9 @@ void TransitionArray::ReplaceTransitions(Handle<Map> map,
}
-static void ZapPrototypeTransitions(Object* raw_transitions) {
- DCHECK(TransitionArray::IsFullTransitionArray(raw_transitions));
- TransitionArray* transitions = TransitionArray::cast(raw_transitions);
- if (!transitions->HasPrototypeTransitions()) return;
- FixedArray* proto_transitions = transitions->GetPrototypeTransitions();
- MemsetPointer(proto_transitions->data_start(),
- proto_transitions->GetHeap()->the_hole_value(),
- proto_transitions->length());
-}
-
-
void TransitionArray::SetPrototypeTransitions(
Handle<Map> map, Handle<FixedArray> proto_transitions) {
EnsureHasFullTransitionArray(map);
- if (Heap::ShouldZapGarbage()) {
- Object* raw_transitions = map->raw_transitions();
- DCHECK(raw_transitions != *proto_transitions);
- ZapPrototypeTransitions(raw_transitions);
- }
TransitionArray* transitions = TransitionArray::cast(map->raw_transitions());
transitions->SetPrototypeTransitions(*proto_transitions);
}
@@ -427,7 +454,7 @@ void TransitionArray::EnsureHasFullTransitionArray(Handle<Map> map) {
} else if (nof == 1) {
Map* target = GetSimpleTransition(raw_transitions);
Name* key = GetSimpleTransitionKey(target);
- result->NoIncrementalWriteBarrierSet(0, key, target);
+ result->Set(0, key, target);
}
ReplaceTransitions(map, *result);
}
@@ -444,8 +471,10 @@ void TransitionArray::TraverseTransitionTreeInternal(Map* map,
for (int i = 0; i < NumberOfPrototypeTransitions(proto_trans); ++i) {
int index = TransitionArray::kProtoTransitionHeaderSize + i;
WeakCell* cell = WeakCell::cast(proto_trans->get(index));
- TraverseTransitionTreeInternal(Map::cast(cell->value()), callback,
- data);
+ if (!cell->cleared()) {
+ TraverseTransitionTreeInternal(Map::cast(cell->value()), callback,
+ data);
+ }
}
}
for (int i = 0; i < transitions->number_of_transitions(); ++i) {
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 1fcd3860d0..73aca7864e 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -113,14 +113,13 @@ class TransitionArray: public FixedArray {
Object* raw = proto_transitions->get(kProtoTransitionNumberOfEntriesOffset);
return Smi::cast(raw)->value();
}
+ static int NumberOfPrototypeTransitionsForTest(Map* map);
static void SetNumberOfPrototypeTransitions(FixedArray* proto_transitions,
int value);
inline FixedArray* GetPrototypeTransitions();
- inline void SetPrototypeTransitions(
- FixedArray* prototype_transitions,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetPrototypeTransitions(FixedArray* prototype_transitions);
inline Object** GetPrototypeTransitionsSlot();
inline bool HasPrototypeTransitions();
@@ -164,8 +163,11 @@ class TransitionArray: public FixedArray {
static int Capacity(Object* raw_transitions);
- // Casting.
- static inline TransitionArray* cast(Object* obj);
+ inline static TransitionArray* cast(Object* object);
+
+ // This field should be used only by GC.
+ inline void set_next_link(Object* next, WriteBarrierMode mode);
+ inline Object* next_link();
static const int kTransitionSize = 2;
static const int kProtoTransitionHeaderSize = 1;
@@ -179,6 +181,14 @@ class TransitionArray: public FixedArray {
bool print_header = true); // NOLINT
#endif
+#ifdef OBJECT_PRINT
+ void TransitionArrayPrint(std::ostream& os); // NOLINT
+#endif
+
+#ifdef VERIFY_HEAP
+ void TransitionArrayVerify();
+#endif
+
#ifdef DEBUG
bool IsSortedNoDuplicates(int valid_entries = -1);
static bool IsSortedNoDuplicates(Map* map);
@@ -198,9 +208,10 @@ class TransitionArray: public FixedArray {
private:
// Layout for full transition arrays.
- static const int kPrototypeTransitionsIndex = 0;
- static const int kTransitionLengthIndex = 1;
- static const int kFirstIndex = 2;
+ static const int kNextLinkIndex = 0;
+ static const int kPrototypeTransitionsIndex = 1;
+ static const int kTransitionLengthIndex = 2;
+ static const int kFirstIndex = 3;
// Layout of map transition entries in full transition arrays.
static const int kTransitionKey = 0;
@@ -272,6 +283,11 @@ class TransitionArray: public FixedArray {
static void SetPrototypeTransitions(Handle<Map> map,
Handle<FixedArray> proto_transitions);
+ static bool CompactPrototypeTransitionArray(FixedArray* array);
+
+ static Handle<FixedArray> GrowPrototypeTransitionArray(
+ Handle<FixedArray> array, int new_capacity, Isolate* isolate);
+
// Compares two tuples <key, kind, attributes>, returns -1 if
// tuple1 is "less" than tuple2, 0 if tuple1 equal to tuple2 and 1 otherwise.
static inline int CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
@@ -291,20 +307,14 @@ class TransitionArray: public FixedArray {
PropertyKind kind2,
PropertyAttributes attributes2);
- inline void NoIncrementalWriteBarrierSet(int transition_number,
- Name* key,
- Map* target);
-
- // Copy a single transition from the origin array.
- inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition);
+ inline void Set(int transition_number, Name* key, Map* target);
#ifdef DEBUG
static void CheckNewTransitionsAreConsistent(Handle<Map> map,
TransitionArray* old_transitions,
Object* transitions);
#endif
+ static void ZapTransitionArray(TransitionArray* transitions);
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
};
diff --git a/deps/v8/src/type-cache.h b/deps/v8/src/type-cache.h
index 5eed557f7d..1b3a26033b 100644
--- a/deps/v8/src/type-cache.h
+++ b/deps/v8/src/type-cache.h
@@ -42,6 +42,7 @@ class TypeCache final {
Type* const kSingletonZero = CreateRange(0.0, 0.0);
Type* const kSingletonOne = CreateRange(1.0, 1.0);
Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
Type* const kZeroish =
Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
@@ -52,9 +53,44 @@ class TypeCache final {
Type* const kIntegerOrMinusZeroOrNaN =
Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+ Type* const kAdditiveSafeInteger =
+ CreateRange(-4503599627370496.0, 4503599627370496.0);
+ Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
- Type* const kIntegral32 = Type::Union(kInt32, kUint32, zone());
+ Type* const kUntaggedUndefined =
+ Type::Intersect(Type::Undefined(), Type::Untagged(), zone());
+
+ // Asm.js related types.
+ Type* const kAsmSigned = kInt32;
+ Type* const kAsmUnsigned = kUint32;
+ Type* const kAsmInt = Type::Union(kAsmSigned, kAsmUnsigned, zone());
+ Type* const kAsmFixnum = Type::Intersect(kAsmSigned, kAsmUnsigned, zone());
+ Type* const kAsmFloat = kFloat32;
+ Type* const kAsmDouble = kFloat64;
+ Type* const kAsmFloatQ = Type::Union(kAsmFloat, kUntaggedUndefined, zone());
+ Type* const kAsmDoubleQ = Type::Union(kAsmDouble, kUntaggedUndefined, zone());
+ // Not part of the Asm.js type hierarchy, but represents a part of what
+ // intish encompasses.
+ Type* const kAsmIntQ = Type::Union(kAsmInt, kUntaggedUndefined, zone());
+ Type* const kAsmFloatDoubleQ = Type::Union(kAsmFloatQ, kAsmDoubleQ, zone());
+ // Asm.js size unions.
+ Type* const kAsmSize8 = Type::Union(kInt8, kUint8, zone());
+ Type* const kAsmSize16 = Type::Union(kInt16, kUint16, zone());
+ Type* const kAsmSize32 =
+ Type::Union(Type::Union(kInt32, kUint32, zone()), kAsmFloat, zone());
+ Type* const kAsmSize64 = kFloat64;
+ // Asm.js other types.
+ Type* const kAsmComparable = Type::Union(
+ kAsmSigned,
+ Type::Union(kAsmUnsigned, Type::Union(kAsmDouble, kAsmFloat, zone()),
+ zone()),
+ zone());
+ Type* const kAsmIntArrayElement =
+ Type::Union(Type::Union(kInt8, kUint8, zone()),
+ Type::Union(Type::Union(kInt16, kUint16, zone()),
+ Type::Union(kInt32, kUint32, zone()), zone()),
+ zone());
// The FixedArray::length property always containts a smi in the range
// [0, FixedArray::kMaxLength].
@@ -76,6 +112,10 @@ class TypeCache final {
Type* const kStringLengthType =
CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
+ // When initializing arrays, we'll unfold the loop if the number of
+ // elements is known to be of this type.
+ Type* const kElementLoopUnrollType = CreateRange(0.0, 16.0);
+
#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
Type* const k##TypeName##Array = CreateArray(k##TypeName);
TYPED_ARRAYS(TYPED_ARRAY)
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index fed28b671e..97df1b9ae9 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -81,35 +81,6 @@ FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
}
-int TypeFeedbackVector::ic_with_type_info_count() {
- return length() > 0 ? Smi::cast(get(kWithTypesIndex))->value() : 0;
-}
-
-
-void TypeFeedbackVector::change_ic_with_type_info_count(int delta) {
- if (delta == 0) return;
- int value = ic_with_type_info_count() + delta;
- // Could go negative because of the debugger.
- if (value >= 0) {
- set(kWithTypesIndex, Smi::FromInt(value));
- }
-}
-
-
-int TypeFeedbackVector::ic_generic_count() {
- return length() > 0 ? Smi::cast(get(kGenericCountIndex))->value() : 0;
-}
-
-
-void TypeFeedbackVector::change_ic_generic_count(int delta) {
- if (delta == 0) return;
- int value = ic_generic_count() + delta;
- if (value >= 0) {
- set(kGenericCountIndex, Smi::FromInt(value));
- }
-}
-
-
int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
DCHECK(slot.ToInt() < slot_count());
return kReservedIndexCount + slot.ToInt();
@@ -135,6 +106,34 @@ void TypeFeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
}
+void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic) {
+ Object* uninitialized_sentinel =
+ TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
+ Object* megamorphic_sentinel =
+ *TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
+ int with = 0;
+ int gen = 0;
+ TypeFeedbackMetadataIterator iter(metadata());
+ while (iter.HasNext()) {
+ FeedbackVectorSlot slot = iter.Next();
+ FeedbackVectorSlotKind kind = iter.kind();
+
+ Object* obj = Get(slot);
+ if (obj != uninitialized_sentinel &&
+ kind != FeedbackVectorSlotKind::GENERAL) {
+ if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+ with++;
+ } else if (obj == megamorphic_sentinel) {
+ gen++;
+ }
+ }
+ }
+
+ *with_type_info = with;
+ *generic = gen;
+}
+
+
Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 9fb03bb673..698f2a6d17 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -140,8 +140,6 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set(kMetadataIndex, *metadata);
- array->set(kWithTypesIndex, Smi::FromInt(0));
- array->set(kGenericCountIndex, Smi::FromInt(0));
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
@@ -162,28 +160,6 @@ int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec* spec,
// static
-int TypeFeedbackVector::PushAppliedArgumentsIndex() {
- return kReservedIndexCount;
-}
-
-
-// static
-Handle<TypeFeedbackVector> TypeFeedbackVector::CreatePushAppliedArgumentsVector(
- Isolate* isolate) {
- StaticFeedbackVectorSpec spec;
- FeedbackVectorSlot slot = spec.AddKeyedLoadICSlot();
- // TODO(ishell): allocate this metadata only once.
- Handle<TypeFeedbackMetadata> feedback_metadata =
- TypeFeedbackMetadata::New(isolate, &spec);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::New(isolate, feedback_metadata);
- DCHECK_EQ(PushAppliedArgumentsIndex(), feedback_vector->GetIndex(slot));
- USE(slot);
- return feedback_vector;
-}
-
-
-// static
Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
Isolate* isolate, Handle<TypeFeedbackVector> vector) {
Handle<TypeFeedbackVector> result;
@@ -233,13 +209,11 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
break;
}
case FeedbackVectorSlotKind::STORE_IC: {
- DCHECK(FLAG_vector_stores);
StoreICNexus nexus(this, slot);
nexus.Clear(shared->code());
break;
}
case FeedbackVectorSlotKind::KEYED_STORE_IC: {
- DCHECK(FLAG_vector_stores);
KeyedStoreICNexus nexus(this, slot);
nexus.Clear(shared->code());
break;
@@ -269,7 +243,6 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
// static
void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
- DCHECK(FLAG_vector_stores);
SharedFunctionInfo::Iterator iterator(isolate);
SharedFunctionInfo* shared;
while ((shared = iterator.Next())) {
@@ -293,7 +266,6 @@ void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
if (kind != FeedbackVectorSlotKind::KEYED_STORE_IC) continue;
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- DCHECK(FLAG_vector_stores);
KeyedStoreICNexus nexus(this, slot);
nexus.Clear(host);
}
@@ -520,6 +492,19 @@ void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
}
+void CallICNexus::ConfigureMegamorphic() {
+ FeedbackNexus::ConfigureMegamorphic();
+}
+
+
+void CallICNexus::ConfigureMegamorphic(int call_count) {
+ SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(call_count * kCallCountIncrement),
+ SKIP_WRITE_BARRIER);
+}
+
+
void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
Handle<Code> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
@@ -536,8 +521,8 @@ void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
SetFeedback(*cell);
SetFeedbackExtra(*handler);
} else {
- SetFeedback(*name);
Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
array->set(0, *cell);
array->set(1, *handler);
}
@@ -560,8 +545,8 @@ void KeyedStoreICNexus::ConfigureMonomorphic(Handle<Name> name,
SetFeedback(*cell);
SetFeedbackExtra(*handler);
} else {
- SetFeedback(*name);
Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ SetFeedback(*name);
array->set(0, *cell);
array->set(1, *handler);
}
@@ -590,8 +575,8 @@ void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
} else {
- SetFeedback(*name);
array = EnsureExtraArrayOfSize(receiver_count * 2);
+ SetFeedback(*name);
}
InstallHandlers(array, maps, handlers);
@@ -620,8 +605,8 @@ void KeyedStoreICNexus::ConfigurePolymorphic(Handle<Name> name,
SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
} else {
- SetFeedback(*name);
array = EnsureExtraArrayOfSize(receiver_count * 2);
+ SetFeedback(*name);
}
InstallHandlers(array, maps, handlers);
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 9aca68f71c..d83b77fa3e 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -182,14 +182,9 @@ class TypeFeedbackVector : public FixedArray {
static inline TypeFeedbackVector* cast(Object* obj);
static const int kMetadataIndex = 0;
- static const int kWithTypesIndex = 1;
- static const int kGenericCountIndex = 2;
- static const int kReservedIndexCount = 3;
+ static const int kReservedIndexCount = 1;
- inline int ic_with_type_info_count();
- inline void change_ic_with_type_info_count(int delta);
- inline int ic_generic_count();
- inline void change_ic_generic_count(int delta);
+ inline void ComputeCounts(int* with_type_info, int* generic);
inline bool is_empty() const;
@@ -259,10 +254,6 @@ class TypeFeedbackVector : public FixedArray {
return FeedbackVectorSlot(dummyIndex);
}
- static int PushAppliedArgumentsIndex();
- static Handle<TypeFeedbackVector> CreatePushAppliedArgumentsVector(
- Isolate* isolate);
-
private:
void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
@@ -375,9 +366,9 @@ class FeedbackNexus {
inline Object* GetFeedback() const;
inline Object* GetFeedbackExtra() const;
- protected:
inline Isolate* GetIsolate() const;
+ protected:
inline void SetFeedback(Object* feedback,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void SetFeedbackExtra(Object* feedback_extra,
@@ -399,7 +390,7 @@ class FeedbackNexus {
};
-class CallICNexus : public FeedbackNexus {
+class CallICNexus final : public FeedbackNexus {
public:
// Monomorphic call ics store call counts. Platform code needs to increment
// the count appropriately (ie, by 2).
@@ -418,17 +409,19 @@ class CallICNexus : public FeedbackNexus {
void ConfigureMonomorphicArray();
void ConfigureMonomorphic(Handle<JSFunction> function);
+ void ConfigureMegamorphic() final;
+ void ConfigureMegamorphic(int call_count);
- InlineCacheState StateFromFeedback() const override;
+ InlineCacheState StateFromFeedback() const final;
- int ExtractMaps(MapHandleList* maps) const override {
+ int ExtractMaps(MapHandleList* maps) const final {
// CallICs don't record map feedback.
return 0;
}
- MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const override {
+ MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(CodeHandleList* code_list, int length = -1) const override {
+ bool FindHandlers(CodeHandleList* code_list, int length = -1) const final {
return length == 0;
}
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index c049af18cb..a8a406efde 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -4,7 +4,7 @@
#include "src/type-info.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/ic/ic.h"
@@ -72,19 +72,6 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
}
-InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(TypeFeedbackId id) {
- Handle<Object> maybe_code = GetInfo(id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- if (code->is_inline_cache_stub()) return code->ic_state();
- }
-
- // If we can't find an IC, assume we've seen *something*, but we don't know
- // what. PREMONOMORPHIC roughly encodes this meaning.
- return PREMONOMORPHIC;
-}
-
-
InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
FeedbackVectorSlot slot) {
if (!slot.IsInvalid()) {
@@ -104,14 +91,6 @@ InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
}
-bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
- Handle<Object> maybe_code = GetInfo(ast_id);
- if (!maybe_code->IsCode()) return false;
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- return code->ic_state() == UNINITIALIZED;
-}
-
-
bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
if (!slot.IsInvalid()) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
@@ -157,24 +136,6 @@ byte TypeFeedbackOracle::ForInType(FeedbackVectorSlot feedback_vector_slot) {
void TypeFeedbackOracle::GetStoreModeAndKeyType(
- TypeFeedbackId ast_id, KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type) {
- Handle<Object> maybe_code = GetInfo(ast_id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- if (code->kind() == Code::KEYED_STORE_IC) {
- ExtraICState extra_ic_state = code->extra_ic_state();
- *store_mode = KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state);
- *key_type = KeyedStoreIC::GetKeyType(extra_ic_state);
- return;
- }
- }
- *store_mode = STANDARD_STORE;
- *key_type = ELEMENT;
-}
-
-
-void TypeFeedbackOracle::GetStoreModeAndKeyType(
FeedbackVectorSlot slot, KeyedAccessStoreMode* store_mode,
IcCheckType* key_type) {
if (!slot.IsInvalid() &&
@@ -232,12 +193,6 @@ Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
}
-bool TypeFeedbackOracle::LoadIsBuiltin(
- TypeFeedbackId id, Builtins::Name builtin) {
- return *GetInfo(id) == isolate()->builtins()->builtin(builtin);
-}
-
-
void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Type** left_type,
Type** right_type,
@@ -290,9 +245,9 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
BinaryOpICState state(isolate(), code->extra_ic_state());
DCHECK_EQ(op, state.op());
- *left = state.GetLeftType(zone());
- *right = state.GetRightType(zone());
- *result = state.GetResultType(zone());
+ *left = state.GetLeftType();
+ *right = state.GetRightType();
+ *result = state.GetResultType();
*fixed_right_arg = state.fixed_right_arg();
AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
@@ -310,7 +265,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(isolate(), code->extra_ic_state());
- return state.GetLeftType(zone());
+ return state.GetLeftType();
}
@@ -351,15 +306,6 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
}
-void TypeFeedbackOracle::AssignmentReceiverTypes(TypeFeedbackId id,
- Handle<Name> name,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
- CollectReceiverTypes(id, name, flags, receiver_types);
-}
-
-
void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
@@ -370,15 +316,6 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
- TypeFeedbackId id, SmallMapList* receiver_types,
- KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
- receiver_types->Clear();
- CollectReceiverTypes(id, receiver_types);
- GetStoreModeAndKeyType(id, store_mode, key_type);
-}
-
-
-void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
FeedbackVectorSlot slot, SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
receiver_types->Clear();
@@ -387,13 +324,6 @@ void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
}
-void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types) {
- receiver_types->Clear();
- CollectReceiverTypes(id, receiver_types);
-}
-
-
void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -410,19 +340,6 @@ void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
}
-void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
- Handle<Name> name,
- Code::Flags flags,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (object->IsUndefined() || object->IsSmi()) return;
-
- DCHECK(object->IsCode());
- Handle<Code> code(Handle<Code>::cast(object));
- CollectReceiverTypes<Code>(*code, name, flags, types);
-}
-
-
template <class T>
void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
Code::Flags flags,
@@ -438,15 +355,6 @@ void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
}
-void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
- SmallMapList* types) {
- Handle<Object> object = GetInfo(ast_id);
- if (!object->IsCode()) return;
- Handle<Code> code = Handle<Code>::cast(object);
- CollectReceiverTypes<Code>(*code, types);
-}
-
-
void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* types) {
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 23cf40ff57..13a7f88b66 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -8,7 +8,7 @@
#include "src/allocation.h"
#include "src/contexts.h"
#include "src/globals.h"
-#include "src/token.h"
+#include "src/parsing/token.h"
#include "src/types.h"
#include "src/zone.h"
@@ -25,13 +25,10 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<TypeFeedbackVector> feedback_vector,
Handle<Context> native_context);
- InlineCacheState LoadInlineCacheState(TypeFeedbackId id);
InlineCacheState LoadInlineCacheState(FeedbackVectorSlot slot);
- bool StoreIsUninitialized(TypeFeedbackId id);
bool StoreIsUninitialized(FeedbackVectorSlot slot);
bool CallIsUninitialized(FeedbackVectorSlot slot);
bool CallIsMonomorphic(FeedbackVectorSlot slot);
- bool KeyedArrayCallIsHoley(TypeFeedbackId id);
bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
// TODO(1571) We can't use ForInStatement::ForInType as the return value due
@@ -40,9 +37,6 @@ class TypeFeedbackOracle: public ZoneObject {
// be possible.
byte ForInType(FeedbackVectorSlot feedback_vector_slot);
- void GetStoreModeAndKeyType(TypeFeedbackId id,
- KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type);
void GetStoreModeAndKeyType(FeedbackVectorSlot slot,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
@@ -52,26 +46,16 @@ class TypeFeedbackOracle: public ZoneObject {
void KeyedPropertyReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type);
- void AssignmentReceiverTypes(TypeFeedbackId id, Handle<Name> name,
- SmallMapList* receiver_types);
void AssignmentReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
- void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types,
- KeyedAccessStoreMode* store_mode,
- IcCheckType* key_type);
void KeyedAssignmentReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void CountReceiverTypes(TypeFeedbackId id,
- SmallMapList* receiver_types);
void CountReceiverTypes(FeedbackVectorSlot slot,
SmallMapList* receiver_types);
void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
- void CollectReceiverTypes(TypeFeedbackId id,
- SmallMapList* types);
template <class T>
void CollectReceiverTypes(T* obj, SmallMapList* types);
@@ -87,8 +71,6 @@ class TypeFeedbackOracle: public ZoneObject {
Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
- bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
-
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cycles in our headers. Death to tons of implementations in
// headers!! :-P
@@ -116,8 +98,6 @@ class TypeFeedbackOracle: public ZoneObject {
private:
void CollectReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
Code::Flags flags, SmallMapList* types);
- void CollectReceiverTypes(TypeFeedbackId id, Handle<Name> name,
- Code::Flags flags, SmallMapList* types);
template <class T>
void CollectReceiverTypes(T* obj, Handle<Name> name, Code::Flags flags,
SmallMapList* types);
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index b1002be26a..92610606d5 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -244,6 +244,8 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_ITERATOR_RESULT_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
if (map->is_undetectable()) return kUndetectable;
return kOtherObject;
case JS_FUNCTION_TYPE:
@@ -252,7 +254,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_REGEXP_TYPE:
return kOtherObject; // TODO(rossberg): there should be a RegExp type.
case JS_PROXY_TYPE:
- case JS_FUNCTION_PROXY_TYPE:
return kProxy;
case MAP_TYPE:
// When compiling stub templates, the meta map is used as a place holder
@@ -265,6 +266,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
// We ought to find a cleaner solution for compiling stubs parameterised
// over type or class variables, esp ones with bounds...
return kDetectable & kTaggedPointer;
+ case ALLOCATION_SITE_TYPE:
case DECLARED_ACCESSOR_INFO_TYPE:
case EXECUTABLE_ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
@@ -273,6 +275,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case FIXED_DOUBLE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
+ case TRANSITION_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
@@ -297,7 +300,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
case TYPE_SWITCH_INFO_TYPE:
- case ALLOCATION_SITE_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case CODE_CACHE_TYPE:
case POLYMORPHIC_CODE_CACHE_TYPE:
diff --git a/deps/v8/src/typing-asm.cc b/deps/v8/src/typing-asm.cc
index b267113400..509ba7b125 100644
--- a/deps/v8/src/typing-asm.cc
+++ b/deps/v8/src/typing-asm.cc
@@ -2,13 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/typing-asm.h"
-#include "src/ast.h"
+#include <limits>
+
+#include "src/v8.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
#include "src/type-cache.h"
namespace v8 {
@@ -38,15 +40,23 @@ namespace internal {
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root)
: zone_(zone),
+ isolate_(isolate),
script_(script),
root_(root),
valid_(true),
+ allow_simd_(false),
+ property_info_(NULL),
+ intish_(0),
stdlib_types_(zone),
stdlib_heap_types_(zone),
stdlib_math_types_(zone),
- global_variable_type_(HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
+#define V(NAME, Name, name, lane_count, lane_type) \
+ stdlib_simd_##name##_types_(zone),
+ SIMD128_TYPES(V)
+#undef V
+ global_variable_type_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
local_variable_type_(HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
@@ -68,6 +78,13 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
Scope* scope = fun->scope();
if (!scope->is_function_scope()) FAIL(fun, "not at function scope");
+ ExpressionStatement* use_asm = fun->body()->first()->AsExpressionStatement();
+ if (use_asm == NULL) FAIL(fun, "missing \"use asm\"");
+ Literal* use_asm_literal = use_asm->expression()->AsLiteral();
+ if (use_asm_literal == NULL) FAIL(fun, "missing \"use asm\"");
+ if (!use_asm_literal->raw_value()->AsString()->IsOneByteEqualTo("use asm"))
+ FAIL(fun, "missing \"use asm\"");
+
// Module parameters.
for (int i = 0; i < scope->num_parameters(); ++i) {
Variable* param = scope->parameter(i);
@@ -91,7 +108,10 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
if (decl != NULL) {
RECURSE(VisitFunctionAnnotation(decl->fun()));
Variable* var = decl->proxy()->var();
- DCHECK(GetType(var) == NULL);
+ if (property_info_ != NULL) {
+ SetVariableInfo(var, property_info_);
+ property_info_ = NULL;
+ }
SetType(var, computed_type_);
DCHECK(GetType(var) != NULL);
}
@@ -116,6 +136,9 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
// Validate exports.
ReturnStatement* stmt = fun->body()->last()->AsReturnStatement();
+ if (stmt == nullptr) {
+ FAIL(fun->body()->last(), "last statement in module is not a return");
+ }
RECURSE(VisitWithExpectation(stmt->expression(), Type::Object(),
"expected object export"));
}
@@ -139,6 +162,10 @@ void AsmTyper::VisitFunctionDeclaration(FunctionDeclaration* decl) {
if (in_function_) {
FAIL(decl, "function declared inside another");
}
+ // Set function type so global references to functions have some type
+ // (so they can give a more useful error).
+ Variable* var = decl->proxy()->var();
+ SetType(var, Type::Function(zone()));
}
@@ -149,7 +176,15 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
if (body->length() > 0) {
ReturnStatement* stmt = body->last()->AsReturnStatement();
if (stmt != NULL) {
- RECURSE(VisitExpressionAnnotation(stmt->expression()));
+ Literal* literal = stmt->expression()->AsLiteral();
+ Type* old_expected = expected_type_;
+ expected_type_ = Type::Any();
+ if (literal) {
+ RECURSE(VisitLiteral(literal, true));
+ } else {
+ RECURSE(VisitExpressionAnnotation(stmt->expression(), NULL, true));
+ }
+ expected_type_ = old_expected;
result_type = computed_type_;
}
}
@@ -171,7 +206,11 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
Variable* var = proxy->var();
if (var->location() != VariableLocation::PARAMETER || var->index() != i)
break;
- RECURSE(VisitExpressionAnnotation(expr->value()));
+ RECURSE(VisitExpressionAnnotation(expr->value(), var, false));
+ if (property_info_ != NULL) {
+ SetVariableInfo(var, property_info_);
+ property_info_ = NULL;
+ }
SetType(var, computed_type_);
type->InitParameter(i, computed_type_);
good = true;
@@ -182,24 +221,38 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
}
-void AsmTyper::VisitExpressionAnnotation(Expression* expr) {
+void AsmTyper::VisitExpressionAnnotation(Expression* expr, Variable* var,
+ bool is_return) {
// Normal +x or x|0 annotations.
BinaryOperation* bin = expr->AsBinaryOperation();
if (bin != NULL) {
+ if (var != NULL) {
+ VariableProxy* proxy = bin->left()->AsVariableProxy();
+ if (proxy == NULL) {
+ FAIL(bin->left(), "expected variable for type annotation");
+ }
+ if (proxy->var() != var) {
+ FAIL(proxy, "annotation source doesn't match destination");
+ }
+ }
Literal* right = bin->right()->AsLiteral();
if (right != NULL) {
switch (bin->op()) {
- case Token::MUL: // We encode +x as 1*x
+ case Token::MUL: // We encode +x as x*1.0
if (right->raw_value()->ContainsDot() &&
right->raw_value()->AsNumber() == 1.0) {
- SetResult(expr, cache_.kFloat64);
+ SetResult(expr, cache_.kAsmDouble);
return;
}
break;
case Token::BIT_OR:
if (!right->raw_value()->ContainsDot() &&
right->raw_value()->AsNumber() == 0.0) {
- SetResult(expr, cache_.kInt32);
+ if (is_return) {
+ SetResult(expr, cache_.kAsmSigned);
+ } else {
+ SetResult(expr, cache_.kAsmInt);
+ }
return;
}
break;
@@ -218,19 +271,28 @@ void AsmTyper::VisitExpressionAnnotation(Expression* expr) {
Call* call = expr->AsCall();
if (call != NULL) {
- if (call->expression()->IsVariableProxy()) {
- RECURSE(VisitWithExpectation(
- call->expression(), Type::Any(zone()),
- "only fround allowed on expression annotations"));
- if (!computed_type_->Is(
- Type::Function(cache_.kFloat32, Type::Number(zone()), zone()))) {
- FAIL(call->expression(),
- "only fround allowed on expression annotations");
+ VariableProxy* proxy = call->expression()->AsVariableProxy();
+ if (proxy != NULL) {
+ VariableInfo* info = GetVariableInfo(proxy->var(), false);
+ if (!info ||
+ (!info->is_check_function && !info->is_constructor_function)) {
+ if (allow_simd_) {
+ FAIL(call->expression(),
+ "only fround/SIMD.checks allowed on expression annotations");
+ } else {
+ FAIL(call->expression(),
+ "only fround allowed on expression annotations");
+ }
+ }
+ Type* type = info->type;
+ DCHECK(type->IsFunction());
+ if (info->is_check_function) {
+ DCHECK(type->AsFunction()->Arity() == 1);
}
- if (call->arguments()->length() != 1) {
- FAIL(call, "invalid argument count calling fround");
+ if (call->arguments()->length() != type->AsFunction()->Arity()) {
+ FAIL(call, "invalid argument count calling function");
}
- SetResult(expr, cache_.kFloat32);
+ SetResult(expr, type->AsFunction()->Result());
return;
}
}
@@ -274,7 +336,7 @@ void AsmTyper::VisitIfStatement(IfStatement* stmt) {
if (!in_function_) {
FAIL(stmt, "if statement inside module body");
}
- RECURSE(VisitWithExpectation(stmt->condition(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->condition(), cache_.kAsmSigned,
"if condition expected to be integer"));
RECURSE(Visit(stmt->then_statement()));
RECURSE(Visit(stmt->else_statement()));
@@ -300,9 +362,17 @@ void AsmTyper::VisitReturnStatement(ReturnStatement* stmt) {
if (!in_function_) {
return;
}
- RECURSE(
- VisitWithExpectation(stmt->expression(), return_type_,
- "return expression expected to have return type"));
+ Literal* literal = stmt->expression()->AsLiteral();
+ if (literal) {
+ VisitLiteral(literal, true);
+ } else {
+ RECURSE(
+ VisitWithExpectation(stmt->expression(), Type::Any(),
+ "return expression expected to have return type"));
+ }
+ if (!computed_type_->Is(return_type_) || !return_type_->Is(computed_type_)) {
+ FAIL(stmt->expression(), "return type does not match function signature");
+ }
}
@@ -315,23 +385,40 @@ void AsmTyper::VisitSwitchStatement(SwitchStatement* stmt) {
if (!in_function_) {
FAIL(stmt, "switch statement inside module body");
}
- RECURSE(VisitWithExpectation(stmt->tag(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->tag(), cache_.kAsmSigned,
"switch expression non-integer"));
ZoneList<CaseClause*>* clauses = stmt->cases();
+ ZoneSet<int32_t> cases(zone());
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
- Expression* label = clause->label();
- RECURSE(
- VisitWithExpectation(label, cache_.kInt32, "case label non-integer"));
- if (!label->IsLiteral()) FAIL(label, "non-literal case label");
- Handle<Object> value = label->AsLiteral()->value();
- int32_t value32;
- if (!value->ToInt32(&value32)) FAIL(label, "illegal case label value");
+ if (clause->is_default()) {
+ if (i != clauses->length() - 1) {
+ FAIL(clause, "default case out of order");
+ }
+ } else {
+ Expression* label = clause->label();
+ RECURSE(VisitWithExpectation(label, cache_.kAsmSigned,
+ "case label non-integer"));
+ if (!label->IsLiteral()) FAIL(label, "non-literal case label");
+ Handle<Object> value = label->AsLiteral()->value();
+ int32_t value32;
+ if (!value->ToInt32(&value32)) FAIL(label, "illegal case label value");
+ if (cases.find(value32) != cases.end()) {
+ FAIL(label, "duplicate case value");
+ }
+ cases.insert(value32);
+ }
// TODO(bradnelson): Detect duplicates.
ZoneList<Statement*>* stmts = clause->statements();
RECURSE(VisitStatements(stmts));
}
+ if (cases.size() > 0) {
+ int64_t min_case = *cases.begin();
+ int64_t max_case = *cases.rbegin();
+ if (max_case - min_case > std::numeric_limits<int32_t>::max()) {
+ FAIL(stmt, "case range too large");
+ }
+ }
}
@@ -343,7 +430,7 @@ void AsmTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
FAIL(stmt, "do statement inside module body");
}
RECURSE(Visit(stmt->body()));
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
"do condition expected to be integer"));
}
@@ -352,7 +439,7 @@ void AsmTyper::VisitWhileStatement(WhileStatement* stmt) {
if (!in_function_) {
FAIL(stmt, "while statement inside module body");
}
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
"while condition expected to be integer"));
RECURSE(Visit(stmt->body()));
}
@@ -366,7 +453,7 @@ void AsmTyper::VisitForStatement(ForStatement* stmt) {
RECURSE(Visit(stmt->init()));
}
if (stmt->cond() != NULL) {
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
"for condition expected to be integer"));
}
if (stmt->next() != NULL) {
@@ -436,56 +523,81 @@ void AsmTyper::VisitDoExpression(DoExpression* expr) {
void AsmTyper::VisitConditional(Conditional* expr) {
- RECURSE(VisitWithExpectation(expr->condition(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(expr->condition(), Type::Number(),
"condition expected to be integer"));
+ if (!computed_type_->Is(cache_.kAsmInt)) {
+ FAIL(expr->condition(), "condition must be of type int");
+ }
+
RECURSE(VisitWithExpectation(
expr->then_expression(), expected_type_,
"conditional then branch type mismatch with enclosing expression"));
- Type* then_type = computed_type_;
+ Type* then_type = StorageType(computed_type_);
+ if (intish_ != 0 || !then_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->then_expression(), "invalid type in ? then expression");
+ }
+
RECURSE(VisitWithExpectation(
expr->else_expression(), expected_type_,
"conditional else branch type mismatch with enclosing expression"));
- Type* else_type = computed_type_;
- Type* type = Type::Union(then_type, else_type, zone());
- if (!(type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
- type->Is(cache_.kFloat32) || type->Is(cache_.kFloat64))) {
- FAIL(expr, "ill-typed conditional");
+ Type* else_type = StorageType(computed_type_);
+ if (intish_ != 0 || !else_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->else_expression(), "invalid type in ? else expression");
}
- IntersectResult(expr, type);
+
+ if (!then_type->Is(else_type) || !else_type->Is(then_type)) {
+ FAIL(expr, "then and else expressions in ? must have the same type");
+ }
+
+ IntersectResult(expr, then_type);
}
void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->var();
- if (GetType(var) == NULL) {
- FAIL(expr, "unbound variable");
+ VariableInfo* info = GetVariableInfo(var, false);
+ if (info == NULL || info->type == NULL) {
+ if (var->mode() == TEMPORARY) {
+ SetType(var, Type::Any(zone()));
+ info = GetVariableInfo(var, false);
+ } else {
+ FAIL(expr, "unbound variable");
+ }
}
- Type* type = Type::Intersect(GetType(var), expected_type_, zone());
- if (type->Is(cache_.kInt32)) {
- type = cache_.kInt32;
+ if (property_info_ != NULL) {
+ SetVariableInfo(var, property_info_);
+ property_info_ = NULL;
}
- SetType(var, type);
+ Type* type = Type::Intersect(info->type, expected_type_, zone());
+ if (type->Is(cache_.kAsmInt)) {
+ type = cache_.kAsmInt;
+ }
+ info->type = type;
intish_ = 0;
IntersectResult(expr, type);
}
-void AsmTyper::VisitLiteral(Literal* expr) {
+void AsmTyper::VisitLiteral(Literal* expr, bool is_return) {
intish_ = 0;
Handle<Object> value = expr->value();
if (value->IsNumber()) {
int32_t i;
uint32_t u;
if (expr->raw_value()->ContainsDot()) {
- IntersectResult(expr, cache_.kFloat64);
- } else if (value->ToUint32(&u)) {
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmDouble);
+ } else if (!is_return && value->ToUint32(&u)) {
+ if (u <= 0x7fffffff) {
+ IntersectResult(expr, cache_.kAsmFixnum);
+ } else {
+ IntersectResult(expr, cache_.kAsmUnsigned);
+ }
} else if (value->ToInt32(&i)) {
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmSigned);
} else {
FAIL(expr, "illegal number");
}
- } else if (value->IsString()) {
+ } else if (!is_return && value->IsString()) {
IntersectResult(expr, Type::String());
} else if (value->IsUndefined()) {
IntersectResult(expr, Type::Undefined());
@@ -495,6 +607,9 @@ void AsmTyper::VisitLiteral(Literal* expr) {
}
+void AsmTyper::VisitLiteral(Literal* expr) { VisitLiteral(expr, false); }
+
+
void AsmTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
FAIL(expr, "regular expression encountered");
}
@@ -555,15 +670,23 @@ void AsmTyper::VisitAssignment(Assignment* expr) {
Type* type = expected_type_;
RECURSE(VisitWithExpectation(
expr->value(), type, "assignment value expected to match surrounding"));
+ Type* target_type = StorageType(computed_type_);
if (intish_ != 0) {
- FAIL(expr, "value still an intish");
+ FAIL(expr, "intish or floatish assignment");
}
- RECURSE(VisitWithExpectation(expr->target(), computed_type_,
- "assignment target expected to match value"));
- if (intish_ != 0) {
- FAIL(expr, "value still an intish");
+ if (expr->target()->IsVariableProxy()) {
+ RECURSE(VisitWithExpectation(expr->target(), target_type,
+ "assignment target expected to match value"));
+ } else if (expr->target()->IsProperty()) {
+ Property* property = expr->target()->AsProperty();
+ RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
+ "bad propety object"));
+ if (!computed_type_->IsArray()) {
+ FAIL(property->obj(), "array expected");
+ }
+ VisitHeapAccess(property, true, target_type);
}
- IntersectResult(expr, computed_type_);
+ IntersectResult(expr, target_type);
}
@@ -578,137 +701,206 @@ void AsmTyper::VisitThrow(Throw* expr) {
int AsmTyper::ElementShiftSize(Type* type) {
- if (type->Is(cache_.kInt8) || type->Is(cache_.kUint8)) return 0;
- if (type->Is(cache_.kInt16) || type->Is(cache_.kUint16)) return 1;
- if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
- type->Is(cache_.kFloat32))
- return 2;
- if (type->Is(cache_.kFloat64)) return 3;
+ if (type->Is(cache_.kAsmSize8)) return 0;
+ if (type->Is(cache_.kAsmSize16)) return 1;
+ if (type->Is(cache_.kAsmSize32)) return 2;
+ if (type->Is(cache_.kAsmSize64)) return 3;
return -1;
}
-void AsmTyper::VisitHeapAccess(Property* expr) {
+Type* AsmTyper::StorageType(Type* type) {
+ if (type->Is(cache_.kAsmInt)) {
+ return cache_.kAsmInt;
+ } else {
+ return type;
+ }
+}
+
+
+void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
+ Type* assignment_type) {
Type::ArrayType* array_type = computed_type_->AsArray();
size_t size = array_size_;
Type* type = array_type->AsArray()->Element();
if (type->IsFunction()) {
+ if (assigning) {
+ FAIL(expr, "assigning to function table is illegal");
+ }
BinaryOperation* bin = expr->key()->AsBinaryOperation();
if (bin == NULL || bin->op() != Token::BIT_AND) {
FAIL(expr->key(), "expected & in call");
}
- RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
"array index expected to be integer"));
Literal* right = bin->right()->AsLiteral();
if (right == NULL || right->raw_value()->ContainsDot()) {
FAIL(right, "call mask must be integer");
}
- RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
"call mask expected to be integer"));
if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
FAIL(right, "call mask must match function table");
}
- bin->set_bounds(Bounds(cache_.kInt32));
+ bin->set_bounds(Bounds(cache_.kAsmSigned));
+ IntersectResult(expr, type);
} else {
Literal* literal = expr->key()->AsLiteral();
if (literal) {
- RECURSE(VisitWithExpectation(literal, cache_.kInt32,
+ RECURSE(VisitWithExpectation(literal, cache_.kAsmSigned,
"array index expected to be integer"));
} else {
BinaryOperation* bin = expr->key()->AsBinaryOperation();
if (bin == NULL || bin->op() != Token::SAR) {
FAIL(expr->key(), "expected >> in heap access");
}
- RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
"array index expected to be integer"));
Literal* right = bin->right()->AsLiteral();
if (right == NULL || right->raw_value()->ContainsDot()) {
FAIL(right, "heap access shift must be integer");
}
- RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
"array shift expected to be integer"));
int n = static_cast<int>(right->raw_value()->AsNumber());
int expected_shift = ElementShiftSize(type);
if (expected_shift < 0 || n != expected_shift) {
FAIL(right, "heap access shift must match element size");
}
- bin->set_bounds(Bounds(cache_.kInt32));
+ bin->set_bounds(Bounds(cache_.kAsmSigned));
+ }
+ Type* result_type;
+ if (type->Is(cache_.kAsmIntArrayElement)) {
+ result_type = cache_.kAsmIntQ;
+ intish_ = kMaxUncombinedAdditiveSteps;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ if (assigning) {
+ result_type = cache_.kAsmFloatDoubleQ;
+ } else {
+ result_type = cache_.kAsmFloatQ;
+ }
+ intish_ = 0;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ if (assigning) {
+ result_type = cache_.kAsmFloatDoubleQ;
+ if (intish_ != 0) {
+ FAIL(expr, "Assignment of floatish to Float64Array");
+ }
+ } else {
+ result_type = cache_.kAsmDoubleQ;
+ }
+ intish_ = 0;
+ } else {
+ UNREACHABLE();
+ }
+ if (assigning) {
+ if (!assignment_type->Is(result_type)) {
+ FAIL(expr, "illegal type in assignment");
+ }
+ } else {
+ IntersectResult(expr, expected_type_);
+ IntersectResult(expr, result_type);
}
}
- IntersectResult(expr, type);
}
-void AsmTyper::VisitProperty(Property* expr) {
- // stdlib.Math.x
- Property* inner_prop = expr->obj()->AsProperty();
- if (inner_prop != NULL) {
- // Get property name.
- Literal* key = expr->key()->AsLiteral();
- if (key == NULL || !key->IsPropertyName())
- FAIL(expr, "invalid type annotation on property 2");
- Handle<String> name = key->AsPropertyName();
-
- // Check that inner property name is "Math".
- Literal* math_key = inner_prop->key()->AsLiteral();
- if (math_key == NULL || !math_key->IsPropertyName() ||
- !math_key->AsPropertyName()->IsUtf8EqualTo(CStrVector("Math")))
- FAIL(expr, "invalid type annotation on stdlib (a1)");
-
- // Check that object is stdlib.
- VariableProxy* proxy = inner_prop->obj()->AsVariableProxy();
- if (proxy == NULL) FAIL(expr, "invalid type annotation on stdlib (a2)");
- Variable* var = proxy->var();
- if (var->location() != VariableLocation::PARAMETER || var->index() != 0)
- FAIL(expr, "invalid type annotation on stdlib (a3)");
+bool AsmTyper::IsStdlibObject(Expression* expr) {
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy == NULL) {
+ return false;
+ }
+ Variable* var = proxy->var();
+ VariableInfo* info = GetVariableInfo(var, false);
+ if (info) {
+ if (info->standard_member == kStdlib) return true;
+ }
+ if (var->location() != VariableLocation::PARAMETER || var->index() != 0) {
+ return false;
+ }
+ info = GetVariableInfo(var, true);
+ info->type = Type::Object();
+ info->standard_member = kStdlib;
+ return true;
+}
+
+
+Expression* AsmTyper::GetReceiverOfPropertyAccess(Expression* expr,
+ const char* name) {
+ Property* property = expr->AsProperty();
+ if (property == NULL) {
+ return NULL;
+ }
+ Literal* key = property->key()->AsLiteral();
+ if (key == NULL || !key->IsPropertyName() ||
+ !key->AsPropertyName()->IsUtf8EqualTo(CStrVector(name))) {
+ return NULL;
+ }
+ return property->obj();
+}
+
+
+bool AsmTyper::IsMathObject(Expression* expr) {
+ Expression* obj = GetReceiverOfPropertyAccess(expr, "Math");
+ return obj && IsStdlibObject(obj);
+}
+
+
+bool AsmTyper::IsSIMDObject(Expression* expr) {
+ Expression* obj = GetReceiverOfPropertyAccess(expr, "SIMD");
+ return obj && IsStdlibObject(obj);
+}
+
+
+bool AsmTyper::IsSIMDTypeObject(Expression* expr, const char* name) {
+ Expression* obj = GetReceiverOfPropertyAccess(expr, name);
+ return obj && IsSIMDObject(obj);
+}
- // Look up library type.
- Type* type = LibType(stdlib_math_types_, name);
- if (type == NULL) FAIL(expr, "unknown standard function 3 ");
- SetResult(expr, type);
+
+void AsmTyper::VisitProperty(Property* expr) {
+ if (IsMathObject(expr->obj())) {
+ VisitLibraryAccess(&stdlib_math_types_, expr);
+ return;
+ }
+#define V(NAME, Name, name, lane_count, lane_type) \
+ if (IsSIMDTypeObject(expr->obj(), #Name)) { \
+ VisitLibraryAccess(&stdlib_simd_##name##_types_, expr); \
+ return; \
+ } \
+ if (IsSIMDTypeObject(expr, #Name)) { \
+ VariableInfo* info = stdlib_simd_##name##_constructor_type_; \
+ SetResult(expr, info->type); \
+ property_info_ = info; \
+ return; \
+ }
+ SIMD128_TYPES(V)
+#undef V
+ if (IsStdlibObject(expr->obj())) {
+ VisitLibraryAccess(&stdlib_types_, expr);
return;
}
+ property_info_ = NULL;
+
// Only recurse at this point so that we avoid needing
// stdlib.Math to have a real type.
- RECURSE(VisitWithExpectation(expr->obj(), Type::Any(),
- "property holder expected to be object"));
+ RECURSE(VisitWithExpectation(expr->obj(), Type::Any(), "bad propety object"));
// For heap view or function table access.
if (computed_type_->IsArray()) {
- VisitHeapAccess(expr);
+ VisitHeapAccess(expr, false, NULL);
return;
}
- // Get property name.
- Literal* key = expr->key()->AsLiteral();
- if (key == NULL || !key->IsPropertyName())
- FAIL(expr, "invalid type annotation on property 3");
- Handle<String> name = key->AsPropertyName();
-
// stdlib.x or foreign.x
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy != NULL) {
Variable* var = proxy->var();
- if (var->location() != VariableLocation::PARAMETER) {
- FAIL(expr, "invalid type annotation on variable");
- }
- switch (var->index()) {
- case 0: {
- // Object is stdlib, look up library type.
- Type* type = LibType(stdlib_types_, name);
- if (type == NULL) {
- FAIL(expr, "unknown standard function 4");
- }
- SetResult(expr, type);
- return;
- }
- case 1:
- // Object is foreign lib.
- SetResult(expr, expected_type_);
- return;
- default:
- FAIL(expr, "invalid type annotation on parameter");
+ if (var->location() == VariableLocation::PARAMETER && var->index() == 1) {
+ // foreign.x is ok.
+ SetResult(expr, expected_type_);
+ return;
}
}
@@ -719,8 +911,20 @@ void AsmTyper::VisitProperty(Property* expr) {
void AsmTyper::VisitCall(Call* expr) {
RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
"callee expected to be any"));
+ StandardMember standard_member = kNone;
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ if (proxy) {
+ standard_member = VariableAsStandardMember(proxy->var());
+ }
+ if (!in_function_ && (proxy == NULL || standard_member != kMathFround)) {
+ FAIL(expr, "calls forbidden outside function bodies");
+ }
+ if (proxy == NULL && !expr->expression()->IsProperty()) {
+ FAIL(expr, "calls must be to bound variables or function tables");
+ }
if (computed_type_->IsFunction()) {
Type::FunctionType* fun_type = computed_type_->AsFunction();
+ Type* result_type = fun_type->Result();
ZoneList<Expression*>* args = expr->arguments();
if (fun_type->Arity() != args->length()) {
FAIL(expr, "call with wrong arity");
@@ -730,8 +934,36 @@ void AsmTyper::VisitCall(Call* expr) {
RECURSE(VisitWithExpectation(
arg, fun_type->Parameter(i),
"call argument expected to match callee parameter"));
+ if (standard_member != kNone && standard_member != kMathFround &&
+ i == 0) {
+ result_type = computed_type_;
+ }
}
- IntersectResult(expr, fun_type->Result());
+ // Handle polymorphic stdlib functions specially.
+ if (standard_member == kMathCeil || standard_member == kMathFloor ||
+ standard_member == kMathSqrt) {
+ if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
+ FAIL(expr, "illegal function argument type");
+ }
+ } else if (standard_member == kMathAbs || standard_member == kMathMin ||
+ standard_member == kMathMax) {
+ if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
+ FAIL(expr, "illegal function argument type");
+ }
+ if (args->length() > 1) {
+ Type* other = Type::Intersect(args->at(0)->bounds().upper,
+ args->at(1)->bounds().upper, zone());
+ if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
+ !other->Is(cache_.kAsmSigned)) {
+ FAIL(expr, "function arguments types don't match");
+ }
+ }
+ }
+ intish_ = 0;
+ IntersectResult(expr, result_type);
} else if (computed_type_->Is(Type::Any())) {
// For foreign calls.
ZoneList<Expression*>* args = expr->arguments();
@@ -740,6 +972,7 @@ void AsmTyper::VisitCall(Call* expr) {
RECURSE(VisitWithExpectation(arg, Type::Any(),
"foreign call argument expected to be any"));
}
+ intish_ = kMaxUncombinedAdditiveSteps;
IntersectResult(expr, Type::Number());
} else {
FAIL(expr, "invalid callee");
@@ -780,9 +1013,9 @@ void AsmTyper::VisitCallRuntime(CallRuntime* expr) {
void AsmTyper::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT: // Used to encode != and !==
- RECURSE(VisitWithExpectation(expr->expression(), cache_.kInt32,
+ RECURSE(VisitWithExpectation(expr->expression(), cache_.kAsmInt,
"operand expected to be integer"));
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmSigned);
return;
case Token::DELETE:
FAIL(expr, "delete operator encountered");
@@ -805,24 +1038,40 @@ void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
Type* left_expected,
Type* right_expected,
Type* result_type, bool conversion) {
- RECURSE(VisitWithExpectation(expr->left(), left_expected,
- "left bit operand expected to be integer"));
+ RECURSE(VisitWithExpectation(expr->left(), Type::Number(),
+ "left bitwise operand expected to be a number"));
int left_intish = intish_;
Type* left_type = computed_type_;
- RECURSE(VisitWithExpectation(expr->right(), right_expected,
- "right bit operand expected to be integer"));
+ if (!left_type->Is(left_expected)) {
+ FAIL(expr->left(), "left bitwise operand expected to be an integer");
+ }
+ if (left_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr->left(), "too many consecutive additive ops");
+ }
+
+ RECURSE(
+ VisitWithExpectation(expr->right(), Type::Number(),
+ "right bitwise operand expected to be a number"));
int right_intish = intish_;
Type* right_type = computed_type_;
- if (left_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
+ if (!right_type->Is(right_expected)) {
+ FAIL(expr->right(), "right bitwise operand expected to be an integer");
}
if (right_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
+ FAIL(expr->right(), "too many consecutive additive ops");
}
+
intish_ = 0;
+
+ if (left_type->Is(cache_.kAsmFixnum) && right_type->Is(cache_.kAsmInt)) {
+ left_type = right_type;
+ }
+ if (right_type->Is(cache_.kAsmFixnum) && left_type->Is(cache_.kAsmInt)) {
+ right_type = left_type;
+ }
if (!conversion) {
if (!left_type->Is(right_type) || !right_type->Is(left_type)) {
- FAIL(expr, "ill typed bitwise operation");
+ FAIL(expr, "ill-typed bitwise operation");
}
}
IntersectResult(expr, result_type);
@@ -841,29 +1090,42 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
}
case Token::OR:
case Token::AND:
- FAIL(expr, "logical operator encountered");
+ FAIL(expr, "illegal logical operator");
case Token::BIT_OR: {
// BIT_OR allows Any since it is used as a type coercion.
- VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kIntegral32,
- cache_.kInt32, true);
+ VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmInt,
+ cache_.kAsmSigned, true);
return;
}
case Token::BIT_XOR: {
+ // Handle booleans specially to handle de-sugared !
+ Literal* left = expr->left()->AsLiteral();
+ if (left && left->value()->IsBoolean()) {
+ if (left->ToBooleanIsTrue()) {
+ left->set_bounds(Bounds(cache_.kSingletonOne));
+ RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmInt,
+ "not operator expects an integer"));
+ IntersectResult(expr, cache_.kAsmSigned);
+ return;
+ } else {
+ FAIL(left, "unexpected false");
+ }
+ }
// BIT_XOR allows Number since it is used as a type coercion (via ~~).
- VisitIntegerBitwiseOperator(expr, Type::Number(), cache_.kIntegral32,
- cache_.kInt32, true);
+ VisitIntegerBitwiseOperator(expr, Type::Number(), cache_.kAsmInt,
+ cache_.kAsmSigned, true);
return;
}
case Token::SHR: {
- VisitIntegerBitwiseOperator(expr, cache_.kIntegral32, cache_.kIntegral32,
- cache_.kUint32, false);
+ VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
+ cache_.kAsmUnsigned, false);
return;
}
case Token::SHL:
case Token::SAR:
case Token::BIT_AND: {
- VisitIntegerBitwiseOperator(expr, cache_.kIntegral32, cache_.kIntegral32,
- cache_.kInt32, false);
+ VisitIntegerBitwiseOperator(expr, cache_.kAsmInt, cache_.kAsmInt,
+ cache_.kAsmSigned, false);
return;
}
case Token::ADD:
@@ -882,13 +1144,25 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
Type* right_type = computed_type_;
int right_intish = intish_;
Type* type = Type::Union(left_type, right_type, zone());
- if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32)) {
+ if (type->Is(cache_.kAsmInt)) {
if (expr->op() == Token::MUL) {
- if (!expr->left()->IsLiteral() && !expr->right()->IsLiteral()) {
+ Literal* right = expr->right()->AsLiteral();
+ if (!right) {
FAIL(expr, "direct integer multiply forbidden");
}
- intish_ = 0;
- IntersectResult(expr, cache_.kInt32);
+ if (!right->value()->IsNumber()) {
+ FAIL(expr, "multiply must be by an integer");
+ }
+ int32_t i;
+ if (!right->value()->ToInt32(&i)) {
+ FAIL(expr, "multiply must be a signed integer");
+ }
+ i = abs(i);
+ if (i >= 1 << 20) {
+ FAIL(expr, "multiply must be by value in -2^20 < n < 2^20");
+ }
+ intish_ = i;
+ IntersectResult(expr, cache_.kAsmInt);
return;
} else {
intish_ = left_intish + right_intish + 1;
@@ -901,20 +1175,23 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
FAIL(expr, "too many consecutive multiplicative ops");
}
}
- IntersectResult(expr, cache_.kInt32);
+ IntersectResult(expr, cache_.kAsmInt);
return;
}
- } else if (expr->op() == Token::MUL &&
- left_type->Is(cache_.kIntegral32) &&
- right_type->Is(cache_.kFloat64)) {
+ } else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
+ right_type->Is(cache_.kAsmDouble)) {
// For unary +, expressed as x * 1.0
- IntersectResult(expr, cache_.kFloat64);
+ IntersectResult(expr, cache_.kAsmDouble);
return;
- } else if (type->Is(cache_.kFloat32) && expr->op() != Token::MOD) {
- IntersectResult(expr, cache_.kFloat32);
+ } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
+ if (left_intish != 0 || right_intish != 0) {
+ FAIL(expr, "float operation before required fround");
+ }
+ IntersectResult(expr, cache_.kAsmFloat);
+ intish_ = 1;
return;
- } else if (type->Is(cache_.kFloat64)) {
- IntersectResult(expr, cache_.kFloat64);
+ } else if (type->Is(cache_.kAsmDouble)) {
+ IntersectResult(expr, cache_.kAsmDouble);
return;
} else {
FAIL(expr, "ill-typed arithmetic operation");
@@ -927,22 +1204,33 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
+ Token::Value op = expr->op();
+ if (op != Token::EQ && op != Token::NE && op != Token::LT &&
+ op != Token::LTE && op != Token::GT && op != Token::GTE) {
+ FAIL(expr, "illegal comparison operator");
+ }
+
RECURSE(
VisitWithExpectation(expr->left(), Type::Number(),
"left comparison operand expected to be number"));
Type* left_type = computed_type_;
+ if (!left_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->left(), "bad type on left side of comparison");
+ }
+
RECURSE(
VisitWithExpectation(expr->right(), Type::Number(),
"right comparison operand expected to be number"));
Type* right_type = computed_type_;
- Type* type = Type::Union(left_type, right_type, zone());
- expr->set_combined_type(type);
- if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
- type->Is(cache_.kFloat32) || type->Is(cache_.kFloat64)) {
- IntersectResult(expr, cache_.kInt32);
- } else {
- FAIL(expr, "ill-typed comparison operation");
+ if (!right_type->Is(cache_.kAsmComparable)) {
+ FAIL(expr->right(), "bad type on right side of comparison");
+ }
+
+ if (!left_type->Is(right_type) && !right_type->Is(left_type)) {
+ FAIL(expr, "left and right side of comparison must match");
}
+
+ IntersectResult(expr, cache_.kAsmSigned);
}
@@ -987,64 +1275,115 @@ void AsmTyper::VisitSuperCallReference(SuperCallReference* expr) {
}
+void AsmTyper::InitializeStdlibSIMD() {
+#define V(NAME, Name, name, lane_count, lane_type) \
+ { \
+ Type* type = Type::Function(Type::Name(isolate_, zone()), Type::Any(), \
+ lane_count, zone()); \
+ for (int i = 0; i < lane_count; ++i) { \
+ type->AsFunction()->InitParameter(i, Type::Number()); \
+ } \
+ stdlib_simd_##name##_constructor_type_ = new (zone()) VariableInfo(type); \
+ stdlib_simd_##name##_constructor_type_->is_constructor_function = true; \
+ }
+ SIMD128_TYPES(V)
+#undef V
+}
+
+
void AsmTyper::InitializeStdlib() {
+ if (allow_simd_) {
+ InitializeStdlibSIMD();
+ }
Type* number_type = Type::Number(zone());
- Type* double_type = cache_.kFloat64;
+ Type* double_type = cache_.kAsmDouble;
Type* double_fn1_type = Type::Function(double_type, double_type, zone());
Type* double_fn2_type =
Type::Function(double_type, double_type, double_type, zone());
- Type* fround_type = Type::Function(cache_.kFloat32, number_type, zone());
+ Type* fround_type = Type::Function(cache_.kAsmFloat, number_type, zone());
Type* imul_type =
- Type::Function(cache_.kInt32, cache_.kInt32, cache_.kInt32, zone());
+ Type::Function(cache_.kAsmSigned, cache_.kAsmInt, cache_.kAsmInt, zone());
// TODO(bradnelson): currently only approximating the proper intersection type
// (which we cannot currently represent).
- Type* abs_type = Type::Function(number_type, number_type, zone());
+ Type* number_fn1_type = Type::Function(number_type, number_type, zone());
+ Type* number_fn2_type =
+ Type::Function(number_type, number_type, number_type, zone());
struct Assignment {
const char* name;
+ StandardMember standard_member;
Type* type;
};
- const Assignment math[] = {
- {"PI", double_type}, {"E", double_type},
- {"LN2", double_type}, {"LN10", double_type},
- {"LOG2E", double_type}, {"LOG10E", double_type},
- {"SQRT2", double_type}, {"SQRT1_2", double_type},
- {"imul", imul_type}, {"abs", abs_type},
- {"ceil", double_fn1_type}, {"floor", double_fn1_type},
- {"fround", fround_type}, {"pow", double_fn2_type},
- {"exp", double_fn1_type}, {"log", double_fn1_type},
- {"min", double_fn2_type}, {"max", double_fn2_type},
- {"sqrt", double_fn1_type}, {"cos", double_fn1_type},
- {"sin", double_fn1_type}, {"tan", double_fn1_type},
- {"acos", double_fn1_type}, {"asin", double_fn1_type},
- {"atan", double_fn1_type}, {"atan2", double_fn2_type}};
+ const Assignment math[] = {{"PI", kMathPI, double_type},
+ {"E", kMathE, double_type},
+ {"LN2", kMathLN2, double_type},
+ {"LN10", kMathLN10, double_type},
+ {"LOG2E", kMathLOG2E, double_type},
+ {"LOG10E", kMathLOG10E, double_type},
+ {"SQRT2", kMathSQRT2, double_type},
+ {"SQRT1_2", kMathSQRT1_2, double_type},
+ {"imul", kMathImul, imul_type},
+ {"abs", kMathAbs, number_fn1_type},
+ {"ceil", kMathCeil, number_fn1_type},
+ {"floor", kMathFloor, number_fn1_type},
+ {"fround", kMathFround, fround_type},
+ {"pow", kMathPow, double_fn2_type},
+ {"exp", kMathExp, double_fn1_type},
+ {"log", kMathLog, double_fn1_type},
+ {"min", kMathMin, number_fn2_type},
+ {"max", kMathMax, number_fn2_type},
+ {"sqrt", kMathSqrt, number_fn1_type},
+ {"cos", kMathCos, double_fn1_type},
+ {"sin", kMathSin, double_fn1_type},
+ {"tan", kMathTan, double_fn1_type},
+ {"acos", kMathAcos, double_fn1_type},
+ {"asin", kMathAsin, double_fn1_type},
+ {"atan", kMathAtan, double_fn1_type},
+ {"atan2", kMathAtan2, double_fn2_type}};
for (unsigned i = 0; i < arraysize(math); ++i) {
- stdlib_math_types_[math[i].name] = math[i].type;
+ stdlib_math_types_[math[i].name] = new (zone()) VariableInfo(math[i].type);
+ stdlib_math_types_[math[i].name]->standard_member = math[i].standard_member;
}
+ stdlib_math_types_["fround"]->is_check_function = true;
- stdlib_types_["Infinity"] = double_type;
- stdlib_types_["NaN"] = double_type;
+ stdlib_types_["Infinity"] = new (zone()) VariableInfo(double_type);
+ stdlib_types_["Infinity"]->standard_member = kInfinity;
+ stdlib_types_["NaN"] = new (zone()) VariableInfo(double_type);
+ stdlib_types_["NaN"]->standard_member = kNaN;
Type* buffer_type = Type::Any(zone());
#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- stdlib_types_[#TypeName "Array"] = \
- Type::Function(cache_.k##TypeName##Array, buffer_type, zone());
+ stdlib_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
+ Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
TYPED_ARRAYS(TYPED_ARRAY)
#undef TYPED_ARRAY
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- stdlib_heap_types_[#TypeName "Array"] = \
- Type::Function(cache_.k##TypeName##Array, buffer_type, zone());
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ stdlib_heap_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
+ Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
TYPED_ARRAYS(TYPED_ARRAY)
#undef TYPED_ARRAY
}
-Type* AsmTyper::LibType(ObjectTypeMap map, Handle<String> name) {
+void AsmTyper::VisitLibraryAccess(ObjectTypeMap* map, Property* expr) {
+ Literal* key = expr->key()->AsLiteral();
+ if (key == NULL || !key->IsPropertyName())
+ FAIL(expr, "invalid key used on stdlib member");
+ Handle<String> name = key->AsPropertyName();
+ VariableInfo* info = LibType(map, name);
+ if (info == NULL || info->type == NULL) FAIL(expr, "unknown stdlib function");
+ SetResult(expr, info->type);
+ property_info_ = info;
+}
+
+
+AsmTyper::VariableInfo* AsmTyper::LibType(ObjectTypeMap* map,
+ Handle<String> name) {
base::SmartArrayPointer<char> aname = name->ToCString();
- ObjectTypeMap::iterator i = map.find(std::string(aname.get()));
- if (i == map.end()) {
+ ObjectTypeMap::iterator i = map->find(std::string(aname.get()));
+ if (i == map->end()) {
return NULL;
}
return i->second;
@@ -1052,32 +1391,62 @@ Type* AsmTyper::LibType(ObjectTypeMap map, Handle<String> name) {
void AsmTyper::SetType(Variable* variable, Type* type) {
- ZoneHashMap::Entry* entry;
- if (in_function_) {
- entry = local_variable_type_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
- } else {
- entry = global_variable_type_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
- }
- entry->value = reinterpret_cast<void*>(type);
+ VariableInfo* info = GetVariableInfo(variable, true);
+ info->type = type;
}
Type* AsmTyper::GetType(Variable* variable) {
- i::ZoneHashMap::Entry* entry = NULL;
+ VariableInfo* info = GetVariableInfo(variable, false);
+ if (!info) return NULL;
+ return info->type;
+}
+
+
+AsmTyper::VariableInfo* AsmTyper::GetVariableInfo(Variable* variable,
+ bool setting) {
+ ZoneHashMap::Entry* entry;
+ ZoneHashMap* map;
if (in_function_) {
- entry = local_variable_type_.Lookup(variable, ComputePointerHash(variable));
- }
- if (entry == NULL) {
- entry =
- global_variable_type_.Lookup(variable, ComputePointerHash(variable));
+ map = &local_variable_type_;
+ } else {
+ map = &global_variable_type_;
}
- if (entry == NULL) {
- return NULL;
+ if (setting) {
+ entry = map->LookupOrInsert(variable, ComputePointerHash(variable),
+ ZoneAllocationPolicy(zone()));
} else {
- return reinterpret_cast<Type*>(entry->value);
+ entry = map->Lookup(variable, ComputePointerHash(variable));
+ if (!entry && in_function_) {
+ entry =
+ global_variable_type_.Lookup(variable, ComputePointerHash(variable));
+ if (entry && entry->value) {
+ }
+ }
+ }
+ if (!entry) return NULL;
+ if (!entry->value) {
+ if (!setting) return NULL;
+ entry->value = new (zone()) VariableInfo;
}
+ return reinterpret_cast<VariableInfo*>(entry->value);
+}
+
+
+void AsmTyper::SetVariableInfo(Variable* variable, const VariableInfo* info) {
+ VariableInfo* dest = GetVariableInfo(variable, true);
+ dest->type = info->type;
+ dest->is_check_function = info->is_check_function;
+ dest->is_constructor_function = info->is_constructor_function;
+ dest->standard_member = info->standard_member;
+}
+
+
+AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(
+ Variable* variable) {
+ VariableInfo* info = GetVariableInfo(variable, false);
+ if (!info) return kNone;
+ return info->standard_member;
}
@@ -1111,5 +1480,13 @@ void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
}
expected_type_ = save;
}
+
+
+void AsmTyper::VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ RECURSE(Visit(expr->expression()));
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/typing-asm.h b/deps/v8/src/typing-asm.h
index a80fec5fba..b7f53831e6 100644
--- a/deps/v8/src/typing-asm.h
+++ b/deps/v8/src/typing-asm.h
@@ -6,7 +6,7 @@
#define V8_TYPING_ASM_H_
#include "src/allocation.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/effects.h"
#include "src/type-info.h"
#include "src/types.h"
@@ -22,28 +22,90 @@ class AsmTyper : public AstVisitor {
explicit AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root);
bool Validate();
+ void set_allow_simd(bool simd);
const char* error_message() { return error_message_; }
+ enum StandardMember {
+ kNone = 0,
+ kStdlib,
+ kInfinity,
+ kNaN,
+ kMathAcos,
+ kMathAsin,
+ kMathAtan,
+ kMathCos,
+ kMathSin,
+ kMathTan,
+ kMathExp,
+ kMathLog,
+ kMathCeil,
+ kMathFloor,
+ kMathSqrt,
+ kMathAbs,
+ kMathMin,
+ kMathMax,
+ kMathAtan2,
+ kMathPow,
+ kMathImul,
+ kMathFround,
+ kMathE,
+ kMathLN10,
+ kMathLN2,
+ kMathLOG2E,
+ kMathLOG10E,
+ kMathPI,
+ kMathSQRT1_2,
+ kMathSQRT2,
+ };
+
+ StandardMember VariableAsStandardMember(Variable* variable);
+
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
Zone* zone_;
+ Isolate* isolate_;
Script* script_;
FunctionLiteral* root_;
bool valid_;
+ bool allow_simd_;
+
+ struct VariableInfo : public ZoneObject {
+ Type* type;
+ bool is_check_function;
+ bool is_constructor_function;
+ StandardMember standard_member;
+
+ VariableInfo()
+ : type(NULL),
+ is_check_function(false),
+ is_constructor_function(false),
+ standard_member(kNone) {}
+ explicit VariableInfo(Type* t)
+ : type(t),
+ is_check_function(false),
+ is_constructor_function(false),
+ standard_member(kNone) {}
+ };
// Information for bi-directional typing with a cap on nesting depth.
Type* expected_type_;
Type* computed_type_;
+ VariableInfo* property_info_;
int intish_; // How many ops we've gone without a x|0.
Type* return_type_; // Return type of last function.
size_t array_size_; // Array size of last ArrayLiteral.
- typedef ZoneMap<std::string, Type*> ObjectTypeMap;
+ typedef ZoneMap<std::string, VariableInfo*> ObjectTypeMap;
ObjectTypeMap stdlib_types_;
ObjectTypeMap stdlib_heap_types_;
ObjectTypeMap stdlib_math_types_;
+#define V(NAME, Name, name, lane_count, lane_type) \
+ ObjectTypeMap stdlib_simd_##name##_types_; \
+ VariableInfo* stdlib_simd_##name##_constructor_type_;
+ SIMD128_TYPES(V)
+#undef V
// Map from Variable* to global/local variable Type*.
ZoneHashMap global_variable_type_;
@@ -61,22 +123,35 @@ class AsmTyper : public AstVisitor {
static const int kMaxUncombinedMultiplicativeSteps = 1;
void InitializeStdlib();
+ void InitializeStdlibSIMD();
void VisitDeclarations(ZoneList<Declaration*>* d) override;
void VisitStatements(ZoneList<Statement*>* s) override;
- void VisitExpressionAnnotation(Expression* e);
+ void VisitExpressionAnnotation(Expression* e, Variable* var, bool is_return);
void VisitFunctionAnnotation(FunctionLiteral* f);
void VisitAsmModule(FunctionLiteral* f);
- void VisitHeapAccess(Property* expr);
+ void VisitHeapAccess(Property* expr, bool assigning, Type* assignment_type);
+
+ Expression* GetReceiverOfPropertyAccess(Expression* expr, const char* name);
+ bool IsMathObject(Expression* expr);
+ bool IsSIMDObject(Expression* expr);
+ bool IsSIMDTypeObject(Expression* expr, const char* name);
+ bool IsStdlibObject(Expression* expr);
+
+ void VisitSIMDProperty(Property* expr);
int ElementShiftSize(Type* type);
+ Type* StorageType(Type* type);
void SetType(Variable* variable, Type* type);
Type* GetType(Variable* variable);
+ VariableInfo* GetVariableInfo(Variable* variable, bool setting);
+ void SetVariableInfo(Variable* variable, const VariableInfo* info);
- Type* LibType(ObjectTypeMap map, Handle<String> name);
+ VariableInfo* LibType(ObjectTypeMap* map, Handle<String> name);
+ void VisitLibraryAccess(ObjectTypeMap* map, Property* expr);
void SetResult(Expression* expr, Type* type);
void IntersectResult(Expression* expr, Type* type);
@@ -84,6 +159,8 @@ class AsmTyper : public AstVisitor {
void VisitWithExpectation(Expression* expr, Type* expected_type,
const char* msg);
+ void VisitLiteral(Literal* expr, bool is_return);
+
void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
Type* right_expected, Type* result_type,
bool conversion);
diff --git a/deps/v8/src/typing-reset.cc b/deps/v8/src/typing-reset.cc
index b67b23507c..c22f7a9276 100644
--- a/deps/v8/src/typing-reset.cc
+++ b/deps/v8/src/typing-reset.cc
@@ -6,9 +6,9 @@
#include "src/typing-reset.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/typing-reset.h b/deps/v8/src/typing-reset.h
index 84e51773e7..3e1969d9ed 100644
--- a/deps/v8/src/typing-reset.h
+++ b/deps/v8/src/typing-reset.h
@@ -5,7 +5,7 @@
#ifndef V8_TYPING_RESET_H_
#define V8_TYPING_RESET_H_
-#include "src/ast-expression-visitor.h"
+#include "src/ast/ast-expression-visitor.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 4077ae7217..c46028f059 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -368,7 +368,7 @@ static void MemMoveWrapper(void* dest, const void* src, size_t size) {
static MemMoveFunction memmove_function = &MemMoveWrapper;
// Defined in codegen-ia32.cc.
-MemMoveFunction CreateMemMoveFunction();
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate);
// Copy memory area to disjoint memory area.
void MemMove(void* dest, const void* src, size_t size) {
@@ -392,29 +392,38 @@ MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
MemCopyUint16Uint8Function memcopy_uint16_uint8_function =
&MemCopyUint16Uint8Wrapper;
// Defined in codegen-arm.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub);
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
- MemCopyUint16Uint8Function stub);
+ Isolate* isolate, MemCopyUint16Uint8Function stub);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
MemCopyUint8Function memcopy_uint8_function = &MemCopyUint8Wrapper;
// Defined in codegen-mips.cc.
-MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub);
+MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
+ MemCopyUint8Function stub);
#endif
-void init_memcopy_functions() {
+static bool g_memcopy_functions_initialized = false;
+
+
+void init_memcopy_functions(Isolate* isolate) {
+ if (g_memcopy_functions_initialized) return;
+ g_memcopy_functions_initialized = true;
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
- MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ MemMoveFunction generated_memmove = CreateMemMoveFunction(isolate);
if (generated_memmove != NULL) {
memmove_function = generated_memmove;
}
#elif V8_OS_POSIX && V8_HOST_ARCH_ARM
- memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
+ memcopy_uint8_function =
+ CreateMemCopyUint8Function(isolate, &MemCopyUint8Wrapper);
memcopy_uint16_uint8_function =
- CreateMemCopyUint16Uint8Function(&MemCopyUint16Uint8Wrapper);
+ CreateMemCopyUint16Uint8Function(isolate, &MemCopyUint16Uint8Wrapper);
#elif V8_OS_POSIX && V8_HOST_ARCH_MIPS
- memcopy_uint8_function = CreateMemCopyUint8Function(&MemCopyUint8Wrapper);
+ memcopy_uint8_function =
+ CreateMemCopyUint8Function(isolate, &MemCopyUint8Wrapper);
#endif
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 1fe6a3213c..1ea2d56fbf 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -26,6 +26,16 @@ namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
+// Returns the value (0 .. 15) of a hexadecimal character c.
+// If c is not a legal hexadecimal character, returns a value < 0.
+inline int HexValue(uc32 c) {
+ c -= '0';
+ if (static_cast<unsigned>(c) <= 9) return c;
+ c = (c | 0x20) - ('a' - '0'); // detect 0x11..0x16 and 0x31..0x36.
+ if (static_cast<unsigned>(c) <= 5) return c + 10;
+ return -1;
+}
+
inline int BoolToInt(bool b) { return b ? 1 : 0; }
@@ -366,9 +376,8 @@ inline uint32_t ComputePointerHash(void* ptr) {
// ----------------------------------------------------------------------------
// Generated memcpy/memmove
-// Initializes the codegen support that depends on CPU features. This is
-// called after CPU initialization.
-void init_memcopy_functions();
+// Initializes the codegen support that depends on CPU features.
+void init_memcopy_functions(Isolate* isolate);
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
// Limit below which the extra overhead of the MemCopy function is likely
@@ -1042,6 +1051,13 @@ class TypeFeedbackId {
int id_;
};
+inline bool operator<(TypeFeedbackId lhs, TypeFeedbackId rhs) {
+ return lhs.ToInt() < rhs.ToInt();
+}
+inline bool operator>(TypeFeedbackId lhs, TypeFeedbackId rhs) {
+ return lhs.ToInt() > rhs.ToInt();
+}
+
class FeedbackVectorSlot {
public:
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 9f8e60c294..31b48780e4 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -79,13 +79,6 @@ void V8::InitializeOncePerProcessImpl() {
Sampler::SetUp();
CpuFeatures::Probe(false);
- init_memcopy_functions();
- // The custom exp implementation needs 16KB of lookup data; initialize it
- // on demand.
- init_fast_sqrt_function();
-#ifdef _WIN64
- init_modulo_function();
-#endif
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
new file mode 100644
index 0000000000..a9d24ade28
--- /dev/null
+++ b/deps/v8/src/wasm/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/deps/v8/src/wasm/asm-wasm-builder.cc b/deps/v8/src/wasm/asm-wasm-builder.cc
new file mode 100644
index 0000000000..30f84642f8
--- /dev/null
+++ b/deps/v8/src/wasm/asm-wasm-builder.cc
@@ -0,0 +1,1045 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/codegen.h"
+#include "src/type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define RECURSE(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+class AsmWasmBuilderImpl : public AstVisitor {
+ public:
+ AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal)
+ : local_variables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ functions_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ global_variables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ in_function_(false),
+ is_set_op_(false),
+ marking_exported(false),
+ builder_(new (zone) WasmModuleBuilder(zone)),
+ current_function_builder_(nullptr),
+ literal_(literal),
+ isolate_(isolate),
+ zone_(zone),
+ cache_(TypeCache::Get()),
+ breakable_blocks_(zone),
+ block_size_(0),
+ init_function_index(0) {
+ InitializeAstVisitor(isolate);
+ }
+
+ void InitializeInitFunction() {
+ unsigned char init[] = "__init__";
+ init_function_index = builder_->AddFunction();
+ current_function_builder_ = builder_->FunctionAt(init_function_index);
+ current_function_builder_->SetName(init, 8);
+ current_function_builder_->ReturnType(kAstStmt);
+ current_function_builder_->Exported(1);
+ current_function_builder_ = nullptr;
+ }
+
+ void Compile() {
+ InitializeInitFunction();
+ RECURSE(VisitFunctionLiteral(literal_));
+ }
+
+ void VisitVariableDeclaration(VariableDeclaration* decl) {}
+
+ void VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ DCHECK(!in_function_);
+ DCHECK(current_function_builder_ == nullptr);
+ uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
+ current_function_builder_ = builder_->FunctionAt(index);
+ in_function_ = true;
+ RECURSE(Visit(decl->fun()));
+ in_function_ = false;
+ current_function_builder_ = nullptr;
+ local_variables_.Clear();
+ }
+
+ void VisitImportDeclaration(ImportDeclaration* decl) {}
+
+ void VisitExportDeclaration(ExportDeclaration* decl) {}
+
+ void VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
+ }
+ }
+
+ void VisitBlock(Block* stmt) {
+ if (stmt->statements()->length() == 1) {
+ ExpressionStatement* expr =
+ stmt->statements()->at(0)->AsExpressionStatement();
+ if (expr != nullptr) {
+ if (expr->expression()->IsAssignment()) {
+ RECURSE(VisitExpressionStatement(expr));
+ return;
+ }
+ }
+ }
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
+ static_cast<byte>(stmt->statements()->length()));
+ RECURSE(VisitStatements(stmt->statements()));
+ DCHECK(block_size_ >= 0);
+ }
+
+ class BlockVisitor {
+ private:
+ int prev_block_size_;
+ uint32_t index_;
+ AsmWasmBuilderImpl* builder_;
+
+ public:
+ BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
+ WasmOpcode opcode, bool is_loop, int initial_block_size)
+ : builder_(builder) {
+ builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
+ builder_->current_function_builder_->Emit(opcode);
+ index_ = builder_->current_function_builder_->EmitEditableImmediate(0);
+ prev_block_size_ = builder_->block_size_;
+ builder_->block_size_ = initial_block_size;
+ }
+ ~BlockVisitor() {
+ builder_->current_function_builder_->EditImmediate(index_,
+ builder_->block_size_);
+ builder_->block_size_ = prev_block_size_;
+ builder_->breakable_blocks_.pop_back();
+ }
+ };
+
+ void VisitExpressionStatement(ExpressionStatement* stmt) {
+ RECURSE(Visit(stmt->expression()));
+ }
+
+ void VisitEmptyStatement(EmptyStatement* stmt) {}
+
+ void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
+
+ void VisitIfStatement(IfStatement* stmt) {
+ DCHECK(in_function_);
+ if (stmt->HasElseStatement()) {
+ current_function_builder_->Emit(kExprIfElse);
+ } else {
+ current_function_builder_->Emit(kExprIf);
+ }
+ RECURSE(Visit(stmt->condition()));
+ if (stmt->HasThenStatement()) {
+ RECURSE(Visit(stmt->then_statement()));
+ } else {
+ current_function_builder_->Emit(kExprNop);
+ }
+ if (stmt->HasElseStatement()) {
+ RECURSE(Visit(stmt->else_statement()));
+ }
+ }
+
+ void VisitContinueStatement(ContinueStatement* stmt) {
+ DCHECK(in_function_);
+ DCHECK(stmt->target() != NULL);
+ int i = static_cast<int>(breakable_blocks_.size()) - 1;
+ int block_distance = 0;
+ for (; i >= 0; i--) {
+ auto elem = breakable_blocks_.at(i);
+ if (elem.first == stmt->target()) {
+ DCHECK(elem.second);
+ break;
+ } else if (elem.second) {
+ block_distance += 2;
+ } else {
+ block_distance += 1;
+ }
+ }
+ DCHECK(i >= 0);
+ current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitBreakStatement(BreakStatement* stmt) {
+ DCHECK(in_function_);
+ DCHECK(stmt->target() != NULL);
+ int i = static_cast<int>(breakable_blocks_.size()) - 1;
+ int block_distance = 0;
+ for (; i >= 0; i--) {
+ auto elem = breakable_blocks_.at(i);
+ if (elem.first == stmt->target()) {
+ if (elem.second) {
+ block_distance++;
+ }
+ break;
+ } else if (elem.second) {
+ block_distance += 2;
+ } else {
+ block_distance += 1;
+ }
+ }
+ DCHECK(i >= 0);
+ current_function_builder_->EmitWithU8(kExprBr, block_distance);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitReturnStatement(ReturnStatement* stmt) {
+ if (in_function_) {
+ current_function_builder_->Emit(kExprReturn);
+ } else {
+ marking_exported = true;
+ }
+ RECURSE(Visit(stmt->expression()));
+ if (!in_function_) {
+ marking_exported = false;
+ }
+ }
+
+ void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
+
+ void SetLocalTo(uint16_t index, int value) {
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index, true);
+ byte code[] = {WASM_I32(value)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ block_size_++;
+ }
+
+ void CompileCase(CaseClause* clause, uint16_t fall_through,
+ VariableProxy* tag) {
+ Literal* label = clause->label()->AsLiteral();
+ DCHECK(label != nullptr);
+ block_size_++;
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->Emit(kExprI32Ior);
+ current_function_builder_->Emit(kExprI32Eq);
+ VisitVariableProxy(tag);
+ VisitLiteral(label);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(fall_through, true);
+ BlockVisitor visitor(this, nullptr, kExprBlock, false, 0);
+ SetLocalTo(fall_through, 1);
+ ZoneList<Statement*>* stmts = clause->statements();
+ block_size_ += stmts->length();
+ RECURSE(VisitStatements(stmts));
+ }
+
+ void VisitSwitchStatement(SwitchStatement* stmt) {
+ VariableProxy* tag = stmt->tag()->AsVariableProxy();
+ DCHECK(tag != NULL);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
+ 0);
+ uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
+ SetLocalTo(fall_through, 0);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default()) {
+ CompileCase(clause, fall_through, tag);
+ } else {
+ ZoneList<Statement*>* stmts = clause->statements();
+ block_size_ += stmts->length();
+ RECURSE(VisitStatements(stmts));
+ }
+ }
+ }
+
+ void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
+
+ void VisitDoWhileStatement(DoWhileStatement* stmt) {
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 2);
+ RECURSE(Visit(stmt->body()));
+ current_function_builder_->Emit(kExprIf);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitWhileStatement(WhileStatement* stmt) {
+ DCHECK(in_function_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 1);
+ current_function_builder_->Emit(kExprIf);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ RECURSE(Visit(stmt->body()));
+ }
+
+ void VisitForStatement(ForStatement* stmt) {
+ DCHECK(in_function_);
+ if (stmt->init() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->init()));
+ }
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
+ 0);
+ if (stmt->cond() != nullptr) {
+ block_size_++;
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->Emit(kExprBoolNot);
+ RECURSE(Visit(stmt->cond()));
+ current_function_builder_->EmitWithU8(kExprBr, 1);
+ current_function_builder_->Emit(kExprNop);
+ }
+ if (stmt->body() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->body()));
+ }
+ if (stmt->next() != nullptr) {
+ block_size_++;
+ RECURSE(Visit(stmt->next()));
+ }
+ block_size_++;
+ current_function_builder_->EmitWithU8(kExprBr, 0);
+ current_function_builder_->Emit(kExprNop);
+ }
+
+ void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
+
+ void VisitForOfStatement(ForOfStatement* stmt) { UNREACHABLE(); }
+
+ void VisitTryCatchStatement(TryCatchStatement* stmt) { UNREACHABLE(); }
+
+ void VisitTryFinallyStatement(TryFinallyStatement* stmt) { UNREACHABLE(); }
+
+ void VisitDebuggerStatement(DebuggerStatement* stmt) { UNREACHABLE(); }
+
+ void VisitFunctionLiteral(FunctionLiteral* expr) {
+ Scope* scope = expr->scope();
+ if (in_function_) {
+ if (expr->bounds().lower->IsFunction()) {
+ Type::FunctionType* func_type = expr->bounds().lower->AsFunction();
+ LocalType return_type = TypeFrom(func_type->Result());
+ current_function_builder_->ReturnType(return_type);
+ for (int i = 0; i < expr->parameter_count(); i++) {
+ LocalType type = TypeFrom(func_type->Parameter(i));
+ DCHECK(type != kAstStmt);
+ LookupOrInsertLocal(scope->parameter(i), type);
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+ RECURSE(VisitDeclarations(scope->declarations()));
+ RECURSE(VisitStatements(expr->body()));
+ }
+
+ void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitConditional(Conditional* expr) {
+ DCHECK(in_function_);
+ current_function_builder_->Emit(kExprIfElse);
+ RECURSE(Visit(expr->condition()));
+ RECURSE(Visit(expr->then_expression()));
+ RECURSE(Visit(expr->else_expression()));
+ }
+
+ void VisitVariableProxy(VariableProxy* expr) {
+ if (in_function_) {
+ Variable* var = expr->var();
+ if (var->is_function()) {
+ DCHECK(!is_set_op_);
+ std::vector<uint8_t> index =
+ UnsignedLEB128From(LookupOrInsertFunction(var));
+ current_function_builder_->EmitCode(
+ &index[0], static_cast<uint32_t>(index.size()));
+ } else {
+ if (is_set_op_) {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprStoreGlobal);
+ } else {
+ current_function_builder_->Emit(kExprSetLocal);
+ }
+ is_set_op_ = false;
+ } else {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprLoadGlobal);
+ } else {
+ current_function_builder_->Emit(kExprGetLocal);
+ }
+ }
+ LocalType var_type = TypeOf(expr);
+ DCHECK(var_type != kAstStmt);
+ if (var->IsContextSlot()) {
+ AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+ } else {
+ AddLeb128(LookupOrInsertLocal(var, var_type), true);
+ }
+ }
+ }
+ }
+
+ void VisitLiteral(Literal* expr) {
+ if (in_function_) {
+ if (expr->raw_value()->IsNumber()) {
+ LocalType type = TypeOf(expr);
+ switch (type) {
+ case kAstI32: {
+ int val = static_cast<int>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ case kAstF32: {
+ float val = static_cast<float>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_F32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ case kAstF64: {
+ double val = static_cast<double>(expr->raw_value()->AsNumber());
+ byte code[] = {WASM_F64(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+
+ void VisitRegExpLiteral(RegExpLiteral* expr) { UNREACHABLE(); }
+
+ void VisitObjectLiteral(ObjectLiteral* expr) {
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ DCHECK(marking_exported);
+ VariableProxy* expr = prop->value()->AsVariableProxy();
+ DCHECK(expr != nullptr);
+ Variable* var = expr->var();
+ Literal* name = prop->key()->AsLiteral();
+ DCHECK(name != nullptr);
+ DCHECK(name->IsPropertyName());
+ const AstRawString* raw_name = name->AsRawPropertyName();
+ if (var->is_function()) {
+ uint16_t index = LookupOrInsertFunction(var);
+ builder_->FunctionAt(index)->Exported(1);
+ builder_->FunctionAt(index)
+ ->SetName(raw_name->raw_data(), raw_name->length());
+ }
+ }
+ }
+
+ void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
+
+ void LoadInitFunction() {
+ current_function_builder_ = builder_->FunctionAt(init_function_index);
+ in_function_ = true;
+ }
+
+ void UnLoadInitFunction() {
+ in_function_ = false;
+ current_function_builder_ = nullptr;
+ }
+
+ void VisitAssignment(Assignment* expr) {
+ bool in_init = false;
+ if (!in_function_) {
+ // TODO(bradnelson): Get rid of this.
+ if (TypeOf(expr->value()) == kAstStmt) {
+ return;
+ }
+ in_init = true;
+ LoadInitFunction();
+ }
+ BinaryOperation* value_op = expr->value()->AsBinaryOperation();
+ if (value_op != nullptr && MatchBinaryOperation(value_op) == kAsIs) {
+ VariableProxy* target_var = expr->target()->AsVariableProxy();
+ VariableProxy* effective_value_var = GetLeft(value_op)->AsVariableProxy();
+ if (target_var != nullptr && effective_value_var != nullptr &&
+ target_var->var() == effective_value_var->var()) {
+ block_size_--;
+ return;
+ }
+ }
+ is_set_op_ = true;
+ RECURSE(Visit(expr->target()));
+ DCHECK(!is_set_op_);
+ RECURSE(Visit(expr->value()));
+ if (in_init) {
+ UnLoadInitFunction();
+ }
+ }
+
+ void VisitYield(Yield* expr) { UNREACHABLE(); }
+
+ void VisitThrow(Throw* expr) { UNREACHABLE(); }
+
+ void VisitProperty(Property* expr) {
+ Expression* obj = expr->obj();
+ DCHECK(obj->bounds().lower == obj->bounds().upper);
+ TypeImpl<ZoneTypeConfig>* type = obj->bounds().lower;
+ MachineType mtype;
+ int size;
+ if (type->Is(cache_.kUint8Array)) {
+ mtype = MachineType::Uint8();
+ size = 1;
+ } else if (type->Is(cache_.kInt8Array)) {
+ mtype = MachineType::Int8();
+ size = 1;
+ } else if (type->Is(cache_.kUint16Array)) {
+ mtype = MachineType::Uint16();
+ size = 2;
+ } else if (type->Is(cache_.kInt16Array)) {
+ mtype = MachineType::Int16();
+ size = 2;
+ } else if (type->Is(cache_.kUint32Array)) {
+ mtype = MachineType::Uint32();
+ size = 4;
+ } else if (type->Is(cache_.kInt32Array)) {
+ mtype = MachineType::Int32();
+ size = 4;
+ } else if (type->Is(cache_.kUint32Array)) {
+ mtype = MachineType::Uint32();
+ size = 4;
+ } else if (type->Is(cache_.kFloat32Array)) {
+ mtype = MachineType::Float32();
+ size = 4;
+ } else if (type->Is(cache_.kFloat64Array)) {
+ mtype = MachineType::Float64();
+ size = 8;
+ } else {
+ UNREACHABLE();
+ }
+ current_function_builder_->EmitWithU8(
+ WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
+ WasmOpcodes::LoadStoreAccessOf(false));
+ is_set_op_ = false;
+ Literal* value = expr->key()->AsLiteral();
+ if (value) {
+ DCHECK(value->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(value));
+ int val = static_cast<int>(value->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val * size)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return;
+ }
+ BinaryOperation* binop = expr->key()->AsBinaryOperation();
+ if (binop) {
+ DCHECK(Token::SAR == binop->op());
+ DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+ DCHECK(size ==
+ 1 << static_cast<int>(
+ binop->right()->AsLiteral()->raw_value()->AsNumber()));
+ // Mask bottom bits to match asm.js behavior.
+ current_function_builder_->Emit(kExprI32And);
+ byte code[] = {WASM_I8(~(size - 1))};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ RECURSE(Visit(binop->left()));
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ void VisitCall(Call* expr) {
+ Call::CallType call_type = expr->GetCallType(isolate_);
+ switch (call_type) {
+ case Call::OTHER_CALL: {
+ DCHECK(in_function_);
+ current_function_builder_->Emit(kExprCallFunction);
+ RECURSE(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
+
+ void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
+
+ void VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::NOT: {
+ DCHECK(TypeOf(expr->expression()) == kAstI32);
+ current_function_builder_->Emit(kExprBoolNot);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->expression()));
+ }
+
+ void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
+
+ bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
+ int32_t val) {
+ DCHECK(expr->right() != nullptr);
+ if (expr->op() == op && expr->right()->IsLiteral() &&
+ TypeOf(expr) == kAstI32) {
+ Literal* right = expr->right()->AsLiteral();
+ DCHECK(right->raw_value()->IsNumber());
+ if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool MatchDoubleBinaryOperation(BinaryOperation* expr, Token::Value op,
+ double val) {
+ DCHECK(expr->right() != nullptr);
+ if (expr->op() == op && expr->right()->IsLiteral() &&
+ TypeOf(expr) == kAstF64) {
+ Literal* right = expr->right()->AsLiteral();
+ DCHECK(right->raw_value()->IsNumber());
+ if (right->raw_value()->AsNumber() == val) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ enum ConvertOperation { kNone, kAsIs, kToInt, kToDouble };
+
+ ConvertOperation MatchOr(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0)) {
+ return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchShr(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
+ // TODO(titzer): this probably needs to be kToUint
+ return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchXor(BinaryOperation* expr) {
+ if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
+ DCHECK(TypeOf(expr->left()) == kAstI32);
+ DCHECK(TypeOf(expr->right()) == kAstI32);
+ BinaryOperation* op = expr->left()->AsBinaryOperation();
+ if (op != nullptr) {
+ if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
+ DCHECK(TypeOf(op->right()) == kAstI32);
+ if (TypeOf(op->left()) != kAstI32) {
+ return kToInt;
+ } else {
+ return kAsIs;
+ }
+ }
+ }
+ }
+ return kNone;
+ }
+
+ ConvertOperation MatchMul(BinaryOperation* expr) {
+ if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
+ DCHECK(TypeOf(expr->right()) == kAstF64);
+ if (TypeOf(expr->left()) != kAstF64) {
+ return kToDouble;
+ } else {
+ return kAsIs;
+ }
+ } else {
+ return kNone;
+ }
+ }
+
+ ConvertOperation MatchBinaryOperation(BinaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::BIT_OR:
+ return MatchOr(expr);
+ case Token::SHR:
+ return MatchShr(expr);
+ case Token::BIT_XOR:
+ return MatchXor(expr);
+ case Token::MUL:
+ return MatchMul(expr);
+ default:
+ return kNone;
+ }
+ }
+
+// Work around Mul + Div being defined in PPC assembler.
+#ifdef Mul
+#undef Mul
+#endif
+#ifdef Div
+#undef Div
+#endif
+
+#define NON_SIGNED_BINOP(op) \
+ static WasmOpcode opcodes[] = { \
+ kExprI32##op, \
+ kExprI32##op, \
+ kExprF32##op, \
+ kExprF64##op \
+ }
+
+#define SIGNED_BINOP(op) \
+ static WasmOpcode opcodes[] = { \
+ kExprI32##op##S, \
+ kExprI32##op##U, \
+ kExprF32##op, \
+ kExprF64##op \
+ }
+
+#define NON_SIGNED_INT_BINOP(op) \
+ static WasmOpcode opcodes[] = { kExprI32##op, kExprI32##op }
+
+#define BINOP_CASE(token, op, V, ignore_sign) \
+ case token: { \
+ V(op); \
+ int type = TypeIndexOf(expr->left(), expr->right(), ignore_sign); \
+ current_function_builder_->Emit(opcodes[type]); \
+ break; \
+ }
+
+ Expression* GetLeft(BinaryOperation* expr) {
+ if (expr->op() == Token::BIT_XOR) {
+ return expr->left()->AsBinaryOperation()->left();
+ } else {
+ return expr->left();
+ }
+ }
+
+ void VisitBinaryOperation(BinaryOperation* expr) {
+ ConvertOperation convertOperation = MatchBinaryOperation(expr);
+ if (convertOperation == kToDouble) {
+ TypeIndex type = TypeIndexOf(expr->left());
+ if (type == kInt32 || type == kFixnum) {
+ current_function_builder_->Emit(kExprF64SConvertI32);
+ } else if (type == kUint32) {
+ current_function_builder_->Emit(kExprF64UConvertI32);
+ } else if (type == kFloat32) {
+ current_function_builder_->Emit(kExprF64ConvertF32);
+ } else {
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ } else if (convertOperation == kToInt) {
+ TypeIndex type = TypeIndexOf(GetLeft(expr));
+ if (type == kFloat32) {
+ current_function_builder_->Emit(kExprI32SConvertF32);
+ } else if (type == kFloat64) {
+ current_function_builder_->Emit(kExprI32SConvertF64);
+ } else {
+ UNREACHABLE();
+ }
+ RECURSE(Visit(GetLeft(expr)));
+ } else if (convertOperation == kAsIs) {
+ RECURSE(Visit(GetLeft(expr)));
+ } else {
+ switch (expr->op()) {
+ BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
+ BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
+ BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::SHR, ShrU, NON_SIGNED_INT_BINOP, true);
+ case Token::MOD: {
+ TypeIndex type = TypeIndexOf(expr->left(), expr->right(), false);
+ if (type == kInt32) {
+ current_function_builder_->Emit(kExprI32RemS);
+ } else if (type == kUint32) {
+ current_function_builder_->Emit(kExprI32RemU);
+ } else if (type == kFloat64) {
+ ModF64(expr);
+ return;
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ }
+ }
+
+ void ModF64(BinaryOperation* expr) {
+ current_function_builder_->EmitWithU8(kExprBlock, 3);
+ uint16_t index_0 = current_function_builder_->AddLocal(kAstF64);
+ uint16_t index_1 = current_function_builder_->AddLocal(kAstF64);
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index_0, true);
+ RECURSE(Visit(expr->left()));
+ current_function_builder_->Emit(kExprSetLocal);
+ AddLeb128(index_1, true);
+ RECURSE(Visit(expr->right()));
+ current_function_builder_->Emit(kExprF64Sub);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_0, true);
+ current_function_builder_->Emit(kExprF64Mul);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_1, true);
+ // Use trunc instead of two casts
+ current_function_builder_->Emit(kExprF64SConvertI32);
+ current_function_builder_->Emit(kExprI32SConvertF64);
+ current_function_builder_->Emit(kExprF64Div);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_0, true);
+ current_function_builder_->Emit(kExprGetLocal);
+ AddLeb128(index_1, true);
+ }
+
+ void AddLeb128(uint32_t index, bool is_local) {
+ std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
+ if (is_local) {
+ uint32_t pos_of_index[1] = {0};
+ current_function_builder_->EmitCode(
+ &index_vec[0], static_cast<uint32_t>(index_vec.size()), pos_of_index,
+ 1);
+ } else {
+ current_function_builder_->EmitCode(
+ &index_vec[0], static_cast<uint32_t>(index_vec.size()));
+ }
+ }
+
+ void VisitCompareOperation(CompareOperation* expr) {
+ switch (expr->op()) {
+ BINOP_CASE(Token::EQ, Eq, NON_SIGNED_BINOP, false);
+ BINOP_CASE(Token::LT, Lt, SIGNED_BINOP, false);
+ BINOP_CASE(Token::LTE, Le, SIGNED_BINOP, false);
+ BINOP_CASE(Token::GT, Gt, SIGNED_BINOP, false);
+ BINOP_CASE(Token::GTE, Ge, SIGNED_BINOP, false);
+ default:
+ UNREACHABLE();
+ }
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+ }
+
+#undef BINOP_CASE
+#undef NON_SIGNED_INT_BINOP
+#undef SIGNED_BINOP
+#undef NON_SIGNED_BINOP
+
+ enum TypeIndex {
+ kInt32 = 0,
+ kUint32 = 1,
+ kFloat32 = 2,
+ kFloat64 = 3,
+ kFixnum = 4
+ };
+
+ TypeIndex TypeIndexOf(Expression* left, Expression* right, bool ignore_sign) {
+ TypeIndex left_index = TypeIndexOf(left);
+ TypeIndex right_index = TypeIndexOf(right);
+ if (left_index == kFixnum) {
+ left_index = right_index;
+ }
+ if (right_index == kFixnum) {
+ right_index = left_index;
+ }
+ if (left_index == kFixnum && right_index == kFixnum) {
+ left_index = kInt32;
+ right_index = kInt32;
+ }
+ DCHECK((left_index == right_index) ||
+ (ignore_sign && (left_index <= 1) && (right_index <= 1)));
+ return left_index;
+ }
+
+ TypeIndex TypeIndexOf(Expression* expr) {
+ DCHECK(expr->bounds().lower == expr->bounds().upper);
+ TypeImpl<ZoneTypeConfig>* type = expr->bounds().lower;
+ if (type->Is(cache_.kAsmFixnum)) {
+ return kFixnum;
+ } else if (type->Is(cache_.kAsmSigned)) {
+ return kInt32;
+ } else if (type->Is(cache_.kAsmUnsigned)) {
+ return kUint32;
+ } else if (type->Is(cache_.kAsmInt)) {
+ return kInt32;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ return kFloat32;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ return kFloat64;
+ } else {
+ UNREACHABLE();
+ return kInt32;
+ }
+ }
+
+#undef CASE
+#undef NON_SIGNED_INT
+#undef SIGNED
+#undef NON_SIGNED
+
+ void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
+
+ void VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+ }
+
+ void VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
+
+ void VisitSpread(Spread* expr) { UNREACHABLE(); }
+
+ void VisitSuperPropertyReference(SuperPropertyReference* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitSuperCallReference(SuperCallReference* expr) { UNREACHABLE(); }
+
+ void VisitSloppyBlockFunctionStatement(SloppyBlockFunctionStatement* expr) {
+ UNREACHABLE();
+ }
+
+ void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
+
+ void VisitRewritableAssignmentExpression(
+ RewritableAssignmentExpression* expr) {
+ UNREACHABLE();
+ }
+
+ struct IndexContainer : public ZoneObject {
+ uint16_t index;
+ };
+
+ uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
+ DCHECK(current_function_builder_ != nullptr);
+ ZoneHashMap::Entry* entry =
+ local_variables_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index;
+ if (v->IsParameter()) {
+ index = current_function_builder_->AddParam(type);
+ } else {
+ index = current_function_builder_->AddLocal(type);
+ }
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ uint16_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+ ZoneHashMap::Entry* entry =
+ global_variables_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index =
+ builder_->AddGlobal(WasmOpcodes::MachineTypeFor(type), 0);
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = global_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ uint16_t LookupOrInsertFunction(Variable* v) {
+ DCHECK(builder_ != nullptr);
+ ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
+ if (entry == nullptr) {
+ uint16_t index = builder_->AddFunction();
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+ return (reinterpret_cast<IndexContainer*>(entry->value))->index;
+ }
+
+ LocalType TypeOf(Expression* expr) {
+ DCHECK(expr->bounds().lower == expr->bounds().upper);
+ return TypeFrom(expr->bounds().lower);
+ }
+
+ LocalType TypeFrom(TypeImpl<ZoneTypeConfig>* type) {
+ if (type->Is(cache_.kAsmInt)) {
+ return kAstI32;
+ } else if (type->Is(cache_.kAsmFloat)) {
+ return kAstF32;
+ } else if (type->Is(cache_.kAsmDouble)) {
+ return kAstF64;
+ } else {
+ return kAstStmt;
+ }
+ }
+
+ Zone* zone() { return zone_; }
+
+ ZoneHashMap local_variables_;
+ ZoneHashMap functions_;
+ ZoneHashMap global_variables_;
+ bool in_function_;
+ bool is_set_op_;
+ bool marking_exported;
+ WasmModuleBuilder* builder_;
+ WasmFunctionBuilder* current_function_builder_;
+ FunctionLiteral* literal_;
+ Isolate* isolate_;
+ Zone* zone_;
+ TypeCache const& cache_;
+ ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
+ int block_size_;
+ uint16_t init_function_index;
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
+};
+
+AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
+ FunctionLiteral* literal)
+ : isolate_(isolate), zone_(zone), literal_(literal) {}
+
+// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
+// that zone in constructor may be thrown away once wasm module is written.
+WasmModuleIndex* AsmWasmBuilder::Run() {
+ AsmWasmBuilderImpl impl(isolate_, zone_, literal_);
+ impl.Compile();
+ WasmModuleWriter* writer = impl.builder_->Build(zone_);
+ return writer->WriteTo(zone_);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/asm-wasm-builder.h b/deps/v8/src/wasm/asm-wasm-builder.h
new file mode 100644
index 0000000000..cb568db77c
--- /dev/null
+++ b/deps/v8/src/wasm/asm-wasm-builder.h
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_ASM_WASM_BUILDER_H_
+#define V8_WASM_ASM_WASM_BUILDER_H_
+
+#include "src/allocation.h"
+#include "src/wasm/encoder.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class FunctionLiteral;
+
+namespace wasm {
+
+class AsmWasmBuilder {
+ public:
+ explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ WasmModuleIndex* Run();
+
+ private:
+ Isolate* isolate_;
+ Zone* zone_;
+ FunctionLiteral* literal_;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_ASM_WASM_BUILDER_H_
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
new file mode 100644
index 0000000000..ffb815771a
--- /dev/null
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -0,0 +1,1583 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/signature.h"
+
+#include "src/flags.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+// The root of a decoded tree.
+struct Tree {
+ LocalType type; // tree type.
+ uint32_t count; // number of children.
+ const byte* pc; // start of the syntax tree.
+ TFNode* node; // node in the TurboFan graph.
+ Tree* children[1]; // pointers to children.
+
+ WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
+};
+
+
+// A production represents an incomplete decoded tree in the LR decoder.
+struct Production {
+ Tree* tree; // the root of the syntax tree.
+ int index; // the current index into the children of the tree.
+
+ WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc()); }
+ const byte* pc() const { return tree->pc; }
+ bool done() const { return index >= static_cast<int>(tree->count); }
+ Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
+};
+
+
+// An SsaEnv environment carries the current local variable renaming
+// as well as the current effect and control dependency in the TF graph.
+// It maintains a control state that tracks whether the environment
+// is reachable, has reached a control end, or has been merged.
+struct SsaEnv {
+ enum State { kControlEnd, kUnreachable, kReached, kMerged };
+
+ State state;
+ TFNode* control;
+ TFNode* effect;
+ TFNode** locals;
+
+ bool go() { return state >= kReached; }
+ void Kill(State new_state = kControlEnd) {
+ state = new_state;
+ locals = nullptr;
+ control = nullptr;
+ effect = nullptr;
+ }
+};
+
+
+// An entry in the stack of blocks during decoding.
+struct Block {
+ SsaEnv* ssa_env; // SSA renaming environment.
+ int stack_depth; // production stack depth.
+};
+
+
+// An entry in the stack of ifs during decoding.
+struct IfEnv {
+ SsaEnv* false_env;
+ SsaEnv* merge_env;
+ SsaEnv** case_envs;
+};
+
+
+// Macros that build nodes only if there is a graph and the current SSA
+// environment is reachable from start. This avoids problems with malformed
+// TF graphs when decoding inputs that have unreachable code.
+#define BUILD(func, ...) (build() ? builder_->func(__VA_ARGS__) : nullptr)
+#define BUILD0(func) (build() ? builder_->func() : nullptr)
+
+
+// A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
+// shift-reduce strategy with multiple internal stacks.
+class LR_WasmDecoder : public Decoder {
+ public:
+ LR_WasmDecoder(Zone* zone, TFBuilder* builder)
+ : Decoder(nullptr, nullptr),
+ zone_(zone),
+ builder_(builder),
+ trees_(zone),
+ stack_(zone),
+ blocks_(zone),
+ ifs_(zone) {}
+
+ TreeResult Decode(FunctionEnv* function_env, const byte* base, const byte* pc,
+ const byte* end) {
+ base::ElapsedTimer decode_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ decode_timer.Start();
+ }
+ trees_.clear();
+ stack_.clear();
+ blocks_.clear();
+ ifs_.clear();
+
+ if (end < pc) {
+ error(pc, "function body end < start");
+ return result_;
+ }
+
+ base_ = base;
+ Reset(pc, end);
+ function_env_ = function_env;
+
+ InitSsaEnv();
+ DecodeFunctionBody();
+
+ Tree* tree = nullptr;
+ if (ok()) {
+ if (ssa_env_->go()) {
+ if (stack_.size() > 0) {
+ error(stack_.back().pc(), end, "fell off end of code");
+ }
+ AddImplicitReturnAtEnd();
+ }
+ if (trees_.size() == 0) {
+ if (function_env_->sig->return_count() > 0) {
+ error(start_, "no trees created");
+ }
+ } else {
+ tree = trees_[0];
+ }
+ }
+
+ if (ok()) {
+ if (FLAG_trace_wasm_decode_time) {
+ double ms = decode_timer.Elapsed().InMillisecondsF();
+ PrintF(" - decoding took %0.3f ms\n", ms);
+ }
+ TRACE("wasm-decode ok\n\n");
+ } else {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
+ startrel(error_pc_), error_msg_.get());
+ }
+ return toResult(tree);
+ }
+
+ private:
+ static const size_t kErrorMsgSize = 128;
+
+ Zone* zone_;
+ TFBuilder* builder_;
+ const byte* base_;
+ TreeResult result_;
+
+ SsaEnv* ssa_env_;
+ FunctionEnv* function_env_;
+
+ ZoneVector<Tree*> trees_;
+ ZoneVector<Production> stack_;
+ ZoneVector<Block> blocks_;
+ ZoneVector<IfEnv> ifs_;
+
+ inline bool build() { return builder_ && ssa_env_->go(); }
+
+ void InitSsaEnv() {
+ FunctionSig* sig = function_env_->sig;
+ int param_count = static_cast<int>(sig->parameter_count());
+ TFNode* start = nullptr;
+ SsaEnv* ssa_env = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * EnvironmentCount();
+ ssa_env->state = SsaEnv::kReached;
+ ssa_env->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
+
+ int pos = 0;
+ if (builder_) {
+ start = builder_->Start(param_count + 1);
+ // Initialize parameters.
+ for (int i = 0; i < param_count; i++) {
+ ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
+ }
+ // Initialize int32 locals.
+ if (function_env_->local_int32_count > 0) {
+ TFNode* zero = builder_->Int32Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_int32_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize int64 locals.
+ if (function_env_->local_int64_count > 0) {
+ TFNode* zero = builder_->Int64Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_int64_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize float32 locals.
+ if (function_env_->local_float32_count > 0) {
+ TFNode* zero = builder_->Float32Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_float32_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ // Initialize float64 locals.
+ if (function_env_->local_float64_count > 0) {
+ TFNode* zero = builder_->Float64Constant(0);
+ for (uint32_t i = 0; i < function_env_->local_float64_count; i++) {
+ ssa_env->locals[pos++] = zero;
+ }
+ }
+ DCHECK_EQ(function_env_->total_locals, pos);
+ DCHECK_EQ(EnvironmentCount(), pos);
+ builder_->set_module(function_env_->module);
+ }
+ ssa_env->control = start;
+ ssa_env->effect = start;
+ SetEnv("initial", ssa_env);
+ }
+
+ void Leaf(LocalType type, TFNode* node = nullptr) {
+ size_t size = sizeof(Tree);
+ Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
+ tree->type = type;
+ tree->count = 0;
+ tree->pc = pc_;
+ tree->node = node;
+ tree->children[0] = nullptr;
+ Reduce(tree);
+ }
+
+ void Shift(LocalType type, uint32_t count) {
+ size_t size =
+ sizeof(Tree) + (count == 0 ? 0 : ((count - 1) * sizeof(Tree*)));
+ Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
+ tree->type = type;
+ tree->count = count;
+ tree->pc = pc_;
+ tree->node = nullptr;
+ for (uint32_t i = 0; i < count; i++) tree->children[i] = nullptr;
+ if (count == 0) {
+ Production p = {tree, 0};
+ Reduce(&p);
+ Reduce(tree);
+ } else {
+ stack_.push_back({tree, 0});
+ }
+ }
+
+ void Reduce(Tree* tree) {
+ while (true) {
+ if (stack_.size() == 0) {
+ trees_.push_back(tree);
+ break;
+ }
+ Production* p = &stack_.back();
+ p->tree->children[p->index++] = tree;
+ Reduce(p);
+ if (p->done()) {
+ tree = p->tree;
+ stack_.pop_back();
+ } else {
+ break;
+ }
+ }
+ }
+
+ char* indentation() {
+ static const int kMaxIndent = 64;
+ static char bytes[kMaxIndent + 1];
+ for (int i = 0; i < kMaxIndent; i++) bytes[i] = ' ';
+ bytes[kMaxIndent] = 0;
+ if (stack_.size() < kMaxIndent / 2) {
+ bytes[stack_.size() * 2] = 0;
+ }
+ return bytes;
+ }
+
+ // Decodes the body of a function, producing reduced trees into {result}.
+ void DecodeFunctionBody() {
+ TRACE("wasm-decode %p...%p (%d bytes) %s\n",
+ reinterpret_cast<const void*>(start_),
+ reinterpret_cast<const void*>(limit_),
+ static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
+
+ if (pc_ >= limit_) return; // Nothing to do.
+
+ while (true) { // decoding loop.
+ int len = 1;
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
+ TRACE("wasm-decode module+%-6d %s func+%d: 0x%02x %s\n", baserel(pc_),
+ indentation(), startrel(pc_), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ // A simple expression with a fixed signature.
+ Shift(sig->GetReturn(), static_cast<uint32_t>(sig->parameter_count()));
+ pc_ += len;
+ if (pc_ >= limit_) {
+ // End of code reached or exceeded.
+ if (pc_ > limit_ && ok()) {
+ error("Beyond end of code");
+ }
+ return;
+ }
+ continue; // back to decoding loop.
+ }
+
+ switch (opcode) {
+ case kExprNop:
+ Leaf(kAstStmt);
+ break;
+ case kExprBlock: {
+ int length = Operand<uint8_t>(pc_);
+ if (length < 1) {
+ Leaf(kAstStmt);
+ } else {
+ Shift(kAstEnd, length);
+ // The break environment is the outer environment.
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SetEnv("block:start", Steal(break_env));
+ }
+ len = 2;
+ break;
+ }
+ case kExprLoop: {
+ int length = Operand<uint8_t>(pc_);
+ if (length < 1) {
+ Leaf(kAstStmt);
+ } else {
+ Shift(kAstEnd, length);
+ // The break environment is the outer environment.
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SsaEnv* cont_env = Steal(break_env);
+ // The continue environment is the inner environment.
+ PrepareForLoop(cont_env);
+ SetEnv("loop:start", Split(cont_env));
+ if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
+ PushBlock(cont_env);
+ blocks_.back().stack_depth = -1; // no production for inner block.
+ }
+ len = 2;
+ break;
+ }
+ case kExprIf:
+ Shift(kAstStmt, 2);
+ break;
+ case kExprIfElse:
+ Shift(kAstEnd, 3); // Result type is typeof(x) in {c ? x : y}.
+ break;
+ case kExprSelect:
+ Shift(kAstStmt, 3); // Result type is typeof(x) in {c ? x : y}.
+ break;
+ case kExprBr: {
+ uint32_t depth = Operand<uint8_t>(pc_);
+ Shift(kAstEnd, 1);
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ }
+ len = 2;
+ break;
+ }
+ case kExprBrIf: {
+ uint32_t depth = Operand<uint8_t>(pc_);
+ Shift(kAstStmt, 2);
+ if (depth >= blocks_.size()) {
+ error("improperly nested conditional branch");
+ }
+ len = 2;
+ break;
+ }
+ case kExprTableSwitch: {
+ if (!checkAvailable(5)) {
+ error("expected #tableswitch <cases> <table>, fell off end");
+ break;
+ }
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc_ + 1);
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc_ + 3);
+ len = 5 + table_count * 2;
+
+ if (table_count == 0) {
+ error("tableswitch with 0 entries");
+ break;
+ }
+
+ if (!checkAvailable(len)) {
+ error("expected #tableswitch <cases> <table>, fell off end");
+ break;
+ }
+
+ Shift(kAstEnd, 1 + case_count);
+
+ // Verify table.
+ for (int i = 0; i < table_count; i++) {
+ uint16_t target =
+ *reinterpret_cast<const uint16_t*>(pc_ + 5 + i * 2);
+ if (target >= 0x8000) {
+ size_t depth = target - 0x8000;
+ if (depth > blocks_.size()) {
+ error(pc_ + 5 + i * 2, "improper branch in tableswitch");
+ }
+ } else {
+ if (target >= case_count) {
+ error(pc_ + 5 + i * 2, "invalid case target in tableswitch");
+ }
+ }
+ }
+ break;
+ }
+ case kExprReturn: {
+ int count = static_cast<int>(function_env_->sig->return_count());
+ if (count == 0) {
+ BUILD(Return, 0, builder_->Buffer(0));
+ ssa_env_->Kill();
+ Leaf(kAstEnd);
+ } else {
+ Shift(kAstEnd, count);
+ }
+ break;
+ }
+ case kExprUnreachable: {
+ BUILD0(Unreachable);
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ Leaf(kAstEnd, nullptr);
+ break;
+ }
+ case kExprI8Const: {
+ int32_t value = Operand<int8_t>(pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, value));
+ len = 2;
+ break;
+ }
+ case kExprI32Const: {
+ int32_t value = Operand<int32_t>(pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, value));
+ len = 5;
+ break;
+ }
+ case kExprI64Const: {
+ int64_t value = Operand<int64_t>(pc_);
+ Leaf(kAstI64, BUILD(Int64Constant, value));
+ len = 9;
+ break;
+ }
+ case kExprF32Const: {
+ float value = Operand<float>(pc_);
+ Leaf(kAstF32, BUILD(Float32Constant, value));
+ len = 5;
+ break;
+ }
+ case kExprF64Const: {
+ double value = Operand<double>(pc_);
+ Leaf(kAstF64, BUILD(Float64Constant, value));
+ len = 9;
+ break;
+ }
+ case kExprGetLocal: {
+ uint32_t index;
+ LocalType type = LocalOperand(pc_, &index, &len);
+ TFNode* val =
+ build() && type != kAstStmt ? ssa_env_->locals[index] : nullptr;
+ Leaf(type, val);
+ break;
+ }
+ case kExprSetLocal: {
+ uint32_t index;
+ LocalType type = LocalOperand(pc_, &index, &len);
+ Shift(type, 1);
+ break;
+ }
+ case kExprLoadGlobal: {
+ uint32_t index;
+ LocalType type = GlobalOperand(pc_, &index, &len);
+ Leaf(type, BUILD(LoadGlobal, index));
+ break;
+ }
+ case kExprStoreGlobal: {
+ uint32_t index;
+ LocalType type = GlobalOperand(pc_, &index, &len);
+ Shift(type, 1);
+ break;
+ }
+ case kExprI32LoadMem8S:
+ case kExprI32LoadMem8U:
+ case kExprI32LoadMem16S:
+ case kExprI32LoadMem16U:
+ case kExprI32LoadMem:
+ len = DecodeLoadMem(pc_, kAstI32);
+ break;
+ case kExprI64LoadMem8S:
+ case kExprI64LoadMem8U:
+ case kExprI64LoadMem16S:
+ case kExprI64LoadMem16U:
+ case kExprI64LoadMem32S:
+ case kExprI64LoadMem32U:
+ case kExprI64LoadMem:
+ len = DecodeLoadMem(pc_, kAstI64);
+ break;
+ case kExprF32LoadMem:
+ len = DecodeLoadMem(pc_, kAstF32);
+ break;
+ case kExprF64LoadMem:
+ len = DecodeLoadMem(pc_, kAstF64);
+ break;
+ case kExprI32StoreMem8:
+ case kExprI32StoreMem16:
+ case kExprI32StoreMem:
+ len = DecodeStoreMem(pc_, kAstI32);
+ break;
+ case kExprI64StoreMem8:
+ case kExprI64StoreMem16:
+ case kExprI64StoreMem32:
+ case kExprI64StoreMem:
+ len = DecodeStoreMem(pc_, kAstI64);
+ break;
+ case kExprF32StoreMem:
+ len = DecodeStoreMem(pc_, kAstF32);
+ break;
+ case kExprF64StoreMem:
+ len = DecodeStoreMem(pc_, kAstF64);
+ break;
+ case kExprMemorySize:
+ Leaf(kAstI32, BUILD(MemSize, 0));
+ break;
+ case kExprGrowMemory:
+ Shift(kAstI32, 1);
+ break;
+ case kExprCallFunction: {
+ uint32_t unused;
+ FunctionSig* sig = FunctionSigOperand(pc_, &unused, &len);
+ if (sig) {
+ LocalType type =
+ sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ Shift(type, static_cast<int>(sig->parameter_count()));
+ } else {
+ Leaf(kAstI32); // error
+ }
+ break;
+ }
+ case kExprCallIndirect: {
+ uint32_t unused;
+ FunctionSig* sig = SigOperand(pc_, &unused, &len);
+ if (sig) {
+ LocalType type =
+ sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ Shift(type, static_cast<int>(1 + sig->parameter_count()));
+ } else {
+ Leaf(kAstI32); // error
+ }
+ break;
+ }
+ default:
+ error("Invalid opcode");
+ return;
+ }
+ pc_ += len;
+ if (pc_ >= limit_) {
+ // End of code reached or exceeded.
+ if (pc_ > limit_ && ok()) {
+ error("Beyond end of code");
+ }
+ return;
+ }
+ }
+ }
+
+ void PushBlock(SsaEnv* ssa_env) {
+ blocks_.push_back({ssa_env, static_cast<int>(stack_.size() - 1)});
+ }
+
+ int DecodeLoadMem(const byte* pc, LocalType type) {
+ int length = 2;
+ uint32_t offset;
+ MemoryAccessOperand(pc, &length, &offset);
+ Shift(type, 1);
+ return length;
+ }
+
+ int DecodeStoreMem(const byte* pc, LocalType type) {
+ int length = 2;
+ uint32_t offset;
+ MemoryAccessOperand(pc, &length, &offset);
+ Shift(type, 2);
+ return length;
+ }
+
+ void AddImplicitReturnAtEnd() {
+ int retcount = static_cast<int>(function_env_->sig->return_count());
+ if (retcount == 0) {
+ BUILD0(ReturnVoid);
+ return;
+ }
+
+ if (static_cast<int>(trees_.size()) < retcount) {
+ error(limit_, nullptr,
+ "ImplicitReturn expects %d arguments, only %d remain", retcount,
+ static_cast<int>(trees_.size()));
+ return;
+ }
+
+ TRACE("wasm-decode implicit return of %d args\n", retcount);
+
+ TFNode** buffer = BUILD(Buffer, retcount);
+ for (int index = 0; index < retcount; index++) {
+ Tree* tree = trees_[trees_.size() - 1 - index];
+ if (buffer) buffer[index] = tree->node;
+ LocalType expected = function_env_->sig->GetReturn(index);
+ if (tree->type != expected) {
+ error(limit_, tree->pc,
+ "ImplicitReturn[%d] expected type %s, found %s of type %s", index,
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::OpcodeName(tree->opcode()),
+ WasmOpcodes::TypeName(tree->type));
+ return;
+ }
+ }
+
+ BUILD(Return, retcount, buffer);
+ }
+
+ int baserel(const byte* ptr) {
+ return base_ ? static_cast<int>(ptr - base_) : 0;
+ }
+
+ int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
+
+ void Reduce(Production* p) {
+ WasmOpcode opcode = p->opcode();
+ TRACE("-----reduce module+%-6d %s func+%d: 0x%02x %s\n", baserel(p->pc()),
+ indentation(), startrel(p->pc()), opcode,
+ WasmOpcodes::OpcodeName(opcode));
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (sig) {
+ // A simple expression with a fixed signature.
+ TypeCheckLast(p, sig->GetParam(p->index - 1));
+ if (p->done() && build()) {
+ if (sig->parameter_count() == 2) {
+ p->tree->node = builder_->Binop(opcode, p->tree->children[0]->node,
+ p->tree->children[1]->node);
+ } else if (sig->parameter_count() == 1) {
+ p->tree->node = builder_->Unop(opcode, p->tree->children[0]->node);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ return;
+ }
+
+ switch (opcode) {
+ case kExprBlock: {
+ if (p->done()) {
+ Block* last = &blocks_.back();
+ DCHECK_EQ(stack_.size() - 1, last->stack_depth);
+ // fallthrough with the last expression.
+ ReduceBreakToExprBlock(p, last);
+ SetEnv("block:end", last->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+ case kExprLoop: {
+ if (p->done()) {
+ // Pop the continue environment.
+ blocks_.pop_back();
+ // Get the break environment.
+ Block* last = &blocks_.back();
+ DCHECK_EQ(stack_.size() - 1, last->stack_depth);
+ // fallthrough with the last expression.
+ ReduceBreakToExprBlock(p, last);
+ SetEnv("loop:end", last->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+ case kExprIf: {
+ if (p->index == 1) {
+ // Condition done. Split environment for true branch.
+ TypeCheckLast(p, kAstI32);
+ SsaEnv* false_env = ssa_env_;
+ SsaEnv* true_env = Split(ssa_env_);
+ ifs_.push_back({nullptr, false_env, nullptr});
+ BUILD(Branch, p->last()->node, &true_env->control,
+ &false_env->control);
+ SetEnv("if:true", true_env);
+ } else if (p->index == 2) {
+ // True block done. Merge true and false environments.
+ IfEnv* env = &ifs_.back();
+ SsaEnv* merge = env->merge_env;
+ if (merge->go()) {
+ merge->state = SsaEnv::kReached;
+ Goto(ssa_env_, merge);
+ }
+ SetEnv("if:merge", merge);
+ ifs_.pop_back();
+ }
+ break;
+ }
+ case kExprIfElse: {
+ if (p->index == 1) {
+ // Condition done. Split environment for true and false branches.
+ TypeCheckLast(p, kAstI32);
+ SsaEnv* merge_env = ssa_env_;
+ TFNode* if_true = nullptr;
+ TFNode* if_false = nullptr;
+ BUILD(Branch, p->last()->node, &if_true, &if_false);
+ SsaEnv* false_env = Split(ssa_env_);
+ SsaEnv* true_env = Steal(ssa_env_);
+ false_env->control = if_false;
+ true_env->control = if_true;
+ ifs_.push_back({false_env, merge_env, nullptr});
+ SetEnv("if_else:true", true_env);
+ } else if (p->index == 2) {
+ // True expr done.
+ IfEnv* env = &ifs_.back();
+ MergeIntoProduction(p, env->merge_env, p->last());
+ // Switch to environment for false branch.
+ SsaEnv* false_env = ifs_.back().false_env;
+ SetEnv("if_else:false", false_env);
+ } else if (p->index == 3) {
+ // False expr done.
+ IfEnv* env = &ifs_.back();
+ MergeIntoProduction(p, env->merge_env, p->last());
+ SetEnv("if_else:merge", env->merge_env);
+ ifs_.pop_back();
+ }
+ break;
+ }
+ case kExprSelect: {
+ if (p->index == 1) {
+ // Condition done.
+ TypeCheckLast(p, kAstI32);
+ } else if (p->index == 2) {
+ // True expression done.
+ p->tree->type = p->last()->type;
+ if (p->tree->type == kAstStmt) {
+ error(p->pc(), p->tree->children[1]->pc,
+ "select operand should be expression");
+ }
+ } else {
+ // False expression done.
+ DCHECK(p->done());
+ TypeCheckLast(p, p->tree->type);
+ if (build()) {
+ TFNode* controls[2];
+ builder_->Branch(p->tree->children[0]->node, &controls[0],
+ &controls[1]);
+ TFNode* merge = builder_->Merge(2, controls);
+ TFNode* vals[2] = {p->tree->children[1]->node,
+ p->tree->children[2]->node};
+ TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
+ p->tree->node = phi;
+ ssa_env_->control = merge;
+ }
+ }
+ break;
+ }
+ case kExprBr: {
+ uint32_t depth = Operand<uint8_t>(p->pc());
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ break;
+ }
+ Block* block = &blocks_[blocks_.size() - depth - 1];
+ ReduceBreakToExprBlock(p, block);
+ break;
+ }
+ case kExprBrIf: {
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32);
+ } else if (p->done()) {
+ uint32_t depth = Operand<uint8_t>(p->pc());
+ if (depth >= blocks_.size()) {
+ error("improperly nested branch");
+ break;
+ }
+ Block* block = &blocks_[blocks_.size() - depth - 1];
+ SsaEnv* fenv = ssa_env_;
+ SsaEnv* tenv = Split(fenv);
+ BUILD(Branch, p->tree->children[0]->node, &tenv->control,
+ &fenv->control);
+ ssa_env_ = tenv;
+ ReduceBreakToExprBlock(p, block);
+ ssa_env_ = fenv;
+ }
+ break;
+ }
+ case kExprTableSwitch: {
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(p->pc() + 3);
+ if (table_count == 1) {
+ // Degenerate switch with only a default target.
+ if (p->index == 1) {
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SetEnv("switch:default", Steal(break_env));
+ }
+ if (p->done()) {
+ Block* block = &blocks_.back();
+ // fall through to the end.
+ ReduceBreakToExprBlock(p, block);
+ SetEnv("switch:end", block->ssa_env);
+ blocks_.pop_back();
+ }
+ break;
+ }
+
+ if (p->index == 1) {
+ // Switch key finished.
+ TypeCheckLast(p, kAstI32);
+
+ TFNode* sw = BUILD(Switch, table_count, p->last()->node);
+
+ // Allocate environments for each case.
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(p->pc() + 1);
+ SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(case_count);
+ for (int i = 0; i < case_count; i++) {
+ case_envs[i] = UnreachableEnv();
+ }
+
+ ifs_.push_back({nullptr, nullptr, case_envs});
+ SsaEnv* break_env = ssa_env_;
+ PushBlock(break_env);
+ SsaEnv* copy = Steal(break_env);
+ ssa_env_ = copy;
+
+ // Build the environments for each case based on the table.
+ const uint16_t* table =
+ reinterpret_cast<const uint16_t*>(p->pc() + 5);
+ for (int i = 0; i < table_count; i++) {
+ uint16_t target = table[i];
+ SsaEnv* env = Split(copy);
+ env->control = (i == table_count - 1) ? BUILD(IfDefault, sw)
+ : BUILD(IfValue, i, sw);
+ if (target >= 0x8000) {
+ // Targets an outer block.
+ int depth = target - 0x8000;
+ SsaEnv* tenv = blocks_[blocks_.size() - depth - 1].ssa_env;
+ Goto(env, tenv);
+ } else {
+ // Targets a case.
+ Goto(env, case_envs[target]);
+ }
+ }
+
+ // Switch to the environment for the first case.
+ SetEnv("switch:case", case_envs[0]);
+ } else {
+ // Switch case finished.
+ if (p->done()) {
+ // Last case. Fall through to the end.
+ Block* block = &blocks_.back();
+ ReduceBreakToExprBlock(p, block);
+ SsaEnv* next = block->ssa_env;
+ blocks_.pop_back();
+ ifs_.pop_back();
+ SetEnv("switch:end", next);
+ } else {
+ // Interior case. Maybe fall through to the next case.
+ SsaEnv* next = ifs_.back().case_envs[p->index - 1];
+ if (ssa_env_->go()) Goto(ssa_env_, next);
+ SetEnv("switch:case", next);
+ }
+ }
+ break;
+ }
+ case kExprReturn: {
+ TypeCheckLast(p, function_env_->sig->GetReturn(p->index - 1));
+ if (p->done()) {
+ if (build()) {
+ int count = p->tree->count;
+ TFNode** buffer = builder_->Buffer(count);
+ for (int i = 0; i < count; i++) {
+ buffer[i] = p->tree->children[i]->node;
+ }
+ BUILD(Return, count, buffer);
+ }
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ }
+ break;
+ }
+ case kExprSetLocal: {
+ int unused = 0;
+ uint32_t index;
+ LocalType type = LocalOperand(p->pc(), &index, &unused);
+ Tree* val = p->last();
+ if (type == val->type) {
+ if (build()) ssa_env_->locals[index] = val->node;
+ p->tree->node = val->node;
+ } else {
+ error(p->pc(), val->pc, "Typecheck failed in SetLocal");
+ }
+ break;
+ }
+ case kExprStoreGlobal: {
+ int unused = 0;
+ uint32_t index;
+ LocalType type = GlobalOperand(p->pc(), &index, &unused);
+ Tree* val = p->last();
+ if (type == val->type) {
+ BUILD(StoreGlobal, index, val->node);
+ p->tree->node = val->node;
+ } else {
+ error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
+ }
+ break;
+ }
+
+ case kExprI32LoadMem8S:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int8());
+ case kExprI32LoadMem8U:
+ return ReduceLoadMem(p, kAstI32, MachineType::Uint8());
+ case kExprI32LoadMem16S:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int16());
+ case kExprI32LoadMem16U:
+ return ReduceLoadMem(p, kAstI32, MachineType::Uint16());
+ case kExprI32LoadMem:
+ return ReduceLoadMem(p, kAstI32, MachineType::Int32());
+
+ case kExprI64LoadMem8S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int8());
+ case kExprI64LoadMem8U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint8());
+ case kExprI64LoadMem16S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int16());
+ case kExprI64LoadMem16U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint16());
+ case kExprI64LoadMem32S:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int32());
+ case kExprI64LoadMem32U:
+ return ReduceLoadMem(p, kAstI64, MachineType::Uint32());
+ case kExprI64LoadMem:
+ return ReduceLoadMem(p, kAstI64, MachineType::Int64());
+
+ case kExprF32LoadMem:
+ return ReduceLoadMem(p, kAstF32, MachineType::Float32());
+
+ case kExprF64LoadMem:
+ return ReduceLoadMem(p, kAstF64, MachineType::Float64());
+
+ case kExprI32StoreMem8:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int8());
+ case kExprI32StoreMem16:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int16());
+ case kExprI32StoreMem:
+ return ReduceStoreMem(p, kAstI32, MachineType::Int32());
+
+ case kExprI64StoreMem8:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int8());
+ case kExprI64StoreMem16:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int16());
+ case kExprI64StoreMem32:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int32());
+ case kExprI64StoreMem:
+ return ReduceStoreMem(p, kAstI64, MachineType::Int64());
+
+ case kExprF32StoreMem:
+ return ReduceStoreMem(p, kAstF32, MachineType::Float32());
+
+ case kExprF64StoreMem:
+ return ReduceStoreMem(p, kAstF64, MachineType::Float64());
+
+ case kExprGrowMemory:
+ TypeCheckLast(p, kAstI32);
+ // TODO(titzer): build node for GrowMemory
+ p->tree->node = BUILD(Int32Constant, 0);
+ return;
+
+ case kExprCallFunction: {
+ int len;
+ uint32_t index;
+ FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
+ if (!sig) break;
+ if (p->index > 0) {
+ TypeCheckLast(p, sig->GetParam(p->index - 1));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count + 1;
+ TFNode** buffer = builder_->Buffer(count);
+ FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
+ USE(sig);
+ buffer[0] = nullptr; // reserved for code object.
+ for (uint32_t i = 1; i < count; i++) {
+ buffer[i] = p->tree->children[i - 1]->node;
+ }
+ p->tree->node = builder_->CallDirect(index, buffer);
+ }
+ break;
+ }
+ case kExprCallIndirect: {
+ int len;
+ uint32_t index;
+ FunctionSig* sig = SigOperand(p->pc(), &index, &len);
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32);
+ } else {
+ TypeCheckLast(p, sig->GetParam(p->index - 2));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count;
+ TFNode** buffer = builder_->Buffer(count);
+ for (uint32_t i = 0; i < count; i++) {
+ buffer[i] = p->tree->children[i]->node;
+ }
+ p->tree->node = builder_->CallIndirect(index, buffer);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ void ReduceBreakToExprBlock(Production* p, Block* block) {
+ if (block->stack_depth < 0) {
+ // This is the inner loop block, which does not have a value.
+ Goto(ssa_env_, block->ssa_env);
+ } else {
+ // Merge the value into the production for the block.
+ Production* bp = &stack_[block->stack_depth];
+ MergeIntoProduction(bp, block->ssa_env, p->last());
+ }
+ }
+
+ void MergeIntoProduction(Production* p, SsaEnv* target, Tree* expr) {
+ if (!ssa_env_->go()) return;
+
+ bool first = target->state == SsaEnv::kUnreachable;
+ Goto(ssa_env_, target);
+ if (expr->type == kAstEnd) return;
+
+ if (first) {
+ // first merge to this environment; set the type and the node.
+ p->tree->type = expr->type;
+ p->tree->node = expr->node;
+ } else {
+ // merge with the existing value for this block.
+ LocalType type = p->tree->type;
+ if (expr->type != type) {
+ type = kAstStmt;
+ p->tree->type = kAstStmt;
+ p->tree->node = nullptr;
+ } else if (type != kAstStmt) {
+ p->tree->node = CreateOrMergeIntoPhi(type, target->control,
+ p->tree->node, expr->node);
+ }
+ }
+ }
+
+ void ReduceLoadMem(Production* p, LocalType type, MachineType mem_type) {
+ DCHECK_EQ(1, p->index);
+ TypeCheckLast(p, kAstI32); // index
+ if (build()) {
+ int length = 0;
+ uint32_t offset = 0;
+ MemoryAccessOperand(p->pc(), &length, &offset);
+ p->tree->node =
+ builder_->LoadMem(type, mem_type, p->last()->node, offset);
+ }
+ }
+
+ void ReduceStoreMem(Production* p, LocalType type, MachineType mem_type) {
+ if (p->index == 1) {
+ TypeCheckLast(p, kAstI32); // index
+ } else {
+ DCHECK_EQ(2, p->index);
+ TypeCheckLast(p, type);
+ if (build()) {
+ int length = 0;
+ uint32_t offset = 0;
+ MemoryAccessOperand(p->pc(), &length, &offset);
+ TFNode* val = p->tree->children[1]->node;
+ builder_->StoreMem(mem_type, p->tree->children[0]->node, offset, val);
+ p->tree->node = val;
+ }
+ }
+ }
+
+ void TypeCheckLast(Production* p, LocalType expected) {
+ LocalType result = p->last()->type;
+ if (result == expected) return;
+ if (result == kAstEnd) return;
+ if (expected != kAstStmt) {
+ error(p->pc(), p->last()->pc,
+ "%s[%d] expected type %s, found %s of type %s",
+ WasmOpcodes::OpcodeName(p->opcode()), p->index - 1,
+ WasmOpcodes::TypeName(expected),
+ WasmOpcodes::OpcodeName(p->last()->opcode()),
+ WasmOpcodes::TypeName(p->last()->type));
+ }
+ }
+
+ void SetEnv(const char* reason, SsaEnv* env) {
+ TRACE(" env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
+ static_cast<int>(blocks_.size()), reason);
+ if (env->control != nullptr && FLAG_trace_wasm_decoder) {
+ TRACE(", control = ");
+ compiler::WasmGraphBuilder::PrintDebugName(env->control);
+ }
+ TRACE("\n");
+ ssa_env_ = env;
+ if (builder_) {
+ builder_->set_control_ptr(&env->control);
+ builder_->set_effect_ptr(&env->effect);
+ }
+ }
+
+ void Goto(SsaEnv* from, SsaEnv* to) {
+ DCHECK_NOT_NULL(to);
+ if (!from->go()) return;
+ switch (to->state) {
+ case SsaEnv::kUnreachable: { // Overwrite destination.
+ to->state = SsaEnv::kReached;
+ to->locals = from->locals;
+ to->control = from->control;
+ to->effect = from->effect;
+ break;
+ }
+ case SsaEnv::kReached: { // Create a new merge.
+ to->state = SsaEnv::kMerged;
+ if (!builder_) break;
+ // Merge control.
+ TFNode* controls[] = {to->control, from->control};
+ TFNode* merge = builder_->Merge(2, controls);
+ to->control = merge;
+ // Merge effects.
+ if (from->effect != to->effect) {
+ TFNode* effects[] = {to->effect, from->effect, merge};
+ to->effect = builder_->EffectPhi(2, effects, merge);
+ }
+ // Merge SSA values.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ TFNode* a = to->locals[i];
+ TFNode* b = from->locals[i];
+ if (a != b) {
+ TFNode* vals[] = {a, b};
+ to->locals[i] =
+ builder_->Phi(function_env_->GetLocalType(i), 2, vals, merge);
+ }
+ }
+ break;
+ }
+ case SsaEnv::kMerged: {
+ if (!builder_) break;
+ TFNode* merge = to->control;
+ // Extend the existing merge.
+ builder_->AppendToMerge(merge, from->control);
+ // Merge effects.
+ if (builder_->IsPhiWithMerge(to->effect, merge)) {
+ builder_->AppendToPhi(merge, to->effect, from->effect);
+ } else if (to->effect != from->effect) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** effects = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ effects[j] = to->effect;
+ }
+ effects[count - 1] = from->effect;
+ to->effect = builder_->EffectPhi(count, effects, merge);
+ }
+ // Merge locals.
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ TFNode* tnode = to->locals[i];
+ TFNode* fnode = from->locals[i];
+ if (builder_->IsPhiWithMerge(tnode, merge)) {
+ builder_->AppendToPhi(merge, tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** vals = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) {
+ vals[j] = tnode;
+ }
+ vals[count - 1] = fnode;
+ to->locals[i] = builder_->Phi(function_env_->GetLocalType(i), count,
+ vals, merge);
+ }
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return from->Kill();
+ }
+
+ TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
+ TFNode* fnode) {
+ if (builder_->IsPhiWithMerge(tnode, merge)) {
+ builder_->AppendToPhi(merge, tnode, fnode);
+ } else if (tnode != fnode) {
+ uint32_t count = builder_->InputCount(merge);
+ TFNode** vals = builder_->Buffer(count);
+ for (uint32_t j = 0; j < count - 1; j++) vals[j] = tnode;
+ vals[count - 1] = fnode;
+ return builder_->Phi(type, count, vals, merge);
+ }
+ return tnode;
+ }
+
+ void BuildInfiniteLoop() {
+ if (ssa_env_->go()) {
+ PrepareForLoop(ssa_env_);
+ SsaEnv* cont_env = ssa_env_;
+ ssa_env_ = Split(ssa_env_);
+ ssa_env_->state = SsaEnv::kReached;
+ Goto(ssa_env_, cont_env);
+ }
+ }
+
+ void PrepareForLoop(SsaEnv* env) {
+ if (env->go()) {
+ env->state = SsaEnv::kMerged;
+ if (builder_) {
+ env->control = builder_->Loop(env->control);
+ env->effect = builder_->EffectPhi(1, &env->effect, env->control);
+ builder_->Terminate(env->effect, env->control);
+ for (int i = EnvironmentCount() - 1; i >= 0; i--) {
+ env->locals[i] = builder_->Phi(function_env_->GetLocalType(i), 1,
+ &env->locals[i], env->control);
+ }
+ }
+ }
+ }
+
+ // Create a complete copy of the {from}.
+ SsaEnv* Split(SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ size_t size = sizeof(TFNode*) * EnvironmentCount();
+ result->control = from->control;
+ result->effect = from->effect;
+ result->state = from->state == SsaEnv::kUnreachable ? SsaEnv::kUnreachable
+ : SsaEnv::kReached;
+
+ if (from->go()) {
+ result->state = SsaEnv::kReached;
+ result->locals =
+ size > 0 ? reinterpret_cast<TFNode**>(zone_->New(size)) : nullptr;
+ memcpy(result->locals, from->locals, size);
+ } else {
+ result->state = SsaEnv::kUnreachable;
+ result->locals = nullptr;
+ }
+
+ return result;
+ }
+
+ // Create a copy of {from} that steals its state and leaves {from}
+ // unreachable.
+ SsaEnv* Steal(SsaEnv* from) {
+ DCHECK_NOT_NULL(from);
+ if (!from->go()) return UnreachableEnv();
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kReached;
+ result->locals = from->locals;
+ result->control = from->control;
+ result->effect = from->effect;
+ from->Kill(SsaEnv::kUnreachable);
+ return result;
+ }
+
+ // Create an unreachable environment.
+ SsaEnv* UnreachableEnv() {
+ SsaEnv* result = reinterpret_cast<SsaEnv*>(zone_->New(sizeof(SsaEnv)));
+ result->state = SsaEnv::kUnreachable;
+ result->control = nullptr;
+ result->effect = nullptr;
+ result->locals = nullptr;
+ return result;
+ }
+
+ // Load an operand at [pc + 1].
+ template <typename V>
+ V Operand(const byte* pc) {
+ if ((limit_ - pc) < static_cast<int>(1 + sizeof(V))) {
+ const char* msg = "Expected operand following opcode";
+ switch (sizeof(V)) {
+ case 1:
+ msg = "Expected 1-byte operand following opcode";
+ break;
+ case 2:
+ msg = "Expected 2-byte operand following opcode";
+ break;
+ case 4:
+ msg = "Expected 4-byte operand following opcode";
+ break;
+ default:
+ break;
+ }
+ error(pc, msg);
+ return -1;
+ }
+ return *reinterpret_cast<const V*>(pc + 1);
+ }
+
+ int EnvironmentCount() {
+ if (builder_) return static_cast<int>(function_env_->GetLocalCount());
+ return 0; // if we aren't building a graph, don't bother with SSA renaming.
+ }
+
+ LocalType LocalOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->IsValidLocal(*index)) {
+ return function_env_->GetLocalType(*index);
+ }
+ error(pc, "invalid local variable index");
+ return kAstStmt;
+ }
+
+ LocalType GlobalOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidGlobal(*index)) {
+ return WasmOpcodes::LocalTypeFor(
+ function_env_->module->GetGlobalType(*index));
+ }
+ error(pc, "invalid global variable index");
+ return kAstStmt;
+ }
+
+ FunctionSig* FunctionSigOperand(const byte* pc, uint32_t* index,
+ int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidFunction(*index)) {
+ return function_env_->module->GetFunctionSignature(*index);
+ }
+ error(pc, "invalid function index");
+ return nullptr;
+ }
+
+ FunctionSig* SigOperand(const byte* pc, uint32_t* index, int* length) {
+ *index = UnsignedLEB128Operand(pc, length);
+ if (function_env_->module->IsValidSignature(*index)) {
+ return function_env_->module->GetSignature(*index);
+ }
+ error(pc, "invalid signature index");
+ return nullptr;
+ }
+
+ uint32_t UnsignedLEB128Operand(const byte* pc, int* length) {
+ uint32_t result = 0;
+ ReadUnsignedLEB128ErrorCode error_code =
+ ReadUnsignedLEB128Operand(pc + 1, limit_, length, &result);
+ if (error_code == kInvalidLEB128) error(pc, "invalid LEB128 varint");
+ if (error_code == kMissingLEB128) error(pc, "expected LEB128 varint");
+ (*length)++;
+ return result;
+ }
+
+ void MemoryAccessOperand(const byte* pc, int* length, uint32_t* offset) {
+ byte bitfield = Operand<uint8_t>(pc);
+ if (MemoryAccess::OffsetField::decode(bitfield)) {
+ *offset = UnsignedLEB128Operand(pc + 1, length);
+ (*length)++; // to account for the memory access byte
+ } else {
+ *offset = 0;
+ *length = 2;
+ }
+ }
+
+ virtual void onFirstError() {
+ limit_ = start_; // Terminate decoding loop.
+ builder_ = nullptr; // Don't build any more nodes.
+#if DEBUG
+ PrintStackForDebugging();
+#endif
+ }
+
+#if DEBUG
+ void PrintStackForDebugging() { PrintProduction(0); }
+
+ void PrintProduction(size_t depth) {
+ if (depth >= stack_.size()) return;
+ Production* p = &stack_[depth];
+ for (size_t d = 0; d < depth; d++) PrintF(" ");
+
+ PrintF("@%d %s [%d]\n", static_cast<int>(p->tree->pc - start_),
+ WasmOpcodes::OpcodeName(p->opcode()), p->tree->count);
+ for (int i = 0; i < p->index; i++) {
+ Tree* child = p->tree->children[i];
+ for (size_t d = 0; d <= depth; d++) PrintF(" ");
+ PrintF("@%d %s [%d]", static_cast<int>(child->pc - start_),
+ WasmOpcodes::OpcodeName(child->opcode()), child->count);
+ if (child->node) {
+ PrintF(" => TF");
+ compiler::WasmGraphBuilder::PrintDebugName(child->node);
+ }
+ PrintF("\n");
+ }
+ PrintProduction(depth + 1);
+ }
+#endif
+};
+
+
+TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
+ const byte* end) {
+ Zone zone;
+ LR_WasmDecoder decoder(&zone, nullptr);
+ TreeResult result = decoder.Decode(env, base, start, end);
+ return result;
+}
+
+
+TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
+ const byte* start, const byte* end) {
+ Zone zone;
+ LR_WasmDecoder decoder(&zone, builder);
+ TreeResult result = decoder.Decode(env, base, start, end);
+ return result;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Tree& tree) {
+ if (tree.pc == nullptr) {
+ os << "null";
+ return os;
+ }
+ PrintF("%s", WasmOpcodes::OpcodeName(tree.opcode()));
+ if (tree.count > 0) os << "(";
+ for (uint32_t i = 0; i < tree.count; i++) {
+ if (i > 0) os << ", ";
+ os << *tree.children[i];
+ }
+ if (tree.count > 0) os << ")";
+ return os;
+}
+
+
+ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
+ const byte* limit,
+ int* length,
+ uint32_t* result) {
+ *result = 0;
+ const byte* ptr = pc;
+ const byte* end = pc + 5; // maximum 5 bytes.
+ if (end > limit) end = limit;
+ int shift = 0;
+ byte b = 0;
+ while (ptr < end) {
+ b = *ptr++;
+ *result = *result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+ DCHECK_LE(ptr - pc, 5);
+ *length = static_cast<int>(ptr - pc);
+ if (ptr == end && (b & 0x80)) {
+ return kInvalidLEB128;
+ } else if (*length == 0) {
+ return kMissingLEB128;
+ } else {
+ return kNoError;
+ }
+}
+
+
+int OpcodeLength(const byte* pc) {
+ switch (static_cast<WasmOpcode>(*pc)) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+
+ case kExprI8Const:
+ case kExprBlock:
+ case kExprLoop:
+ case kExprBr:
+ case kExprBrIf:
+ return 2;
+ case kExprI32Const:
+ case kExprF32Const:
+ return 5;
+ case kExprI64Const:
+ case kExprF64Const:
+ return 9;
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ case kExprLoadGlobal:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ case kExprGetLocal: {
+ int length;
+ uint32_t result = 0;
+ ReadUnsignedLEB128Operand(pc + 1, pc + 6, &length, &result);
+ return 1 + length;
+ }
+ case kExprTableSwitch: {
+ uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc + 3);
+ return 5 + table_count * 2;
+ }
+
+ default:
+ return 1;
+ }
+}
+
+
+int OpcodeArity(FunctionEnv* env, const byte* pc) {
+#define DECLARE_ARITY(name, ...) \
+ static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const int kArity_##name = \
+ static_cast<int>(arraysize(kTypes_##name) - 1);
+
+ FOREACH_SIGNATURE(DECLARE_ARITY);
+#undef DECLARE_ARITY
+
+ switch (static_cast<WasmOpcode>(*pc)) {
+ case kExprI8Const:
+ case kExprI32Const:
+ case kExprI64Const:
+ case kExprF64Const:
+ case kExprF32Const:
+ case kExprGetLocal:
+ case kExprLoadGlobal:
+ case kExprNop:
+ case kExprUnreachable:
+ return 0;
+
+ case kExprBr:
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ return 1;
+
+ case kExprIf:
+ case kExprBrIf:
+ return 2;
+ case kExprIfElse:
+ case kExprSelect:
+ return 3;
+ case kExprBlock:
+ case kExprLoop:
+ return *(pc + 1);
+
+ case kExprCallFunction: {
+ int index = *(pc + 1);
+ return static_cast<int>(
+ env->module->GetFunctionSignature(index)->parameter_count());
+ }
+ case kExprCallIndirect: {
+ int index = *(pc + 1);
+ return 1 + static_cast<int>(
+ env->module->GetSignature(index)->parameter_count());
+ }
+ case kExprReturn:
+ return static_cast<int>(env->sig->return_count());
+ case kExprTableSwitch: {
+ uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc + 1);
+ return 1 + case_count;
+ }
+
+#define DECLARE_OPCODE_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return kArity_##sig;
+
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
new file mode 100644
index 0000000000..5b95ad9f87
--- /dev/null
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_AST_DECODER_H_
+#define V8_WASM_AST_DECODER_H_
+
+#include "src/signature.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler { // external declarations from compiler.
+class WasmGraphBuilder;
+}
+
+namespace wasm {
+
+typedef compiler::WasmGraphBuilder TFBuilder;
+struct ModuleEnv; // forward declaration of module interface.
+
+// Interface the function environment during decoding, include the signature
+// and number of locals.
+struct FunctionEnv {
+ ModuleEnv* module; // module environment
+ FunctionSig* sig; // signature of this function
+ uint32_t local_int32_count; // number of int32 locals
+ uint32_t local_int64_count; // number of int64 locals
+ uint32_t local_float32_count; // number of float32 locals
+ uint32_t local_float64_count; // number of float64 locals
+ uint32_t total_locals; // sum of parameters and all locals
+
+ bool IsValidLocal(uint32_t index) { return index < total_locals; }
+ uint32_t GetLocalCount() { return total_locals; }
+ LocalType GetLocalType(uint32_t index) {
+ if (index < static_cast<uint32_t>(sig->parameter_count())) {
+ return sig->GetParam(index);
+ }
+ index -= static_cast<uint32_t>(sig->parameter_count());
+ if (index < local_int32_count) return kAstI32;
+ index -= local_int32_count;
+ if (index < local_int64_count) return kAstI64;
+ index -= local_int64_count;
+ if (index < local_float32_count) return kAstF32;
+ index -= local_float32_count;
+ if (index < local_float64_count) return kAstF64;
+ return kAstStmt;
+ }
+
+ void AddLocals(LocalType type, uint32_t count) {
+ switch (type) {
+ case kAstI32:
+ local_int32_count += count;
+ break;
+ case kAstI64:
+ local_int64_count += count;
+ break;
+ case kAstF32:
+ local_float32_count += count;
+ break;
+ case kAstF64:
+ local_float64_count += count;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ total_locals += count;
+ DCHECK(total_locals ==
+ (sig->parameter_count() + local_int32_count + local_int64_count +
+ local_float32_count + local_float64_count));
+ }
+
+ void SumLocals() {
+ total_locals = static_cast<uint32_t>(sig->parameter_count()) +
+ local_int32_count + local_int64_count + local_float32_count +
+ local_float64_count;
+ }
+};
+
+struct Tree;
+typedef Result<Tree*> TreeResult;
+
+std::ostream& operator<<(std::ostream& os, const Tree& tree);
+
+TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
+ const byte* end);
+TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
+ const byte* start, const byte* end);
+
+inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
+ const byte* end) {
+ return VerifyWasmCode(env, nullptr, start, end);
+}
+
+inline TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env,
+ const byte* start, const byte* end) {
+ return BuildTFGraph(builder, env, nullptr, start, end);
+}
+
+enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
+
+ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
+ int*, uint32_t*);
+
+// Computes the length of the opcode at the given address.
+int OpcodeLength(const byte* pc);
+
+// Computes the arity (number of sub-nodes) of the opcode at the given address.
+int OpcodeArity(FunctionEnv* env, const byte* pc);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_AST_DECODER_H_
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
new file mode 100644
index 0000000000..698919d6a0
--- /dev/null
+++ b/deps/v8/src/wasm/decoder.h
@@ -0,0 +1,233 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_DECODER_H_
+#define V8_WASM_DECODER_H_
+
+#include "src/base/smart-pointers.h"
+#include "src/flags.h"
+#include "src/signature.h"
+#include "src/wasm/wasm-result.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+// A helper utility to decode bytes, integers, fields, varints, etc, from
+// a buffer of bytes.
+class Decoder {
+ public:
+ Decoder(const byte* start, const byte* end)
+ : start_(start),
+ pc_(start),
+ limit_(end),
+ error_pc_(nullptr),
+ error_pt_(nullptr) {}
+
+ virtual ~Decoder() {}
+
+ // Reads a 8-bit unsigned integer (byte) and advances {pc_}.
+ uint8_t u8(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint8_t");
+ if (checkAvailable(1)) {
+ byte val = *(pc_++);
+ TRACE("%02x = %d\n", val, val);
+ return val;
+ } else {
+ error("expected 1 byte, but fell off end");
+ return traceOffEnd<uint8_t>();
+ }
+ }
+
+ // Reads a 16-bit unsigned integer (little endian) and advances {pc_}.
+ uint16_t u16(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint16_t");
+ if (checkAvailable(2)) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ byte b0 = pc_[0];
+ byte b1 = pc_[1];
+#else
+ byte b1 = pc_[0];
+ byte b0 = pc_[1];
+#endif
+ uint16_t val = static_cast<uint16_t>(b1 << 8) | b0;
+ TRACE("%02x %02x = %d\n", pc_[0], pc_[1], val);
+ pc_ += 2;
+ return val;
+ } else {
+ error("expected 2 bytes, but fell off end");
+ return traceOffEnd<uint16_t>();
+ }
+ }
+
+ // Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
+ uint32_t u32(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "uint32_t");
+ if (checkAvailable(4)) {
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ byte b0 = pc_[0];
+ byte b1 = pc_[1];
+ byte b2 = pc_[2];
+ byte b3 = pc_[3];
+#else
+ byte b3 = pc_[0];
+ byte b2 = pc_[1];
+ byte b1 = pc_[2];
+ byte b0 = pc_[3];
+#endif
+ uint32_t val = static_cast<uint32_t>(b3 << 24) |
+ static_cast<uint32_t>(b2 << 16) |
+ static_cast<uint32_t>(b1 << 8) | b0;
+ TRACE("%02x %02x %02x %02x = %u\n", pc_[0], pc_[1], pc_[2], pc_[3], val);
+ pc_ += 4;
+ return val;
+ } else {
+ error("expected 4 bytes, but fell off end");
+ return traceOffEnd<uint32_t>();
+ }
+ }
+
+ // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
+ uint32_t u32v(int* length, const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "varint");
+
+ if (!checkAvailable(1)) {
+ error("expected at least 1 byte, but fell off end");
+ return traceOffEnd<uint32_t>();
+ }
+
+ const byte* pos = pc_;
+ const byte* end = pc_ + 5;
+ if (end > limit_) end = limit_;
+
+ uint32_t result = 0;
+ int shift = 0;
+ byte b = 0;
+ while (pc_ < end) {
+ b = *pc_++;
+ TRACE("%02x ", b);
+ result = result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+
+ *length = static_cast<int>(pc_ - pos);
+ if (pc_ == end && (b & 0x80)) {
+ error(pc_ - 1, "varint too large");
+ } else {
+ TRACE("= %u\n", result);
+ }
+ return result;
+ }
+
+ // Check that at least {size} bytes exist between {pc_} and {limit_}.
+ bool checkAvailable(int size) {
+ if (pc_ < start_ || (pc_ + size) > limit_) {
+ error(pc_, nullptr, "expected %d bytes, fell off end", size);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void error(const char* msg) { error(pc_, nullptr, msg); }
+
+ void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
+
+ // Sets internal error state.
+ void error(const byte* pc, const byte* pt, const char* format, ...) {
+ if (ok()) {
+#if DEBUG
+ if (FLAG_wasm_break_on_decoder_error) {
+ base::OS::DebugBreak();
+ }
+#endif
+ const int kMaxErrorMsg = 256;
+ char* buffer = new char[kMaxErrorMsg];
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VSNPrintF(buffer, kMaxErrorMsg - 1, format, arguments);
+ va_end(arguments);
+ error_msg_.Reset(buffer);
+ error_pc_ = pc;
+ error_pt_ = pt;
+ onFirstError();
+ }
+ }
+
+ // Behavior triggered on first error, overridden in subclasses.
+ virtual void onFirstError() {}
+
+ // Debugging helper to print bytes up to the end.
+ template <typename T>
+ T traceOffEnd() {
+ T t = 0;
+ for (const byte* ptr = pc_; ptr < limit_; ptr++) {
+ TRACE("%02x ", *ptr);
+ }
+ TRACE("<end>\n");
+ pc_ = limit_;
+ return t;
+ }
+
+ // Converts the given value to a {Result}, copying the error if necessary.
+ template <typename T>
+ Result<T> toResult(T val) {
+ Result<T> result;
+ if (error_pc_) {
+ result.error_code = kError;
+ result.start = start_;
+ result.error_pc = error_pc_;
+ result.error_pt = error_pt_;
+ result.error_msg = error_msg_;
+ error_msg_.Reset(nullptr);
+ } else {
+ result.error_code = kSuccess;
+ }
+ result.val = val;
+ return result;
+ }
+
+ // Resets the boundaries of this decoder.
+ void Reset(const byte* start, const byte* end) {
+ start_ = start;
+ pc_ = start;
+ limit_ = end;
+ error_pc_ = nullptr;
+ error_pt_ = nullptr;
+ error_msg_.Reset(nullptr);
+ }
+
+ bool ok() const { return error_pc_ == nullptr; }
+ bool failed() const { return error_pc_ != nullptr; }
+
+ protected:
+ const byte* start_;
+ const byte* pc_;
+ const byte* limit_;
+ const byte* error_pc_;
+ const byte* error_pt_;
+ base::SmartArrayPointer<char> error_msg_;
+};
+
+#undef TRACE
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_DECODER_H_
diff --git a/deps/v8/src/wasm/encoder.cc b/deps/v8/src/wasm/encoder.cc
new file mode 100644
index 0000000000..d8d36338b1
--- /dev/null
+++ b/deps/v8/src/wasm/encoder.cc
@@ -0,0 +1,592 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/signature.h"
+
+#include "src/handles.h"
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "src/v8memory.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+/*TODO: add error cases for adding too many locals, too many functions and bad
+ indices in body */
+
+namespace {
+void EmitUint8(byte** b, uint8_t x) {
+ Memory::uint8_at(*b) = x;
+ *b += 1;
+}
+
+
+void EmitUint16(byte** b, uint16_t x) {
+ Memory::uint16_at(*b) = x;
+ *b += 2;
+}
+
+
+void EmitUint32(byte** b, uint32_t x) {
+ Memory::uint32_at(*b) = x;
+ *b += 4;
+}
+
+
+void EmitVarInt(byte** b, size_t val) {
+ while (true) {
+ size_t next = val >> 7;
+ byte out = static_cast<byte>(val & 0x7f);
+ if (next) {
+ *((*b)++) = 0x80 | out;
+ val = next;
+ } else {
+ *((*b)++) = out;
+ break;
+ }
+ }
+}
+} // namespace
+
+
+struct WasmFunctionBuilder::Type {
+ bool param_;
+ LocalType type_;
+};
+
+
+WasmFunctionBuilder::WasmFunctionBuilder(Zone* zone)
+ : return_type_(kAstI32),
+ locals_(zone),
+ exported_(0),
+ external_(0),
+ body_(zone),
+ local_indices_(zone),
+ name_(zone) {}
+
+
+uint16_t WasmFunctionBuilder::AddParam(LocalType type) {
+ return AddVar(type, true);
+}
+
+
+uint16_t WasmFunctionBuilder::AddLocal(LocalType type) {
+ return AddVar(type, false);
+}
+
+
+uint16_t WasmFunctionBuilder::AddVar(LocalType type, bool param) {
+ locals_.push_back({param, type});
+ return static_cast<uint16_t>(locals_.size() - 1);
+}
+
+
+void WasmFunctionBuilder::ReturnType(LocalType type) { return_type_ = type; }
+
+
+void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
+ EmitCode(code, code_size, nullptr, 0);
+}
+
+
+void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size,
+ const uint32_t* local_indices,
+ uint32_t indices_size) {
+ size_t size = body_.size();
+ for (size_t i = 0; i < code_size; i++) {
+ body_.push_back(code[i]);
+ }
+ for (size_t i = 0; i < indices_size; i++) {
+ local_indices_.push_back(local_indices[i] + static_cast<uint32_t>(size));
+ }
+}
+
+
+void WasmFunctionBuilder::Emit(WasmOpcode opcode) {
+ body_.push_back(static_cast<byte>(opcode));
+}
+
+
+void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
+ body_.push_back(static_cast<byte>(opcode));
+ body_.push_back(immediate);
+}
+
+
+void WasmFunctionBuilder::EmitWithLocal(WasmOpcode opcode) {
+ body_.push_back(static_cast<byte>(opcode));
+ local_indices_.push_back(static_cast<uint32_t>(body_.size()) - 1);
+}
+
+
+uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
+ body_.push_back(immediate);
+ return static_cast<uint32_t>(body_.size()) - 1;
+}
+
+
+void WasmFunctionBuilder::EditImmediate(uint32_t offset, const byte immediate) {
+ DCHECK(offset < body_.size());
+ body_[offset] = immediate;
+}
+
+
+void WasmFunctionBuilder::Exported(uint8_t flag) { exported_ = flag; }
+
+
+void WasmFunctionBuilder::External(uint8_t flag) { external_ = flag; }
+
+void WasmFunctionBuilder::SetName(const unsigned char* name, int name_length) {
+ name_.clear();
+ if (name_length > 0) {
+ for (int i = 0; i < name_length; i++) {
+ name_.push_back(*(name + i));
+ }
+ name_.push_back('\0');
+ }
+}
+
+
+WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
+ WasmModuleBuilder* mb) const {
+ WasmFunctionEncoder* e =
+ new (zone) WasmFunctionEncoder(zone, return_type_, exported_, external_);
+ uint16_t* var_index = zone->NewArray<uint16_t>(locals_.size());
+ IndexVars(e, var_index);
+ if (body_.size() > 0) {
+ // TODO(titzer): iterate over local indexes, not the bytes.
+ const byte* start = &body_[0];
+ const byte* end = start + body_.size();
+ size_t local_index = 0;
+ for (size_t i = 0; i < body_.size();) {
+ if (local_index < local_indices_.size() &&
+ i == local_indices_[local_index]) {
+ int length = 0;
+ uint32_t index;
+ ReadUnsignedLEB128Operand(start + i, end, &length, &index);
+ uint16_t new_index = var_index[index];
+ const std::vector<uint8_t>& index_vec = UnsignedLEB128From(new_index);
+ for (size_t j = 0; j < index_vec.size(); j++) {
+ e->body_.push_back(index_vec.at(j));
+ }
+ i += length;
+ local_index++;
+ } else {
+ e->body_.push_back(*(start + i));
+ i++;
+ }
+ }
+ }
+ FunctionSig::Builder sig(zone, return_type_ == kAstStmt ? 0 : 1,
+ e->params_.size());
+ if (return_type_ != kAstStmt) {
+ sig.AddReturn(static_cast<LocalType>(return_type_));
+ }
+ for (size_t i = 0; i < e->params_.size(); i++) {
+ sig.AddParam(static_cast<LocalType>(e->params_[i]));
+ }
+ e->signature_index_ = mb->AddSignature(sig.Build());
+ e->name_.insert(e->name_.begin(), name_.begin(), name_.end());
+ return e;
+}
+
+
+void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
+ uint16_t* var_index) const {
+ uint16_t param = 0;
+ uint16_t int32 = 0;
+ uint16_t int64 = 0;
+ uint16_t float32 = 0;
+ uint16_t float64 = 0;
+ for (size_t i = 0; i < locals_.size(); i++) {
+ if (locals_.at(i).param_) {
+ param++;
+ } else if (locals_.at(i).type_ == kAstI32) {
+ int32++;
+ } else if (locals_.at(i).type_ == kAstI64) {
+ int64++;
+ } else if (locals_.at(i).type_ == kAstF32) {
+ float32++;
+ } else if (locals_.at(i).type_ == kAstF64) {
+ float64++;
+ }
+ }
+ e->local_int32_count_ = int32;
+ e->local_int64_count_ = int64;
+ e->local_float32_count_ = float32;
+ e->local_float64_count_ = float64;
+ float64 = param + int32 + int64 + float32;
+ float32 = param + int32 + int64;
+ int64 = param + int32;
+ int32 = param;
+ param = 0;
+ for (size_t i = 0; i < locals_.size(); i++) {
+ if (locals_.at(i).param_) {
+ e->params_.push_back(locals_.at(i).type_);
+ var_index[i] = param++;
+ } else if (locals_.at(i).type_ == kAstI32) {
+ var_index[i] = int32++;
+ } else if (locals_.at(i).type_ == kAstI64) {
+ var_index[i] = int64++;
+ } else if (locals_.at(i).type_ == kAstF32) {
+ var_index[i] = float32++;
+ } else if (locals_.at(i).type_ == kAstF64) {
+ var_index[i] = float64++;
+ }
+ }
+}
+
+
+WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalType return_type,
+ bool exported, bool external)
+ : params_(zone),
+ exported_(exported),
+ external_(external),
+ body_(zone),
+ name_(zone) {}
+
+
+uint32_t WasmFunctionEncoder::HeaderSize() const {
+ uint32_t size = 3;
+ if (HasLocals()) size += 8;
+ if (!external_) size += 2;
+ if (HasName()) size += 4;
+ return size;
+}
+
+
+uint32_t WasmFunctionEncoder::BodySize(void) const {
+ return external_ ? 0 : static_cast<uint32_t>(body_.size());
+}
+
+
+uint32_t WasmFunctionEncoder::NameSize() const {
+ return exported_ ? static_cast<uint32_t>(name_.size()) : 0;
+}
+
+
+void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
+ byte** body) const {
+ uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
+ (external_ ? kDeclFunctionImport : 0) |
+ (HasLocals() ? kDeclFunctionLocals : 0) |
+ (HasName() ? kDeclFunctionName : 0);
+
+ EmitUint8(header, decl_bits);
+ EmitUint16(header, signature_index_);
+
+ if (HasName()) {
+ uint32_t name_offset = static_cast<uint32_t>(*body - buffer);
+ EmitUint32(header, name_offset);
+ std::memcpy(*body, &name_[0], name_.size());
+ (*body) += name_.size();
+ }
+
+ if (HasLocals()) {
+ EmitUint16(header, local_int32_count_);
+ EmitUint16(header, local_int64_count_);
+ EmitUint16(header, local_float32_count_);
+ EmitUint16(header, local_float64_count_);
+ }
+
+ if (!external_) {
+ EmitUint16(header, static_cast<uint16_t>(body_.size()));
+ if (body_.size() > 0) {
+ std::memcpy(*header, &body_[0], body_.size());
+ (*header) += body_.size();
+ }
+ }
+}
+
+
+WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
+ uint32_t size, uint32_t dest)
+ : data_(zone), dest_(dest) {
+ for (size_t i = 0; i < size; i++) {
+ data_.push_back(data[i]);
+ }
+}
+
+
+uint32_t WasmDataSegmentEncoder::HeaderSize() const {
+ static const int kDataSegmentSize = 13;
+ return kDataSegmentSize;
+}
+
+
+uint32_t WasmDataSegmentEncoder::BodySize() const {
+ return static_cast<uint32_t>(data_.size());
+}
+
+
+void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
+ byte** body) const {
+ uint32_t body_offset = static_cast<uint32_t>(*body - buffer);
+ EmitUint32(header, dest_);
+ EmitUint32(header, body_offset);
+ EmitUint32(header, static_cast<uint32_t>(data_.size()));
+ EmitUint8(header, 1); // init
+
+ std::memcpy(*body, &data_[0], data_.size());
+ (*body) += data_.size();
+}
+
+
+WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
+ : zone_(zone),
+ signatures_(zone),
+ functions_(zone),
+ data_segments_(zone),
+ indirect_functions_(zone),
+ globals_(zone),
+ signature_map_(zone) {}
+
+
+uint16_t WasmModuleBuilder::AddFunction() {
+ functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
+ return static_cast<uint16_t>(functions_.size() - 1);
+}
+
+
+WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
+ if (functions_.size() > index) {
+ return functions_.at(index);
+ } else {
+ return nullptr;
+ }
+}
+
+
+void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
+ data_segments_.push_back(data);
+}
+
+
+int WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
+ FunctionSig* b) const {
+ if (a->return_count() < b->return_count()) return -1;
+ if (a->return_count() > b->return_count()) return 1;
+ if (a->parameter_count() < b->parameter_count()) return -1;
+ if (a->parameter_count() > b->parameter_count()) return 1;
+ for (size_t r = 0; r < a->return_count(); r++) {
+ if (a->GetReturn(r) < b->GetReturn(r)) return -1;
+ if (a->GetReturn(r) > b->GetReturn(r)) return 1;
+ }
+ for (size_t p = 0; p < a->parameter_count(); p++) {
+ if (a->GetParam(p) < b->GetParam(p)) return -1;
+ if (a->GetParam(p) > b->GetParam(p)) return 1;
+ }
+ return 0;
+}
+
+
+uint16_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
+ SignatureMap::iterator pos = signature_map_.find(sig);
+ if (pos != signature_map_.end()) {
+ return pos->second;
+ } else {
+ uint16_t index = static_cast<uint16_t>(signatures_.size());
+ signature_map_[sig] = index;
+ signatures_.push_back(sig);
+ return index;
+ }
+}
+
+
+void WasmModuleBuilder::AddIndirectFunction(uint16_t index) {
+ indirect_functions_.push_back(index);
+}
+
+
+WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
+ WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
+ for (auto function : functions_) {
+ writer->functions_.push_back(function->Build(zone, this));
+ }
+ for (auto segment : data_segments_) {
+ writer->data_segments_.push_back(segment);
+ }
+ for (auto sig : signatures_) {
+ writer->signatures_.push_back(sig);
+ }
+ for (auto index : indirect_functions_) {
+ writer->indirect_functions_.push_back(index);
+ }
+ for (auto global : globals_) {
+ writer->globals_.push_back(global);
+ }
+ return writer;
+}
+
+
+uint32_t WasmModuleBuilder::AddGlobal(MachineType type, bool exported) {
+ globals_.push_back(std::make_pair(type, exported));
+ return static_cast<uint32_t>(globals_.size() - 1);
+}
+
+
+WasmModuleWriter::WasmModuleWriter(Zone* zone)
+ : functions_(zone),
+ data_segments_(zone),
+ signatures_(zone),
+ indirect_functions_(zone),
+ globals_(zone) {}
+
+
+struct Sizes {
+ size_t header_size;
+ size_t body_size;
+
+ size_t total() { return header_size + body_size; }
+
+ void Add(size_t header, size_t body) {
+ header_size += header;
+ body_size += body;
+ }
+
+ void AddSection(size_t size) {
+ if (size > 0) {
+ Add(1, 0);
+ while (size > 0) {
+ Add(1, 0);
+ size = size >> 7;
+ }
+ }
+ }
+};
+
+
+WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
+ Sizes sizes = {0, 0};
+
+ sizes.Add(1, 0);
+ sizes.Add(kDeclMemorySize, 0);
+
+ sizes.AddSection(signatures_.size());
+ for (auto sig : signatures_) {
+ sizes.Add(2 + sig->parameter_count(), 0);
+ }
+
+ sizes.AddSection(globals_.size());
+ if (globals_.size() > 0) {
+ sizes.Add(kDeclGlobalSize * globals_.size(), 0);
+ }
+
+ sizes.AddSection(functions_.size());
+ for (auto function : functions_) {
+ sizes.Add(function->HeaderSize() + function->BodySize(),
+ function->NameSize());
+ }
+
+ sizes.AddSection(data_segments_.size());
+ for (auto segment : data_segments_) {
+ sizes.Add(segment->HeaderSize(), segment->BodySize());
+ }
+
+ sizes.AddSection(indirect_functions_.size());
+ sizes.Add(2 * static_cast<uint32_t>(indirect_functions_.size()), 0);
+
+ if (sizes.body_size > 0) sizes.Add(1, 0);
+
+ ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + sizes.header_size;
+
+ // -- emit memory declaration ------------------------------------------------
+ EmitUint8(&header, kDeclMemory);
+ EmitUint8(&header, 16); // min memory size
+ EmitUint8(&header, 16); // max memory size
+ EmitUint8(&header, 0); // memory export
+
+ // -- emit globals -----------------------------------------------------------
+ if (globals_.size() > 0) {
+ EmitUint8(&header, kDeclGlobals);
+ EmitVarInt(&header, globals_.size());
+
+ for (auto global : globals_) {
+ EmitUint32(&header, 0);
+ EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
+ EmitUint8(&header, global.second);
+ }
+ }
+
+ // -- emit signatures --------------------------------------------------------
+ if (signatures_.size() > 0) {
+ EmitUint8(&header, kDeclSignatures);
+ EmitVarInt(&header, signatures_.size());
+
+ for (FunctionSig* sig : signatures_) {
+ EmitUint8(&header, static_cast<byte>(sig->parameter_count()));
+ if (sig->return_count() > 0) {
+ EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
+ } else {
+ EmitUint8(&header, kLocalVoid);
+ }
+ for (size_t j = 0; j < sig->parameter_count(); j++) {
+ EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+ }
+ }
+ }
+
+ // -- emit functions ---------------------------------------------------------
+ if (functions_.size() > 0) {
+ EmitUint8(&header, kDeclFunctions);
+ EmitVarInt(&header, functions_.size());
+
+ for (auto func : functions_) {
+ func->Serialize(buffer, &header, &body);
+ }
+ }
+
+ // -- emit data segments -----------------------------------------------------
+ if (data_segments_.size() > 0) {
+ EmitUint8(&header, kDeclDataSegments);
+ EmitVarInt(&header, data_segments_.size());
+
+ for (auto segment : data_segments_) {
+ segment->Serialize(buffer, &header, &body);
+ }
+ }
+
+ // -- emit function table ----------------------------------------------------
+ if (indirect_functions_.size() > 0) {
+ EmitUint8(&header, kDeclFunctionTable);
+ EmitVarInt(&header, indirect_functions_.size());
+
+ for (auto index : indirect_functions_) {
+ EmitUint16(&header, index);
+ }
+ }
+
+ if (sizes.body_size > 0) EmitUint8(&header, kDeclEnd);
+
+ return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
+}
+
+
+std::vector<uint8_t> UnsignedLEB128From(uint32_t result) {
+ std::vector<uint8_t> output;
+ uint8_t next = 0;
+ int shift = 0;
+ do {
+ next = static_cast<uint8_t>(result >> shift);
+ if (((result >> shift) & 0xFFFFFF80) != 0) {
+ next = next | 0x80;
+ }
+ output.push_back(next);
+ shift += 7;
+ } while ((next & 0x80) != 0);
+ return output;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/encoder.h b/deps/v8/src/wasm/encoder.h
new file mode 100644
index 0000000000..f0fabe998a
--- /dev/null
+++ b/deps/v8/src/wasm/encoder.h
@@ -0,0 +1,157 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_ENCODER_H_
+#define V8_WASM_ENCODER_H_
+
+#include "src/signature.h"
+#include "src/zone-containers.h"
+
+#include "src/base/smart-pointers.h"
+
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleBuilder;
+
+class WasmFunctionEncoder : public ZoneObject {
+ public:
+ uint32_t HeaderSize() const;
+ uint32_t BodySize() const;
+ uint32_t NameSize() const;
+ void Serialize(byte* buffer, byte** header, byte** body) const;
+
+ private:
+ WasmFunctionEncoder(Zone* zone, LocalType return_type, bool exported,
+ bool external);
+ friend class WasmFunctionBuilder;
+ uint16_t signature_index_;
+ ZoneVector<LocalType> params_;
+ uint16_t local_int32_count_;
+ uint16_t local_int64_count_;
+ uint16_t local_float32_count_;
+ uint16_t local_float64_count_;
+ bool exported_;
+ bool external_;
+ ZoneVector<uint8_t> body_;
+ ZoneVector<char> name_;
+
+ bool HasLocals() const {
+ return (local_int32_count_ + local_int64_count_ + local_float32_count_ +
+ local_float64_count_) > 0;
+ }
+
+ bool HasName() const { return exported_ && name_.size() > 0; }
+};
+
+class WasmFunctionBuilder : public ZoneObject {
+ public:
+ uint16_t AddParam(LocalType type);
+ uint16_t AddLocal(LocalType type);
+ void ReturnType(LocalType type);
+ void EmitCode(const byte* code, uint32_t code_size);
+ void EmitCode(const byte* code, uint32_t code_size,
+ const uint32_t* local_indices, uint32_t indices_size);
+ void Emit(WasmOpcode opcode);
+ void EmitWithU8(WasmOpcode opcode, const byte immediate);
+ void EmitWithLocal(WasmOpcode opcode);
+ uint32_t EmitEditableImmediate(const byte immediate);
+ void EditImmediate(uint32_t offset, const byte immediate);
+ void Exported(uint8_t flag);
+ void External(uint8_t flag);
+ void SetName(const unsigned char* name, int name_length);
+ WasmFunctionEncoder* Build(Zone* zone, WasmModuleBuilder* mb) const;
+
+ private:
+ explicit WasmFunctionBuilder(Zone* zone);
+ friend class WasmModuleBuilder;
+ LocalType return_type_;
+ struct Type;
+ ZoneVector<Type> locals_;
+ uint8_t exported_;
+ uint8_t external_;
+ ZoneVector<uint8_t> body_;
+ ZoneVector<uint32_t> local_indices_;
+ ZoneVector<char> name_;
+ uint16_t AddVar(LocalType type, bool param);
+ void IndexVars(WasmFunctionEncoder* e, uint16_t* var_index) const;
+};
+
+class WasmDataSegmentEncoder : public ZoneObject {
+ public:
+ WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
+ uint32_t dest);
+ uint32_t HeaderSize() const;
+ uint32_t BodySize() const;
+ void Serialize(byte* buffer, byte** header, byte** body) const;
+
+ private:
+ ZoneVector<byte> data_;
+ uint32_t dest_;
+};
+
+class WasmModuleIndex : public ZoneObject {
+ public:
+ const byte* Begin() const { return begin_; }
+ const byte* End() const { return end_; }
+
+ private:
+ friend class WasmModuleWriter;
+ WasmModuleIndex(const byte* begin, const byte* end)
+ : begin_(begin), end_(end) {}
+ const byte* begin_;
+ const byte* end_;
+};
+
+class WasmModuleWriter : public ZoneObject {
+ public:
+ WasmModuleIndex* WriteTo(Zone* zone) const;
+
+ private:
+ friend class WasmModuleBuilder;
+ explicit WasmModuleWriter(Zone* zone);
+ ZoneVector<WasmFunctionEncoder*> functions_;
+ ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<uint16_t> indirect_functions_;
+ ZoneVector<std::pair<MachineType, bool>> globals_;
+};
+
+class WasmModuleBuilder : public ZoneObject {
+ public:
+ explicit WasmModuleBuilder(Zone* zone);
+ uint16_t AddFunction();
+ uint32_t AddGlobal(MachineType type, bool exported);
+ WasmFunctionBuilder* FunctionAt(size_t index);
+ void AddDataSegment(WasmDataSegmentEncoder* data);
+ uint16_t AddSignature(FunctionSig* sig);
+ void AddIndirectFunction(uint16_t index);
+ WasmModuleWriter* Build(Zone* zone);
+
+ private:
+ struct CompareFunctionSigs {
+ int operator()(FunctionSig* a, FunctionSig* b) const;
+ };
+ typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
+
+ Zone* zone_;
+ ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<WasmFunctionBuilder*> functions_;
+ ZoneVector<WasmDataSegmentEncoder*> data_segments_;
+ ZoneVector<uint16_t> indirect_functions_;
+ ZoneVector<std::pair<MachineType, bool>> globals_;
+ SignatureMap signature_map_;
+};
+
+std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_ENCODER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
new file mode 100644
index 0000000000..24f39822f9
--- /dev/null
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -0,0 +1,547 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/module-decoder.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_decoder) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+
+// The main logic for decoding the bytes of a module.
+class ModuleDecoder : public Decoder {
+ public:
+ ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
+ bool asm_js)
+ : Decoder(module_start, module_end), module_zone(zone), asm_js_(asm_js) {
+ result_.start = start_;
+ if (limit_ < start_) {
+ error(start_, "end is less than start");
+ limit_ = start_;
+ }
+ }
+
+ virtual void onFirstError() {
+ pc_ = limit_; // On error, terminate section decoding loop.
+ }
+
+ // Decodes an entire module.
+ ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
+ pc_ = start_;
+ module->module_start = start_;
+ module->module_end = limit_;
+ module->min_mem_size_log2 = 0;
+ module->max_mem_size_log2 = 0;
+ module->mem_export = false;
+ module->mem_external = false;
+ module->globals = new std::vector<WasmGlobal>();
+ module->signatures = new std::vector<FunctionSig*>();
+ module->functions = new std::vector<WasmFunction>();
+ module->data_segments = new std::vector<WasmDataSegment>();
+ module->function_table = new std::vector<uint16_t>();
+
+ bool sections[kMaxModuleSectionCode];
+ memset(sections, 0, sizeof(sections));
+
+ // Decode the module sections.
+ while (pc_ < limit_) {
+ TRACE("DecodeSection\n");
+ WasmSectionDeclCode section =
+ static_cast<WasmSectionDeclCode>(u8("section"));
+ // Each section should appear at most once.
+ if (section < kMaxModuleSectionCode) {
+ CheckForPreviousSection(sections, section, false);
+ sections[section] = true;
+ }
+
+ switch (section) {
+ case kDeclEnd:
+ // Terminate section decoding.
+ limit_ = pc_;
+ break;
+ case kDeclMemory:
+ module->min_mem_size_log2 = u8("min memory");
+ module->max_mem_size_log2 = u8("max memory");
+ module->mem_export = u8("export memory") != 0;
+ break;
+ case kDeclSignatures: {
+ int length;
+ uint32_t signatures_count = u32v(&length, "signatures count");
+ module->signatures->reserve(SafeReserve(signatures_count));
+ // Decode signatures.
+ for (uint32_t i = 0; i < signatures_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ FunctionSig* s = sig(); // read function sig.
+ module->signatures->push_back(s);
+ }
+ break;
+ }
+ case kDeclFunctions: {
+ // Functions require a signature table first.
+ CheckForPreviousSection(sections, kDeclSignatures, true);
+ int length;
+ uint32_t functions_count = u32v(&length, "functions count");
+ module->functions->reserve(SafeReserve(functions_count));
+ // Set up module environment for verification.
+ ModuleEnv menv;
+ menv.module = module;
+ menv.globals_area = 0;
+ menv.mem_start = 0;
+ menv.mem_end = 0;
+ menv.function_code = nullptr;
+ menv.asm_js = asm_js_;
+ // Decode functions.
+ for (uint32_t i = 0; i < functions_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeFunction[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->functions->push_back(
+ {nullptr, 0, 0, 0, 0, 0, 0, false, false});
+ WasmFunction* function = &module->functions->back();
+ DecodeFunctionInModule(module, function, false);
+ }
+ if (ok() && verify_functions) {
+ for (uint32_t i = 0; i < functions_count; i++) {
+ if (failed()) break;
+ WasmFunction* function = &module->functions->at(i);
+ if (!function->external) {
+ VerifyFunctionBody(i, &menv, function);
+ if (result_.failed())
+ error(result_.error_pc, result_.error_msg.get());
+ }
+ }
+ }
+ break;
+ }
+ case kDeclGlobals: {
+ int length;
+ uint32_t globals_count = u32v(&length, "globals count");
+ module->globals->reserve(SafeReserve(globals_count));
+ // Decode globals.
+ for (uint32_t i = 0; i < globals_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeGlobal[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->globals->push_back({0, MachineType::Int32(), 0, false});
+ WasmGlobal* global = &module->globals->back();
+ DecodeGlobalInModule(global);
+ }
+ break;
+ }
+ case kDeclDataSegments: {
+ int length;
+ uint32_t data_segments_count = u32v(&length, "data segments count");
+ module->data_segments->reserve(SafeReserve(data_segments_count));
+ // Decode data segments.
+ for (uint32_t i = 0; i < data_segments_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeDataSegment[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module->data_segments->push_back({0, 0, 0});
+ WasmDataSegment* segment = &module->data_segments->back();
+ DecodeDataSegmentInModule(segment);
+ }
+ break;
+ }
+ case kDeclFunctionTable: {
+ // An indirect function table requires functions first.
+ CheckForPreviousSection(sections, kDeclFunctions, true);
+ int length;
+ uint32_t function_table_count = u32v(&length, "function table count");
+ module->function_table->reserve(SafeReserve(function_table_count));
+ // Decode function table.
+ for (uint32_t i = 0; i < function_table_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeFunctionTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ uint16_t index = u16();
+ if (index >= module->functions->size()) {
+ error(pc_ - 2, "invalid function index");
+ break;
+ }
+ module->function_table->push_back(index);
+ }
+ break;
+ }
+ case kDeclWLL: {
+ // Reserved for experimentation by the Web Low-level Language project
+ // which is augmenting the binary encoding with source code meta
+ // information. This section does not affect the semantics of the code
+ // and can be ignored by the runtime. https://github.com/JSStats/wll
+ int length = 0;
+ uint32_t section_size = u32v(&length, "section size");
+ if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
+ error(pc_ - length, "invalid section size");
+ break;
+ }
+ pc_ += section_size;
+ break;
+ }
+ default:
+ error(pc_ - 1, nullptr, "unrecognized section 0x%02x", section);
+ break;
+ }
+ }
+
+ return toResult(module);
+ }
+
+ uint32_t SafeReserve(uint32_t count) {
+ // Avoid OOM by only reserving up to a certain size.
+ const uint32_t kMaxReserve = 20000;
+ return count < kMaxReserve ? count : kMaxReserve;
+ }
+
+ void CheckForPreviousSection(bool* sections, WasmSectionDeclCode section,
+ bool present) {
+ if (section >= kMaxModuleSectionCode) return;
+ if (sections[section] == present) return;
+ const char* name = "";
+ switch (section) {
+ case kDeclMemory:
+ name = "memory";
+ break;
+ case kDeclSignatures:
+ name = "signatures";
+ break;
+ case kDeclFunctions:
+ name = "function declaration";
+ break;
+ case kDeclGlobals:
+ name = "global variable";
+ break;
+ case kDeclDataSegments:
+ name = "data segment";
+ break;
+ case kDeclFunctionTable:
+ name = "function table";
+ break;
+ default:
+ name = "";
+ break;
+ }
+ if (present) {
+ error(pc_ - 1, nullptr, "required %s section missing", name);
+ } else {
+ error(pc_ - 1, nullptr, "%s section already present", name);
+ }
+ }
+
+ // Decodes a single anonymous function starting at {start_}.
+ FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
+ WasmFunction* function) {
+ pc_ = start_;
+ function->sig = sig(); // read signature
+ function->name_offset = 0; // ---- name
+ function->code_start_offset = off(pc_ + 8); // ---- code start
+ function->code_end_offset = off(limit_); // ---- code end
+ function->local_int32_count = u16(); // read u16
+ function->local_int64_count = u16(); // read u16
+ function->local_float32_count = u16(); // read u16
+ function->local_float64_count = u16(); // read u16
+ function->exported = false; // ---- exported
+ function->external = false; // ---- external
+
+ if (ok()) VerifyFunctionBody(0, module_env, function);
+
+ FunctionResult result;
+ result.CopyFrom(result_); // Copy error code and location.
+ result.val = function;
+ return result;
+ }
+
+ // Decodes a single function signature at {start}.
+ FunctionSig* DecodeFunctionSignature(const byte* start) {
+ pc_ = start;
+ FunctionSig* result = sig();
+ return ok() ? result : nullptr;
+ }
+
+ private:
+ Zone* module_zone;
+ ModuleResult result_;
+ bool asm_js_;
+
+ uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
+
+ // Decodes a single global entry inside a module starting at {pc_}.
+ void DecodeGlobalInModule(WasmGlobal* global) {
+ global->name_offset = string("global name");
+ global->type = mem_type();
+ global->offset = 0;
+ global->exported = u8("exported") != 0;
+ }
+
+ // Decodes a single function entry inside a module starting at {pc_}.
+ void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
+ bool verify_body = true) {
+ byte decl_bits = u8("function decl");
+
+ const byte* sigpos = pc_;
+ function->sig_index = u16("signature index");
+
+ if (function->sig_index >= module->signatures->size()) {
+ return error(sigpos, "invalid signature index");
+ } else {
+ function->sig = module->signatures->at(function->sig_index);
+ }
+
+ TRACE(" +%d <function attributes:%s%s%s%s%s>\n",
+ static_cast<int>(pc_ - start_),
+ decl_bits & kDeclFunctionName ? " name" : "",
+ decl_bits & kDeclFunctionImport ? " imported" : "",
+ decl_bits & kDeclFunctionLocals ? " locals" : "",
+ decl_bits & kDeclFunctionExport ? " exported" : "",
+ (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
+
+ if (decl_bits & kDeclFunctionName) {
+ function->name_offset = string("function name");
+ }
+
+ function->exported = decl_bits & kDeclFunctionExport;
+
+ // Imported functions have no locals or body.
+ if (decl_bits & kDeclFunctionImport) {
+ function->external = true;
+ return;
+ }
+
+ if (decl_bits & kDeclFunctionLocals) {
+ function->local_int32_count = u16("int32 count");
+ function->local_int64_count = u16("int64 count");
+ function->local_float32_count = u16("float32 count");
+ function->local_float64_count = u16("float64 count");
+ }
+
+ uint16_t size = u16("body size");
+ if (ok()) {
+ if ((pc_ + size) > limit_) {
+ return error(pc_, limit_,
+ "expected %d bytes for function body, fell off end", size);
+ }
+ function->code_start_offset = static_cast<uint32_t>(pc_ - start_);
+ function->code_end_offset = function->code_start_offset + size;
+ TRACE(" +%d %-20s: (%d bytes)\n", static_cast<int>(pc_ - start_),
+ "function body", size);
+ pc_ += size;
+ }
+ }
+
+ // Decodes a single data segment entry inside a module starting at {pc_}.
+ void DecodeDataSegmentInModule(WasmDataSegment* segment) {
+ segment->dest_addr =
+ u32("destination"); // TODO(titzer): check it's within the memory size.
+ segment->source_offset = offset("source offset");
+ segment->source_size =
+ u32("source size"); // TODO(titzer): check the size is reasonable.
+ segment->init = u8("init");
+ }
+
+ // Verifies the body (code) of a given function.
+ void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
+ WasmFunction* function) {
+ if (FLAG_trace_wasm_decode_time) {
+ // TODO(titzer): clean me up a bit.
+ OFStream os(stdout);
+ os << "Verifying WASM function:";
+ if (function->name_offset > 0) {
+ os << menv->module->GetName(function->name_offset);
+ }
+ os << std::endl;
+ }
+ FunctionEnv fenv;
+ fenv.module = menv;
+ fenv.sig = function->sig;
+ fenv.local_int32_count = function->local_int32_count;
+ fenv.local_int64_count = function->local_int64_count;
+ fenv.local_float32_count = function->local_float32_count;
+ fenv.local_float64_count = function->local_float64_count;
+ fenv.SumLocals();
+
+ TreeResult result =
+ VerifyWasmCode(&fenv, start_, start_ + function->code_start_offset,
+ start_ + function->code_end_offset);
+ if (result.failed()) {
+ // Wrap the error message from the function decoder.
+ std::ostringstream str;
+ str << "in function #" << func_num << ": ";
+ // TODO(titzer): add function name for the user?
+ str << result;
+ std::string strval = str.str();
+ const char* raw = strval.c_str();
+ size_t len = strlen(raw);
+ char* buffer = new char[len];
+ strncpy(buffer, raw, len);
+ buffer[len - 1] = 0;
+
+ // Copy error code and location.
+ result_.CopyFrom(result);
+ result_.error_msg.Reset(buffer);
+ }
+ }
+
+ // Reads a single 32-bit unsigned integer interpreted as an offset, checking
+ // the offset is within bounds and advances.
+ uint32_t offset(const char* name = nullptr) {
+ uint32_t offset = u32(name ? name : "offset");
+ if (offset > static_cast<uint32_t>(limit_ - start_)) {
+ error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
+ }
+ return offset;
+ }
+
+ // Reads a single 32-bit unsigned integer interpreted as an offset into the
+ // data and validating the string there and advances.
+ uint32_t string(const char* name = nullptr) {
+ return offset(name ? name : "string"); // TODO(titzer): validate string
+ }
+
+ // Reads a single 8-bit integer, interpreting it as a local type.
+ LocalType local_type() {
+ byte val = u8("local type");
+ LocalTypeCode t = static_cast<LocalTypeCode>(val);
+ switch (t) {
+ case kLocalVoid:
+ return kAstStmt;
+ case kLocalI32:
+ return kAstI32;
+ case kLocalI64:
+ return kAstI64;
+ case kLocalF32:
+ return kAstF32;
+ case kLocalF64:
+ return kAstF64;
+ default:
+ error(pc_ - 1, "invalid local type");
+ return kAstStmt;
+ }
+ }
+
+ // Reads a single 8-bit integer, interpreting it as a memory type.
+ MachineType mem_type() {
+ byte val = u8("memory type");
+ MemTypeCode t = static_cast<MemTypeCode>(val);
+ switch (t) {
+ case kMemI8:
+ return MachineType::Int8();
+ case kMemU8:
+ return MachineType::Uint8();
+ case kMemI16:
+ return MachineType::Int16();
+ case kMemU16:
+ return MachineType::Uint16();
+ case kMemI32:
+ return MachineType::Int32();
+ case kMemU32:
+ return MachineType::Uint32();
+ case kMemI64:
+ return MachineType::Int64();
+ case kMemU64:
+ return MachineType::Uint64();
+ case kMemF32:
+ return MachineType::Float32();
+ case kMemF64:
+ return MachineType::Float64();
+ default:
+ error(pc_ - 1, "invalid memory type");
+ return MachineType::None();
+ }
+ }
+
+ // Parses an inline function signature.
+ FunctionSig* sig() {
+ byte count = u8("param count");
+ LocalType ret = local_type();
+ FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
+ if (ret != kAstStmt) builder.AddReturn(ret);
+
+ for (int i = 0; i < count; i++) {
+ LocalType param = local_type();
+ if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
+ builder.AddParam(param);
+ }
+ return builder.Build();
+ }
+};
+
+
+// Helpers for nice error messages.
+class ModuleError : public ModuleResult {
+ public:
+ explicit ModuleError(const char* msg) {
+ error_code = kError;
+ size_t len = strlen(msg) + 1;
+ char* result = new char[len];
+ strncpy(result, msg, len);
+ result[len - 1] = 0;
+ error_msg.Reset(result);
+ }
+};
+
+
+// Helpers for nice error messages.
+class FunctionError : public FunctionResult {
+ public:
+ explicit FunctionError(const char* msg) {
+ error_code = kError;
+ size_t len = strlen(msg) + 1;
+ char* result = new char[len];
+ strncpy(result, msg, len);
+ result[len - 1] = 0;
+ error_msg.Reset(result);
+ }
+};
+
+
+ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, bool asm_js) {
+ size_t size = module_end - module_start;
+ if (module_start > module_end) return ModuleError("start > end");
+ if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+ WasmModule* module = new WasmModule();
+ ModuleDecoder decoder(zone, module_start, module_end, asm_js);
+ return decoder.DecodeModule(module, verify_functions);
+}
+
+
+FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+ const byte* end) {
+ ModuleDecoder decoder(zone, start, end, false);
+ return decoder.DecodeFunctionSignature(start);
+}
+
+
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
+ ModuleEnv* module_env,
+ const byte* function_start,
+ const byte* function_end) {
+ size_t size = function_end - function_start;
+ if (function_start > function_end) return FunctionError("start > end");
+ if (size > kMaxFunctionSize)
+ return FunctionError("size > maximum function size");
+ WasmFunction* function = new WasmFunction();
+ ModuleDecoder decoder(zone, function_start, function_end, false);
+ return decoder.DecodeSingleFunction(module_env, function);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
new file mode 100644
index 0000000000..3f469a500e
--- /dev/null
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_DECODER_H_
+#define V8_WASM_MODULE_DECODER_H_
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+// Decodes the bytes of a WASM module between {module_start} and {module_end}.
+ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+ const byte* module_start, const byte* module_end,
+ bool verify_functions, bool asm_js);
+
+// Exposed for testing. Decodes a single function signature, allocating it
+// in the given zone. Returns {nullptr} upon failure.
+FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
+ const byte* end);
+
+// Decodes the bytes of a WASM function between
+// {function_start} and {function_end}.
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
+ const byte* function_start,
+ const byte* function_end);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_DECODER_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
new file mode 100644
index 0000000000..80d8bdb236
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -0,0 +1,345 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
+#include "src/api-natives.h"
+#include "src/assert-scope.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/factory.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/parsing/parser.h"
+#include "src/typing-asm.h"
+
+#include "src/wasm/asm-wasm-builder.h"
+#include "src/wasm/encoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+typedef uint8_t byte;
+
+using v8::internal::wasm::ErrorThrower;
+
+namespace v8 {
+
+namespace {
+struct RawBuffer {
+ const byte* start;
+ const byte* end;
+ size_t size() { return static_cast<size_t>(end - start); }
+};
+
+
+RawBuffer GetRawBufferArgument(
+ ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
+ thrower.Error("Argument 0 must be an array buffer");
+ return {nullptr, nullptr};
+ }
+ Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ // TODO(titzer): allow offsets into buffers, views, etc.
+
+ const byte* start = reinterpret_cast<const byte*>(contents.Data());
+ const byte* end = start + contents.ByteLength();
+
+ if (start == nullptr) {
+ thrower.Error("ArrayBuffer argument is empty");
+ }
+ return {start, end};
+}
+
+
+void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.verifyModule()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, true, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.verifyFunction()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ internal::wasm::FunctionResult result;
+ {
+ // Verification of a single function shouldn't allocate.
+ i::DisallowHeapAllocation no_allocation;
+ i::Zone zone;
+ result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
+ buffer.start, buffer.end);
+ }
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void CompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.compileRun()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (thrower.error()) return;
+
+ // Decode and pre-verify the functions before compiling and running.
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, true, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Compile and run!
+ int32_t retval = i::wasm::CompileAndRunWasmModule(isolate, result.val);
+ args.GetReturnValue().Set(retval);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(i::ParseInfo* info) {
+ info->set_global();
+ info->set_lazy(false);
+ info->set_allow_lazy_parsing(false);
+ info->set_toplevel(true);
+
+ if (!i::Compiler::ParseAndAnalyze(info)) {
+ return nullptr;
+ }
+
+ info->set_literal(
+ info->scope()->declarations()->at(0)->AsFunctionDeclaration()->fun());
+
+ v8::internal::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
+ info->literal());
+ if (!typer.Validate()) {
+ return nullptr;
+ }
+
+ auto module = v8::internal::wasm::AsmWasmBuilder(
+ info->isolate(), info->zone(), info->literal())
+ .Run();
+ return module;
+}
+
+
+void AsmCompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.asmCompileRun()");
+
+ if (args.Length() != 1) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+ if (!args[0]->IsString()) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+
+ i::Factory* factory = isolate->factory();
+ i::Zone zone;
+ Local<String> source = Local<String>::Cast(args[0]);
+ i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+ i::ParseInfo info(&zone, script);
+
+ auto module = TranslateAsmModule(&info);
+ if (module == nullptr) {
+ thrower.Error("Asm.js validation failed");
+ return;
+ }
+
+ int32_t result = v8::internal::wasm::CompileAndRunWasmModule(
+ isolate, module->Begin(), module->End(), true);
+ args.GetReturnValue().Set(result);
+}
+
+
+// TODO(aseemgarg): deal with arraybuffer and foreign functions
+void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
+
+ if (args.Length() != 1) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+ if (!args[0]->IsString()) {
+ thrower.Error("Invalid argument count");
+ return;
+ }
+
+ i::Factory* factory = isolate->factory();
+ i::Zone zone;
+ Local<String> source = Local<String>::Cast(args[0]);
+ i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
+ i::ParseInfo info(&zone, script);
+
+ auto module = TranslateAsmModule(&info);
+ if (module == nullptr) {
+ thrower.Error("Asm.js validation failed");
+ return;
+ }
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, module->Begin(), module->End(), false, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ ErrorThrower thrower(isolate, "WASM.instantiateModule()");
+
+ RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (buffer.start == nullptr) return;
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ Local<Object> obj = Local<Object>::Cast(args[2]);
+ i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+ memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+ }
+
+ // Decode but avoid a redundant pass over function bodies for verification.
+ // Verification will happen during compilation.
+ i::Zone zone;
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, buffer.start, buffer.end, false, false);
+
+ if (result.failed()) {
+ thrower.Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> obj = Local<Object>::Cast(args[1]);
+ ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+ }
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
+ }
+
+ if (result.val) delete result.val;
+}
+} // namespace
+
+
+// TODO(titzer): we use the API to create the function template because the
+// internal guts are too ugly to replicate here.
+static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
+ FunctionCallback func) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ Local<FunctionTemplate> local = FunctionTemplate::New(isolate, func);
+ return v8::Utils::OpenHandle(*local);
+}
+
+
+namespace internal {
+static Handle<String> v8_str(Isolate* isolate, const char* str) {
+ return isolate->factory()->NewStringFromAsciiChecked(str);
+}
+
+
+static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback func) {
+ Handle<String> name = v8_str(isolate, str);
+ Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ Handle<JSFunction> function =
+ ApiNatives::InstantiateFunction(temp).ToHandleChecked();
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ JSObject::AddProperty(object, name, function, attributes);
+}
+
+
+void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
+ // Setup wasm function map.
+ Handle<Context> context(global->native_context(), isolate);
+ InstallWasmFunctionMap(isolate, context);
+
+ // Bind the WASM object.
+ Factory* factory = isolate->factory();
+ Handle<String> name = v8_str(isolate, "_WASMEXP_");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons, Handle<Object>(context->initial_object_prototype(), isolate));
+ cons->shared()->set_instance_class_name(*name);
+ Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+ JSObject::AddProperty(global, name, wasm_object, attributes);
+
+ // Install functions on the WASM object.
+ InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
+ InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
+ InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
+ InstallFunc(isolate, wasm_object, "compileRun", CompileRun);
+ InstallFunc(isolate, wasm_object, "asmCompileRun", AsmCompileRun);
+ InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
+ InstantiateModuleFromAsm);
+}
+
+
+void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
+ if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
+ Handle<Map> wasm_function_map = isolate->factory()->NewMap(
+ JS_FUNCTION_TYPE, JSFunction::kSize + kPointerSize);
+ wasm_function_map->set_is_callable();
+ context->set_wasm_function_map(*wasm_function_map);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
new file mode 100644
index 0000000000..e7305aa164
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_JS_H_
+#define V8_WASM_JS_H_
+
+#ifndef V8_SHARED
+#include "src/allocation.h"
+#include "src/hashmap.h"
+#else
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#endif // !V8_SHARED
+
+namespace v8 {
+namespace internal {
+// Exposes a WASM API to JavaScript through the V8 API.
+class WasmJs {
+ public:
+ static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
+ static void InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context);
+};
+
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
new file mode 100644
index 0000000000..470804a73d
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -0,0 +1,265 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MACRO_GEN_H_
+#define V8_WASM_MACRO_GEN_H_
+
+#include "src/wasm/wasm-opcodes.h"
+
+// Convenience macros for building Wasm bytecode directly into a byte array.
+
+//------------------------------------------------------------------------------
+// Control.
+//------------------------------------------------------------------------------
+#define WASM_NOP kExprNop
+
+#define WASM_BLOCK(count, ...) kExprBlock, static_cast<byte>(count), __VA_ARGS__
+#define WASM_INFINITE_LOOP kExprLoop, 1, kExprBr, 0, kExprNop
+#define WASM_LOOP(count, ...) kExprLoop, static_cast<byte>(count), __VA_ARGS__
+#define WASM_IF(cond, tstmt) kExprIf, cond, tstmt
+#define WASM_IF_ELSE(cond, tstmt, fstmt) kExprIfElse, cond, tstmt, fstmt
+#define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
+#define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define WASM_BR_IF(depth, cond) \
+ kExprBrIf, static_cast<byte>(depth), cond, kExprNop
+#define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
+#define WASM_BRV_IF(depth, cond, val) \
+ kExprBrIf, static_cast<byte>(depth), cond, val
+#define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
+#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
+#define WASM_RETURN0 kExprReturn
+#define WASM_RETURN(...) kExprReturn, __VA_ARGS__
+#define WASM_UNREACHABLE kExprUnreachable
+
+#define WASM_TABLESWITCH_OP(case_count, table_count, ...) \
+ kExprTableSwitch, static_cast<byte>(case_count), \
+ static_cast<byte>(case_count >> 8), static_cast<byte>(table_count), \
+ static_cast<byte>(table_count >> 8), __VA_ARGS__
+
+#define WASM_TABLESWITCH_BODY0(key) key
+
+#define WASM_TABLESWITCH_BODY(key, ...) key, __VA_ARGS__
+
+#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
+#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
+
+//------------------------------------------------------------------------------
+// Misc expressions.
+//------------------------------------------------------------------------------
+#define WASM_ID(...) __VA_ARGS__
+#define WASM_ZERO kExprI8Const, 0
+#define WASM_ONE kExprI8Const, 1
+#define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
+#define WASM_I32(val) \
+ kExprI32Const, static_cast<byte>(val), static_cast<byte>(val >> 8), \
+ static_cast<byte>(val >> 16), static_cast<byte>(val >> 24)
+#define WASM_I64(val) \
+ kExprI64Const, static_cast<byte>(static_cast<uint64_t>(val)), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 8), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 16), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 24), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 32), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 40), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 48), \
+ static_cast<byte>(static_cast<uint64_t>(val) >> 56)
+#define WASM_F32(val) \
+ kExprF32Const, \
+ static_cast<byte>(bit_cast<int32_t>(static_cast<float>(val))), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 8), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 16), \
+ static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 24)
+#define WASM_F64(val) \
+ kExprF64Const, static_cast<byte>(bit_cast<uint64_t>(val)), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 8), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 16), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 24), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 32), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 40), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 48), \
+ static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
+#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
+#define WASM_SET_LOCAL(index, val) kExprSetLocal, static_cast<byte>(index), val
+#define WASM_LOAD_GLOBAL(index) kExprLoadGlobal, static_cast<byte>(index)
+#define WASM_STORE_GLOBAL(index, val) \
+ kExprStoreGlobal, static_cast<byte>(index), val
+#define WASM_LOAD_MEM(type, index) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index
+#define WASM_STORE_MEM(type, index, val) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(false), index, val
+#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
+ static_cast<byte>(offset), index
+#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ v8::internal::wasm::WasmOpcodes::LoadStoreAccessOf(true), \
+ static_cast<byte>(offset), index, val
+#define WASM_CALL_FUNCTION(index, ...) \
+ kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
+#define WASM_CALL_INDIRECT(index, func, ...) \
+ kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
+#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT0(index, func) \
+ kExprCallIndirect, static_cast<byte>(index), func
+#define WASM_NOT(x) kExprBoolNot, x
+
+//------------------------------------------------------------------------------
+// Constructs that are composed of multiple bytecodes.
+//------------------------------------------------------------------------------
+#define WASM_WHILE(x, y) kExprLoop, 1, kExprIf, x, kExprBr, 0, y
+#define WASM_INC_LOCAL(index) \
+ kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
+ static_cast<byte>(index), kExprI8Const, 1
+#define WASM_INC_LOCAL_BY(index, count) \
+ kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
+ static_cast<byte>(index), kExprI8Const, static_cast<int8_t>(count)
+
+#define WASM_UNOP(opcode, x) static_cast<byte>(opcode), x
+#define WASM_BINOP(opcode, x, y) static_cast<byte>(opcode), x, y
+
+//------------------------------------------------------------------------------
+// Int32 operations
+//------------------------------------------------------------------------------
+#define WASM_I32_ADD(x, y) kExprI32Add, x, y
+#define WASM_I32_SUB(x, y) kExprI32Sub, x, y
+#define WASM_I32_MUL(x, y) kExprI32Mul, x, y
+#define WASM_I32_DIVS(x, y) kExprI32DivS, x, y
+#define WASM_I32_DIVU(x, y) kExprI32DivU, x, y
+#define WASM_I32_REMS(x, y) kExprI32RemS, x, y
+#define WASM_I32_REMU(x, y) kExprI32RemU, x, y
+#define WASM_I32_AND(x, y) kExprI32And, x, y
+#define WASM_I32_IOR(x, y) kExprI32Ior, x, y
+#define WASM_I32_XOR(x, y) kExprI32Xor, x, y
+#define WASM_I32_SHL(x, y) kExprI32Shl, x, y
+#define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
+#define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
+#define WASM_I32_EQ(x, y) kExprI32Eq, x, y
+#define WASM_I32_NE(x, y) kExprI32Ne, x, y
+#define WASM_I32_LTS(x, y) kExprI32LtS, x, y
+#define WASM_I32_LES(x, y) kExprI32LeS, x, y
+#define WASM_I32_LTU(x, y) kExprI32LtU, x, y
+#define WASM_I32_LEU(x, y) kExprI32LeU, x, y
+#define WASM_I32_GTS(x, y) kExprI32GtS, x, y
+#define WASM_I32_GES(x, y) kExprI32GeS, x, y
+#define WASM_I32_GTU(x, y) kExprI32GtU, x, y
+#define WASM_I32_GEU(x, y) kExprI32GeU, x, y
+#define WASM_I32_CLZ(x) kExprI32Clz, x
+#define WASM_I32_CTZ(x) kExprI32Ctz, x
+#define WASM_I32_POPCNT(x) kExprI32Popcnt, x
+
+//------------------------------------------------------------------------------
+// Int64 operations
+//------------------------------------------------------------------------------
+#define WASM_I64_ADD(x, y) kExprI64Add, x, y
+#define WASM_I64_SUB(x, y) kExprI64Sub, x, y
+#define WASM_I64_MUL(x, y) kExprI64Mul, x, y
+#define WASM_I64_DIVS(x, y) kExprI64DivS, x, y
+#define WASM_I64_DIVU(x, y) kExprI64DivU, x, y
+#define WASM_I64_REMS(x, y) kExprI64RemS, x, y
+#define WASM_I64_REMU(x, y) kExprI64RemU, x, y
+#define WASM_I64_AND(x, y) kExprI64And, x, y
+#define WASM_I64_IOR(x, y) kExprI64Ior, x, y
+#define WASM_I64_XOR(x, y) kExprI64Xor, x, y
+#define WASM_I64_SHL(x, y) kExprI64Shl, x, y
+#define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
+#define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
+#define WASM_I64_EQ(x, y) kExprI64Eq, x, y
+#define WASM_I64_NE(x, y) kExprI64Ne, x, y
+#define WASM_I64_LTS(x, y) kExprI64LtS, x, y
+#define WASM_I64_LES(x, y) kExprI64LeS, x, y
+#define WASM_I64_LTU(x, y) kExprI64LtU, x, y
+#define WASM_I64_LEU(x, y) kExprI64LeU, x, y
+#define WASM_I64_GTS(x, y) kExprI64GtS, x, y
+#define WASM_I64_GES(x, y) kExprI64GeS, x, y
+#define WASM_I64_GTU(x, y) kExprI64GtU, x, y
+#define WASM_I64_GEU(x, y) kExprI64GeU, x, y
+#define WASM_I64_CLZ(x) kExprI64Clz, x
+#define WASM_I64_CTZ(x) kExprI64Ctz, x
+#define WASM_I64_POPCNT(x) kExprI64Popcnt, x
+
+//------------------------------------------------------------------------------
+// Float32 operations
+//------------------------------------------------------------------------------
+#define WASM_F32_ADD(x, y) kExprF32Add, x, y
+#define WASM_F32_SUB(x, y) kExprF32Sub, x, y
+#define WASM_F32_MUL(x, y) kExprF32Mul, x, y
+#define WASM_F32_DIV(x, y) kExprF32Div, x, y
+#define WASM_F32_MIN(x, y) kExprF32Min, x, y
+#define WASM_F32_MAX(x, y) kExprF32Max, x, y
+#define WASM_F32_ABS(x) kExprF32Abs, x
+#define WASM_F32_NEG(x) kExprF32Neg, x
+#define WASM_F32_COPYSIGN(x, y) kExprF32CopySign, x, y
+#define WASM_F32_CEIL(x) kExprF32Ceil, x
+#define WASM_F32_FLOOR(x) kExprF32Floor, x
+#define WASM_F32_TRUNC(x) kExprF32Trunc, x
+#define WASM_F32_NEARESTINT(x) kExprF32NearestInt, x
+#define WASM_F32_SQRT(x) kExprF32Sqrt, x
+#define WASM_F32_EQ(x, y) kExprF32Eq, x, y
+#define WASM_F32_NE(x, y) kExprF32Ne, x, y
+#define WASM_F32_LT(x, y) kExprF32Lt, x, y
+#define WASM_F32_LE(x, y) kExprF32Le, x, y
+#define WASM_F32_GT(x, y) kExprF32Gt, x, y
+#define WASM_F32_GE(x, y) kExprF32Ge, x, y
+
+//------------------------------------------------------------------------------
+// Float64 operations
+//------------------------------------------------------------------------------
+#define WASM_F64_ADD(x, y) kExprF64Add, x, y
+#define WASM_F64_SUB(x, y) kExprF64Sub, x, y
+#define WASM_F64_MUL(x, y) kExprF64Mul, x, y
+#define WASM_F64_DIV(x, y) kExprF64Div, x, y
+#define WASM_F64_MIN(x, y) kExprF64Min, x, y
+#define WASM_F64_MAX(x, y) kExprF64Max, x, y
+#define WASM_F64_ABS(x) kExprF64Abs, x
+#define WASM_F64_NEG(x) kExprF64Neg, x
+#define WASM_F64_COPYSIGN(x, y) kExprF64CopySign, x, y
+#define WASM_F64_CEIL(x) kExprF64Ceil, x
+#define WASM_F64_FLOOR(x) kExprF64Floor, x
+#define WASM_F64_TRUNC(x) kExprF64Trunc, x
+#define WASM_F64_NEARESTINT(x) kExprF64NearestInt, x
+#define WASM_F64_SQRT(x) kExprF64Sqrt, x
+#define WASM_F64_EQ(x, y) kExprF64Eq, x, y
+#define WASM_F64_NE(x, y) kExprF64Ne, x, y
+#define WASM_F64_LT(x, y) kExprF64Lt, x, y
+#define WASM_F64_LE(x, y) kExprF64Le, x, y
+#define WASM_F64_GT(x, y) kExprF64Gt, x, y
+#define WASM_F64_GE(x, y) kExprF64Ge, x, y
+
+//------------------------------------------------------------------------------
+// Type conversions.
+//------------------------------------------------------------------------------
+#define WASM_I32_SCONVERT_F32(x) kExprI32SConvertF32, x
+#define WASM_I32_SCONVERT_F64(x) kExprI32SConvertF64, x
+#define WASM_I32_UCONVERT_F32(x) kExprI32UConvertF32, x
+#define WASM_I32_UCONVERT_F64(x) kExprI32UConvertF64, x
+#define WASM_I32_CONVERT_I64(x) kExprI32ConvertI64, x
+#define WASM_I64_SCONVERT_F32(x) kExprI64SConvertF32, x
+#define WASM_I64_SCONVERT_F64(x) kExprI64SConvertF64, x
+#define WASM_I64_UCONVERT_F32(x) kExprI64UConvertF32, x
+#define WASM_I64_UCONVERT_F64(x) kExprI64UConvertF64, x
+#define WASM_I64_SCONVERT_I32(x) kExprI64SConvertI32, x
+#define WASM_I64_UCONVERT_I32(x) kExprI64UConvertI32, x
+#define WASM_F32_SCONVERT_I32(x) kExprF32SConvertI32, x
+#define WASM_F32_UCONVERT_I32(x) kExprF32UConvertI32, x
+#define WASM_F32_SCONVERT_I64(x) kExprF32SConvertI64, x
+#define WASM_F32_UCONVERT_I64(x) kExprF32UConvertI64, x
+#define WASM_F32_CONVERT_F64(x) kExprF32ConvertF64, x
+#define WASM_F32_REINTERPRET_I32(x) kExprF32ReinterpretI32, x
+#define WASM_F64_SCONVERT_I32(x) kExprF64SConvertI32, x
+#define WASM_F64_UCONVERT_I32(x) kExprF64UConvertI32, x
+#define WASM_F64_SCONVERT_I64(x) kExprF64SConvertI64, x
+#define WASM_F64_UCONVERT_I64(x) kExprF64UConvertI64, x
+#define WASM_F64_CONVERT_F32(x) kExprF64ConvertF32, x
+#define WASM_F64_REINTERPRET_I64(x) kExprF64ReinterpretI64, x
+#define WASM_I32_REINTERPRET_F32(x) kExprI32ReinterpretF32, x
+#define WASM_I64_REINTERPRET_F64(x) kExprI64ReinterpretF64, x
+
+#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
new file mode 100644
index 0000000000..fd2428080b
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -0,0 +1,511 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "src/simulator.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/compiler/wasm-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+ os << "WASM module with ";
+ os << (1 << module.min_mem_size_log2) << " min mem";
+ os << (1 << module.max_mem_size_log2) << " max mem";
+ if (module.functions) os << module.functions->size() << " functions";
+ if (module.globals) os << module.functions->size() << " globals";
+ if (module.data_segments) os << module.functions->size() << " data segments";
+ return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+ os << "WASM function with signature ";
+
+ // TODO(titzer): factor out rendering of signatures.
+ if (function.sig->return_count() == 0) os << "v";
+ for (size_t i = 0; i < function.sig->return_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(function.sig->GetReturn(i));
+ }
+ os << "_";
+ if (function.sig->parameter_count() == 0) os << "v";
+ for (size_t i = 0; i < function.sig->parameter_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(function.sig->GetParam(i));
+ }
+ os << " locals: ";
+ if (function.local_int32_count)
+ os << function.local_int32_count << " int32s ";
+ if (function.local_int64_count)
+ os << function.local_int64_count << " int64s ";
+ if (function.local_float32_count)
+ os << function.local_float32_count << " float32s ";
+ if (function.local_float64_count)
+ os << function.local_float64_count << " float64s ";
+
+ os << " code bytes: "
+ << (function.code_end_offset - function.code_start_offset);
+ return os;
+}
+
+
+// A helper class for compiling multiple wasm functions that offers
+// placeholder code objects for calling functions that are not yet compiled.
+class WasmLinker {
+ public:
+ WasmLinker(Isolate* isolate, size_t size)
+ : isolate_(isolate), placeholder_code_(size), function_code_(size) {}
+
+ // Get the code object for a function, allocating a placeholder if it has
+ // not yet been compiled.
+ Handle<Code> GetFunctionCode(uint32_t index) {
+ DCHECK(index < function_code_.size());
+ if (function_code_[index].is_null()) {
+ // Create a placeholder code object and encode the corresponding index in
+ // the {constant_pool_offset} field of the code object.
+ // TODO(titzer): placeholder code objects are somewhat dangerous.
+ Handle<Code> self(nullptr, isolate_);
+ byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fake instructions.
+ CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr};
+ Handle<Code> code = isolate_->factory()->NewCode(
+ desc, Code::KindField::encode(Code::WASM_FUNCTION), self);
+ code->set_constant_pool_offset(index + kPlaceholderMarker);
+ placeholder_code_[index] = code;
+ function_code_[index] = code;
+ }
+ return function_code_[index];
+ }
+
+ void Finish(uint32_t index, Handle<Code> code) {
+ DCHECK(index < function_code_.size());
+ function_code_[index] = code;
+ }
+
+ void Link(Handle<FixedArray> function_table,
+ std::vector<uint16_t>* functions) {
+ for (size_t i = 0; i < function_code_.size(); i++) {
+ LinkFunction(function_code_[i]);
+ }
+ if (functions && !function_table.is_null()) {
+ int table_size = static_cast<int>(functions->size());
+ DCHECK_EQ(function_table->length(), table_size * 2);
+ for (int i = 0; i < table_size; i++) {
+ function_table->set(i + table_size, *function_code_[functions->at(i)]);
+ }
+ }
+ }
+
+ private:
+ static const int kPlaceholderMarker = 1000000000;
+
+ Isolate* isolate_;
+ std::vector<Handle<Code>> placeholder_code_;
+ std::vector<Handle<Code>> function_code_;
+
+ void LinkFunction(Handle<Code> code) {
+ bool modified = false;
+ int mode_mask = RelocInfo::kCodeTargetMask;
+ AllowDeferredHandleDereference embedding_raw_address;
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsCodeTarget(mode)) {
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION &&
+ target->constant_pool_offset() >= kPlaceholderMarker) {
+ // Patch direct calls to placeholder code objects.
+ uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+ CHECK(index < function_code_.size());
+ Handle<Code> new_target = function_code_[index];
+ if (target != *new_target) {
+ CHECK_EQ(*placeholder_code_[index], target);
+ it.rinfo()->set_target_address(new_target->instruction_start(),
+ SKIP_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ modified = true;
+ }
+ }
+ }
+ }
+ if (modified) {
+ Assembler::FlushICache(isolate_, code->instruction_start(),
+ code->instruction_size());
+ }
+ }
+};
+
+namespace {
+// Internal constants for the layout of the module object.
+const int kWasmModuleInternalFieldCount = 4;
+const int kWasmModuleFunctionTable = 0;
+const int kWasmModuleCodeTable = 1;
+const int kWasmMemArrayBuffer = 2;
+const int kWasmGlobalsArrayBuffer = 3;
+
+
+size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>* globals) {
+ uint32_t offset = 0;
+ if (!globals) return 0;
+ for (WasmGlobal& global : *globals) {
+ byte size = WasmOpcodes::MemSize(global.type);
+ offset = (offset + size - 1) & ~(size - 1); // align
+ global.offset = offset;
+ offset += size;
+ }
+ return offset;
+}
+
+
+void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
+ for (const WasmDataSegment& segment : *module->data_segments) {
+ if (!segment.init) continue;
+ CHECK_LT(segment.dest_addr, mem_size);
+ CHECK_LE(segment.source_size, mem_size);
+ CHECK_LE(segment.dest_addr + segment.source_size, mem_size);
+ byte* addr = mem_addr + segment.dest_addr;
+ memcpy(addr, module->module_start + segment.source_offset,
+ segment.source_size);
+ }
+}
+
+
+Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
+ if (!module->function_table || module->function_table->size() == 0) {
+ return Handle<FixedArray>::null();
+ }
+ int table_size = static_cast<int>(module->function_table->size());
+ Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
+ for (int i = 0; i < table_size; i++) {
+ WasmFunction* function =
+ &module->functions->at(module->function_table->at(i));
+ fixed->set(i, Smi::FromInt(function->sig_index));
+ }
+ return fixed;
+}
+
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, int size,
+ byte** backing_store) {
+ void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ if (!memory) return Handle<JSArrayBuffer>::null();
+ *backing_store = reinterpret_cast<byte*>(memory);
+
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ for (int i = 0; i < size; i++) {
+ DCHECK_EQ(0, (*backing_store)[i]);
+ }
+#endif
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, false, memory, size);
+ buffer->set_is_neuterable(false);
+ return buffer;
+}
+} // namespace
+
+
+WasmModule::WasmModule()
+ : globals(nullptr),
+ signatures(nullptr),
+ functions(nullptr),
+ data_segments(nullptr),
+ function_table(nullptr) {}
+
+
+WasmModule::~WasmModule() {
+ if (globals) delete globals;
+ if (signatures) delete signatures;
+ if (functions) delete functions;
+ if (data_segments) delete data_segments;
+ if (function_table) delete function_table;
+}
+
+
+// Instantiates a wasm module as a JSObject.
+// * allocates a backing store of {mem_size} bytes.
+// * installs a named property "memory" for that buffer if exported
+// * installs named properties on the object for exported functions
+// * compiles wasm code to machine code
+MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
+ Handle<JSObject> ffi,
+ Handle<JSArrayBuffer> memory) {
+ this->shared_isolate = isolate; // TODO(titzer): have a real shared isolate.
+ ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
+
+ Factory* factory = isolate->factory();
+ // Memory is bigger than maximum supported size.
+ if (memory.is_null() && min_mem_size_log2 > kMaxMemSize) {
+ thrower.Error("Out of memory: wasm memory too large");
+ return MaybeHandle<JSObject>();
+ }
+
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
+
+ //-------------------------------------------------------------------------
+ // Allocate the module object.
+ //-------------------------------------------------------------------------
+ Handle<JSObject> module = factory->NewJSObjectFromMap(map, TENURED);
+ Handle<FixedArray> code_table =
+ factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+
+ //-------------------------------------------------------------------------
+ // Allocate the linear memory.
+ //-------------------------------------------------------------------------
+ uint32_t mem_size = 1 << min_mem_size_log2;
+ byte* mem_addr = nullptr;
+ Handle<JSArrayBuffer> mem_buffer;
+ if (!memory.is_null()) {
+ memory->set_is_neuterable(false);
+ mem_addr = reinterpret_cast<byte*>(memory->backing_store());
+ mem_size = memory->byte_length()->Number();
+ mem_buffer = memory;
+ } else {
+ mem_buffer = NewArrayBuffer(isolate, mem_size, &mem_addr);
+ if (!mem_addr) {
+ // Not enough space for backing store of memory
+ thrower.Error("Out of memory: wasm memory");
+ return MaybeHandle<JSObject>();
+ }
+ }
+
+ // Load initialized data segments.
+ LoadDataSegments(this, mem_addr, mem_size);
+
+ module->SetInternalField(kWasmMemArrayBuffer, *mem_buffer);
+
+ if (mem_export) {
+ // Export the memory as a named property.
+ Handle<String> name = factory->InternalizeUtf8String("memory");
+ JSObject::AddProperty(module, name, mem_buffer, READ_ONLY);
+ }
+
+ //-------------------------------------------------------------------------
+ // Allocate the globals area if necessary.
+ //-------------------------------------------------------------------------
+ size_t globals_size = AllocateGlobalsOffsets(globals);
+ byte* globals_addr = nullptr;
+ if (globals_size > 0) {
+ Handle<JSArrayBuffer> globals_buffer =
+ NewArrayBuffer(isolate, mem_size, &globals_addr);
+ if (!globals_addr) {
+ // Not enough space for backing store of globals.
+ thrower.Error("Out of memory: wasm globals");
+ return MaybeHandle<JSObject>();
+ }
+
+ module->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
+ } else {
+ module->SetInternalField(kWasmGlobalsArrayBuffer, Smi::FromInt(0));
+ }
+
+ //-------------------------------------------------------------------------
+ // Compile all functions in the module.
+ //-------------------------------------------------------------------------
+ int index = 0;
+ WasmLinker linker(isolate, functions->size());
+ ModuleEnv module_env;
+ module_env.module = this;
+ module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr);
+ module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr) + mem_size;
+ module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr);
+ module_env.linker = &linker;
+ module_env.function_code = nullptr;
+ module_env.function_table = BuildFunctionTable(isolate, this);
+ module_env.memory = memory;
+ module_env.context = isolate->native_context();
+ module_env.asm_js = false;
+
+ // First pass: compile each function and initialize the code table.
+ for (const WasmFunction& func : *functions) {
+ if (thrower.error()) break;
+
+ const char* cstr = GetName(func.name_offset);
+ Handle<String> name = factory->InternalizeUtf8String(cstr);
+ Handle<Code> code = Handle<Code>::null();
+ Handle<JSFunction> function = Handle<JSFunction>::null();
+ if (func.external) {
+ // Lookup external function in FFI object.
+ if (!ffi.is_null()) {
+ MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+ if (!result.is_null()) {
+ Handle<Object> obj = result.ToHandleChecked();
+ if (obj->IsJSFunction()) {
+ function = Handle<JSFunction>::cast(obj);
+ code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
+ function, index);
+ } else {
+ thrower.Error("FFI function #%d:%s is not a JSFunction.", index,
+ cstr);
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ thrower.Error("FFI function #%d:%s not found.", index, cstr);
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ thrower.Error("FFI table is not an object.");
+ return MaybeHandle<JSObject>();
+ }
+ } else {
+ // Compile the function.
+ code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func,
+ index);
+ if (code.is_null()) {
+ thrower.Error("Compilation of #%d:%s failed.", index, cstr);
+ return MaybeHandle<JSObject>();
+ }
+ if (func.exported) {
+ function = compiler::CompileJSToWasmWrapper(isolate, &module_env, name,
+ code, module, index);
+ }
+ }
+ if (!code.is_null()) {
+ // Install the code into the linker table.
+ linker.Finish(index, code);
+ code_table->set(index, *code);
+ }
+ if (func.exported) {
+ // Exported functions are installed as read-only properties on the module.
+ JSObject::AddProperty(module, name, function, READ_ONLY);
+ }
+ index++;
+ }
+
+ // Second pass: patch all direct call sites.
+ linker.Link(module_env.function_table, this->function_table);
+
+ module->SetInternalField(kWasmModuleFunctionTable, Smi::FromInt(0));
+ module->SetInternalField(kWasmModuleCodeTable, *code_table);
+ return module;
+}
+
+
+Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ if (linker) return linker->GetFunctionCode(index);
+ if (function_code) return function_code->at(index);
+ return Handle<Code>::null();
+}
+
+
+compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
+ uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ // Always make a direct call to whatever is in the table at that location.
+ // A wrapper will be generated for FFI calls.
+ WasmFunction* function = &module->functions->at(index);
+ return GetWasmCallDescriptor(zone, function->sig);
+}
+
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js) {
+ HandleScope scope(isolate);
+ Zone zone;
+ // Decode the module, but don't verify function bodies, since we'll
+ // be compiling them anyway.
+ ModuleResult result =
+ DecodeWasmModule(isolate, &zone, module_start, module_end, false, false);
+ if (result.failed()) {
+ // Module verification failed. throw.
+ std::ostringstream str;
+ str << "WASM.compileRun() failed: " << result;
+ isolate->Throw(
+ *isolate->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+ return -1;
+ }
+
+ int32_t retval = CompileAndRunWasmModule(isolate, result.val);
+ delete result.val;
+ return retval;
+}
+
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+
+ // Allocate temporary linear memory and globals.
+ size_t mem_size = 1 << module->min_mem_size_log2;
+ size_t globals_size = AllocateGlobalsOffsets(module->globals);
+
+ base::SmartArrayPointer<byte> mem_addr(new byte[mem_size]);
+ base::SmartArrayPointer<byte> globals_addr(new byte[globals_size]);
+
+ memset(mem_addr.get(), 0, mem_size);
+ memset(globals_addr.get(), 0, globals_size);
+
+ // Create module environment.
+ WasmLinker linker(isolate, module->functions->size());
+ ModuleEnv module_env;
+ module_env.module = module;
+ module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr.get());
+ module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr.get()) + mem_size;
+ module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr.get());
+ module_env.linker = &linker;
+ module_env.function_code = nullptr;
+ module_env.function_table = BuildFunctionTable(isolate, module);
+ module_env.asm_js = false;
+
+ // Load data segments.
+ // TODO(titzer): throw instead of crashing if segments don't fit in memory?
+ LoadDataSegments(module, mem_addr.get(), mem_size);
+
+ // Compile all functions.
+ Handle<Code> main_code = Handle<Code>::null(); // record last code.
+ int index = 0;
+ for (const WasmFunction& func : *module->functions) {
+ if (!func.external) {
+ // Compile the function and install it in the code table.
+ Handle<Code> code = compiler::CompileWasmFunction(
+ thrower, isolate, &module_env, func, index);
+ if (!code.is_null()) {
+ if (func.exported) main_code = code;
+ linker.Finish(index, code);
+ }
+ if (thrower.error()) return -1;
+ }
+ index++;
+ }
+
+ if (!main_code.is_null()) {
+ linker.Link(module_env.function_table, module->function_table);
+#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
+ // Run the main code on arm64 simulator.
+ Simulator* simulator = Simulator::current(isolate);
+ Simulator::CallArgument args[] = {Simulator::CallArgument(0),
+ Simulator::CallArgument::End()};
+ return static_cast<int32_t>(simulator->CallInt64(main_code->entry(), args));
+#elif USE_SIMULATOR
+ // Run the main code on simulator.
+ Simulator* simulator = Simulator::current(isolate);
+ return static_cast<int32_t>(
+ simulator->Call(main_code->entry(), 4, 0, 0, 0, 0));
+#else
+ // Run the main code as raw machine code.
+ int32_t (*raw_func)() = reinterpret_cast<int32_t (*)()>(
+ reinterpret_cast<uintptr_t>(main_code->entry()));
+ return raw_func();
+#endif
+ } else {
+ // No main code was found.
+ isolate->Throw(*isolate->factory()->NewStringFromStaticChars(
+ "WASM.compileRun() failed: no valid main code produced."));
+ }
+ return -1;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
new file mode 100644
index 0000000000..5e2ba58a44
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -0,0 +1,192 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MODULE_H_
+#define V8_WASM_MODULE_H_
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/api.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CallDescriptor;
+}
+
+namespace wasm {
+const size_t kMaxModuleSize = 1024 * 1024 * 1024;
+const size_t kMaxFunctionSize = 128 * 1024;
+const size_t kMaxStringSize = 256;
+
+enum WasmSectionDeclCode {
+ kDeclMemory = 0x00,
+ kDeclSignatures = 0x01,
+ kDeclFunctions = 0x02,
+ kDeclGlobals = 0x03,
+ kDeclDataSegments = 0x04,
+ kDeclFunctionTable = 0x05,
+ kDeclWLL = 0x11,
+ kDeclEnd = 0x06,
+};
+
+static const int kMaxModuleSectionCode = 6;
+
+enum WasmFunctionDeclBit {
+ kDeclFunctionName = 0x01,
+ kDeclFunctionImport = 0x02,
+ kDeclFunctionLocals = 0x04,
+ kDeclFunctionExport = 0x08
+};
+
+// Constants for fixed-size elements within a module.
+static const size_t kDeclMemorySize = 3;
+static const size_t kDeclGlobalSize = 6;
+static const size_t kDeclDataSegmentSize = 13;
+
+// Static representation of a wasm function.
+struct WasmFunction {
+ FunctionSig* sig; // signature of the function.
+ uint16_t sig_index; // index into the signature table.
+ uint32_t name_offset; // offset in the module bytes of the name, if any.
+ uint32_t code_start_offset; // offset in the module bytes of code start.
+ uint32_t code_end_offset; // offset in the module bytes of code end.
+ uint16_t local_int32_count; // number of int32 local variables.
+ uint16_t local_int64_count; // number of int64 local variables.
+ uint16_t local_float32_count; // number of float32 local variables.
+ uint16_t local_float64_count; // number of float64 local variables.
+ bool exported; // true if this function is exported.
+ bool external; // true if this function is externally supplied.
+};
+
+struct ModuleEnv; // forward declaration of decoder interface.
+
+// Static representation of a wasm global variable.
+struct WasmGlobal {
+ uint32_t name_offset; // offset in the module bytes of the name, if any.
+ MachineType type; // type of the global.
+ uint32_t offset; // offset from beginning of globals area.
+ bool exported; // true if this global is exported.
+};
+
+// Static representation of a wasm data segment.
+struct WasmDataSegment {
+ uint32_t dest_addr; // destination memory address of the data.
+ uint32_t source_offset; // start offset in the module bytes.
+ uint32_t source_size; // end offset in the module bytes.
+ bool init; // true if loaded upon instantiation.
+};
+
+// Static representation of a module.
+struct WasmModule {
+ static const uint8_t kMinMemSize = 12; // Minimum memory size = 4kb
+ static const uint8_t kMaxMemSize = 30; // Maximum memory size = 1gb
+
+ Isolate* shared_isolate; // isolate for storing shared code.
+ const byte* module_start; // starting address for the module bytes.
+ const byte* module_end; // end address for the module bytes.
+ uint8_t min_mem_size_log2; // minimum size of the memory (log base 2).
+ uint8_t max_mem_size_log2; // maximum size of the memory (log base 2).
+ bool mem_export; // true if the memory is exported.
+ bool mem_external; // true if the memory is external.
+
+ std::vector<WasmGlobal>* globals; // globals in this module.
+ std::vector<FunctionSig*>* signatures; // signatures in this module.
+ std::vector<WasmFunction>* functions; // functions in this module.
+ std::vector<WasmDataSegment>* data_segments; // data segments in this module.
+ std::vector<uint16_t>* function_table; // function table.
+
+ WasmModule();
+ ~WasmModule();
+
+ // Get a pointer to a string stored in the module bytes representing a name.
+ const char* GetName(uint32_t offset) {
+ CHECK(BoundsCheck(offset, offset + 1));
+ if (offset == 0) return "<?>"; // no name.
+ return reinterpret_cast<const char*>(module_start + offset);
+ }
+
+ // Checks the given offset range is contained within the module bytes.
+ bool BoundsCheck(uint32_t start, uint32_t end) {
+ size_t size = module_end - module_start;
+ return start < size && end < size;
+ }
+
+ // Creates a new instantiation of the module in the given isolate.
+ MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSObject> ffi,
+ Handle<JSArrayBuffer> memory);
+};
+
+// forward declaration.
+class WasmLinker;
+
+// Interface provided to the decoder/graph builder which contains only
+// minimal information about the globals, functions, and function tables.
+struct ModuleEnv {
+ uintptr_t globals_area; // address of the globals area.
+ uintptr_t mem_start; // address of the start of linear memory.
+ uintptr_t mem_end; // address of the end of linear memory.
+
+ WasmModule* module;
+ WasmLinker* linker;
+ std::vector<Handle<Code>>* function_code;
+ Handle<FixedArray> function_table;
+ Handle<JSArrayBuffer> memory;
+ Handle<Context> context;
+ bool asm_js; // true if the module originated from asm.js.
+
+ bool IsValidGlobal(uint32_t index) {
+ return module && index < module->globals->size();
+ }
+ bool IsValidFunction(uint32_t index) {
+ return module && index < module->functions->size();
+ }
+ bool IsValidSignature(uint32_t index) {
+ return module && index < module->signatures->size();
+ }
+ MachineType GetGlobalType(uint32_t index) {
+ DCHECK(IsValidGlobal(index));
+ return module->globals->at(index).type;
+ }
+ FunctionSig* GetFunctionSignature(uint32_t index) {
+ DCHECK(IsValidFunction(index));
+ return module->functions->at(index).sig;
+ }
+ FunctionSig* GetSignature(uint32_t index) {
+ DCHECK(IsValidSignature(index));
+ return module->signatures->at(index);
+ }
+ size_t FunctionTableSize() {
+ return module ? module->function_table->size() : 0;
+ }
+
+ Handle<Code> GetFunctionCode(uint32_t index);
+ Handle<FixedArray> GetFunctionTable();
+
+ compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone, FunctionSig* sig);
+ compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
+};
+
+std::ostream& operator<<(std::ostream& os, const WasmModule& module);
+std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
+
+typedef Result<WasmModule*> ModuleResult;
+typedef Result<WasmFunction*> FunctionResult;
+
+// For testing. Decode, verify, and run the last exported function in the
+// given encoded module.
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js = false);
+
+// For testing. Decode, verify, and run the last exported function in the
+// given decoded module.
+int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MODULE_H_
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
new file mode 100644
index 0000000000..25eef034d7
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -0,0 +1,133 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+typedef Signature<LocalType> FunctionSig;
+
+const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return "Expr" #name;
+ FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+ default:
+ break;
+ }
+ return "Unknown";
+}
+
+
+#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
+
+
+enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
+
+
+// TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
+#define DECLARE_SIG(name, ...) \
+ static LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const FunctionSig kSig_##name( \
+ 1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
+
+FOREACH_SIGNATURE(DECLARE_SIG)
+
+#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
+
+static const FunctionSig* kSimpleExprSigs[] = {
+ nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+
+static byte kSimpleExprSigTable[256];
+
+
+// Initialize the signature table.
+static void InitSigTable() {
+#define SET_SIG_TABLE(name, opcode, sig) \
+ kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
+ FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
+#undef SET_SIG_TABLE
+}
+
+
+FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
+ // TODO(titzer): use LazyInstance to make this thread safe.
+ if (kSimpleExprSigTable[kExprI32Add] == 0) InitSigTable();
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+}
+
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+
+bool WasmOpcodes::IsSupported(WasmOpcode opcode) {
+#if !WASM_64
+ switch (opcode) {
+ // Opcodes not supported on 32-bit platforms.
+ case kExprI64Add:
+ case kExprI64Sub:
+ case kExprI64Mul:
+ case kExprI64DivS:
+ case kExprI64DivU:
+ case kExprI64RemS:
+ case kExprI64RemU:
+ case kExprI64And:
+ case kExprI64Ior:
+ case kExprI64Xor:
+ case kExprI64Shl:
+ case kExprI64ShrU:
+ case kExprI64ShrS:
+ case kExprI64Eq:
+ case kExprI64Ne:
+ case kExprI64LtS:
+ case kExprI64LeS:
+ case kExprI64LtU:
+ case kExprI64LeU:
+ case kExprI64GtS:
+ case kExprI64GeS:
+ case kExprI64GtU:
+ case kExprI64GeU:
+
+ case kExprI32ConvertI64:
+ case kExprI64SConvertI32:
+ case kExprI64UConvertI32:
+
+ case kExprF64ReinterpretI64:
+ case kExprI64ReinterpretF64:
+
+ case kExprI64Clz:
+ case kExprI64Ctz:
+ case kExprI64Popcnt:
+
+ case kExprF32SConvertI64:
+ case kExprF32UConvertI64:
+ case kExprF64SConvertI64:
+ case kExprF64UConvertI64:
+ case kExprI64SConvertF32:
+ case kExprI64SConvertF64:
+ case kExprI64UConvertF32:
+ case kExprI64UConvertF64:
+
+ return false;
+ default:
+ return true;
+ }
+#else
+ return true;
+#endif
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
new file mode 100644
index 0000000000..ae2843a6c1
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -0,0 +1,476 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OPCODES_H_
+#define V8_WASM_OPCODES_H_
+
+#include "src/machine-type.h"
+#include "src/signature.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Binary encoding of local types.
+enum LocalTypeCode {
+ kLocalVoid = 0,
+ kLocalI32 = 1,
+ kLocalI64 = 2,
+ kLocalF32 = 3,
+ kLocalF64 = 4
+};
+
+// Binary encoding of memory types.
+enum MemTypeCode {
+ kMemI8 = 0,
+ kMemU8 = 1,
+ kMemI16 = 2,
+ kMemU16 = 3,
+ kMemI32 = 4,
+ kMemU32 = 5,
+ kMemI64 = 6,
+ kMemU64 = 7,
+ kMemF32 = 8,
+ kMemF64 = 9
+};
+
+// We reuse the internal machine type to represent WebAssembly AST types.
+// A typedef improves readability without adding a whole new type system.
+typedef MachineRepresentation LocalType;
+const LocalType kAstStmt = MachineRepresentation::kNone;
+const LocalType kAstI32 = MachineRepresentation::kWord32;
+const LocalType kAstI64 = MachineRepresentation::kWord64;
+const LocalType kAstF32 = MachineRepresentation::kFloat32;
+const LocalType kAstF64 = MachineRepresentation::kFloat64;
+// We use kTagged here because kNone is already used by kAstStmt.
+const LocalType kAstEnd = MachineRepresentation::kTagged;
+
+// Functionality related to encoding memory accesses.
+struct MemoryAccess {
+ // Atomicity annotations for access to the memory and globals.
+ enum Atomicity {
+ kNone = 0, // non-atomic
+ kSequential = 1, // sequential consistency
+ kAcquire = 2, // acquire semantics
+ kRelease = 3 // release semantics
+ };
+
+ // Alignment annotations for memory accesses.
+ enum Alignment { kAligned = 0, kUnaligned = 1 };
+
+ // Bitfields for the various annotations for memory accesses.
+ typedef BitField<Alignment, 7, 1> AlignmentField;
+ typedef BitField<Atomicity, 5, 2> AtomicityField;
+ typedef BitField<bool, 4, 1> OffsetField;
+};
+
+typedef Signature<LocalType> FunctionSig;
+
+// Control expressions and blocks.
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Nop, 0x00, _) \
+ V(Block, 0x01, _) \
+ V(Loop, 0x02, _) \
+ V(If, 0x03, _) \
+ V(IfElse, 0x04, _) \
+ V(Select, 0x05, _) \
+ V(Br, 0x06, _) \
+ V(BrIf, 0x07, _) \
+ V(TableSwitch, 0x08, _) \
+ V(Return, 0x14, _) \
+ V(Unreachable, 0x15, _)
+// TODO(titzer): numbering
+
+// Constants, locals, globals, and calls.
+#define FOREACH_MISC_OPCODE(V) \
+ V(I8Const, 0x09, _) \
+ V(I32Const, 0x0a, _) \
+ V(I64Const, 0x0b, _) \
+ V(F64Const, 0x0c, _) \
+ V(F32Const, 0x0d, _) \
+ V(GetLocal, 0x0e, _) \
+ V(SetLocal, 0x0f, _) \
+ V(LoadGlobal, 0x10, _) \
+ V(StoreGlobal, 0x11, _) \
+ V(CallFunction, 0x12, _) \
+ V(CallIndirect, 0x13, _)
+
+// Load memory expressions.
+#define FOREACH_LOAD_MEM_OPCODE(V) \
+ V(I32LoadMem8S, 0x20, i_i) \
+ V(I32LoadMem8U, 0x21, i_i) \
+ V(I32LoadMem16S, 0x22, i_i) \
+ V(I32LoadMem16U, 0x23, i_i) \
+ V(I64LoadMem8S, 0x24, l_i) \
+ V(I64LoadMem8U, 0x25, l_i) \
+ V(I64LoadMem16S, 0x26, l_i) \
+ V(I64LoadMem16U, 0x27, l_i) \
+ V(I64LoadMem32S, 0x28, l_i) \
+ V(I64LoadMem32U, 0x29, l_i) \
+ V(I32LoadMem, 0x2a, i_i) \
+ V(I64LoadMem, 0x2b, l_i) \
+ V(F32LoadMem, 0x2c, f_i) \
+ V(F64LoadMem, 0x2d, d_i)
+
+// Store memory expressions.
+#define FOREACH_STORE_MEM_OPCODE(V) \
+ V(I32StoreMem8, 0x2e, i_ii) \
+ V(I32StoreMem16, 0x2f, i_ii) \
+ V(I64StoreMem8, 0x30, l_il) \
+ V(I64StoreMem16, 0x31, l_il) \
+ V(I64StoreMem32, 0x32, l_il) \
+ V(I32StoreMem, 0x33, i_ii) \
+ V(I64StoreMem, 0x34, l_il) \
+ V(F32StoreMem, 0x35, f_if) \
+ V(F64StoreMem, 0x36, d_id)
+
+// Load memory expressions.
+#define FOREACH_MISC_MEM_OPCODE(V) \
+ V(MemorySize, 0x3b, i_v) \
+ V(GrowMemory, 0x39, i_i)
+
+// Expressions with signatures.
+#define FOREACH_SIMPLE_OPCODE(V) \
+ V(I32Add, 0x40, i_ii) \
+ V(I32Sub, 0x41, i_ii) \
+ V(I32Mul, 0x42, i_ii) \
+ V(I32DivS, 0x43, i_ii) \
+ V(I32DivU, 0x44, i_ii) \
+ V(I32RemS, 0x45, i_ii) \
+ V(I32RemU, 0x46, i_ii) \
+ V(I32And, 0x47, i_ii) \
+ V(I32Ior, 0x48, i_ii) \
+ V(I32Xor, 0x49, i_ii) \
+ V(I32Shl, 0x4a, i_ii) \
+ V(I32ShrU, 0x4b, i_ii) \
+ V(I32ShrS, 0x4c, i_ii) \
+ V(I32Eq, 0x4d, i_ii) \
+ V(I32Ne, 0x4e, i_ii) \
+ V(I32LtS, 0x4f, i_ii) \
+ V(I32LeS, 0x50, i_ii) \
+ V(I32LtU, 0x51, i_ii) \
+ V(I32LeU, 0x52, i_ii) \
+ V(I32GtS, 0x53, i_ii) \
+ V(I32GeS, 0x54, i_ii) \
+ V(I32GtU, 0x55, i_ii) \
+ V(I32GeU, 0x56, i_ii) \
+ V(I32Clz, 0x57, i_i) \
+ V(I32Ctz, 0x58, i_i) \
+ V(I32Popcnt, 0x59, i_i) \
+ V(BoolNot, 0x5a, i_i) \
+ V(I64Add, 0x5b, l_ll) \
+ V(I64Sub, 0x5c, l_ll) \
+ V(I64Mul, 0x5d, l_ll) \
+ V(I64DivS, 0x5e, l_ll) \
+ V(I64DivU, 0x5f, l_ll) \
+ V(I64RemS, 0x60, l_ll) \
+ V(I64RemU, 0x61, l_ll) \
+ V(I64And, 0x62, l_ll) \
+ V(I64Ior, 0x63, l_ll) \
+ V(I64Xor, 0x64, l_ll) \
+ V(I64Shl, 0x65, l_ll) \
+ V(I64ShrU, 0x66, l_ll) \
+ V(I64ShrS, 0x67, l_ll) \
+ V(I64Eq, 0x68, i_ll) \
+ V(I64Ne, 0x69, i_ll) \
+ V(I64LtS, 0x6a, i_ll) \
+ V(I64LeS, 0x6b, i_ll) \
+ V(I64LtU, 0x6c, i_ll) \
+ V(I64LeU, 0x6d, i_ll) \
+ V(I64GtS, 0x6e, i_ll) \
+ V(I64GeS, 0x6f, i_ll) \
+ V(I64GtU, 0x70, i_ll) \
+ V(I64GeU, 0x71, i_ll) \
+ V(I64Clz, 0x72, l_l) \
+ V(I64Ctz, 0x73, l_l) \
+ V(I64Popcnt, 0x74, l_l) \
+ V(F32Add, 0x75, f_ff) \
+ V(F32Sub, 0x76, f_ff) \
+ V(F32Mul, 0x77, f_ff) \
+ V(F32Div, 0x78, f_ff) \
+ V(F32Min, 0x79, f_ff) \
+ V(F32Max, 0x7a, f_ff) \
+ V(F32Abs, 0x7b, f_f) \
+ V(F32Neg, 0x7c, f_f) \
+ V(F32CopySign, 0x7d, f_ff) \
+ V(F32Ceil, 0x7e, f_f) \
+ V(F32Floor, 0x7f, f_f) \
+ V(F32Trunc, 0x80, f_f) \
+ V(F32NearestInt, 0x81, f_f) \
+ V(F32Sqrt, 0x82, f_f) \
+ V(F32Eq, 0x83, i_ff) \
+ V(F32Ne, 0x84, i_ff) \
+ V(F32Lt, 0x85, i_ff) \
+ V(F32Le, 0x86, i_ff) \
+ V(F32Gt, 0x87, i_ff) \
+ V(F32Ge, 0x88, i_ff) \
+ V(F64Add, 0x89, d_dd) \
+ V(F64Sub, 0x8a, d_dd) \
+ V(F64Mul, 0x8b, d_dd) \
+ V(F64Div, 0x8c, d_dd) \
+ V(F64Min, 0x8d, d_dd) \
+ V(F64Max, 0x8e, d_dd) \
+ V(F64Abs, 0x8f, d_d) \
+ V(F64Neg, 0x90, d_d) \
+ V(F64CopySign, 0x91, d_dd) \
+ V(F64Ceil, 0x92, d_d) \
+ V(F64Floor, 0x93, d_d) \
+ V(F64Trunc, 0x94, d_d) \
+ V(F64NearestInt, 0x95, d_d) \
+ V(F64Sqrt, 0x96, d_d) \
+ V(F64Eq, 0x97, i_dd) \
+ V(F64Ne, 0x98, i_dd) \
+ V(F64Lt, 0x99, i_dd) \
+ V(F64Le, 0x9a, i_dd) \
+ V(F64Gt, 0x9b, i_dd) \
+ V(F64Ge, 0x9c, i_dd) \
+ V(I32SConvertF32, 0x9d, i_f) \
+ V(I32SConvertF64, 0x9e, i_d) \
+ V(I32UConvertF32, 0x9f, i_f) \
+ V(I32UConvertF64, 0xa0, i_d) \
+ V(I32ConvertI64, 0xa1, i_l) \
+ V(I64SConvertF32, 0xa2, l_f) \
+ V(I64SConvertF64, 0xa3, l_d) \
+ V(I64UConvertF32, 0xa4, l_f) \
+ V(I64UConvertF64, 0xa5, l_d) \
+ V(I64SConvertI32, 0xa6, l_i) \
+ V(I64UConvertI32, 0xa7, l_i) \
+ V(F32SConvertI32, 0xa8, f_i) \
+ V(F32UConvertI32, 0xa9, f_i) \
+ V(F32SConvertI64, 0xaa, f_l) \
+ V(F32UConvertI64, 0xab, f_l) \
+ V(F32ConvertF64, 0xac, f_d) \
+ V(F32ReinterpretI32, 0xad, f_i) \
+ V(F64SConvertI32, 0xae, d_i) \
+ V(F64UConvertI32, 0xaf, d_i) \
+ V(F64SConvertI64, 0xb0, d_l) \
+ V(F64UConvertI64, 0xb1, d_l) \
+ V(F64ConvertF32, 0xb2, d_f) \
+ V(F64ReinterpretI64, 0xb3, d_l) \
+ V(I32ReinterpretF32, 0xb4, i_f) \
+ V(I64ReinterpretF64, 0xb5, l_d)
+
+// All opcodes.
+#define FOREACH_OPCODE(V) \
+ FOREACH_CONTROL_OPCODE(V) \
+ FOREACH_MISC_OPCODE(V) \
+ FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_STORE_MEM_OPCODE(V) \
+ FOREACH_LOAD_MEM_OPCODE(V) \
+ FOREACH_MISC_MEM_OPCODE(V)
+
+// All signatures.
+#define FOREACH_SIGNATURE(V) \
+ V(i_ii, kAstI32, kAstI32, kAstI32) \
+ V(i_i, kAstI32, kAstI32) \
+ V(i_v, kAstI32) \
+ V(i_ff, kAstI32, kAstF32, kAstF32) \
+ V(i_f, kAstI32, kAstF32) \
+ V(i_dd, kAstI32, kAstF64, kAstF64) \
+ V(i_d, kAstI32, kAstF64) \
+ V(i_l, kAstI32, kAstI64) \
+ V(l_ll, kAstI64, kAstI64, kAstI64) \
+ V(i_ll, kAstI32, kAstI64, kAstI64) \
+ V(l_l, kAstI64, kAstI64) \
+ V(l_i, kAstI64, kAstI32) \
+ V(l_f, kAstI64, kAstF32) \
+ V(l_d, kAstI64, kAstF64) \
+ V(f_ff, kAstF32, kAstF32, kAstF32) \
+ V(f_f, kAstF32, kAstF32) \
+ V(f_d, kAstF32, kAstF64) \
+ V(f_i, kAstF32, kAstI32) \
+ V(f_l, kAstF32, kAstI64) \
+ V(d_dd, kAstF64, kAstF64, kAstF64) \
+ V(d_d, kAstF64, kAstF64) \
+ V(d_f, kAstF64, kAstF32) \
+ V(d_i, kAstF64, kAstI32) \
+ V(d_l, kAstF64, kAstI64) \
+ V(d_id, kAstF64, kAstI32, kAstF64) \
+ V(f_if, kAstF32, kAstI32, kAstF32) \
+ V(l_il, kAstI64, kAstI32, kAstI64)
+
+enum WasmOpcode {
+// Declare expression opcodes.
+#define DECLARE_NAMED_ENUM(name, opcode, sig) kExpr##name = opcode,
+ FOREACH_OPCODE(DECLARE_NAMED_ENUM)
+#undef DECLARE_NAMED_ENUM
+};
+
+// A collection of opcode-related static methods.
+class WasmOpcodes {
+ public:
+ static bool IsSupported(WasmOpcode opcode);
+ static const char* OpcodeName(WasmOpcode opcode);
+ static FunctionSig* Signature(WasmOpcode opcode);
+
+ static byte MemSize(MachineType type) {
+ return 1 << ElementSizeLog2Of(type.representation());
+ }
+
+ static LocalTypeCode LocalTypeCodeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return kLocalI32;
+ case kAstI64:
+ return kLocalI64;
+ case kAstF32:
+ return kLocalF32;
+ case kAstF64:
+ return kLocalF64;
+ case kAstStmt:
+ return kLocalVoid;
+ default:
+ UNREACHABLE();
+ return kLocalVoid;
+ }
+ }
+
+ static MemTypeCode MemTypeCodeFor(MachineType type) {
+ if (type == MachineType::Int8()) {
+ return kMemI8;
+ } else if (type == MachineType::Uint8()) {
+ return kMemU8;
+ } else if (type == MachineType::Int16()) {
+ return kMemI16;
+ } else if (type == MachineType::Uint16()) {
+ return kMemU16;
+ } else if (type == MachineType::Int32()) {
+ return kMemI32;
+ } else if (type == MachineType::Uint32()) {
+ return kMemU32;
+ } else if (type == MachineType::Int64()) {
+ return kMemI64;
+ } else if (type == MachineType::Uint64()) {
+ return kMemU64;
+ } else if (type == MachineType::Float32()) {
+ return kMemF32;
+ } else if (type == MachineType::Float64()) {
+ return kMemF64;
+ } else {
+ UNREACHABLE();
+ return kMemI32;
+ }
+ }
+
+ static MachineType MachineTypeFor(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return MachineType::Int32();
+ case kAstI64:
+ return MachineType::Int64();
+ case kAstF32:
+ return MachineType::Float32();
+ case kAstF64:
+ return MachineType::Float64();
+ case kAstStmt:
+ return MachineType::None();
+ default:
+ UNREACHABLE();
+ return MachineType::None();
+ }
+ }
+
+ static LocalType LocalTypeFor(MachineType type) {
+ if (type == MachineType::Int8()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint8()) {
+ return kAstI32;
+ } else if (type == MachineType::Int16()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint16()) {
+ return kAstI32;
+ } else if (type == MachineType::Int32()) {
+ return kAstI32;
+ } else if (type == MachineType::Uint32()) {
+ return kAstI32;
+ } else if (type == MachineType::Int64()) {
+ return kAstI64;
+ } else if (type == MachineType::Uint64()) {
+ return kAstI64;
+ } else if (type == MachineType::Float32()) {
+ return kAstF32;
+ } else if (type == MachineType::Float64()) {
+ return kAstF64;
+ } else {
+ UNREACHABLE();
+ return kAstI32;
+ }
+ }
+
+ // TODO(titzer): remove this method
+ static WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
+ if (type == MachineType::Int8()) {
+ return store ? kExprI32StoreMem8 : kExprI32LoadMem8S;
+ } else if (type == MachineType::Uint8()) {
+ return store ? kExprI32StoreMem8 : kExprI32LoadMem8U;
+ } else if (type == MachineType::Int16()) {
+ return store ? kExprI32StoreMem16 : kExprI32LoadMem16S;
+ } else if (type == MachineType::Uint16()) {
+ return store ? kExprI32StoreMem16 : kExprI32LoadMem16U;
+ } else if (type == MachineType::Int32()) {
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ } else if (type == MachineType::Uint32()) {
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ } else if (type == MachineType::Int64()) {
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ } else if (type == MachineType::Uint64()) {
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ } else if (type == MachineType::Float32()) {
+ return store ? kExprF32StoreMem : kExprF32LoadMem;
+ } else if (type == MachineType::Float64()) {
+ return store ? kExprF64StoreMem : kExprF64LoadMem;
+ } else {
+ UNREACHABLE();
+ return kExprNop;
+ }
+ }
+
+ static byte LoadStoreAccessOf(bool with_offset) {
+ return MemoryAccess::OffsetField::encode(with_offset);
+ }
+
+ static char ShortNameOf(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return 'i';
+ case kAstI64:
+ return 'l';
+ case kAstF32:
+ return 'f';
+ case kAstF64:
+ return 'd';
+ case kAstStmt:
+ return 'v';
+ case kAstEnd:
+ return 'x';
+ default:
+ UNREACHABLE();
+ return '?';
+ }
+ }
+
+ static const char* TypeName(LocalType type) {
+ switch (type) {
+ case kAstI32:
+ return "i32";
+ case kAstI64:
+ return "i64";
+ case kAstF32:
+ return "f32";
+ case kAstF64:
+ return "f64";
+ case kAstStmt:
+ return "<stmt>";
+ case kAstEnd:
+ return "<end>";
+ default:
+ return "<unknown>";
+ }
+ }
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OPCODES_H_
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
new file mode 100644
index 0000000000..4fd17ee364
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-result.h"
+
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects.h"
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code) {
+ switch (error_code) {
+ case kSuccess:
+ os << "Success";
+ break;
+ default: // TODO(titzer): render error codes
+ os << "Error";
+ break;
+ }
+ return os;
+}
+
+
+void ErrorThrower::Error(const char* format, ...) {
+ if (error_) return; // only report the first error.
+ error_ = true;
+ char buffer[256];
+
+ va_list arguments;
+ va_start(arguments, format);
+ base::OS::VSNPrintF(buffer, 255, format, arguments);
+ va_end(arguments);
+
+ std::ostringstream str;
+ if (context_ != nullptr) {
+ str << context_ << ": ";
+ }
+ str << buffer;
+
+ isolate_->ScheduleThrow(
+ *isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
new file mode 100644
index 0000000000..59ab29ebe4
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_RESULT_H_
+#define V8_WASM_RESULT_H_
+
+#include "src/base/smart-pointers.h"
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+namespace wasm {
+
+// Error codes for programmatic checking of the decoder's verification.
+enum ErrorCode {
+ kSuccess,
+ kError, // TODO(titzer): remove me
+ kOutOfMemory, // decoder ran out of memory
+ kEndOfCode, // end of code reached prematurely
+ kInvalidOpcode, // found invalid opcode
+ kUnreachableCode, // found unreachable code
+ kImproperContinue, // improperly nested continue
+ kImproperBreak, // improperly nested break
+ kReturnCount, // return count mismatch
+ kTypeError, // type mismatch
+ kInvalidLocalIndex, // invalid local
+ kInvalidGlobalIndex, // invalid global
+ kInvalidFunctionIndex, // invalid function
+ kInvalidMemType // invalid memory type
+};
+
+// The overall result of decoding a function or a module.
+template <typename T>
+struct Result {
+ Result()
+ : val(nullptr), error_code(kSuccess), start(nullptr), error_pc(nullptr) {
+ error_msg.Reset(nullptr);
+ }
+
+ T val;
+ ErrorCode error_code;
+ const byte* start;
+ const byte* error_pc;
+ const byte* error_pt;
+ base::SmartArrayPointer<char> error_msg;
+
+ bool ok() const { return error_code == kSuccess; }
+ bool failed() const { return error_code != kSuccess; }
+
+ template <typename V>
+ void CopyFrom(Result<V>& that) {
+ error_code = that.error_code;
+ start = that.start;
+ error_pc = that.error_pc;
+ error_pt = that.error_pt;
+ error_msg = that.error_msg;
+ }
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const Result<T>& result) {
+ os << "Result = ";
+ if (result.ok()) {
+ if (result.val != nullptr) {
+ os << *result.val;
+ } else {
+ os << "success (no value)";
+ }
+ } else if (result.error_msg.get() != nullptr) {
+ ptrdiff_t offset = result.error_pc - result.start;
+ if (offset < 0) {
+ os << result.error_msg.get() << " @" << offset;
+ } else {
+ os << result.error_msg.get() << " @+" << offset;
+ }
+ } else {
+ os << result.error_code;
+ }
+ os << std::endl;
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
+
+// A helper for generating error messages that bubble up to JS exceptions.
+class ErrorThrower {
+ public:
+ ErrorThrower(Isolate* isolate, const char* context)
+ : isolate_(isolate), context_(context), error_(false) {}
+
+ void Error(const char* fmt, ...);
+
+ template <typename T>
+ void Failed(const char* error, Result<T>& result) {
+ std::ostringstream str;
+ str << error << result;
+ return Error(str.str().c_str());
+ }
+
+ bool error() const { return error_; }
+
+ private:
+ Isolate* isolate_;
+ const char* context_;
+ bool error_;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 17376581b5..bfec51c462 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -272,18 +272,18 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc, sizeof(int32_t));
+ Assembler::FlushICache(isolate, pc, sizeof(int32_t));
}
}
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@@ -354,7 +354,8 @@ void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -404,7 +405,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -451,7 +452,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
@@ -469,7 +470,8 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -512,8 +514,8 @@ void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
}
@@ -527,8 +529,9 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
- Assembler::FlushICacheWithoutIsolate(
- pc_ + Assembler::kPatchDebugBreakSlotAddressOffset, sizeof(Address));
+ Assembler::FlushICache(isolate_,
+ pc_ + Assembler::kPatchDebugBreakSlotAddressOffset,
+ sizeof(Address));
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -541,7 +544,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index c8f99a11a6..9626efc4a7 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -292,6 +292,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -388,6 +389,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size =
static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos()));
@@ -3111,6 +3113,28 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
}
+void Assembler::cvttss2siq(Register dst, XMMRegister src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvttss2siq(Register dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3481,6 +3505,21 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
+ DCHECK(!IsEnabled(AVX));
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x3a);
+ emit(0x0a);
+ emit_sse_operand(dst, src);
+ // Mask precision exception.
+ emit(static_cast<byte>(mode) | 0x8);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
DCHECK(IsEnabled(SSE4_1));
@@ -4078,7 +4117,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// Don't record psuedo relocation info for code age sequence mode.
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 2182dbb3ff..799fa6fe9d 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -496,19 +496,18 @@ class Assembler : public AssemblerBase {
// the relative displacements stored in the code.
static inline Address target_address_at(Address pc, Address constant_pool);
static inline void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) {
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+ set_target_address_at(isolate, pc, constant_pool, target,
+ icache_flush_mode);
}
// Return the code target address at a call site from the return address
@@ -518,13 +517,14 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static inline RelocInfo::Mode RelocInfoNone() {
@@ -1077,6 +1077,8 @@ class Assembler : public AssemblerBase {
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
+ void cvttss2siq(Register dst, XMMRegister src);
+ void cvttss2siq(Register dst, const Operand& src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, const Operand& src);
@@ -1136,6 +1138,7 @@ class Assembler : public AssemblerBase {
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+ void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
// AVX instruction
@@ -1389,6 +1392,14 @@ class Assembler : public AssemblerBase {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
}
+ void vcvttss2siq(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
+ }
+ void vcvttss2siq(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF3, k0F, kW1);
+ }
void vcvttsd2siq(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF2, k0F, kW1);
@@ -1407,6 +1418,11 @@ class Assembler : public AssemblerBase {
void vucomisd(XMMRegister dst, const Operand& src) {
vsd(0x2e, dst, xmm0, src, k66, k0F, kWIG);
}
+ void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ RoundingMode mode) {
+ vsd(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
+ emit(static_cast<byte>(mode) | 0x8); // Mask precision exception.
+ }
void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
RoundingMode mode) {
vsd(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
@@ -1636,7 +1652,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 4efd3bfb23..cb092f2f2d 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -21,9 +21,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- rax : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- rdi : called function
+ // -- rdi : target
+ // -- rdx : new.target
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
@@ -36,50 +35,48 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
+ if (extra_args != BuiltinExtraArguments::kNone) {
__ PopReturnAddressTo(kScratchRegister);
- __ Push(rdi);
+ if (extra_args & BuiltinExtraArguments::kTarget) {
+ ++num_extra_args;
+ __ Push(rdi);
+ }
+ if (extra_args & BuiltinExtraArguments::kNewTarget) {
+ ++num_extra_args;
+ __ Push(rdx);
+ }
__ PushReturnAddressFrom(kScratchRegister);
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToExternalReference expects rax to contain the number of arguments
- // including the receiver and the extra arguments. But rax is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- Label argc, done_argc;
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(
- rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
- __ j(equal, &argc, Label::kNear);
- __ leap(rax, Operand(rbx, num_extra_args + 1));
- __ jmp(&done_argc, Label::kNear);
- __ bind(&argc);
+ // including the receiver and the extra arguments.
__ addp(rax, Immediate(num_extra_args + 1));
- __ bind(&done_argc);
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
+ __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
__ Push(rdi);
+ __ Push(rdx);
// Function is also the parameter to the runtime call.
__ Push(rdi);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ Pop(rdx);
__ Pop(rdi);
}
@@ -119,12 +116,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
// -- rbx: allocation site or undefined
- // -- rdx: original constructor
+ // -- rdx: new target
// -----------------------------------
// Enter a construct frame.
@@ -134,179 +132,167 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(rbx);
__ Push(rbx);
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
- __ Push(rdi);
- __ Push(rdx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ Move(kScratchRegister, debug_step_in_fp);
- __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
- __ j(not_equal, &rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // rdx: original constructor
- __ movp(rax, FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- DCHECK(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ cmpp(rdi, FieldOperand(rax, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // rdi: constructor
- // rax: initial map
- __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
- if (!is_api_function) {
- Label allocate;
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
- // Check if slack tracking is enabled.
- __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
- __ shrl(rsi, Immediate(Map::Counter::kShift));
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(less, &allocate);
- // Decrease generous allocation count.
- __ subl(FieldOperand(rax, Map::kBitField3Offset),
- Immediate(1 << Map::Counter::kShift));
-
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(not_equal, &allocate);
-
- __ Push(rax);
- __ Push(rdx);
- __ Push(rdi);
-
- __ Push(rax); // initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ Integer32ToSmi(rcx, rax);
+ __ Push(rcx);
+
+ if (create_implicit_receiver) {
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // rdx: new target
+ __ movp(rax,
+ FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ DCHECK(kSmiTag == 0);
+ __ JumpIfSmi(rax, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmpp(rdi, FieldOperand(rax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ __ movzxbp(r9, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shlp(r9, Immediate(kPointerSizeLog2));
+ // r9: size of new object
+ __ Allocate(r9, rbx, r9, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+ // Allocated the JSObject, now initialize the fields.
+ // rdi: constructor
+ // rdx: new target
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // r9: start of next object
+ __ movp(Operand(rbx, JSObject::kMapOffset), rax);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ orp(rbx, Immediate(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // rbx: JSObject (tagged)
+ // rcx: First in-object property of JSObject (not tagged)
+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ // Check if slack tracking is enabled.
+ __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
+ __ shrl(rsi, Immediate(Map::ConstructionCounter::kShift));
+ __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
+ __ Push(rsi); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ subl(FieldOperand(rax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Allocate object with a slack.
+ __ movzxbp(rsi, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ negp(rsi);
+ __ leap(rsi, Operand(r9, rsi, times_pointer_size, 0));
+ // rsi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmpp(rcx, rsi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(rcx, rsi, r11);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(rcx, r9, r11);
+
+ __ Pop(rsi); // Restore allocation count value before decreasing.
+ __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
+ __ j(not_equal, &allocated);
+
+ // Push the constructor, new_target and the object to the stack,
+ // and then the initial map as an argument to the runtime call.
+ __ Push(rdi);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ __ Push(rax); // initial map
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+
+ // Continue with JSObject being successfully allocated.
+ // rdi: constructor
+ // rdx: new target
+ // rbx: JSObject (tagged)
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
+ }
- __ Pop(rdi);
- __ Pop(rdx);
- __ Pop(rax);
- __ movl(rsi, Immediate(Map::kSlackTrackingCounterEnd - 1));
+ __ InitializeFieldsWithFiller(rcx, r9, r11);
- __ bind(&allocate);
+ // Continue with JSObject being successfully allocated
+ // rdi: constructor
+ // rdx: new target
+ // rbx: JSObject (tagged)
+ __ jmp(&allocated);
}
- // Now allocate the JSObject on the heap.
- __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shlp(rdi, Immediate(kPointerSizeLog2));
- // rdi: size of new object
- __ Allocate(rdi,
- rbx,
- rdi,
- no_reg,
- &rt_call,
- NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
- __ movp(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
- // Set extra fields in the newly allocated object.
- // rax: initial map
- // rbx: JSObject
- // rdi: start of next object
- // rsi: slack tracking counter (non-API function case)
- __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
- __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(less, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ movzxbp(
- rsi,
- FieldOperand(
- rax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ movzxbp(rax, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ subp(rsi, rax);
- __ leap(rsi,
- Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
- // rsi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmpp(rsi, rdi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(rcx, rsi, rdx);
- __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
- // Fill the remaining fields with one pointer filler map.
+ // Allocate the new receiver object using the runtime call.
+ // rdi: constructor
+ // rdx: new target
+ __ bind(&rt_call);
- __ bind(&no_inobject_slack_tracking);
- }
+ // Must restore rsi (context) before calling runtime.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ // Push the constructor and new_target twice, second pair as arguments
+ // to the runtime call.
+ __ Push(rdi);
+ __ Push(rdx);
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ movp(rbx, rax); // store result in rbx
+ __ Pop(rdx);
+ __ Pop(rdi);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- // rbx: JSObject (untagged)
- __ orp(rbx, Immediate(kHeapObjectTag));
+ // Receiver for constructor call allocated.
+ // rdi: constructor
+ // rdx: new target
+ // rbx: newly allocated object
+ __ bind(&allocated);
- // Continue with JSObject being successfully allocated
- // rbx: JSObject (tagged)
- __ jmp(&allocated);
+ // Retrieve smi-tagged arguments count from the stack.
+ __ movp(rax, Operand(rsp, 0));
+ __ SmiToInteger32(rax, rax);
}
- // Allocate the new receiver object using the runtime call.
- // rdx: original constructor
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore rsi (context) and rdi (constructor) before calling runtime.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movp(rdi, Operand(rsp, offset));
- __ Push(rdi); // constructor function
- __ Push(rdx); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ movp(rbx, rax); // store result in rbx
-
- // New object allocated.
- // rbx: newly allocated object
- __ bind(&allocated);
-
- // Restore the parameters.
- __ Pop(rdx);
- __ Pop(rdi);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ movp(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
-
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ Push(rdx);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(rbx);
- __ Push(rbx);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(rbx);
+ __ Push(rbx);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
@@ -329,39 +315,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
+ __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movp(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame. The arguments
- // count is stored below the reciever and the new.target.
- __ bind(&exit);
- __ movp(rbx, Operand(rsp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(rax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movp(rax, Operand(rsp, 0));
+
+ // Restore the arguments count and leave the construct frame. The
+ // arguments count is stored below the receiver.
+ __ bind(&exit);
+ __ movp(rbx, Operand(rsp, 1 * kPointerSize));
+ } else {
+ __ movp(rbx, Operand(rsp, 0));
+ }
// Leave construct frame.
}
@@ -371,95 +362,33 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
+ if (create_implicit_receiver) {
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->constructed_objects(), 1);
+ }
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax: number of arguments
- // -- rdi: constructor function
- // -- rbx: allocation site or undefined
- // -- rdx: original constructor
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve allocation site.
- __ AssertUndefinedOrAllocationSite(rbx);
- __ Push(rbx);
-
- // Store a smi-tagged arguments count on the stack.
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
- __ SmiToInteger32(rax, rax);
-
- // Push new.target
- __ Push(rdx);
-
- // receiver is the hole.
- __ Push(masm->isolate()->factory()->the_hole_value());
-
- // Set up pointer to last argument.
- __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ movp(rcx, rax);
- __ jmp(&entry);
- __ bind(&loop);
- __ Push(Operand(rbx, rcx, times_pointer_size, 0));
- __ bind(&entry);
- __ decp(rcx);
- __ j(greater_equal, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ Move(kScratchRegister, debug_step_in_fp);
- __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
- __ j(equal, &skip_step_in);
-
- __ Push(rax);
- __ Push(rdi);
- __ Push(rdi);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ Pop(rdi);
- __ Pop(rax);
-
- __ bind(&skip_step_in);
-
- // Call the function.
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Get arguments count, skipping over new.target.
- __ movp(rbx, Operand(rsp, kPointerSize)); // Get arguments count.
- } // Leave construct frame.
- // Remove caller arguments from the stack and return.
- __ PopReturnAddressTo(rcx);
- SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ ret(0);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -494,7 +423,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -650,6 +579,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o rdi: the JS function object being called
+// o rdx: the new target
// o rsi: our context
// o rbp: the caller's frame pointer
// o rsp: stack pointer (pointing to return address)
@@ -667,6 +597,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(rbp, rsp);
__ Push(rsi); // Callee's context.
__ Push(rdi); // Callee's JS function.
+ __ Push(rdx); // Callee's new target.
+
+ // Push zero for bytecode array offset.
+ __ Push(Immediate(0));
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
@@ -694,7 +628,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ subp(rdx, rcx);
__ CompareRoot(rdx, Heap::kRealStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -724,7 +658,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
__ Push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ Pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -733,9 +667,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ movp(kInterpreterRegisterFileRegister, rbp);
- __ subp(
- kInterpreterRegisterFileRegister,
- Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ addp(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterDispatchTableRegister,
@@ -833,7 +766,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (either the same as the constructor or
+ // -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -853,40 +786,112 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ PushReturnAddressFrom(kScratchRegister);
// Call the constructor (rax, rdx, rdi passed on).
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and push PC at top
+ // of stack (to simulate initial call to bytecode handler in interpreter entry
+ // trampoline).
+ __ Pop(rbx);
+ __ Drop(1);
+ __ Push(rbx);
+
+ // Initialize register file register and dispatch table register.
+ __ movp(kInterpreterRegisterFileRegister, rbp);
+ __ addp(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ addp(kInterpreterDispatchTableRegister,
+ Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ movp(kContextRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // Get the bytecode array pointer from the frame.
+ __ movp(rbx,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(kInterpreterBytecodeArrayRegister,
+ FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ rbx);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ movp(
+ kInterpreterBytecodeOffsetRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister);
+
+ // Dispatch to the target bytecode.
+ __ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
+ times_pointer_size, 0));
+ __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rbx);
}
-static void CallCompileOptimized(MacroAssembler* masm,
- bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- __ Push(rdi);
- // Function is also the parameter to the runtime call.
- __ Push(rdi);
- // Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ Pop(rdi);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -979,7 +984,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ Popad();
// Tear down internal frame.
}
@@ -1008,7 +1013,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
@@ -1048,7 +1053,138 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into rax and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(rax, args.GetReceiverOperand());
+ __ JumpIfSmi(rax, &receiver_not_date);
+ __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
+ __ j(not_equal, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ movp(rax, FieldOperand(rax, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ Load(rdx, ExternalReference::date_cache_stamp(masm->isolate()));
+ __ cmpp(rdx, FieldOperand(rax, JSDate::kCacheStampOffset));
+ __ j(not_equal, &stamp_mismatch, Label::kNear);
+ __ movp(rax, FieldOperand(
+ rax, JSDate::kValueOffset + field_index * kPointerSize));
+ __ ret(1 * kPointerSize);
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2);
+ __ Move(arg_reg_1, rax);
+ __ Move(arg_reg_2, Smi::FromInt(field_index));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ ret(1 * kPointerSize);
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowNotDateError);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : argArray
+ // -- rsp[16] : thisArg
+ // -- rsp[24] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into rdi, argArray into rax (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg_array, no_this_arg;
+ StackArgumentsAccessor args(rsp, rax);
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ movp(rbx, rdx);
+ __ movp(rdi, args.GetReceiverOperand());
+ __ testp(rax, rax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ {
+ __ movp(rdx, args.GetArgumentOperand(1));
+ __ cmpp(rax, Immediate(1));
+ __ j(equal, &no_arg_array, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(2));
+ __ bind(&no_arg_array);
+ }
+ __ bind(&no_this_arg);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
+
+ // ----------- S t a t e -------------
+ // -- rax : argArray
+ // -- rdi : receiver
+ // -- rsp[0] : return address
+ // -- rsp[8] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(rdi, &receiver_not_callable, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
+ __ j(zero, &receiver_not_callable, Label::kNear);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(rax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ Label::kNear);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Set(rax, 0);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
// rsp[8] : Argument n
@@ -1098,202 +1234,150 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- __ movp(key, Operand(rbp, indexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movp(receiver, Operand(rbp, argumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ Move(slot, Smi::FromInt(slot_index));
- __ movp(vector, Operand(rbp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ Push(rax);
-
- // Update the index on the stack and in register key.
- __ movp(key, Operand(rbp, indexOffset));
- __ SmiAddConstant(key, key, Smi::FromInt(1));
- __ movp(Operand(rbp, indexOffset), key);
-
- __ bind(&entry);
- __ cmpp(key, Operand(rbp, limitOffset));
- __ j(not_equal, &loop);
-
- // On exit, the pushed arguments count is in rax, untagged
- __ SmiToInteger64(rax, key);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
-
- // Stack at entry:
- // rsp : return address
- // rsp[8] : arguments
- // rsp[16] : receiver ("this")
- // rsp[24] : function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp : Old base pointer
- // rbp[8] : return address
- // rbp[16] : function arguments
- // rbp[24] : receiver
- // rbp[32] : function
- static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ movp(rdi, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdi, FieldOperand(rdi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(rdi);
-
- __ Push(Operand(rbp, kFunctionOffset));
- __ Push(Operand(rbp, kArgumentsOffset));
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : argumentsList
+ // -- rsp[16] : thisArgument
+ // -- rsp[24] : target
+ // -- rsp[32] : receiver
+ // -----------------------------------
- Generate_CheckStackOverflow(masm, kRaxIsSmiTagged);
+ // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label done;
+ StackArgumentsAccessor args(rsp, rax);
+ __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+ __ movp(rdx, rdi);
+ __ movp(rbx, rdi);
+ __ cmpp(rax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ movp(rdi, args.GetArgumentOperand(1)); // target
+ __ j(equal, &done, Label::kNear);
+ __ movp(rdx, args.GetArgumentOperand(2)); // thisArgument
+ __ cmpp(rax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(3)); // argumentsList
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
- // Push current index and limit, and receiver.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(rax); // limit
- __ Push(Immediate(0)); // index
- __ Push(Operand(rbp, kReceiverOffset)); // receiver
+ // ----------- S t a t e -------------
+ // -- rax : argumentsList
+ // -- rdi : target
+ // -- rsp[0] : return address
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(rdi, &target_not_callable, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
+ __ j(zero, &target_not_callable, Label::kNear);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ movp(rdi, Operand(rbp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Leave internal frame.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
}
-// Used by ReflectConstruct
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rsp[0] : return address
+ // -- rsp[8] : new.target (optional)
+ // -- rsp[16] : argumentsList
+ // -- rsp[24] : target
+ // -- rsp[32] : receiver
+ // -----------------------------------
- // Stack at entry:
- // rsp : return address
- // rsp[8] : original constructor (new.target)
- // rsp[16] : arguments
- // rsp[24] : constructor
+ // 1. Load target into rdi (if present), argumentsList into rax (if present),
+ // new.target into rdx (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // rbp : Old base pointer
- // rbp[8] : return address
- // rbp[16] : original constructor (new.target)
- // rbp[24] : arguments
- // rbp[32] : constructor
- static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
-
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ movp(rdi, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdi, FieldOperand(rdi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ Push(rdi);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ movp(rax, Operand(rbp, kNewTargetOffset));
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &validate_arguments, Label::kNear);
- __ movp(rax, Operand(rbp, kFunctionOffset));
- __ movp(Operand(rbp, kNewTargetOffset), rax);
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ Push(Operand(rbp, kFunctionOffset));
- __ Push(Operand(rbp, kArgumentsOffset));
- __ Push(Operand(rbp, kNewTargetOffset));
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, kRaxIsSmiTagged);
-
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(rax); // limit
- __ Push(Immediate(0)); // index
- // Push the constructor function as callee.
- __ Push(Operand(rbp, kFunctionOffset));
-
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ movp(rdi, Operand(rbp, kFunctionOffset));
- __ movp(rcx, Operand(rbp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label done;
+ StackArgumentsAccessor args(rsp, rax);
+ __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
+ __ movp(rdx, rdi);
+ __ movp(rbx, rdi);
+ __ cmpp(rax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ movp(rdi, args.GetArgumentOperand(1)); // target
+ __ movp(rdx, rdi); // new.target defaults to target
+ __ j(equal, &done, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(2)); // argumentsList
+ __ cmpp(rax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ movp(rdx, args.GetArgumentOperand(3)); // new.target
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
}
- // remove this, target, arguments and newTarget
- __ ret(kStackSize * kPointerSize);
-}
+ // ----------- S t a t e -------------
+ // -- rax : argumentsList
+ // -- rdx : new.target
+ // -- rdi : target
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(rdi, &target_not_constructor, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &target_not_constructor, Label::kNear);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &new_target_not_constructor, Label::kNear);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdx);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1306,7 +1390,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
Label generic_array_code;
// Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
@@ -1336,7 +1420,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
Label generic_array_code;
// Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
@@ -1359,6 +1443,115 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : constructor function
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into rax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ StackArgumentsAccessor args(rsp, rax);
+ __ testp(rax, rax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in rax).
+ __ bind(&no_arguments);
+ __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : constructor function
+ // -- rdx : new target
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into rbx and get rid of the rest (including the
+ // receiver).
+ {
+ StackArgumentsAccessor args(rsp, rax);
+ Label no_arguments, done;
+ __ testp(rax, rax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ Move(rbx, Smi::FromInt(0));
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ }
+
+ // 3. Make sure rbx is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(rbx, &done_convert);
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &done_convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdx);
+ __ Push(rdi);
+ __ Move(rax, rbx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(rbx, rax);
+ __ Pop(rdi);
+ __ Pop(rdx);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmpp(rdx, rdi);
+ __ j(not_equal, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx); // the first argument
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(rax, JSValue::kValueOffset));
+ }
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
@@ -1414,7 +1607,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx);
__ Push(rax);
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -1424,13 +1617,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
- // -- rdx : original constructor
+ // -- rdx : new target
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- // 1. Load the first argument into rbx and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into rbx and get rid of the rest (including the
// receiver).
{
StackArgumentsAccessor args(rsp, rax);
@@ -1447,7 +1643,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
- // 2. Make sure rbx is a string.
+ // 3. Make sure rbx is a string.
{
Label convert, done_convert;
__ JumpIfSmi(rbx, &convert, Label::kNear);
@@ -1468,60 +1664,26 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- rbx : the first argument
- // -- rdi : constructor function
- // -- rdx : original constructor
- // -----------------------------------
- Label allocate, done_allocate, rt_call;
-
- // Fall back to runtime if the original constructor and constructor differ.
- __ cmpp(rdx, rdi);
- __ j(not_equal, &rt_call);
-
- __ Allocate(JSValue::kSize, rax, rcx, no_reg, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in rax.
- __ LoadGlobalFunctionInitialMap(rdi, rcx);
- __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
- __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmpp(rdx, rdi);
+ __ j(not_equal, &new_object);
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rbx);
- __ Push(rdi);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(rdi);
- __ Pop(rbx);
- }
- __ jmp(&done_allocate);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
+ __ Ret();
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rbx);
- __ Push(rdi);
- __ Push(rdi); // constructor function
- __ Push(rdx); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(rdi);
- __ Pop(rbx);
- }
- __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx); // the first argument
+ __ Push(rdi); // constructor function
+ __ Push(rdx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(rax, JSValue::kValueOffset));
}
+ __ Ret();
}
@@ -1530,23 +1692,24 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rdi: function (passed through to callee)
+ // -- rdx : new target (passed through to callee)
+ // -- rdi : function (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
- __ LoadRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subp(rcx, rdx);
- // Make rdx the space we need for the array when it is unrolled onto the
+ __ subp(rcx, r8);
+ // Make r8 the space we need for the array when it is unrolled onto the
// stack.
- __ movp(rdx, rbx);
- __ shlp(rdx, Immediate(kPointerSizeLog2));
+ __ movp(r8, rbx);
+ __ shlp(r8, Immediate(kPointerSizeLog2));
// Check if the arguments will overflow the stack.
- __ cmpp(rcx, rdx);
+ __ cmpp(rcx, r8);
__ j(less_equal, stack_overflow); // Signed comparison.
}
@@ -1589,18 +1752,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
- // -- rdi: function (passed through to callee)
+ // -- rdx : new target (passed through to callee)
+ // -- rdi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->arguments_adaptors(), 1);
- Label stack_overflow;
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
Label enough, too_few;
- __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpp(rax, rbx);
__ j(less, &too_few);
__ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
@@ -1609,6 +1769,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1658,11 +1819,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1694,8 +1856,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
__ movp(rax, rbx);
// rax : expected number of arguments
- // rdi: function (passed through to callee)
- __ call(rdx);
+ // rdx : new target (passed through to callee)
+ // rdi : function (passed through to callee)
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ call(rcx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1708,19 +1872,146 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(rdx);
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ jmp(rcx);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
}
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argumentsList
+ // -- rdi : target
+ // -- rdx : new.target (checked to be constructor or undefined)
+ // -- rsp[0] : return address.
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(rax, &create_runtime);
+
+ // Load the map of argumentsList into rcx.
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+
+ // Load native context into rbx.
+ __ movp(rbx, NativeContextOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ cmpp(rcx, ContextOperand(rbx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+ __ cmpp(rcx, ContextOperand(rbx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+ __ j(equal, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ Push(rdx);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ SmiToInteger32(rbx, FieldOperand(rax, FixedArray::kLengthOffset));
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ movp(rbx,
+ FieldOperand(rax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
+ __ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(not_equal, &create_runtime);
+ __ SmiToInteger32(rbx, rbx);
+ __ movp(rax, rcx);
+ __ jmp(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(rcx);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmpl(rcx, Immediate(FAST_ELEMENTS));
+ __ j(above, &create_runtime);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+ __ j(equal, &create_runtime);
+ __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, kScratchRegister);
+ __ sarp(rcx, Immediate(kPointerSizeLog2));
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rbx);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- rdi : target
+ // -- rax : args (a FixedArray built from argumentsList)
+ // -- rbx : len (number of elements to push from args)
+ // -- rdx : new.target (checked to be constructor or undefined)
+ // -- rsp[0] : return address.
+ // -- rsp[8] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ PopReturnAddressTo(r8);
+ __ Set(rcx, 0);
+ Label done, loop;
+ __ bind(&loop);
+ __ cmpl(rcx, rbx);
+ __ j(equal, &done, Label::kNear);
+ __ Push(
+ FieldOperand(rax, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ incl(rcx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(r8);
+ __ Move(rax, rcx);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1820,17 +2111,129 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadSharedFunctionInfoSpecialField(
rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount actual(rax);
ParameterCount expected(rbx);
- __ InvokeCode(rdx, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+
+ __ InvokeFunctionCode(rdi, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : new.target (only in case of [[Construct]])
+ // -- rdi : target (checked to be a JSBoundFunction)
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into rcx and length of that into rbx.
+ Label no_bound_arguments;
+ __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ testl(rbx, rbx);
+ __ j(zero, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : new.target (only in case of [[Construct]])
+ // -- rdi : target (checked to be a JSBoundFunction)
+ // -- rcx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ leap(kScratchRegister, Operand(rbx, times_pointer_size, 0));
+ __ subp(rsp, kScratchRegister);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(rsp, Heap::kRealStackLimitRootIndex);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ // Restore the stack pointer.
+ __ leap(rsp, Operand(rsp, rbx, times_pointer_size, 0));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Adjust effective number of arguments to include return address.
+ __ incl(rax);
+
+ // Relocate arguments and return address down the stack.
+ {
+ Label loop;
+ __ Set(rcx, 0);
+ __ leap(rbx, Operand(rsp, rbx, times_pointer_size, 0));
+ __ bind(&loop);
+ __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+ __ movp(Operand(rsp, rcx, times_pointer_size, 0), kScratchRegister);
+ __ incl(rcx);
+ __ cmpl(rcx, rax);
+ __ j(less, &loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ movp(rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ bind(&loop);
+ __ decl(rbx);
+ __ movp(kScratchRegister, FieldOperand(rcx, rbx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ movp(Operand(rsp, rax, times_pointer_size, 0), kScratchRegister);
+ __ leal(rax, Operand(rax, 1));
+ __ j(greater, &loop);
+ }
+
+ // Adjust effective number of arguments (rax contains the number of
+ // arguments from the call plus return address plus the number of
+ // [[BoundArguments]]), so we need to subtract one for the return address.
+ __ decl(rax);
}
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdi : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(rdi);
+
+ // Patch the receiver to [[BoundThis]].
+ StackArgumentsAccessor args(rsp, rax);
+ __ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
+ __ movp(args.GetReceiverOperand(), rbx);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Load(rcx,
+ ExternalReference(Builtins::kCall_ReceiverIsAny, masm->isolate()));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
}
@@ -1848,14 +2251,22 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ movp(rdi, FieldOperand(rdi, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(rdi);
- __ jmp(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(rdi);
+ __ PushReturnAddressFrom(kScratchRegister);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ addp(rax, Immediate(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1867,7 +2278,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// Overwrite the original receiver with the (original) target.
__ movp(args.GetReceiverOperand(), rdi);
// Let the "call_as_function_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(
ConvertReceiverMode::kNotNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -1877,7 +2288,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rdi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1886,10 +2297,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (checked to be a JSFunction)
+ // -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(rdx);
__ AssertFunction(rdi);
// Calling convention for function specific ConstructStubs require
@@ -1906,17 +2316,53 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (checked to be a constructor)
+ // -- rdi : the constructor to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(rdi);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ cmpp(rdi, rdx);
+ __ j(not_equal, &done, Label::kNear);
+ __ movp(rdx,
+ FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ movp(rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Load(rcx, ExternalReference(Builtins::kConstruct, masm->isolate()));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (either the same as the constructor or
+ // -- rdi : the constructor to call (checked to be a JSProxy)
+ // -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
- // -- rdi : the constructor to call (checked to be a JSFunctionProxy)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ movp(rdi, FieldOperand(rdi, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(rdi);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(kScratchRegister);
+ // Include the pushed new_target, constructor and the receiver.
+ __ addp(rax, Immediate(3));
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1924,25 +2370,34 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
- // -- rdx : the original constructor (either the same as the constructor or
+ // -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
// -----------------------------------
StackArgumentsAccessor args(rsp, rax);
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(rdi, &non_constructor, Label::kNear);
- __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+
+ // Dispatch based on instance type.
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET);
+
+ // Check if target has a [[Construct]] internal method.
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsConstructor));
__ j(zero, &non_constructor, Label::kNear);
- // Dispatch based on instance type.
- __ CmpInstanceType(rcx, JS_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(equal, masm->isolate()->builtins()->ConstructProxy(),
RelocInfo::CODE_TARGET);
@@ -1951,7 +2406,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Overwrite the original receiver with the (original) target.
__ movp(args.GetReceiverOperand(), rdi);
// Let the "call_as_constructor_delegate" take care of the rest.
- __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
+ __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(),
RelocInfo::CODE_TARGET);
}
@@ -1959,10 +2414,120 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Register scratch2,
+ Label* receiver_check_failed) {
+ Register signature = scratch0;
+ Register map = scratch1;
+ Register constructor = scratch2;
+
+ // If there is no signature, return the holder.
+ __ movp(signature, FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ CompareRoot(signature, Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // Walk the prototype chain.
+ __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(constructor, map, kScratchRegister);
+ __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
+ Label next_prototype;
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Get the constructor's signature.
+ Register type = constructor;
+ __ movp(type,
+ FieldOperand(constructor, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(type, FieldOperand(type, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmpp(signature, type);
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(type, &next_prototype, Label::kNear);
+ __ CmpObjectType(type, FUNCTION_TEMPLATE_INFO_TYPE, kScratchRegister);
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Otherwise load the parent function template and iterate.
+ __ movp(type,
+ FieldOperand(type, FunctionTemplateInfo::kParentTemplateOffset));
+ __ jmp(&function_template_loop, Label::kNear);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, receiver_check_failed);
+ __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ testq(FieldOperand(map, Map::kBitField3Offset),
+ Immediate(Map::IsHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+ // Iterate.
+ __ jmp(&prototype_loop_start, Label::kNear);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments (not including the receiver)
+ // -- rdi : callee
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[rax * 8] : first argument
+ // -- rsp[(rax + 1) * 8] : receiver
+ // -----------------------------------
+
+ StackArgumentsAccessor args(rsp, rax);
+
+ // Load the FunctionTemplateInfo.
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ movp(rcx, args.GetReceiverOperand());
+ CompatibleReceiverCheck(masm, rcx, rbx, rdx, r8, r9, &receiver_check_failed);
+
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ movp(rdx, FieldOperand(rbx, FunctionTemplateInfo::kCallCodeOffset));
+ __ movp(rdx, FieldOperand(rdx, CallHandlerInfo::kFastHandlerOffset));
+ __ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rdx);
+
+ // Compatible receiver check failed: pop return address, arguments and
+ // receiver and throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ __ PopReturnAddressTo(rbx);
+ __ leap(rax, Operand(rax, times_pointer_size, 1 * kPointerSize));
+ __ addp(rsp, rax);
+ __ PushReturnAddressFrom(rbx);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
}
@@ -1974,7 +2539,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ Push(rax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
Label skip;
@@ -2010,7 +2575,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index b7fb099512..1e14f83d9b 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -491,7 +491,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in rax.
@@ -591,7 +591,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ PopReturnAddressTo(rbx);
__ Push(rdx);
__ PushReturnAddressFrom(rbx);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -663,8 +663,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r11 = argument count (untagged)
// Get the arguments map from the current native context into r9.
Label has_mapped_parameters, instantiate;
- __ movp(r9, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(r9, FieldOperand(r9, JSGlobalObject::kNativeContextOffset));
+ __ movp(r9, NativeContextOperand());
__ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -804,7 +803,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Push(rdx); // Push parameters pointer.
__ Push(r11); // Push parameter count.
__ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -838,7 +837,37 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ Push(rdx); // Push parameters pointer.
__ Push(rcx); // Push parameter count.
__ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // rcx : number of parameters (tagged)
+ // rdx : parameters pointer
+ // rbx : rest parameter index (tagged)
+ // rsp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ movp(r8, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(r8, StandardFrameConstants::kContextOffset));
+ __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(rcx, Operand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger64(rax, rcx);
+ __ leap(rdx, Operand(r8, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ PopReturnAddressTo(rax);
+ __ Push(rcx); // Push number of parameters.
+ __ Push(rdx); // Push parameters pointer.
+ __ Push(rbx); // Push rest parameter index.
+ __ PushReturnAddressFrom(rax);
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -862,7 +891,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ PushReturnAddressFrom(scratch);
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -945,10 +974,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
- __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rdi, FieldOperand(rdi, JSGlobalObject::kNativeContextOffset));
- const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
- __ movp(rdi, Operand(rdi, offset));
+ __ movp(rdi, NativeContextOperand());
+ __ movp(rdi, ContextOperand(rdi, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
@@ -998,7 +1025,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Push(rdx); // Push parameters pointer.
__ Push(rcx); // Push parameter count.
__ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -1007,7 +1034,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1390,11 +1417,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(equal, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1541,7 +1568,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
// Call runtime on identical objects. Otherwise return equal.
- __ cmpb(rcx, Immediate(static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE)));
+ __ cmpb(rcx, Immediate(static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE)));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
@@ -1608,9 +1635,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// There is no test for undetectability in strict equality.
// If the first object is a JS object, we have done pointer comparison.
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Label first_non_object;
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (rax (not rax) is not zero)
Label return_not_equal;
@@ -1623,7 +1650,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpInstanceType(rcx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1710,9 +1737,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(below, &runtime_call, Label::kNear);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -1738,14 +1765,12 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
if (cc == equal) {
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
__ Push(Smi::FromInt(NegativeComparisonResult(cc)));
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -1753,11 +1778,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// rax : number of arguments to the construct function
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1769,15 +1792,9 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
__ Push(rbx);
- if (is_super) {
- __ Push(rcx);
- }
__ CallStub(stub);
- if (is_super) {
- __ Pop(rcx);
- }
__ Pop(rbx);
__ Pop(rdx);
__ Pop(rdi);
@@ -1786,13 +1803,12 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
@@ -1832,7 +1848,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ j(not_equal, &miss);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
__ jmp(&done);
@@ -1855,17 +1871,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ bind(&initialize);
// Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &not_array_function);
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done_no_smi_convert);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ jmp(&done_no_smi_convert);
__ bind(&done);
@@ -1878,8 +1894,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
- // rdx : slot in feedback vector (Smi, for RecordCallTarget)
+ // rdx : slot in feedback vector (Smi)
// rdi : constructor function
Label non_function;
@@ -1889,28 +1904,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
__ j(not_equal, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- __ SmiToInteger32(rdx, rdx);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into rbx, or undefined.
- __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(equal, &feedback_register_initialized);
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
+ __ SmiToInteger32(rdx, rdx);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx,
+ FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized, Label::kNear);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(rbx);
- }
+ __ AssertUndefinedOrAllocationSite(rbx);
- // Pass original constructor to construct stub.
- if (IsSuperConstructorCall()) {
- __ movp(rdx, rcx);
- } else {
- __ movp(rdx, rdi);
- }
+ // Pass new target to construct stub.
+ __ movp(rdx, rdi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1930,7 +1939,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// rdx - slot id
// rbx - vector
// rcx - allocation site (loaded from vector[slot]).
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmpp(rdi, r8);
__ j(not_equal, miss);
@@ -1955,11 +1964,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// -- rbx - vector
// -----------------------------------
Isolate* isolate = masm->isolate();
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
StackArgumentsAccessor args(rsp, argc);
ParameterCount actual(argc);
@@ -1995,9 +2000,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Smi::FromInt(CallICNexus::kCallCountIncrement));
- __ bind(&call);
+ __ bind(&call_function);
__ Set(rax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2031,10 +2037,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
__ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
TypeFeedbackVector::MegamorphicSentinel(isolate));
- // We have to update statistics for runtime profiling.
- __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
- __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
- __ jmp(&call);
+
+ __ bind(&call);
+ __ Set(rax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2047,12 +2054,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rcx);
__ cmpp(rdi, rcx);
__ j(equal, &miss);
- // Update stats.
- __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
+ // Make sure the function belongs to the same native context.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rcx, ContextOperand(rcx, Context::NATIVE_CONTEXT_INDEX));
+ __ cmpp(rcx, NativeContextOperand());
+ __ j(not_equal, &miss);
// Initialize the call counter.
__ Move(FieldOperand(rbx, rdx, times_pointer_size,
@@ -2073,7 +2083,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(rdi);
}
- __ jmp(&call);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -2097,7 +2107,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rdx);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ movp(rdi, rax);
@@ -2501,15 +2511,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
Immediate(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = kScratchRegister;
- __ movp(shared_info,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ TestBitSharedFunctionInfoSpecialField(
- shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- __ j(not_zero, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ movp(function_prototype,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2535,28 +2536,45 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
- Label done, loop;
+ Label done, loop, fast_runtime_fallback;
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
__ bind(&loop);
- __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmpp(object_prototype, function_prototype);
+
+ __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ __ j(equal, &fast_runtime_fallback, Label::kNear);
+
+ __ movp(object, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmpp(object, function_prototype);
__ j(equal, &done, Label::kNear);
- __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
- __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ CompareRoot(object, Heap::kNullValueRootIndex);
+ __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
__ j(not_equal, &loop);
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
__ bind(&done);
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(0);
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime.
+ __ bind(&fast_runtime_fallback);
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(object);
+ __ Push(function_prototype);
+ __ PushReturnAddressFrom(kScratchRegister);
+ // Invalidate the instanceof cache.
+ __ Move(rax, Smi::FromInt(0));
+ __ StoreRoot(rax, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
__ PopReturnAddressTo(kScratchRegister);
__ Push(object);
__ Push(function);
__ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -2615,11 +2633,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_);
__ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
@@ -2648,7 +2666,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Push(object_);
__ Integer32ToSmi(index_, index_);
__ Push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -2686,7 +2704,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ Push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -2933,7 +2951,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// rax: string
@@ -2979,7 +2997,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -2992,7 +3010,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -3011,7 +3029,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -3046,7 +3064,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -3221,7 +3239,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Push(rdx);
__ Push(rax);
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3266,14 +3284,16 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
__ AssertSmi(rax);
__ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
__ AssertSmi(rdx);
- __ xchgp(rax, rdx);
+ __ pushq(rax);
+ __ movq(rax, rdx);
+ __ popq(rdx);
}
__ subp(rax, rdx);
__ Ret();
@@ -3548,9 +3568,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3558,18 +3578,19 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
- __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(below, &miss, Label::kNear);
+ __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(below, &miss, Label::kNear);
- DCHECK(GetCondition() == equal);
+ DCHECK_EQ(equal, GetCondition());
__ subp(rax, rdx);
__ ret(0);
@@ -3578,7 +3599,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
@@ -3594,14 +3615,14 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ subp(rax, rdx);
__ ret(0);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(rax);
__ Push(Smi::FromInt(NegativeComparisonResult(GetCondition())));
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3618,7 +3639,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rdx);
__ Push(rax);
__ Push(Smi::FromInt(op()));
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -4003,11 +4024,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
__ Push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object, Label::kNear);
__ Pop(regs_.object());
regs_.Restore(masm);
@@ -4027,85 +4047,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : element value to store
- // -- rcx : element index as smi
- // -- rsp[0] : return address
- // -- rsp[8] : array literal index in function
- // -- rsp[16] : array literal
- // clobbers rbx, rdx, rdi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rdx, args.GetArgumentOperand(1));
- __ movp(rbx, args.GetArgumentOperand(0));
- __ movp(rdi, FieldOperand(rbx, JSObject::kMapOffset));
-
- __ CheckFastElements(rdi, &double_elements);
-
- // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
- __ JumpIfSmi(rax, &smi_element);
- __ CheckFastSmiElements(rdi, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ PopReturnAddressTo(rdi);
- __ Push(rbx);
- __ Push(rcx);
- __ Push(rax);
- __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(rdx);
- __ PushReturnAddressFrom(rdi);
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ movp(Operand(rcx, 0), rax);
- // Update the write barrier for the array store.
- __ RecordWrite(rbx, rcx, rax,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
- // FAST_*_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ SmiToInteger32(kScratchRegister, rcx);
- __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ movp(FieldOperand(rbx, kScratchRegister, times_pointer_size,
- FixedArrayBase::kHeaderSize), rax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ movp(r9, FieldOperand(rbx, JSObject::kElementsOffset));
- __ SmiToInteger32(r11, rcx);
- __ StoreNumberToDoubleElements(rax,
- r9,
- r11,
- xmm0,
- &slow_elements);
- __ ret(0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4763,7 +4704,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- rax : argc
// -- rbx : AllocationSite or undefined
// -- rdi : constructor
- // -- rdx : original constructor
+ // -- rdx : new target
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
@@ -4784,6 +4725,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(rbx);
}
+ // Enter the context of the Array function.
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
Label subclassing;
__ cmpp(rdi, rdx);
__ j(not_equal, &subclassing);
@@ -4806,28 +4750,32 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing
__ bind(&subclassing);
- __ Pop(rcx); // return address.
- __ Push(rdi);
- __ Push(rdx);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
- case MORE_THAN_ONE:
- __ addp(rax, Immediate(2));
+ case MORE_THAN_ONE: {
+ StackArgumentsAccessor args(rsp, rax);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ addp(rax, Immediate(3));
break;
- case NONE:
- __ movp(rax, Immediate(2));
+ }
+ case NONE: {
+ StackArgumentsAccessor args(rsp, 0);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ Set(rax, 3);
break;
- case ONE:
- __ movp(rax, Immediate(3));
+ }
+ case ONE: {
+ StackArgumentsAccessor args(rsp, 1);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ Set(rax, 4);
break;
+ }
}
-
- __ Push(rcx);
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()),
- 1);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ Push(rbx);
+ __ PushReturnAddressFrom(rcx);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4946,7 +4894,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(kScratchRegister);
__ Push(slot_reg);
__ Push(kScratchRegister);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5069,8 +5017,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(kScratchRegister);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5210,7 +5157,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -5244,7 +5191,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 5c297f1a07..81c1a69aa8 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -32,15 +32,15 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::exp;
+ if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
@@ -58,20 +58,21 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// xmm0: raw double input.
// Move double input into registers.
__ Sqrtsd(xmm0, xmm0);
@@ -81,101 +82,11 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
-
-#ifdef _WIN64
-typedef double (*ModuloFunction)(double, double);
-// Define custom fmod implementation.
-ModuloFunction CreateModuloFunction() {
- size_t actual_size;
- byte* buffer = static_cast<byte*>(
- base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
- // the V8 heap. We can't, and don't, refer to any relocatable addresses
- // (e.g. the JavaScript nan-object).
-
- // Windows 64 ABI passes double arguments in xmm0, xmm1 and
- // returns result in xmm0.
- // Argument backing space is allocated on the stack above
- // the return address.
-
- // Compute x mod y.
- // Load y and x (use argument backing store as temporary storage).
- __ Movsd(Operand(rsp, kRegisterSize * 2), xmm1);
- __ Movsd(Operand(rsp, kRegisterSize), xmm0);
- __ fld_d(Operand(rsp, kRegisterSize * 2));
- __ fld_d(Operand(rsp, kRegisterSize));
-
- // Clear exception flags before operation.
- {
- Label no_exceptions;
- __ fwait();
- __ fnstsw_ax();
- // Clear if Illegal Operand or Zero Division exceptions are set.
- __ testb(rax, Immediate(5));
- __ j(zero, &no_exceptions);
- __ fnclex();
- __ bind(&no_exceptions);
- }
-
- // Compute st(0) % st(1)
- {
- Label partial_remainder_loop;
- __ bind(&partial_remainder_loop);
- __ fprem();
- __ fwait();
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x400 /* C2 */));
- // If C2 is set, computation only has partial result. Loop to
- // continue computation.
- __ j(not_zero, &partial_remainder_loop);
- }
-
- Label valid_result;
- Label return_result;
- // If Invalid Operand or Zero Division exceptions are set,
- // return NaN.
- __ testb(rax, Immediate(5));
- __ j(zero, &valid_result);
- __ fstp(0); // Drop result in st(0).
- int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue);
- __ movq(Operand(rsp, kRegisterSize), rcx);
- __ Movsd(xmm0, Operand(rsp, kRegisterSize));
- __ jmp(&return_result);
-
- // If result is valid, return that.
- __ bind(&valid_result);
- __ fstp_d(Operand(rsp, kRegisterSize));
- __ Movsd(xmm0, Operand(rsp, kRegisterSize));
-
- // Clean up FPU stack and exceptions and return xmm0
- __ bind(&return_result);
- __ fstp(0); // Unload y.
-
- Label clear_exceptions;
- __ testb(rax, Immediate(0x3f /* Any Exception*/));
- __ j(not_zero, &clear_exceptions);
- __ ret(0);
- __ bind(&clear_exceptions);
- __ fnclex();
- __ ret(0);
-
- CodeDesc desc;
- masm.GetCode(&desc);
- base::OS::ProtectCode(buffer, actual_size);
- // Call the function from C++ through this pointer.
- return FUNCTION_CAST<ModuloFunction>(buffer);
-}
-
-#endif
-
#undef __
// -------------------------------------------------------------------------
@@ -642,12 +553,14 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ CodePatcher patcher(isolate, young_sequence_.start(),
+ young_sequence_.length());
patcher.masm()->pushq(rbp);
patcher.masm()->movp(rbp, rsp);
patcher.masm()->Push(rsi);
@@ -694,7 +607,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length);
+ CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start());
patcher.masm()->Nop(
kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 09af38ddea..1403781c67 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -5,7 +5,7 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 620f614aa5..c2fd970c67 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -41,14 +41,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->int3();
}
}
@@ -74,7 +75,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address call_address = instruction_start + deopt_data->Pc(i)->value();
// There is room enough to write a long call instruction because we pad
// LLazyBailout instructions with nops if necessary.
- CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
+ CodePatcher patcher(isolate, call_address, Assembler::kCallSequenceLength);
patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
Assembler::RelocInfoNone());
DCHECK(prev_call_address == NULL ||
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index d6cf513392..05b199d558 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -989,6 +989,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightOperand(current);
break;
+ case 0x2c:
+ AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
+ NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index e69d38d1f3..79315c70a0 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -63,6 +63,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return rcx; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return rdx; }
+const Register RestParamAccessDescriptor::parameter_count() { return rcx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return rdx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return rbx; }
+
+
const Register ApiGetterDescriptor::function_address() { return r8; }
@@ -126,6 +131,13 @@ void NumberToStringDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi, rax, rcx, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax, rbx, rcx};
@@ -186,12 +198,11 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// rax : number of arguments
// rbx : feedback vector
- // rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
- Register registers[] = {rax, rdi, rcx, rbx};
+ Register registers[] = {rax, rdi, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -205,6 +216,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rdx : the new target
+ // rdi : the target to call
+ // rbx : allocation site or undefined
+ Register registers[] = {rdi, rdx, rax, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rdx : the new target
+ // rdi : the target to call
+ Register registers[] = {rdi, rdx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rcx, rbx, rax};
@@ -342,6 +374,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rdi, // JSFunction
+ rdx, // the new target
rax, // actual number of arguments
rbx, // expected number of arguments
};
@@ -374,27 +407,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rdi, // math rounding function
- rdx, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rdi, // math rounding function
- rdx, // vector slot id
- rbx // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -410,7 +422,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
rax, // argument count (not including receiver)
- rdx, // original constructor
+ rdx, // new target
rdi, // constructor
rbx, // address of first argument
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 3c8cab2d83..9952eb3b65 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -17,12 +17,13 @@
namespace v8 {
namespace internal {
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
root_array_available_(true) {
- if (isolate() != NULL) {
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -661,39 +662,30 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : argument num_arguments - 1
// ...
// -- rsp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- rax : number of arguments
// -----------------------------------
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Set(rax, num_arguments);
- JumpToExternalReference(ext, result_size);
-}
-
-
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ Set(rax, function->nargs);
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
- int result_size) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(isolate(), result_size);
+ CEntryStub ces(isolate(), 1);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -703,30 +695,10 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinEntry(rdx, native_context_index);
- InvokeCode(rdx, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the builtins object into target register.
- movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
- movp(target, ContextOperand(target, native_context_index));
-}
-
-
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(rdi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, native_context_index);
- movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ LoadNativeContextSlot(native_context_index, rdi);
+ InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
}
@@ -899,6 +871,43 @@ void MacroAssembler::Cvtqsi2sd(XMMRegister dst, const Operand& src) {
}
+void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ testq(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ Cvtqsi2ss(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ movq(tmp, src);
+ shrq(src, Immediate(1));
+ // Recover the least significant bit to avoid rounding errors.
+ andq(tmp, Immediate(1));
+ orq(src, tmp);
+ Cvtqsi2ss(dst, src);
+ addss(dst, dst);
+ bind(&jmp_return);
+}
+
+
+void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ testq(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ Cvtqsi2sd(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ movq(tmp, src);
+ shrq(src, Immediate(1));
+ andq(tmp, Immediate(1));
+ orq(src, tmp);
+ Cvtqsi2sd(dst, src);
+ addsd(dst, dst);
+ bind(&jmp_return);
+}
+
+
void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -929,6 +938,26 @@ void MacroAssembler::Cvttsd2si(Register dst, const Operand& src) {
}
+void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2siq(dst, src);
+ } else {
+ cvttss2siq(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttss2siq(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2siq(dst, src);
+ } else {
+ cvttss2siq(dst, src);
+ }
+}
+
+
void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -1062,6 +1091,7 @@ void MacroAssembler::SafePush(Smi* src) {
Register MacroAssembler::GetSmiConstant(Smi* source) {
+ STATIC_ASSERT(kSmiTag == 0);
int value = source->value();
if (value == 0) {
xorl(kScratchRegister, kScratchRegister);
@@ -1073,9 +1103,13 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- // Special-casing 0 here to use xorl seems to make things slower, so we don't
- // do it.
- Move(dst, source, Assembler::RelocInfoNone());
+ STATIC_ASSERT(kSmiTag == 0);
+ int value = source->value();
+ if (value == 0) {
+ xorl(dst, dst);
+ } else {
+ Move(dst, source, Assembler::RelocInfoNone());
+ }
}
@@ -2727,6 +2761,17 @@ void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
}
+void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
+ RoundingMode mode) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vroundss(dst, dst, src, mode);
+ } else {
+ roundss(dst, src, mode);
+ }
+}
+
+
void MacroAssembler::Roundsd(XMMRegister dst, XMMRegister src,
RoundingMode mode) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -3808,6 +3853,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -3939,15 +3996,16 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
- movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ movp(rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
LoadSharedFunctionInfoSpecialField(
- rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+ rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
ParameterCount expected(rbx);
- InvokeFunction(function, expected, actual, flag, call_wrapper);
+ InvokeFunction(function, new_target, expected, actual, flag, call_wrapper);
}
@@ -3957,44 +4015,55 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
Move(rdi, function);
- InvokeFunction(rdi, expected, actual, flag, call_wrapper);
+ InvokeFunction(rdi, no_reg, expected, actual, flag, call_wrapper);
}
void MacroAssembler::InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
DCHECK(function.is(rdi));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
- // Advances rdx to the end of the Code object header, to the start of
- // the executable code.
- movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- InvokeCode(rdx, expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(rdi, new_target, expected, actual, flag, call_wrapper);
}
-void MacroAssembler::InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(rdi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ }
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected,
actual,
- Handle<Code>::null(),
- code,
&done,
&definitely_mismatches,
flag,
Label::kNear,
call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -4010,8 +4079,6 @@ void MacroAssembler::InvokeCode(Register code,
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -4061,13 +4128,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_register.is(rdx)) {
- movp(rdx, code_register);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor));
Call(adaptor, RelocInfo::CODE_TARGET);
@@ -4083,6 +4143,49 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
+ cmpb(step_in_enabled_operand, Immediate(0));
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ Integer32ToSmi(expected.reg(), expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ Integer32ToSmi(actual.reg(), actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiToInteger64(actual.reg(), actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiToInteger64(expected.reg(), expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
void MacroAssembler::StubPrologue() {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
@@ -4307,10 +4410,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- movp(scratch, FieldOperand(scratch, offset));
- movp(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -4840,6 +4940,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ movp(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ movp(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ movp(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ movp(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Destination is incremented by length, source, length and scratch are
@@ -4926,16 +5047,16 @@ void MacroAssembler::CopyBytes(Register destination,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- movp(Operand(start_offset, 0), filler);
- addp(start_offset, Immediate(kPointerSize));
+ movp(Operand(current_address, 0), filler);
+ addp(current_address, Immediate(kPointerSize));
bind(&entry);
- cmpp(start_offset, end_offset);
+ cmpp(current_address, end_address);
j(below, &loop);
}
@@ -4966,36 +5087,24 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
-void MacroAssembler::LoadGlobalProxy(Register dst) {
- movp(dst, GlobalObjectOperand());
- movp(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
-}
-
-
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- movp(scratch,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- movp(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- int offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmpp(map_in_out, FieldOperand(scratch, offset));
+ movp(scratch, NativeContextOperand());
+ cmpp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- movp(map_in_out, FieldOperand(scratch, offset));
+ movp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
@@ -5005,14 +5114,10 @@ static const int kRegisterPassedArguments = 4;
static const int kRegisterPassedArguments = 6;
#endif
-void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- movp(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
- // Load the function from the native context.
- movp(function, Operand(function, Context::SlotOffset(index)));
+
+void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
+ movp(dst, NativeContextOperand());
+ movp(dst, ContextOperand(dst, index));
}
@@ -5151,10 +5256,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -5164,7 +5269,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -5202,45 +5307,19 @@ void MacroAssembler::JumpIfBlack(Register object,
Label* on_black,
Label::Distance on_black_distance) {
DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+
GetMarkBits(object, bitmap_scratch, mask_scratch);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
- // and a 0 at all other positions, including the position of the second bit.
+ // and a 1 at a position of the second bit. All other positions are zero.
movp(rcx, mask_scratch);
- // Make rcx into a mask that covers both marking bits using the operation
- // rcx = mask | (mask << 1).
- leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
- // Note that we are using a 4-byte aligned 8-byte load.
andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
-// Detect some, but not all, common pointer-free objects. This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
- Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance) {
- Label is_data_object;
- movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- j(equal, &is_data_object, Label::kNear);
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, not_data_object, not_data_object_distance);
- bind(&is_data_object);
-}
-
-
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
@@ -5260,104 +5339,27 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
- movl(mask_reg, Immediate(1));
+ movl(mask_reg, Immediate(3));
shlp_cl(mask_reg);
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- Push(mask_scratch);
- // shl. May overflow making the check conservative.
- addp(mask_scratch, mask_scratch);
- testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- Pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = rcx; // Holds map while checking type.
- Register length = rcx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- movp(map, FieldOperand(value, HeapObject::kMapOffset));
- CompareRoot(map, Heap::kHeapNumberMapRootIndex);
- j(not_equal, &not_heap_number, Label::kNear);
- movp(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = rcx;
- movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- testb(instance_type, Immediate(kExternalStringTag));
- j(zero, &not_external, Label::kNear);
- movp(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- andp(length, Immediate(kStringEncodingMask));
- xorp(length, Immediate(kStringEncodingMask));
- addp(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
- imulp(length, FieldOperand(value, String::kLengthOffset));
- shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- andp(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
- bind(&done);
+ j(zero, value_is_white, distance);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index c7f7f40778..1aa2c74f22 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -26,6 +26,7 @@ const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_rdx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
@@ -84,11 +85,8 @@ struct SmiIndex {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Prevent the use of the RootArray during the lifetime of this
// scope object.
@@ -151,7 +149,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kNear) {
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
@@ -159,7 +163,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kNear) {
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
@@ -220,29 +230,14 @@ class MacroAssembler: public Assembler {
}
// Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
-
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ void JumpIfBlack(Register object, Register bitmap_scratch,
+ Register mask_scratch, Label* on_black,
+ Label::Distance on_black_distance);
+
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -370,20 +365,25 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -399,13 +399,6 @@ class MacroAssembler: public Assembler {
void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, int native_context_index);
-
-
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
@@ -824,10 +817,15 @@ class MacroAssembler: public Assembler {
void Cvtqsi2sd(XMMRegister dst, Register src);
void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
+ void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
+
void Cvtsd2si(Register dst, XMMRegister src);
void Cvttsd2si(Register dst, XMMRegister src);
void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttss2siq(Register dst, XMMRegister src);
+ void Cvttss2siq(Register dst, const Operand& src);
void Cvttsd2siq(Register dst, XMMRegister src);
void Cvttsd2siq(Register dst, const Operand& src);
@@ -963,6 +961,7 @@ class MacroAssembler: public Assembler {
void Movapd(XMMRegister dst, XMMRegister src);
void Movmskpd(Register dst, XMMRegister src);
+ void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void Sqrtsd(XMMRegister dst, XMMRegister src);
void Sqrtsd(XMMRegister dst, const Operand& src);
@@ -1201,6 +1200,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -1316,6 +1319,11 @@ class MacroAssembler: public Assembler {
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -1353,8 +1361,15 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
// Load the global proxy from the current context.
- void LoadGlobalProxy(Register dst);
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
@@ -1367,8 +1382,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
+ // Load the native context slot with the current index.
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1392,36 +1407,33 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ // Convenience function: tail call a runtime routine (jump)
+ void TailCallRuntime(Runtime::FunctionId fid);
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext, int result_size);
+ // Jump to a runtime routines
+ void JumpToExternalReference(const ExternalReference& ext);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1470,12 +1482,11 @@ class MacroAssembler: public Assembler {
int min_length = 0,
Register scratch = kScratchRegister);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// Emit code for a truncating division by a constant. The dividend register is
@@ -1582,13 +1593,11 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ Label::Distance near_jump,
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue(bool save_rax);
@@ -1651,7 +1660,7 @@ class MacroAssembler: public Assembler {
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
+ CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
@@ -1692,8 +1701,8 @@ inline Operand ContextOperand(Register context, Register index) {
}
-inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand NativeContextOperand() {
+ return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
}
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 99649ec018..f1351c88cf 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -13,7 +13,7 @@ namespace internal {
// Since there is no simulator for the x64 architecture the only thing we can
// do is to call the entry directly.
// TODO(X64): Don't pass p0, since it isn't used?
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*regexp_matcher)(String*, int, const byte*,
@@ -21,7 +21,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
@@ -34,11 +35,13 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
} // namespace internal
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index ef8876c15a..0e529c7ab6 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -104,8 +104,9 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
- Assembler::set_target_address_at(pc_, host_, target);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
+ Assembler::set_target_address_at(isolate_, pc_, host_, target);
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
@@ -134,7 +135,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -200,7 +201,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -230,8 +231,8 @@ void RelocInfo::set_code_age_stub(Code* stub,
ICacheFlushMode icache_flush_mode) {
DCHECK(*pc_ == kCallOpcode);
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
- icache_flush_mode);
+ Assembler::set_target_address_at(
+ isolate_, pc_ + 1, host_, stub->instruction_start(), icache_flush_mode);
}
@@ -245,7 +246,7 @@ Address RelocInfo::debug_call_address() {
void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Address location = pc_ + Assembler::kPatchDebugBreakSlotAddressOffset;
- Assembler::set_target_address_at(location, host_, target);
+ Assembler::set_target_address_at(isolate_, location, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -260,7 +261,8 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -281,7 +283,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -306,7 +308,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
+ Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
@@ -454,13 +456,13 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
-void Assembler::set_target_address_at(Address pc, Address constant_pool,
- Address target,
+void Assembler::set_target_address_at(Isolate* isolate, Address pc,
+ Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode) {
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICacheWithoutIsolate(p, sizeof(int32_t));
+ Assembler::FlushICache(isolate, p, sizeof(int32_t));
}
}
@@ -500,7 +502,7 @@ void Assembler::emit_near_disp(Label* L) {
void Assembler::deserialization_set_target_internal_reference_at(
- Address pc, Address target, RelocInfo::Mode mode) {
+ Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index baadd87206..53919486d6 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -102,37 +102,6 @@ bool RelocInfo::IsInConstantPool() {
}
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Call instruction takes up 5 bytes and int3 takes up one byte.
- static const int kCallCodeSize = 5;
- int code_size = kCallCodeSize + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
-// Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE32);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(kCallCodeSize,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- DCHECK_GE(guard_bytes, 0);
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -253,6 +222,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->constant_pool_size = 0;
}
@@ -1975,6 +1945,7 @@ void Assembler::GrowBuffer() {
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2110,7 +2081,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index aa5195c951..668dc7bb40 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -483,19 +483,17 @@ class Assembler : public AssemblerBase {
// Read/Modify the code target in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
- Address pc, Address constant_pool, Address target,
+ Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
- static inline void set_target_address_at(Address pc,
- Code* code,
- Address target,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED) {
+ static inline void set_target_address_at(
+ Isolate* isolate, Address pc, Code* code, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
Address constant_pool = code ? code->constant_pool() : NULL;
- set_target_address_at(pc, constant_pool, target);
+ set_target_address_at(isolate, pc, constant_pool, target);
}
// Return the code target address at a call site from the return address
@@ -505,13 +503,14 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Code* code, Address target) {
- set_target_address_at(instruction_payload, code, target);
+ Isolate* isolate, Address instruction_payload, Code* code,
+ Address target) {
+ set_target_address_at(isolate, instruction_payload, code, target);
}
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
- Address pc, Address target,
+ Isolate* isolate, Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
static const int kSpecialTargetSize = kPointerSize;
@@ -928,7 +927,7 @@ class Assembler : public AssemblerBase {
void RecordGeneratorContinuation();
// Mark address of a debug break slot.
- void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
+ void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 12b41084b4..55ec55fc6f 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -22,9 +22,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
BuiltinExtraArguments extra_args) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
- // (only guaranteed when the called function
- // is not marked as DontAdaptArguments)
- // -- edi : called function
+ // -- edi : target
+ // -- edx : new.target
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
@@ -37,37 +36,26 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- // TODO(bmeurer): Can we make this more robust?
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
- if (extra_args == NEEDS_CALLED_FUNCTION) {
- num_extra_args = 1;
- Register scratch = ebx;
- __ pop(scratch); // Save return address.
- __ push(edi);
- __ push(scratch); // Restore return address.
- } else {
- DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+ if (extra_args != BuiltinExtraArguments::kNone) {
+ __ PopReturnAddressTo(ecx);
+ if (extra_args & BuiltinExtraArguments::kTarget) {
+ ++num_extra_args;
+ __ Push(edi);
+ }
+ if (extra_args & BuiltinExtraArguments::kNewTarget) {
+ ++num_extra_args;
+ __ Push(edx);
+ }
+ __ PushReturnAddressFrom(ecx);
}
// JumpToExternalReference expects eax to contain the number of arguments
- // including the receiver and the extra arguments. But eax is only valid
- // if the called function is marked as DontAdaptArguments, otherwise we
- // need to load the argument count from the SharedFunctionInfo.
- Label argc, done_argc;
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(ebx);
- __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
- __ j(equal, &argc, Label::kNear);
- __ lea(eax, Operand(ebx, num_extra_args + 1));
- __ jmp(&done_argc, Label::kNear);
- __ bind(&argc);
+ // including the receiver and the extra arguments.
__ add(eax, Immediate(num_extra_args + 1));
- __ bind(&done_argc);
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@@ -75,14 +63,21 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
+ // Push a copy of the target function and the new target.
__ push(edi);
+ __ push(edx);
// Function is also the parameter to the runtime call.
__ push(edi);
__ CallRuntime(function_id, 1);
- // Restore receiver.
+ // Restore target function and new target.
+ __ pop(edx);
__ pop(edi);
}
@@ -122,12 +117,13 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function) {
+ bool is_api_function,
+ bool create_implicit_receiver) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
// -- ebx: allocation site or undefined
- // -- edx: original constructor
+ // -- edx: new target
// -----------------------------------
// Enter a construct frame.
@@ -139,177 +135,166 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
__ SmiTag(eax);
__ push(eax);
- __ push(edi);
- __ push(edx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(not_equal, &rt_call);
-
- // Verify that the original constructor is a JSFunction.
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // edx: original constructor
- __ mov(eax, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- if (!is_api_function) {
- Label allocate;
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
- // Check if slack tracking is enabled.
- __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
- __ shr(esi, Map::Counter::kShift);
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &allocate);
- // Decrease generous allocation count.
- __ sub(FieldOperand(eax, Map::kBitField3Offset),
- Immediate(1 << Map::Counter::kShift));
-
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(not_equal, &allocate);
-
- __ push(eax);
- __ push(edx);
- __ push(edi);
-
- __ push(eax); // initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
- __ pop(edi);
- __ pop(edx);
- __ pop(eax);
- __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
-
- __ bind(&allocate);
- }
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
-
- __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
- Factory* factory = masm->isolate()->factory();
-
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- // Set extra fields in the newly allocated object.
- // eax: initial map
- // ebx: JSObject
- // edi: start of next object
- // esi: slack tracking counter (non-API function case)
- __ mov(edx, factory->undefined_value());
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(less, &no_inobject_slack_tracking);
-
- // Allocate object with a slack.
- __ movzx_b(
- esi,
- FieldOperand(
- eax, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ sub(esi, eax);
- __ lea(esi,
- Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(esi, edi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
+ if (create_implicit_receiver) {
+ __ push(edi);
+ __ push(edx);
+
+ // Try to allocate the object without transitioning into C code. If any of
+ // the preconditions is not met, the code bails out to the runtime call.
+ Label rt_call, allocated;
+ if (FLAG_inline_new) {
+ // Verify that the new target is a JSFunction.
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Load the initial map and verify that it is in fact a map.
+ // edx: new target
+ __ mov(eax,
+ FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ JumpIfSmi(eax, &rt_call);
+ // edi: constructor
+ // eax: initial map (if proven valid below)
+ __ CmpObjectType(eax, MAP_TYPE, ebx);
+ __ j(not_equal, &rt_call);
+
+ // Fall back to runtime if the expected base constructor and base
+ // constructor differ.
+ __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see
+ // comments in Runtime_NewObject in runtime.cc). In which case the
+ // initial map's instance type would be JS_FUNCTION_TYPE.
+ // edi: constructor
+ // eax: initial map
+ __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ // edi: constructor
+ // eax: initial map
+ __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
+
+ __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+
+ Factory* factory = masm->isolate()->factory();
+
+ // Allocated the JSObject, now initialize the fields.
+ // eax: initial map
+ // ebx: JSObject (not HeapObject tagged - the actual address).
+ // edi: start of next object
+ __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+ __ mov(ecx, factory->empty_fixed_array());
+ __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+ __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+ __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+
+ // Add the object tag to make the JSObject real, so that we can continue
+ // and jump into the continuation code at any time from now on.
+ __ or_(ebx, Immediate(kHeapObjectTag));
+
+ // Fill all the in-object properties with the appropriate filler.
+ // ebx: JSObject (tagged)
+ // ecx: First in-object property of JSObject (not tagged)
+ __ mov(edx, factory->undefined_value());
+
+ if (!is_api_function) {
+ Label no_inobject_slack_tracking;
+
+ // The code below relies on these assumptions.
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ // Check if slack tracking is enabled.
+ __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
+ __ shr(esi, Map::ConstructionCounter::kShift);
+ __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
+ __ push(esi); // Save allocation count value.
+ // Decrease generous allocation count.
+ __ sub(FieldOperand(eax, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Allocate object with a slack.
+ __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ neg(esi);
+ __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
+ // esi: offset of first field after pre-allocated fields
+ if (FLAG_debug_code) {
+ __ cmp(ecx, esi);
+ __ Assert(less_equal,
+ kUnexpectedNumberOfPreAllocatedPropertyFields);
+ }
+ __ InitializeFieldsWithFiller(ecx, esi, edx);
+
+ // To allow truncation fill the remaining fields with one pointer
+ // filler map.
+ __ mov(edx, factory->one_pointer_filler_map());
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+ __ pop(esi); // Restore allocation count value before decreasing.
+ __ cmp(esi, Map::kSlackTrackingCounterEnd);
+ __ j(not_equal, &allocated);
+
+ // Push the object to the stack, and then the initial map as
+ // an argument to the runtime call.
+ __ push(ebx);
+ __ push(eax); // initial map
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ pop(ebx);
+
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+
+ __ bind(&no_inobject_slack_tracking);
}
- __ InitializeFieldsWithFiller(ecx, esi, edx);
- __ mov(edx, factory->one_pointer_filler_map());
- // Fill the remaining fields with one pointer filler map.
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(ecx, edi, edx);
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- // ebx: JSObject (untagged)
- __ or_(ebx, Immediate(kHeapObjectTag));
+ // Continue with JSObject being successfully allocated
+ // ebx: JSObject (tagged)
+ __ jmp(&allocated);
+ }
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
+ // Allocate the new receiver object using the runtime call.
+ // edx: new target
+ __ bind(&rt_call);
+ int offset = kPointerSize;
+
+ // Must restore esi (context) and edi (constructor) before calling
+ // runtime.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(edi, Operand(esp, offset));
+ __ push(edi); // constructor function
+ __ push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ mov(ebx, eax); // store result in ebx
+
+ // New object allocated.
+ // ebx: newly allocated object
+ __ bind(&allocated);
+
+ // Restore the parameters.
+ __ pop(edx); // new.target
+ __ pop(edi); // Constructor function.
+
+ // Retrieve smi-tagged arguments count from the stack.
+ __ mov(eax, Operand(esp, 0));
}
- // Allocate the new receiver object using the runtime call.
- // edx: original constructor
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi); // constructor function
- __ push(edx); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
-
- // Restore the parameters.
- __ pop(edx); // new.target
- __ pop(edi); // Constructor function.
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
__ SmiUntag(eax);
- // Push new.target onto the construct frame. This is stored just below the
- // receiver on the stack.
- __ push(edx);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
+ if (create_implicit_receiver) {
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ push(ebx);
+ __ push(ebx);
+ } else {
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ }
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -332,40 +317,44 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ call(code, RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper());
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
// Store offset of return address for deoptimizer.
- if (!is_api_function) {
+ if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The arguments
- // count is stored below the reciever and the new.target.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ if (create_implicit_receiver) {
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0));
+
+ // Restore the arguments count and leave the construct frame. The
+ // arguments count is stored below the receiver.
+ __ bind(&exit);
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ } else {
+ __ mov(ebx, Operand(esp, 0));
+ }
// Leave construct frame.
}
@@ -375,91 +364,32 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ if (create_implicit_receiver) {
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ }
__ ret(0);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false);
+ Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true);
+ Generate_JSConstructStubHelper(masm, true, true);
}
-void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax: number of arguments
- // -- edi: constructor function
- // -- ebx: allocation site or undefined
- // -- edx: original constructor
- // -----------------------------------
-
- {
- FrameScope frame_scope(masm, StackFrame::CONSTRUCT);
-
- // Preserve allocation site.
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(ebx);
-
- // Preserve actual arguments count.
- __ SmiTag(eax);
- __ push(eax);
- __ SmiUntag(eax);
-
- // Push new.target.
- __ push(edx);
-
- // receiver is the hole.
- __ push(Immediate(masm->isolate()->factory()->the_hole_value()));
-
- // Set up pointer to last argument.
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
- // Copy arguments and receiver to the expression stack.
- Label loop, entry;
- __ mov(ecx, eax);
- __ jmp(&entry);
- __ bind(&loop);
- __ push(Operand(ebx, ecx, times_4, 0));
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
-
- // Handle step in.
- Label skip_step_in;
- ExternalReference debug_step_in_fp =
- ExternalReference::debug_step_in_fp_address(masm->isolate());
- __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
- __ j(equal, &skip_step_in);
-
- __ push(eax);
- __ push(edi);
- __ push(edi);
- __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
- __ pop(edi);
- __ pop(eax);
-
- __ bind(&skip_step_in);
-
- // Invoke function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
- // Restore context from the frame.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false);
+}
- // Get arguments count, skipping over new.target.
- __ mov(ebx, Operand(esp, kPointerSize));
- }
- __ pop(ecx); // Return address.
- __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
@@ -492,7 +422,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&okay);
}
@@ -578,6 +508,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
//
// The live registers are:
// o edi: the JS function object being called
+// o edx: the new target
// o esi: our context
// o ebp: the caller's frame pointer
// o esp: stack pointer (pointing to return address)
@@ -595,6 +526,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(ebp, esp);
__ push(esi); // Callee's context.
__ push(edi); // Callee's JS function.
+ __ push(edx); // Callee's new target.
+
+ // Push zero for bytecode array offset.
+ __ push(Immediate(0));
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
@@ -624,7 +559,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -656,7 +591,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
__ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
__ pop(kInterpreterBytecodeArrayRegister);
__ bind(&ok);
}
@@ -665,9 +600,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ mov(kInterpreterRegisterFileRegister, ebp);
- __ sub(
- kInterpreterRegisterFileRegister,
- Immediate(kPointerSize + StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
// Since the dispatch table root might be set after builtins are generated,
@@ -675,7 +609,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
__ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
- // Push context as a stack located parameter to the bytecode handler.
+ // Push dispatch table as a stack located parameter to the bytecode handler.
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
__ push(ebx);
@@ -691,6 +625,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ebx);
+ __ nop(); // Ensure that return address still counts as interpreter entry
+ // trampoline.
}
@@ -766,7 +702,7 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor
+ // -- edx : the new target
// -- edi : the constructor
// -- ebx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
@@ -799,39 +735,108 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Push(ecx);
// Call the constructor with unmodified eax, edi, ebi values.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+
+ __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
+ // Tear down internal frame.
+ }
+
+ // Initialize register file register.
+ __ mov(kInterpreterRegisterFileRegister, ebp);
+ __ add(kInterpreterRegisterFileRegister,
+ Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+
+ // Get the bytecode array pointer from the frame.
+ __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
+ __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
+ ebx);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ mov(
+ kInterpreterBytecodeOffsetRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Push dispatch table as a stack located parameter to the bytecode handler -
+ // overwrite the state slot (we don't use these for interpreter deopts).
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
+ __ mov(Operand(esp, kPointerSize), ebx);
+
+ // Dispatch to the target bytecode.
+ __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
+
+ // Get the context from the frame.
+ // TODO(rmcilroy): Update interpreter frame to expect current context at the
+ // context slot instead of the function context.
+ __ mov(kContextRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kContextFromRegisterPointer));
+
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(ebx);
}
-static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function.
- __ push(edi);
- // Function is also the parameter to the runtime call.
- __ push(edi);
- // Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
- __ CallRuntime(Runtime::kCompileOptimized, 2);
- // Restore receiver.
- __ pop(edi);
+
+void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
+ Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+ GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallCompileOptimized(masm, false);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
GenerateTailCallToReturnedCode(masm);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallCompileOptimized(masm, true);
+ CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
GenerateTailCallToReturnedCode(masm);
}
@@ -927,7 +932,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kNotifyStubFailure, save_doubles);
__ popad();
// Tear down internal frame.
}
@@ -954,7 +959,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
// Tear down internal frame.
}
@@ -996,7 +1001,136 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
// static
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into eax and check that it's actually a JSDate object.
+ Label receiver_not_date;
+ {
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ JumpIfSmi(eax, &receiver_not_date);
+ __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
+ __ j(not_equal, &receiver_not_date);
+ }
+
+ // 2. Load the specified date field, falling back to the runtime as necessary.
+ if (field_index == JSDate::kDateValue) {
+ __ mov(eax, FieldOperand(eax, JSDate::kValueOffset));
+ } else {
+ if (field_index < JSDate::kFirstUncachedField) {
+ Label stamp_mismatch;
+ __ mov(edx, Operand::StaticVariable(
+ ExternalReference::date_cache_stamp(masm->isolate())));
+ __ cmp(edx, FieldOperand(eax, JSDate::kCacheStampOffset));
+ __ j(not_equal, &stamp_mismatch, Label::kNear);
+ __ mov(eax, FieldOperand(
+ eax, JSDate::kValueOffset + field_index * kPointerSize));
+ __ ret(1 * kPointerSize);
+ __ bind(&stamp_mismatch);
+ }
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ PrepareCallCFunction(2, ebx);
+ __ mov(Operand(esp, 0), eax);
+ __ mov(Operand(esp, 1 * kPointerSize),
+ Immediate(Smi::FromInt(field_index)));
+ __ CallCFunction(
+ ExternalReference::get_date_field_function(masm->isolate()), 2);
+ }
+ __ ret(1 * kPointerSize);
+
+ // 3. Raise a TypeError if the receiver is not a date.
+ __ bind(&receiver_not_date);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowNotDateError);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argArray
+ // -- esp[8] : thisArg
+ // -- esp[12] : receiver
+ // -----------------------------------
+
+ // 1. Load receiver into edi, argArray into eax (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ Label no_arg_array, no_this_arg;
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ mov(ebx, edx);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ {
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 0));
+ __ cmp(eax, Immediate(1));
+ __ j(equal, &no_arg_array, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -kPointerSize));
+ __ bind(&no_arg_array);
+ }
+ __ bind(&no_this_arg);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
+
+ // ----------- S t a t e -------------
+ // -- eax : argArray
+ // -- edi : receiver
+ // -- esp[0] : return address
+ // -- esp[4] : thisArg
+ // -----------------------------------
+
+ // 2. Make sure the receiver is actually callable.
+ Label receiver_not_callable;
+ __ JumpIfSmi(edi, &receiver_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &receiver_not_callable, Label::kNear);
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(eax, Heap::kNullValueRootIndex, &no_arguments, Label::kNear);
+ __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &no_arguments,
+ Label::kNear);
+
+ // 4a. Apply the receiver to the given argArray (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ Set(eax, 0);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+
+ // 4c. The receiver is not callable, throw an appropriate TypeError.
+ __ bind(&receiver_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
+ }
+}
+
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
// esp[0] : Return address
// esp[8] : Argument n
@@ -1042,201 +1176,142 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-static void Generate_PushAppliedArguments(MacroAssembler* masm,
- const int vectorOffset,
- const int argumentsOffset,
- const int indexOffset,
- const int limitOffset) {
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- __ mov(key, Operand(ebp, indexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- int slot_index = TypeFeedbackVector::PushAppliedArgumentsIndex();
- __ mov(slot, Immediate(Smi::FromInt(slot_index)));
- __ mov(vector, Operand(ebp, vectorOffset));
- Handle<Code> ic =
- KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register key.
- __ mov(key, Operand(ebp, indexOffset));
- __ add(key, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, indexOffset), key);
-
- __ bind(&entry);
- __ cmp(key, Operand(ebp, limitOffset));
- __ j(not_equal, &loop);
-
- // On exit, the pushed arguments count is in eax, untagged
- __ Move(eax, key);
- __ SmiUntag(eax);
-}
-
-
-// Used by FunctionApply and ReflectApply
-static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
- const int kFormalParameters = targetIsArgument ? 3 : 2;
- const int kStackSize = kFormalParameters + 1;
-
- // Stack at entry:
- // esp : return address
- // esp[4] : arguments
- // esp[8] : receiver ("this")
- // esp[12] : function
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : function arguments
- // ebp[12] : receiver
- // ebp[16] : function
- static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
- static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- __ push(Operand(ebp, kFunctionOffset)); // push this
- __ push(Operand(ebp, kArgumentsOffset)); // push arguments
- if (targetIsArgument) {
- __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
- }
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : argumentsList
+ // -- esp[8] : thisArgument
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ j(equal, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
+ }
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ Push(Immediate(0)); // index
- __ Push(Operand(ebp, kReceiverOffset)); // receiver
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : thisArgument
+ // -----------------------------------
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
+ // 2. Make sure the target is actually callable.
+ Label target_not_callable;
+ __ JumpIfSmi(edi, &target_not_callable, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &target_not_callable, Label::kNear);
- // Call the callable.
- // TODO(bmeurer): This should be a tail call according to ES6.
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // 3a. Apply the target to the given argumentsList (passing undefined for
+ // new.target).
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
- // Leave internal frame.
+ // 3b. The target is not callable, throw an appropriate TypeError.
+ __ bind(&target_not_callable);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
}
-// Used by ReflectConstruct
-static void Generate_ConstructHelper(MacroAssembler* masm) {
- const int kFormalParameters = 3;
- const int kStackSize = kFormalParameters + 1;
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : new.target (optional)
+ // -- esp[8] : argumentsList
+ // -- esp[12] : target
+ // -- esp[16] : receiver
+ // -----------------------------------
- // Stack at entry:
- // esp : return address
- // esp[4] : original constructor (new.target)
- // esp[8] : arguments
- // esp[16] : constructor
+ // 1. Load target into edi (if present), argumentsList into eax (if present),
+ // new.target into edx (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
{
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Stack frame:
- // ebp : Old base pointer
- // ebp[4] : return address
- // ebp[8] : original constructor (new.target)
- // ebp[12] : arguments
- // ebp[16] : constructor
- static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
- static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
- static const int kVectorOffset =
- InternalFrameConstants::kCodeOffset - 1 * kPointerSize;
-
- // Push the vector.
- __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
- __ push(edi);
-
- // If newTarget is not supplied, set it to constructor
- Label validate_arguments;
- __ mov(eax, Operand(ebp, kNewTargetOffset));
- __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &validate_arguments, Label::kNear);
- __ mov(eax, Operand(ebp, kFunctionOffset));
- __ mov(Operand(ebp, kNewTargetOffset), eax);
-
- // Validate arguments
- __ bind(&validate_arguments);
- __ push(Operand(ebp, kFunctionOffset));
- __ push(Operand(ebp, kArgumentsOffset));
- __ push(Operand(ebp, kNewTargetOffset));
- __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
- CALL_FUNCTION);
-
- Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
-
- // Push current index and limit.
- const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
- const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(eax); // limit
- __ push(Immediate(0)); // index
- // Push the constructor function as callee.
- __ push(Operand(ebp, kFunctionOffset));
-
- // Loop over the arguments array, pushing each value to the stack
- Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
- kIndexOffset, kLimitOffset);
-
- // Use undefined feedback vector
- __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ mov(ecx, Operand(ebp, kNewTargetOffset));
-
- // Call the function.
- CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- // Leave internal frame.
+ Label done;
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ mov(edx, edi);
+ __ mov(ebx, edi);
+ __ cmp(eax, Immediate(1));
+ __ j(below, &done, Label::kNear);
+ __ mov(edi, Operand(esp, eax, times_pointer_size, -0 * kPointerSize));
+ __ mov(edx, edi);
+ __ j(equal, &done, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, -1 * kPointerSize));
+ __ cmp(eax, Immediate(3));
+ __ j(below, &done, Label::kNear);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, -2 * kPointerSize));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(eax, ebx);
}
- // remove this, target, arguments, and newTarget
- __ ret(kStackSize * kPointerSize);
-}
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edx : new.target
+ // -- edi : target
+ // -- esp[0] : return address
+ // -- esp[4] : receiver (undefined)
+ // -----------------------------------
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, false);
-}
+ // 2. Make sure the target is actually a constructor.
+ Label target_not_constructor;
+ __ JumpIfSmi(edi, &target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &target_not_constructor, Label::kNear);
+ // 3. Make sure the target is actually a constructor.
+ Label new_target_not_constructor;
+ __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &new_target_not_constructor, Label::kNear);
-void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
- Generate_ApplyHelper(masm, true);
-}
+ // 4a. Construct the target with the given new.target and argumentsList.
+ __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
+ // 4b. The target is not a constructor, throw an appropriate TypeError.
+ __ bind(&target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edi);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
-void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
- Generate_ConstructHelper(masm);
+ // 4c. The new.target is not a constructor, throw an appropriate TypeError.
+ __ bind(&new_target_not_constructor);
+ {
+ __ mov(Operand(esp, kPointerSize), edx);
+ __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+ }
}
@@ -1300,6 +1375,113 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Load the first argument into eax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, ebx);
+ }
+
+ // 2a. Convert the first argument to a number.
+ ToNumberStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+
+ // 2b. No arguments, return +0 (already in eax).
+ __ bind(&no_arguments);
+ __ ret(1 * kPointerSize);
+}
+
+
+// static
+void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- edx : new target
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ Move(ebx, Smi::FromInt(0));
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ }
+
+ // 3. Make sure ebx is a number.
+ {
+ Label done_convert;
+ __ JumpIfSmi(ebx, &done_convert);
+ __ CompareRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &done_convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Move(eax, ebx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Move(ebx, eax);
+ __ Pop(edx);
+ __ Pop(edi);
+ }
+ __ bind(&done_convert);
+ }
+
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
+
+ // 5. Allocate a JSValue wrapper for the number.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
+
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ }
+ __ Ret();
+}
+
+
+// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
@@ -1354,7 +1536,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
}
@@ -1364,13 +1546,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
- // -- edx : original constructor
+ // -- edx : new target
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into ebx and get rid of the rest (including the
+ // 1. Make sure we operate in the context of the called function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // 2. Load the first argument into ebx and get rid of the rest (including the
// receiver).
{
Label no_arguments, done;
@@ -1386,7 +1571,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ PushReturnAddressFrom(ecx);
}
- // 2. Make sure ebx is a string.
+ // 3. Make sure ebx is a string.
{
Label convert, done_convert;
__ JumpIfSmi(ebx, &convert, Label::kNear);
@@ -1407,62 +1592,26 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&done_convert);
}
- // 3. Allocate a JSValue wrapper for the string.
- {
- // ----------- S t a t e -------------
- // -- ebx : the first argument
- // -- edi : constructor function
- // -- edx : original constructor
- // -----------------------------------
-
- Label allocate, done_allocate, rt_call;
+ // 4. Check if new target and constructor differ.
+ Label new_object;
+ __ cmp(edx, edi);
+ __ j(not_equal, &new_object);
- // Fall back to runtime if the original constructor and constructor differ.
- __ cmp(edx, edi);
- __ j(not_equal, &rt_call);
+ // 5. Allocate a JSValue wrapper for the string.
+ __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
+ __ Ret();
- __ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
- __ bind(&done_allocate);
-
- // Initialize the JSValue in eax.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- __ Ret();
-
- // Fallback to the runtime to allocate in new space.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edi);
- __ Push(Smi::FromInt(JSValue::kSize));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Pop(edi);
- __ Pop(ebx);
- }
- __ jmp(&done_allocate);
-
- // Fallback to the runtime to create new object.
- __ bind(&rt_call);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx);
- __ Push(edi);
- __ Push(edi); // constructor function
- __ Push(edx); // original constructor
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Pop(edi);
- __ Pop(ebx);
- }
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
- __ Ret();
+ // 6. Fallback to the runtime to create new object.
+ __ bind(&new_object);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx); // the first argument
+ __ Push(edi); // constructor function
+ __ Push(edx); // new target
+ __ CallRuntime(Runtime::kNewObject);
+ __ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
+ __ Ret();
}
@@ -1471,24 +1620,24 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
- // -- edi : function (passed through to callee)
+ // -- edx : new target (passed through to callee)
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edx, Operand::StaticVariable(real_stack_limit));
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
__ mov(ecx, esp);
- __ sub(ecx, edx);
- // Make edx the space we need for the array when it is unrolled onto the
+ __ sub(ecx, edi);
+ // Make edi the space we need for the array when it is unrolled onto the
// stack.
- __ mov(edx, ebx);
- __ shl(edx, kPointerSizeLog2);
+ __ mov(edi, ebx);
+ __ shl(edi, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
+ __ cmp(ecx, edi);
__ j(less_equal, stack_overflow); // Signed comparison.
}
@@ -1528,6 +1677,146 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// static
+void Builtins::Generate_Apply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argumentsList
+ // -- edi : target
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Create the list of arguments from the array-like argumentsList.
+ {
+ Label create_arguments, create_array, create_runtime, done_create;
+ __ JumpIfSmi(eax, &create_runtime);
+
+ // Load the map of argumentsList into ecx.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+
+ // Load native context into ebx.
+ __ mov(ebx, NativeContextOperand());
+
+ // Check if argumentsList is an (unmodified) arguments object.
+ __ cmp(ecx, ContextOperand(ebx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+ __ cmp(ecx, ContextOperand(ebx, Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ j(equal, &create_arguments);
+
+ // Check if argumentsList is a fast JSArray.
+ __ CmpInstanceType(ecx, JS_ARRAY_TYPE);
+ __ j(equal, &create_array);
+
+ // Ask the runtime to create the list (actually a FixedArray).
+ __ bind(&create_runtime);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ Push(edx);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kCreateListFromArrayLike);
+ __ Pop(edx);
+ __ Pop(edi);
+ __ mov(ebx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ }
+ __ jmp(&done_create);
+
+ // Try to create the list from an arguments object.
+ __ bind(&create_arguments);
+ __ mov(ebx,
+ FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
+ __ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
+ __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ j(not_equal, &create_runtime);
+ __ SmiUntag(ebx);
+ __ mov(eax, ecx);
+ __ jmp(&done_create);
+
+ // Try to create the list from a JSArray object.
+ __ bind(&create_array);
+ __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(ecx);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ __ cmp(ecx, Immediate(FAST_ELEMENTS));
+ __ j(above, &create_runtime);
+ __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
+ __ j(equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+
+ __ bind(&done_create);
+ }
+
+ // Check for stack overflow.
+ {
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label done;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(ecx, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ neg(ecx);
+ __ add(ecx, esp);
+ __ sar(ecx, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, ebx);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&done);
+ }
+
+ // ----------- S t a t e -------------
+ // -- edi : target
+ // -- eax : args (a FixedArray built from argumentsList)
+ // -- ebx : len (number of elements to push from args)
+ // -- edx : new.target (checked to be constructor or undefined)
+ // -- esp[0] : return address.
+ // -- esp[4] : thisArgument
+ // -----------------------------------
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ __ push(edx);
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+
+ __ PopReturnAddressTo(edx);
+ __ Move(ecx, Immediate(0));
+ Label done, loop;
+ __ bind(&loop);
+ __ cmp(ecx, ebx);
+ __ j(equal, &done, Label::kNear);
+ __ Push(
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ inc(ecx);
+ __ jmp(&loop);
+ __ bind(&done);
+ __ PushReturnAddressFrom(edx);
+
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ fstp_s(MemOperand(esp, 0));
+ __ pop(edx);
+
+ __ Move(eax, ebx);
+ }
+
+ // Dispatch to Call or Construct depending on whether new.target is undefined.
+ {
+ __ CompareRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ }
+}
+
+
+// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1623,18 +1912,129 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiUntag(ebx);
ParameterCount actual(eax);
ParameterCount expected(ebx);
- __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
- actual, JUMP_FUNCTION, NullCallWrapper());
-
+ __ InvokeFunctionCode(edi, no_reg, expected, actual, JUMP_FUNCTION,
+ CheckDebugStepCallWrapper());
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowConstructorNonCallableError, 0);
+ __ push(edi);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
}
}
+namespace {
+
+void Generate_PushBoundArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -----------------------------------
+
+ // Load [[BoundArguments]] into ecx and length of that into ebx.
+ Label no_bound_arguments;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ test(ebx, ebx);
+ __ j(zero, &no_bound_arguments);
+ {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : new.target (only in case of [[Construct]])
+ // -- edi : target (checked to be a JSBoundFunction)
+ // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
+ // -- ebx : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ lea(ecx, Operand(ebx, times_pointer_size, 0));
+ __ sub(esp, ecx);
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack
+ // limit".
+ __ CompareRoot(esp, ecx, Heap::kRealStackLimitRootIndex);
+ __ j(greater, &done, Label::kNear); // Signed comparison.
+ // Restore the stack pointer.
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 0));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Adjust effective number of arguments to include return address.
+ __ inc(eax);
+
+ // Relocate arguments and return address down the stack.
+ {
+ Label loop;
+ __ Set(ecx, 0);
+ __ lea(ebx, Operand(esp, ebx, times_pointer_size, 0));
+ __ bind(&loop);
+ __ fld_s(Operand(ebx, ecx, times_pointer_size, 0));
+ __ fstp_s(Operand(esp, ecx, times_pointer_size, 0));
+ __ inc(ecx);
+ __ cmp(ecx, eax);
+ __ j(less, &loop);
+ }
+
+ // Copy [[BoundArguments]] to the stack (below the arguments).
+ {
+ Label loop;
+ __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
+ __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ bind(&loop);
+ __ dec(ebx);
+ __ fld_s(
+ FieldOperand(ecx, ebx, times_pointer_size, FixedArray::kHeaderSize));
+ __ fstp_s(Operand(esp, eax, times_pointer_size, 0));
+ __ lea(eax, Operand(eax, 1));
+ __ j(greater, &loop);
+ }
+
+ // Adjust effective number of arguments (eax contains the number of
+ // arguments from the call plus return address plus the number of
+ // [[BoundArguments]]), so we need to subtract one for the return address.
+ __ dec(eax);
+ }
+ __ bind(&no_bound_arguments);
+}
+
+} // namespace
+
+
+// static
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Patch the receiver to [[BoundThis]].
+ __ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(ExternalReference(
+ Builtins::kCall_ReceiverIsAny, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
@@ -1648,14 +2048,22 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(equal, masm->isolate()->builtins()->CallFunction(mode),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
- // 1. Call to function proxy.
- // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kCallTrapOffset));
- __ AssertNotSmi(edi);
- __ jmp(&non_smi);
+ // 1. Runtime fallback for Proxy [[Call]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ PushReturnAddressFrom(ecx);
+ // Increase the arguments size to include the pushed function and the
+ // existing receiver on the stack.
+ __ add(eax, Immediate(2));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyCall, masm->isolate()));
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
@@ -1676,7 +2084,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -1685,10 +2093,9 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (checked to be a JSFunction)
+ // -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
- __ AssertFunction(edx);
__ AssertFunction(edi);
// Calling convention for function specific ConstructStubs require
@@ -1705,17 +2112,54 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the new target (checked to be a constructor)
+ // -- edi : the constructor to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(edi);
+
+ // Push the [[BoundArguments]] onto the stack.
+ Generate_PushBoundArguments(masm);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label done;
+ __ cmp(edi, edx);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&done);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ mov(ecx, Operand::StaticVariable(
+ ExternalReference(Builtins::kConstruct, masm->isolate())));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edi : the constructor to call (checked to be a JSProxy)
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
- // -- edi : the constructor to call (checked to be a JSFunctionProxy)
// -----------------------------------
- // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
- __ mov(edi, FieldOperand(edi, JSFunctionProxy::kConstructTrapOffset));
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Call into the Runtime for Proxy [[Construct]].
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ // Include the pushed new_target, constructor and the receiver.
+ __ add(eax, Immediate(3));
+ // Tail-call to the runtime.
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
@@ -1723,23 +2167,32 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
- // -- edx : the original constructor (either the same as the constructor or
+ // -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
// -----------------------------------
- // Check if target has a [[Construct]] internal method.
+ // Check if target is a Smi.
Label non_constructor;
__ JumpIfSmi(edi, &non_constructor, Label::kNear);
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
- __ j(zero, &non_constructor, Label::kNear);
// Dispatch based on instance type.
- __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(equal, masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+
+ // Check if target has a [[Construct]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructBoundFunction(),
+ RelocInfo::CODE_TARGET);
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(equal, masm->isolate()->builtins()->ConstructProxy(),
RelocInfo::CODE_TARGET);
@@ -1756,11 +2209,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an Object that doesn't have a [[Construct]] internal
// method.
__ bind(&non_constructor);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
- __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
- }
+ __ Jump(masm->isolate()->builtins()->ConstructedNonConstructable(),
+ RelocInfo::CODE_TARGET);
}
@@ -1768,17 +2218,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
// -- ebx : expected number of arguments
+ // -- edx : new target (passed through to callee)
// -- edi : function (passed through to callee)
// -----------------------------------
- Label invoke, dont_adapt_arguments;
+ Label invoke, dont_adapt_arguments, stack_overflow;
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
- Label stack_overflow;
- ArgumentsAdaptorStackCheck(masm, &stack_overflow);
-
Label enough, too_few;
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
@@ -1787,6 +2234,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -1823,11 +2271,12 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
+ __ CallRuntime(Runtime::kThrowStrongModeTooFewArguments);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ ArgumentsAdaptorStackCheck(masm, &stack_overflow);
// Remember expected arguments in ecx.
__ mov(ecx, ebx);
@@ -1866,8 +2315,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
// eax : expected number of arguments
+ // edx : new target (passed through to callee)
// edi : function (passed through to callee)
- __ call(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ call(ecx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
@@ -1880,18 +2331,128 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(edx);
+ __ mov(ecx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ __ jmp(ecx);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
- EnterArgumentsAdaptorFrame(masm);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
__ int3();
}
}
+static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
+ Register function_template_info,
+ Register scratch0, Register scratch1,
+ Label* receiver_check_failed) {
+ // If there is no signature, return the holder.
+ __ CompareRoot(FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset),
+ Heap::kUndefinedValueRootIndex);
+ Label receiver_check_passed;
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // Walk the prototype chain.
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ Label prototype_loop_start;
+ __ bind(&prototype_loop_start);
+
+ // Get the constructor, if any.
+ __ GetMapConstructor(scratch0, scratch0, scratch1);
+ __ CmpInstanceType(scratch1, JS_FUNCTION_TYPE);
+ Label next_prototype;
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Get the constructor's signature.
+ __ mov(scratch0,
+ FieldOperand(scratch0, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(scratch0,
+ FieldOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Loop through the chain of inheriting function templates.
+ Label function_template_loop;
+ __ bind(&function_template_loop);
+
+ // If the signatures match, we have a compatible receiver.
+ __ cmp(scratch0, FieldOperand(function_template_info,
+ FunctionTemplateInfo::kSignatureOffset));
+ __ j(equal, &receiver_check_passed, Label::kNear);
+
+ // If the current type is not a FunctionTemplateInfo, load the next prototype
+ // in the chain.
+ __ JumpIfSmi(scratch0, &next_prototype, Label::kNear);
+ __ CmpObjectType(scratch0, FUNCTION_TEMPLATE_INFO_TYPE, scratch1);
+ __ j(not_equal, &next_prototype, Label::kNear);
+
+ // Otherwise load the parent function template and iterate.
+ __ mov(scratch0,
+ FieldOperand(scratch0, FunctionTemplateInfo::kParentTemplateOffset));
+ __ jmp(&function_template_loop, Label::kNear);
+
+ // Load the next prototype.
+ __ bind(&next_prototype);
+ __ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
+ // End if the prototype is null or not hidden.
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, receiver_check_failed);
+ __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ test(FieldOperand(scratch0, Map::kBitField3Offset),
+ Immediate(Map::IsHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+ // Iterate.
+ __ jmp(&prototype_loop_start, Label::kNear);
+
+ __ bind(&receiver_check_passed);
+}
+
+
+void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments (not including the receiver)
+ // -- edi : callee
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[eax * 4] : first argument
+ // -- esp[(eax + 1) * 4] : receiver
+ // -----------------------------------
+
+ // Load the FunctionTemplateInfo.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Do the compatible receiver check.
+ Label receiver_check_failed;
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPCOnStackSize));
+ __ Push(eax);
+ CompatibleReceiverCheck(masm, ecx, ebx, edx, eax, &receiver_check_failed);
+ __ Pop(eax);
+ // Get the callback offset from the FunctionTemplateInfo, and jump to the
+ // beginning of the code.
+ __ mov(edx, FieldOperand(ebx, FunctionTemplateInfo::kCallCodeOffset));
+ __ mov(edx, FieldOperand(edx, CallHandlerInfo::kFastHandlerOffset));
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(edx);
+
+ // Compatible receiver check failed: pop return address, arguments and
+ // receiver and throw an Illegal Invocation exception.
+ __ bind(&receiver_check_failed);
+ __ Pop(eax);
+ __ PopReturnAddressTo(ebx);
+ __ lea(eax, Operand(eax, times_pointer_size, 1 * kPointerSize));
+ __ add(esp, eax);
+ __ PushReturnAddressFrom(ebx);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ TailCallRuntime(Runtime::kThrowIllegalInvocation);
+ }
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1899,7 +2460,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(eax);
- __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
Label skip;
@@ -1938,7 +2499,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kStackGuard);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 9d066483cf..1da5f41a88 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -334,7 +334,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+ __ TailCallRuntime(Runtime::kMathPowRT);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
@@ -388,7 +388,7 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
__ push(scratch); // return address
// Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadElementWithInterceptor, 2, 1);
+ __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
@@ -488,7 +488,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ pop(ebx); // Return address.
__ push(edx);
__ push(ebx);
- __ TailCallRuntime(Runtime::kArguments, 1, 1);
+ __ TailCallRuntime(Runtime::kArguments);
}
@@ -520,7 +520,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -593,8 +593,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// esp[8] = parameter count (tagged)
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
+ __ mov(edi, NativeContextOperand());
__ mov(ebx, Operand(esp, 0 * kPointerSize));
__ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
@@ -756,7 +755,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
}
@@ -797,10 +796,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
- __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edi, FieldOperand(edi, JSGlobalObject::kNativeContextOffset));
- const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
- __ mov(edi, Operand(edi, offset));
+ __ mov(edi, NativeContextOperand());
+ __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
__ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -852,7 +849,35 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ push(edx); // Push parameters pointer.
__ push(ecx); // Push parameter count.
__ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
+}
+
+
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // ebx : rest parameter index (tagged)
+ // esp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+ __ bind(&runtime);
+ __ pop(eax); // Save return address.
+ __ push(ecx); // Push number of parameters.
+ __ push(edx); // Push parameters pointer.
+ __ push(ebx); // Push rest parameter index.
+ __ push(eax); // Push return address.
+ __ TailCallRuntime(Runtime::kNewRestParam);
}
@@ -861,7 +886,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1139,7 +1164,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(equal, &runtime);
// For exception, throw the exception again.
- __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow);
__ bind(&failure);
// For failure to match, return null.
@@ -1225,7 +1250,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1380,7 +1405,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Call runtime on identical JSObjects. Otherwise return equal.
- __ cmpb(ecx, static_cast<uint8_t>(FIRST_SPEC_OBJECT_TYPE));
+ __ cmpb(ecx, static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE));
__ j(above_equal, &runtime_call, Label::kFar);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SYMBOL_TYPE));
@@ -1448,8 +1473,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
@@ -1463,7 +1488,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1559,9 +1584,9 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
__ j(below, &runtime_call, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
@@ -1590,8 +1615,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
if (cc == equal) {
__ push(ecx);
- __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
- 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
} else {
__ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
@@ -1600,9 +1624,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(
- is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
- 1);
+ __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
+ : Runtime::kCompare);
}
__ bind(&miss);
@@ -1610,16 +1633,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
}
-static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
- bool is_super) {
+static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// eax : number of arguments to the construct function
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
- if (is_super) {
- __ pop(ecx);
- }
{
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -1630,29 +1648,19 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
__ push(edi);
__ push(edx);
__ push(ebx);
- if (is_super) {
- __ push(ecx);
- }
__ CallStub(stub);
- if (is_super) {
- __ pop(ecx);
- }
__ pop(ebx);
__ pop(edx);
__ pop(edi);
__ pop(eax);
__ SmiUntag(eax);
}
-
- if (is_super) {
- __ push(ecx);
- }
}
-static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
@@ -1660,7 +1668,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// ebx : feedback vector
// edx : slot in feedback vector (Smi)
// edi : the function to call
- // esp[0]: original receiver (for IsSuperConstructorCall)
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
@@ -1726,12 +1733,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CallStubInRecordCallTarget(masm, &create_stub);
__ jmp(&done);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
- CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub);
__ bind(&done);
}
@@ -1739,14 +1746,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
- if (IsSuperConstructorCall()) {
- __ push(ecx);
- }
-
Label non_function;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function);
@@ -1754,29 +1756,22 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
- if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm, IsSuperConstructorCall());
+ GenerateRecordCallTarget(masm);
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map =
- isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
- __ AssertUndefinedOrAllocationSite(ebx);
- }
+ __ AssertUndefinedOrAllocationSite(ebx);
- if (IsSuperConstructorCall()) {
- __ pop(edx);
- } else {
- // Pass original constructor to construct stub.
- __ mov(edx, edi);
- }
+ // Pass new target to construct stub.
+ __ mov(edx, edi);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
@@ -1786,7 +1781,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ jmp(ecx);
__ bind(&non_function);
- if (IsSuperConstructorCall()) __ Drop(1);
__ mov(edx, edi);
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -1824,11 +1818,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
- const int with_types_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
- const int generic_offset =
- FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
- Label extra_checks_or_miss, call;
+ Label extra_checks_or_miss, call, call_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -1862,9 +1852,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + kPointerSize),
Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
- __ bind(&call);
+ __ bind(&call_function);
__ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -1899,10 +1890,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ mov(
FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
- // We have to update statistics for runtime profiling.
- __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
- __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
- __ jmp(&call);
+
+ __ bind(&call);
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -1919,8 +1911,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ cmp(edi, ecx);
__ j(equal, &miss);
- // Update stats.
- __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+ // Make sure the function belongs to the same native context.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
+ __ cmp(ecx, NativeContextOperand());
+ __ j(not_equal, &miss);
// Initialize the call counter.
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1939,7 +1934,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
}
- __ jmp(&call);
+ __ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
@@ -1962,7 +1957,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- __ CallRuntime(Runtime::kCallIC_Miss, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2274,14 +2269,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
__ j(not_zero, &slow_case);
- // Ensure that {function} is not bound.
- Register const shared_info = scratch;
- __ mov(shared_info,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ BooleanBitTest(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- __ j(not_zero, &slow_case);
-
// Get the "prototype" (or initial map) of the {function}.
__ mov(function_prototype,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2307,28 +2294,48 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the {function} prototype.
// Assume true, and change to false if not found.
- Register const object_prototype = object_map;
- Label done, loop;
+ Label done, loop, fast_runtime_fallback;
__ mov(eax, isolate()->factory()->true_value());
__ bind(&loop);
- __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, function_prototype);
+
+ // Check if the object needs to be access checked.
+ __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &fast_runtime_fallback, Label::kNear);
+ // Check if the current object is a Proxy.
+ __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+ __ j(equal, &fast_runtime_fallback, Label::kNear);
+
+ __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object, function_prototype);
__ j(equal, &done, Label::kNear);
- __ cmp(object_prototype, isolate()->factory()->null_value());
- __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ cmp(object, isolate()->factory()->null_value());
__ j(not_equal, &loop);
__ mov(eax, isolate()->factory()->false_value());
+
__ bind(&done);
__ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(0);
- // Slow-case: Call the runtime function.
+ // Found Proxy or access check needed: Call the runtime.
+ __ bind(&fast_runtime_fallback);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function_prototype);
+ __ PushReturnAddressFrom(scratch);
+ // Invalidate the instanceof cache.
+ __ Move(eax, Immediate(Smi::FromInt(0)));
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ TailCallRuntime(Runtime::kHasInPrototypeChain);
+
+ // Slow-case: Call the %InstanceOf runtime function.
__ bind(&slow_case);
- __ pop(scratch); // Pop return address.
- __ push(object); // Push {object}.
- __ push(function); // Push {function}.
- __ push(scratch); // Push return address.
- __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
+ __ PopReturnAddressTo(scratch);
+ __ Push(object);
+ __ Push(function);
+ __ PushReturnAddressFrom(scratch);
+ __ TailCallRuntime(Runtime::kInstanceOf);
}
@@ -2387,11 +2394,11 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
} else {
DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kNumberToSmi);
}
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
@@ -2421,7 +2428,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ push(object_);
__ SmiTag(index_);
__ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+ __ CallRuntime(Runtime::kStringCharCodeAtRT);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2467,7 +2474,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
- __ CallRuntime(Runtime::kStringCharFromCode, 1);
+ __ CallRuntime(Runtime::kStringCharFromCode);
if (!result_.is(eax)) {
__ mov(result_, eax);
}
@@ -2717,7 +2724,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubString);
__ bind(&single_char);
// eax: string
@@ -2762,7 +2769,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kStringToNumber);
__ bind(&not_string);
Label not_oddball;
@@ -2775,7 +2782,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+ __ TailCallRuntime(Runtime::kToNumber);
}
@@ -2794,7 +2801,7 @@ void ToLengthStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToLength, 1, 1);
+ __ TailCallRuntime(Runtime::kToLength);
}
@@ -2829,7 +2836,7 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToString, 1, 1);
+ __ TailCallRuntime(Runtime::kToString);
}
@@ -2992,7 +2999,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ Push(edx);
__ Push(eax);
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
@@ -3037,7 +3044,7 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (!Token::IsEqualityOp(op())) {
__ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
@@ -3305,9 +3312,9 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
__ push(right);
__ push(tmp1);
if (equality) {
- __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+ __ TailCallRuntime(Runtime::kStringEquals);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompare);
}
__ bind(&miss);
@@ -3315,19 +3322,20 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
}
-void CompareICStub::GenerateObjects(MacroAssembler* masm) {
- DCHECK(state() == CompareICState::OBJECT);
+void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::RECEIVER, state());
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
- __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
- __ j(not_equal, &miss, Label::kNear);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
+ __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(below, &miss, Label::kNear);
- DCHECK(GetCondition() == equal);
+ DCHECK_EQ(equal, GetCondition());
__ sub(eax, edx);
__ ret(0);
@@ -3336,7 +3344,7 @@ void CompareICStub::GenerateObjects(MacroAssembler* masm) {
}
-void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
Label miss;
Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
__ mov(ecx, edx);
@@ -3353,14 +3361,14 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ sub(eax, edx);
__ ret(0);
} else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(eax);
__ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
__ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -3377,7 +3385,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op())));
- __ CallRuntime(Runtime::kCompareIC_Miss, 3);
+ __ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ lea(edi, FieldOperand(eax, Code::kHeaderSize));
__ pop(eax);
@@ -3768,11 +3776,10 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
__ push(regs_.object());
- __ EnsureNotWhite(regs_.scratch0(), // The value.
- regs_.scratch1(), // Scratch.
- regs_.object(), // Scratch.
- &need_incremental_pop_object,
- Label::kNear);
+ __ JumpIfWhite(regs_.scratch0(), // The value.
+ regs_.scratch1(), // Scratch.
+ regs_.object(), // Scratch.
+ &need_incremental_pop_object, Label::kNear);
__ pop(regs_.object());
regs_.Restore(masm);
@@ -3792,89 +3799,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : element value to store
- // -- ecx : element index as smi
- // -- esp[0] : return address
- // -- esp[4] : array literal index in function
- // -- esp[8] : array literal
- // clobbers ebx, edx, edi
- // -----------------------------------
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label slow_elements_from_double;
- Label fast_elements;
-
- // Get array literal index, array literal and its map.
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-
- __ CheckFastElements(edi, &double_elements);
-
- // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
- __ JumpIfSmi(eax, &smi_element);
- __ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
-
- __ bind(&slow_elements);
- __ pop(edi); // Pop return address and remember to put back later for tail
- // call.
- __ push(ebx);
- __ push(ecx);
- __ push(eax);
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(edx);
- __ push(edi); // Return return address so that tail call returns to right
- // place.
- __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
- __ bind(&slow_elements_from_double);
- __ pop(edx);
- __ jmp(&slow_elements);
-
- // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize));
- __ mov(Operand(ecx, 0), eax);
- // Update the write barrier for the array store.
- __ RecordWrite(ebx, ecx, eax, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
- // and value is Smi.
- __ bind(&smi_element);
- __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
- FixedArrayBase::kHeaderSize), eax);
- __ ret(0);
-
- // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
-
- __ push(edx);
- __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(eax,
- edx,
- ecx,
- edi,
- &slow_elements_from_double,
- false);
- __ pop(edx);
- __ ret(0);
-}
-
-
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ call(ces.GetCode(), RelocInfo::CODE_TARGET);
@@ -4756,6 +4680,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label subclassing;
+ // Enter the context of the Array function.
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
__ cmp(edx, edi);
__ j(not_equal, &subclassing);
@@ -4777,27 +4704,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- __ pop(ecx); // return address.
- __ push(edi);
- __ push(edx);
-
- // Adjust argc.
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ add(eax, Immediate(2));
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ add(eax, Immediate(3));
break;
case NONE:
- __ mov(eax, Immediate(2));
+ __ mov(Operand(esp, 1 * kPointerSize), edi);
+ __ mov(eax, Immediate(3));
break;
case ONE:
- __ mov(eax, Immediate(3));
+ __ mov(Operand(esp, 2 * kPointerSize), edi);
+ __ mov(eax, Immediate(4));
break;
}
-
- __ push(ecx);
- __ JumpToExternalReference(
- ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(ebx);
+ __ PushReturnAddressFrom(ecx);
+ __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4914,7 +4840,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Pop(result_reg); // Pop return address.
__ Push(slot_reg);
__ Push(result_reg); // Push return address.
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext, 1, 1);
+ __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
}
@@ -5037,8 +4963,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
__ Push(cell_reg); // Push return address.
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy,
- 2, 1);
+ : Runtime::kStoreGlobalViaContext_Sloppy);
}
@@ -5175,7 +5100,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
- __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
+ __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, &ok, Label::kNear);
__ cmp(map, isolate->factory()->heap_number_map());
@@ -5209,7 +5134,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index 7f99fe332b..c66166f7f0 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -33,21 +33,20 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-
-UnaryMathFunction CreateExpFunction() {
- // No SSE2 support
- return &std::exp;
+UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
+ return nullptr;
}
-UnaryMathFunction CreateSqrtFunction() {
+UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &std::sqrt;
+ if (buffer == nullptr) return nullptr;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
// Load double input into registers.
__ fld_d(MemOperand(esp, 4));
__ X87SetFPUCW(0x027F);
@@ -59,9 +58,9 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
+ return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
}
@@ -95,13 +94,14 @@ class LabelConverter {
};
-MemMoveFunction CreateMemMoveFunction() {
+MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return NULL;
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ if (buffer == nullptr) return nullptr;
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ CodeObjectRequired::kNo);
LabelConverter conv(buffer);
// Generated code is put into a fixed, unmovable buffer, and not into
@@ -201,7 +201,7 @@ MemMoveFunction CreateMemMoveFunction() {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
@@ -590,9 +590,11 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
#undef __
-CodeAgingHelper::CodeAgingHelper() {
+CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
+ USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
- CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
+ CodePatcher patcher(isolate, young_sequence_.start(),
+ young_sequence_.length());
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
@@ -639,7 +641,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(sequence, young_length);
+ CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
diff --git a/deps/v8/src/x87/codegen-x87.h b/deps/v8/src/x87/codegen-x87.h
index e786b84f04..170b40397a 100644
--- a/deps/v8/src/x87/codegen-x87.h
+++ b/deps/v8/src/x87/codegen-x87.h
@@ -5,7 +5,7 @@
#ifndef V8_X87_CODEGEN_X87_H_
#define V8_X87_CODEGEN_X87_H_
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 6352cf8045..5a1951a0ed 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -75,7 +75,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
- RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+ RelocInfo rinfo(isolate, 0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@@ -101,14 +101,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
} else {
pointer = code->instruction_start();
}
- CodePatcher patcher(pointer, 1);
+ CodePatcher patcher(isolate, pointer, 1);
patcher.masm()->int3();
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
int osr_offset = data->OsrPcOffset()->value();
if (osr_offset > 0) {
- CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+ 1);
osr_patcher.masm()->int3();
}
}
@@ -137,14 +138,13 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- CodePatcher patcher(call_address, patch_size());
+ CodePatcher patcher(isolate, call_address, patch_size());
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
- RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
+ RelocInfo rinfo(isolate, call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(deopt_entry),
- NULL);
+ reinterpret_cast<intptr_t>(deopt_entry), NULL);
reloc_info_writer.Write(&rinfo);
DCHECK_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@@ -157,18 +157,15 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
// Move the relocation info to the beginning of the byte array.
- int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
-
- // The relocation info is in place, update the size.
- reloc_info->set_length(new_reloc_size);
-
- // Handle the junk part after the new relocation info. We will create
- // a non-live object in the extra space at the end of the former reloc info.
- Address junk_address = reloc_info->address() + reloc_info->Size();
- DCHECK(junk_address <= reloc_end_address);
- isolate->heap()->CreateFillerObjectAt(junk_address,
- reloc_end_address - junk_address);
+ const int new_reloc_length = reloc_end_address - reloc_info_writer.pos();
+ MemMove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_length);
+
+ // Right trim the relocation info to free up remaining space.
+ const int delta = reloc_info->length() - new_reloc_length;
+ if (delta > 0) {
+ isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ reloc_info, delta);
+ }
}
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 9f37b85c87..5bd84fc298 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -68,6 +68,11 @@ const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
+const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
+const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
+
+
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -131,6 +136,13 @@ void TypeofDescriptor::InitializePlatformSpecific(
}
+void FastCloneRegExpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi, eax, ecx, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx, ecx};
@@ -191,7 +203,7 @@ void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// eax : number of arguments
// ebx : feedback vector
- // ecx : original constructor (for IsSuperConstructorCall)
+ // ecx : new target (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
// TODO(turbofan): So far we don't gather type feedback and hence skip the
@@ -210,6 +222,27 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
}
+void ConstructStubDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ // ebx : allocation site or undefined
+ Register registers[] = {edi, edx, eax, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ConstructTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ Register registers[] = {edi, edx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, ebx, eax};
@@ -348,6 +381,7 @@ void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
edi, // JSFunction
+ edx, // the new target
eax, // actual number of arguments
ebx, // expected number of arguments
};
@@ -380,27 +414,6 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
}
-void MathRoundVariantCallFromUnoptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void MathRoundVariantCallFromOptimizedCodeDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edi, // math rounding function
- edx, // vector slot id
- ebx // type vector
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -416,7 +429,7 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
eax, // argument count (not including receiver)
- edx, // original constructor
+ edx, // new target
edi, // constructor
ebx, // address of first argument
};
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index f5ecf5f677..7a0beb57bc 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -19,12 +19,12 @@ namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
-MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
- if (isolate() != NULL) {
- // TODO(titzer): should we just use a null handle here instead?
+ if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
@@ -793,6 +793,18 @@ void MacroAssembler::AssertFunction(Register object) {
}
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotABoundFunction);
+ Push(object);
+ CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotABoundFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -1035,10 +1047,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
- int offset =
- Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch1, FieldOperand(scratch1, offset));
- mov(scratch1, FieldOperand(scratch1, JSGlobalObject::kNativeContextOffset));
+ mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
// Check the context is a native context.
if (emit_debug_code()) {
@@ -1633,6 +1642,27 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
}
+void MacroAssembler::AllocateJSValue(Register result, Register constructor,
+ Register value, Register scratch,
+ Label* gc_required) {
+ DCHECK(!result.is(constructor));
+ DCHECK(!result.is(scratch));
+ DCHECK(!result.is(value));
+
+ // Allocate JSValue in new space.
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+
+ // Initialize the JSValue.
+ LoadGlobalFunctionInitialMap(constructor, scratch);
+ mov(FieldOperand(result, HeapObject::kMapOffset), scratch);
+ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
+ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
+ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
+ mov(FieldOperand(result, JSValue::kValueOffset), value);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+}
+
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
@@ -1700,16 +1730,16 @@ void MacroAssembler::CopyBytes(Register source,
}
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
+void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
+ Register end_address,
Register filler) {
Label loop, entry;
jmp(&entry);
bind(&loop);
- mov(Operand(start_offset, 0), filler);
- add(start_offset, Immediate(kPointerSize));
+ mov(Operand(current_address, 0), filler);
+ add(current_address, Immediate(kPointerSize));
bind(&entry);
- cmp(start_offset, end_offset);
+ cmp(current_address, end_address);
j(below, &loop);
}
@@ -1856,24 +1886,27 @@ void MacroAssembler::CallExternalReference(ExternalReference ref,
}
-void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size) {
- // TODO(1236192): Most runtime routines don't need the number of
- // arguments passed in because it is constant. At some point we
- // should remove this need and make the runtime routine entry code
- // smarter.
- Move(eax, Immediate(num_arguments));
- JumpToExternalReference(ext);
-}
-
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[8] : argument num_arguments - 1
+ // ...
+ // -- esp[8 * num_arguments] : argument 0 (receiver)
+ //
+ // For runtime functions with variable arguments:
+ // -- eax : number of arguments
+ // -----------------------------------
-void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size) {
- TailCallExternalReference(ExternalReference(fid, isolate()),
- num_arguments,
- result_size);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(eax, Immediate(function->nargs));
+ }
+ JumpToExternalReference(ExternalReference(fid, isolate()));
}
@@ -1887,8 +1920,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
@@ -1939,13 +1970,6 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (!definitely_matches) {
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
- if (!code_constant.is_null()) {
- mov(edx, Immediate(code_constant));
- add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
- } else if (!code_operand.is_reg(edx)) {
- mov(edx, code_operand);
- }
-
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call(adaptor, RelocInfo::CODE_TARGET);
@@ -1961,20 +1985,76 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
-void MacroAssembler::InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
+void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_flooding;
+ ExternalReference step_in_enabled =
+ ExternalReference::debug_step_in_enabled_address(isolate());
+ cmpb(Operand::StaticVariable(step_in_enabled), 0);
+ j(equal, &skip_flooding);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ if (expected.is_reg()) {
+ SmiTag(expected.reg());
+ Push(expected.reg());
+ }
+ if (actual.is_reg()) {
+ SmiTag(actual.reg());
+ Push(actual.reg());
+ }
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ Push(fun);
+ Push(fun);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+ if (actual.is_reg()) {
+ Pop(actual.reg());
+ SmiUntag(actual.reg());
+ }
+ if (expected.is_reg()) {
+ Pop(expected.reg());
+ SmiUntag(expected.reg());
+ }
+ }
+ bind(&skip_flooding);
+}
+
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(function.is(edi));
+ DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
+
+ if (call_wrapper.NeedsDebugStepCheck()) {
+ FloodFunctionIfStepping(function, new_target, expected, actual);
+ }
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ mov(edx, isolate()->factory()->undefined_value());
+ }
Label done;
bool definitely_mismatches = false;
- InvokePrologue(expected, actual, Handle<Code>::null(), code,
- &done, &definitely_mismatches, flag, Label::kNear,
- call_wrapper);
+ InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
+ Label::kNear, call_wrapper);
if (!definitely_mismatches) {
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Operand code = FieldOperand(function, JSFunction::kCodeEntryOffset);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
call(code);
@@ -1988,7 +2068,7 @@ void MacroAssembler::InvokeCode(const Operand& code,
}
-void MacroAssembler::InvokeFunction(Register fun,
+void MacroAssembler::InvokeFunction(Register fun, Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
@@ -1996,14 +2076,13 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(fun.is(edi));
- mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
ParameterCount expected(ebx);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, new_target, expected, actual, flag, call_wrapper);
}
@@ -2018,8 +2097,7 @@ void MacroAssembler::InvokeFunction(Register fun,
DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, actual, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, actual, flag, call_wrapper);
}
@@ -2038,35 +2116,21 @@ void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- // Rely on the assertion to check that the number of provided
- // arguments match the expected number of arguments. Fake a
- // parameter count to avoid emitting code to do the check.
+ // Fake a parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinFunction(edi, native_context_index);
- InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
- expected, expected, flag, call_wrapper);
+ InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, JSGlobalObject::kNativeContextOffset));
+ mov(target, NativeContextOperand());
mov(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target,
- int native_context_index) {
- DCHECK(!target.is(edi));
- // Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, native_context_index);
- // Load the code entry point from the function into the target register.
- mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -2094,8 +2158,8 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
void MacroAssembler::LoadGlobalProxy(Register dst) {
- mov(dst, GlobalObjectOperand());
- mov(dst, FieldOperand(dst, JSGlobalObject::kGlobalProxyOffset));
+ mov(dst, NativeContextOperand());
+ mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
}
@@ -2105,34 +2169,26 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
Register map_in_out,
Register scratch,
Label* no_map_match) {
- // Load the global or builtins object from the current context.
- mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(scratch, FieldOperand(scratch, JSGlobalObject::kNativeContextOffset));
+ DCHECK(IsFastElementsKind(expected_kind));
+ DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
- mov(scratch, Operand(scratch,
- Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
-
- size_t offset = expected_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- cmp(map_in_out, FieldOperand(scratch, offset));
+ mov(scratch, NativeContextOperand());
+ cmp(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
- offset = transitioned_kind * kPointerSize +
- FixedArrayBase::kHeaderSize;
- mov(map_in_out, FieldOperand(scratch, offset));
+ mov(map_in_out,
+ ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
- // Load the global or builtins object from the current context.
- mov(function,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- // Load the native context from the global or builtins object.
- mov(function, FieldOperand(function, JSGlobalObject::kNativeContextOffset));
+ // Load the native context from the current context.
+ mov(function, NativeContextOperand());
// Load the function from the native context.
- mov(function, Operand(function, Context::SlotOffset(index)));
+ mov(function, ContextOperand(function, index));
}
@@ -2664,10 +2720,10 @@ bool AreAliased(Register reg1,
#endif
-CodePatcher::CodePatcher(byte* address, int size)
+CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size)
: address_(address),
size_(size),
- masm_(NULL, address, size + Assembler::kGap) {
+ masm_(isolate, address, size + Assembler::kGap, CodeObjectRequired::kNo) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
@@ -2677,7 +2733,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- Assembler::FlushICacheWithoutIsolate(address_, size_);
+ Assembler::FlushICache(masm_.isolate(), address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
@@ -2737,10 +2793,9 @@ void MacroAssembler::JumpIfBlack(Register object,
Register scratch1,
Label* on_black,
Label::Distance on_black_near) {
- HasColor(object, scratch0, scratch1,
- on_black, on_black_near,
- 1, 0); // kBlackBitPattern.
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ HasColor(object, scratch0, scratch1, on_black, on_black_near, 1,
+ 1); // kBlackBitPattern.
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
@@ -2794,110 +2849,22 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
}
-void MacroAssembler::EnsureNotWhite(
- Register value,
- Register bitmap_scratch,
- Register mask_scratch,
- Label* value_is_white_and_not_data,
- Label::Distance distance) {
+void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
+ Register mask_scratch, Label* value_is_white,
+ Label::Distance distance) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
- Label done;
-
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(not_zero, &done, Label::kNear);
-
- if (emit_debug_code()) {
- // Check for impossible bit pattern.
- Label ok;
- push(mask_scratch);
- // shl. May overflow making the check conservative.
- add(mask_scratch, mask_scratch);
- test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- j(zero, &ok, Label::kNear);
- int3();
- bind(&ok);
- pop(mask_scratch);
- }
-
- // Value is white. We check whether it is data that doesn't need scanning.
- // Currently only checks for HeapNumber and non-cons strings.
- Register map = ecx; // Holds map while checking type.
- Register length = ecx; // Holds length of object after checking type.
- Label not_heap_number;
- Label is_data_object;
-
- // Check for heap-number
- mov(map, FieldOperand(value, HeapObject::kMapOffset));
- cmp(map, isolate()->factory()->heap_number_map());
- j(not_equal, &not_heap_number, Label::kNear);
- mov(length, Immediate(HeapNumber::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_heap_number);
- // Check for strings.
- DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
- // If it's a string and it's not a cons string then it's an object containing
- // no GC pointers.
- Register instance_type = ecx;
- movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
- j(not_zero, value_is_white_and_not_data);
- // It's a non-indirect (non-cons and non-slice) string.
- // If it's external, the length is just ExternalString::kSize.
- // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
- Label not_external;
- // External strings are the only ones with the kExternalStringTag bit
- // set.
- DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
- DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(instance_type, kExternalStringTag);
- j(zero, &not_external, Label::kNear);
- mov(length, Immediate(ExternalString::kSize));
- jmp(&is_data_object, Label::kNear);
-
- bind(&not_external);
- // Sequential string, either Latin1 or UC16.
- DCHECK(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- add(length, Immediate(0x04));
- // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
- // by 2. If we multiply the string length as smi by this, it still
- // won't overflow a 32-bit value.
- DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- DCHECK(SeqOneByteString::kMaxSize <=
- static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
- imul(length, FieldOperand(value, String::kLengthOffset));
- shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
-
- bind(&is_data_object);
- // Value is a data object, and it is white. Mark it black. Since we know
- // that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
- add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
- length);
- if (emit_debug_code()) {
- mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
- cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
- Check(less_equal, kLiveBytesCountOverflowChunkSize);
- }
-
- bind(&done);
+ j(zero, value_is_white, Label::kNear);
}
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 4535f8f9f4..9b6c5e8a0a 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -23,6 +23,7 @@ const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
@@ -40,33 +41,20 @@ enum PointersToHereCheck {
kPointersToHereAreAlwaysInteresting
};
-
-enum RegisterValueType {
- REGISTER_VALUE_IS_SMI,
- REGISTER_VALUE_IS_INT32
-};
-
+enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
#ifdef DEBUG
-bool AreAliased(Register reg1,
- Register reg2,
- Register reg3 = no_reg,
- Register reg4 = no_reg,
- Register reg5 = no_reg,
- Register reg6 = no_reg,
- Register reg7 = no_reg,
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+ Register reg4 = no_reg, Register reg5 = no_reg,
+ Register reg6 = no_reg, Register reg7 = no_reg,
Register reg8 = no_reg);
#endif
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
@@ -93,7 +81,13 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
- Label::Distance if_equal_distance = Label::kNear) {
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(equal, if_equal, if_equal_distance);
}
@@ -101,17 +95,20 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal,
- Label::Distance if_not_equal_distance = Label::kNear) {
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
CompareRoot(with, index);
j(not_equal, if_not_equal, if_not_equal_distance);
}
// ---------------------------------------------------------------------------
// GC Support
- enum RememberedSetFinalAction {
- kReturnAtEnd,
- kFallThroughAtEnd
- };
+ enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
@@ -121,63 +118,41 @@ class MacroAssembler: public Assembler {
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
- void CheckPageFlag(Register object,
- Register scratch,
- int mask,
- Condition cc,
+ void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
void CheckPageFlagForMap(
- Handle<Map> map,
- int mask,
- Condition cc,
- Label* condition_met,
+ Handle<Map> map, int mask, Condition cc, Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
- void JumpIfNotInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
- void JumpIfInNewSpace(Register object,
- Register scratch,
- Label* branch,
+ void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has a given incremental marking color. Also uses ecx!
- void HasColor(Register object,
- Register scratch0,
- Register scratch1,
- Label* has_color,
- Label::Distance has_color_distance,
- int first_bit,
- int second_bit);
-
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
+ void HasColor(Register object, Register scratch0, Register scratch1,
+ Label* has_color, Label::Distance has_color_distance,
+ int first_bit, int second_bit);
+
+ void JumpIfBlack(Register object, Register scratch0, Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -273,12 +248,11 @@ class MacroAssembler: public Assembler {
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
+ void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -326,37 +300,29 @@ class MacroAssembler: public Assembler {
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- InvokeCode(Operand(code), expected, actual, flag, call_wrapper);
- }
- void InvokeCode(const Operand& code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
- void InvokeFunction(Register function,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, Register new_target,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void InvokeFunction(Register function,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ void InvokeFunction(Register function, const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
+ const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
// Invoke specified builtin JavaScript function.
@@ -366,8 +332,6 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, int native_context_index);
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, int native_context_index);
// Expression support
// Support for constant splitting.
@@ -384,30 +348,24 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
+ void CheckFastElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
+ void CheckFastObjectElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
+ void CheckFastSmiElements(Register map, Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register key,
- Register scratch,
- Label* fail,
+ void StoreNumberToDoubleElements(Register maybe_number, Register elements,
+ Register key, Register scratch, Label* fail,
int offset = 0);
// Compare an object's map with the specified map.
@@ -417,9 +375,7 @@ class MacroAssembler: public Assembler {
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specified map.
- void CheckMap(Register obj,
- Handle<Map> map,
- Label* fail,
+ void CheckMap(Register obj, Handle<Map> map, Label* fail,
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified weak map and branch
@@ -434,8 +390,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectStringType(Register heap_object,
- Register map,
+ Condition IsObjectStringType(Register heap_object, Register map,
Register instance_type);
// Check if the object in register heap_object is a name. Afterwards the
@@ -443,8 +398,7 @@ class MacroAssembler: public Assembler {
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
- Condition IsObjectNameType(Register heap_object,
- Register map,
+ Condition IsObjectNameType(Register heap_object, Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
@@ -493,22 +447,19 @@ class MacroAssembler: public Assembler {
void LoadUint32NoSSE2(const Operand& src);
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value,
- Label* smi_label,
+ inline void JumpIfSmi(Register value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if the operand is a smi.
- inline void JumpIfSmi(Operand value,
- Label* smi_label,
+ inline void JumpIfSmi(Operand value, Label* smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value,
- Label* not_smi_label,
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
j(not_zero, not_smi_label, distance);
@@ -562,6 +513,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -581,22 +536,15 @@ class MacroAssembler: public Assembler {
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss);
+ void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
+ Register scratch2, Label* miss);
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
+ void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+ Register r0, Register r1, Register r2,
Register result);
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -610,48 +558,29 @@ class MacroAssembler: public Assembler {
// result is known to be the allocation top on entry (could be result_end
// from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void Allocate(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(int header_size,
- ScaleFactor element_size,
- Register element_count,
- RegisterValueType element_count_type,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
-
- void Allocate(Register object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
+
+ void Allocate(int header_size, ScaleFactor element_size,
+ Register element_count, RegisterValueType element_count_type,
+ Register result, Register result_end, Register scratch,
+ Label* gc_required, AllocationFlags flags);
+
+ void Allocate(Register object_size, Register result, Register result_end,
+ Register scratch, Label* gc_required, AllocationFlags flags);
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
// jumps to gc_required if new space is full.
- void AllocateHeapNumber(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+ Label* gc_required, MutableMode mode = IMMUTABLE);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
+ void AllocateTwoByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3, Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -660,36 +589,34 @@ class MacroAssembler: public Assembler {
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteConsString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteConsString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
// Allocate a raw sliced string object. Only the map field of the result is
// initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
+ void AllocateTwoByteSlicedString(Register result, Register scratch1,
+ Register scratch2, Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
- void CopyBytes(Register source,
- Register destination,
- Register length,
+ void CopyBytes(Register source, Register destination, Register length,
Register scratch);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@@ -738,31 +665,29 @@ class MacroAssembler: public Assembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id, int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
-
// Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
+ void TailCallRuntime(Runtime::FunctionId fid);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -804,6 +729,7 @@ class MacroAssembler: public Assembler {
void Push(const Operand& src) { push(src); }
void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
+ void Pop(const Operand& dst) { pop(dst); }
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
@@ -829,9 +755,11 @@ class MacroAssembler: public Assembler {
void Move(Register dst, const Immediate& x);
void Move(const Operand& dst, const Immediate& x);
+ void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
+
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+ void Push(Smi* smi) { Push(Immediate(smi)); }
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -854,7 +782,6 @@ class MacroAssembler: public Assembler {
void IncrementCounter(Condition cc, StatsCounter* counter, int value);
void DecrementCounter(Condition cc, StatsCounter* counter, int value);
-
// ---------------------------------------------------------------------------
// Debugging
@@ -905,10 +832,8 @@ class MacroAssembler: public Assembler {
void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
Label::Distance distance = Label::kFar);
- void EmitSeqStringSetCharCheck(Register string,
- Register index,
- Register value,
- uint32_t encoding_mask);
+ void EmitSeqStringSetCharCheck(Register string, Register index,
+ Register value, uint32_t encoding_mask);
static int SafepointRegisterStackIndex(Register reg) {
return SafepointRegisterStackIndex(reg.code());
@@ -958,14 +883,10 @@ class MacroAssembler: public Assembler {
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
- const ParameterCount& actual,
- Handle<Code> code_constant,
- const Operand& code_operand,
- Label* done,
- bool* definitely_mismatches,
- InvokeFlag flag,
+ const ParameterCount& actual, Label* done,
+ bool* definitely_mismatches, InvokeFlag flag,
Label::Distance done_distance,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@@ -973,18 +894,14 @@ class MacroAssembler: public Assembler {
void LeaveExitFrameEpilogue(bool restore_context);
// Allocation support helpers.
- void LoadAllocationTopHelper(Register result,
- Register scratch,
+ void LoadAllocationTopHelper(Register result, Register scratch,
AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end,
- Register scratch,
+ void UpdateAllocationTopHelper(Register result_end, Register scratch,
AllocationFlags flags);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cc,
+ void InNewSpace(Register object, Register scratch, Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
@@ -992,8 +909,7 @@ class MacroAssembler: public Assembler {
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
- inline void GetMarkBits(Register addr_reg,
- Register bitmap_reg,
+ inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
@@ -1005,7 +921,6 @@ class MacroAssembler: public Assembler {
friend class StandardFrame;
};
-
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
@@ -1013,19 +928,18 @@ class MacroAssembler: public Assembler {
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
+ CodePatcher(Isolate* isolate, byte* address, int size);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
- byte* address_; // The address of the code being patched.
- int size_; // Number of bytes of the expected patch size.
+ byte* address_; // The address of the code being patched.
+ int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
-
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1034,39 +948,30 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
-
// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
+inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-
-inline Operand FixedArrayElementOperand(Register array,
- Register index_as_smi,
+inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
-
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
-
-inline Operand GlobalObjectOperand() {
- return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand NativeContextOperand() {
+ return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
-
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
@@ -1088,7 +993,6 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/simulator-x87.h b/deps/v8/src/x87/simulator-x87.h
index 3071842f20..667f0fd6d7 100644
--- a/deps/v8/src/x87/simulator-x87.h
+++ b/deps/v8/src/x87/simulator-x87.h
@@ -12,7 +12,7 @@ namespace internal {
// Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
@@ -21,7 +21,8 @@ typedef int (*regexp_matcher)(String*, int, const byte*,
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
+ p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
@@ -36,11 +37,13 @@ class SimulatorStack : public v8::internal::AllStatic {
return c_limit;
}
- static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+ static inline uintptr_t RegisterCTryCatch(Isolate* isolate,
+ uintptr_t try_catch_address) {
+ USE(isolate);
return try_catch_address;
}
- static inline void UnregisterCTryCatch() { }
+ static inline void UnregisterCTryCatch(Isolate* isolate) { USE(isolate); }
};
} // namespace internal
diff --git a/deps/v8/src/zone-containers.h b/deps/v8/src/zone-containers.h
index 8daf0dd657..79b168c37e 100644
--- a/deps/v8/src/zone-containers.h
+++ b/deps/v8/src/zone-containers.h
@@ -114,12 +114,12 @@ class ZoneSet : public std::set<K, Compare, zone_allocator<K>> {
// a zone allocator.
template <typename K, typename V, typename Compare = std::less<K>>
class ZoneMap
- : public std::map<K, V, Compare, zone_allocator<std::pair<K, V>>> {
+ : public std::map<K, V, Compare, zone_allocator<std::pair<const K, V>>> {
public:
// Constructs an empty map.
explicit ZoneMap(Zone* zone)
- : std::map<K, V, Compare, zone_allocator<std::pair<K, V>>>(
- Compare(), zone_allocator<std::pair<K, V>>(zone)) {}
+ : std::map<K, V, Compare, zone_allocator<std::pair<const K, V>>>(
+ Compare(), zone_allocator<std::pair<const K, V>>(zone)) {}
};
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index ea8a397300..7fae8f355a 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -3,6 +3,7 @@ per-file *-mips*=gergely.kis@imgtec.com
per-file *-mips*=akos.palfi@imgtec.com
per-file *-mips*=balazs.kilvady@imgtec.com
per-file *-mips*=dusan.milosavljevic@imgtec.com
+per-file *-mips*=ivica.bogosavljevic@imgtec.com
per-file *-ppc*=dstence@us.ibm.com
per-file *-ppc*=joransiu@ca.ibm.com
per-file *-ppc*=jyan@ca.ibm.com
@@ -10,3 +11,7 @@ per-file *-ppc*=mbrandy@us.ibm.com
per-file *-ppc*=michael_dawson@ca.ibm.com
per-file *-x87*=chunyang.dai@intel.com
per-file *-x87*=weiliang.lin@intel.com
+per-file expression-type-collector*=aseemgarg@chromium.org
+per-file expression-type-collector*=bradnelson@chromium.org
+per-file test-asm-validator.cc=aseemgarg@chromium.org
+per-file test-asm-validator.cc=bradnelson@chromium.org
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 8b32b63160..9ef2d9bfb2 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -52,6 +52,7 @@
'compiler/test-basic-block-profiler.cc',
'compiler/test-branch-combine.cc',
'compiler/test-changes-lowering.cc',
+ 'compiler/test-code-stub-assembler.cc',
'compiler/test-gap-resolver.cc',
'compiler/test-graph-visualizer.cc',
'compiler/test-instruction.cc',
@@ -93,17 +94,23 @@
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
- 'heap-tester.h',
+ 'heap/heap-tester.h',
+ 'heap/test-alloc.cc',
+ 'heap/test-compaction.cc',
+ 'heap/test-heap.cc',
+ 'heap/test-incremental-marking.cc',
+ 'heap/test-lab.cc',
+ 'heap/test-mark-compact.cc',
+ 'heap/test-spaces.cc',
+ 'heap/utils-inl.h',
'print-extension.cc',
'profiler-extension.cc',
'test-accessors.cc',
- 'test-alloc.cc',
'test-api.cc',
'test-api.h',
- # TODO(epertoso): re-enable the following test after the API change is
- # checked in.
- # 'test-api-accessors.cc',
+ 'test-api-accessors.cc',
'test-api-interceptors.cc',
+ 'test-api-fast-accessor-builder.cc',
'test-array-list.cc',
'test-ast.cc',
'test-ast-expression-visitor.cc',
@@ -137,23 +144,22 @@
'test-global-object.cc',
'test-hashing.cc',
'test-hashmap.cc',
- 'test-heap.cc',
'test-heap-profiler.cc',
'test-hydrogen-types.cc',
'test-identity-map.cc',
- 'test-incremental-marking.cc',
+ 'test-inobject-slack-tracking.cc',
'test-list.cc',
'test-liveedit.cc',
'test-lockers.cc',
'test-log.cc',
'test-microtask-delivery.cc',
- 'test-mark-compact.cc',
'test-mementos.cc',
'test-object-observe.cc',
'test-parsing.cc',
'test-platform.cc',
'test-profile-generator.cc',
'test-random-number-generator.cc',
+ 'test-receiver-check-hidden-prototype.cc',
'test-regexp.cc',
'test-reloc-info.cc',
'test-representation.cc',
@@ -161,12 +167,12 @@
'test-serialize.cc',
'test-simd.cc',
'test-slots-buffer.cc',
- 'test-spaces.cc',
'test-strings.cc',
'test-symbols.cc',
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
+ 'test-trace-event.cc',
'test-transitions.cc',
'test-typedarrays.cc',
'test-types.cc',
@@ -180,6 +186,11 @@
'test-weakmaps.cc',
'test-weaksets.cc',
'trace-extension.cc',
+ 'wasm/test-run-wasm.cc',
+ 'wasm/test-run-wasm-js.cc',
+ 'wasm/test-run-wasm-module.cc',
+ 'wasm/test-signatures.h',
+ 'wasm/wasm-run-utils.h',
],
'conditions': [
['v8_target_arch=="ia32"', {
@@ -296,11 +307,6 @@
}, {
'dependencies': ['../../tools/gyp/v8.gyp:v8'],
}],
- ['v8_wasm!=0', {
- 'dependencies': [
- '../../third_party/wasm/test/cctest/wasm/wasm.gyp:wasm_cctest'
- ],
- }],
],
},
{
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 4cfb8b0d70..fe9ae6e38d 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -341,6 +341,13 @@ static inline v8::Local<v8::String> v8_str(const char* x) {
}
+static inline v8::Local<v8::String> v8_str(v8::Isolate* isolate,
+ const char* x) {
+ return v8::String::NewFromUtf8(isolate, x, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
+
+
static inline v8::Local<v8::Symbol> v8_symbol(const char* name) {
return v8::Symbol::New(v8::Isolate::GetCurrent(), v8_str(name));
}
@@ -361,6 +368,12 @@ static inline v8::Local<v8::Script> v8_compile(const char* x) {
}
+static inline int32_t v8_run_int32value(v8::Local<v8::Script> script) {
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ return script->Run(context).ToLocalChecked()->Int32Value(context).FromJust();
+}
+
+
static inline v8::Local<v8::Script> CompileWithOrigin(
v8::Local<v8::String> source, v8::Local<v8::String> origin_url) {
v8::ScriptOrigin origin(origin_url);
@@ -392,6 +405,18 @@ static inline v8::MaybeLocal<v8::Value> CompileRun(
}
+static inline v8::Local<v8::Value> CompileRunChecked(v8::Isolate* isolate,
+ const char* source) {
+ v8::Local<v8::String> source_string =
+ v8::String::NewFromUtf8(isolate, source, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(context, source_string).ToLocalChecked();
+ return script->Run(context).ToLocalChecked();
+}
+
+
static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
v8::Local<v8::Value> result;
if (v8_compile(source)
@@ -525,6 +550,12 @@ static inline void ExpectUndefined(const char* code) {
}
+static inline void ExpectNull(const char* code) {
+ v8::Local<v8::Value> result = CompileRun(code);
+ CHECK(result->IsNull());
+}
+
+
static inline void CheckDoubleEquals(double expected, double actual) {
const double kEpsilon = 1e-10;
CHECK_LE(expected, actual + kEpsilon);
@@ -532,134 +563,18 @@ static inline void CheckDoubleEquals(double expected, double actual) {
}
-static int LenFromSize(int size) {
- return (size - i::FixedArray::kHeaderSize) / i::kPointerSize;
-}
-
-
-static inline void CreatePadding(i::Heap* heap, int padding_size,
- i::PretenureFlag tenure) {
- const int max_number_of_objects = 20;
- v8::internal::Handle<v8::internal::FixedArray>
- big_objects[max_number_of_objects];
- i::Isolate* isolate = heap->isolate();
- int allocate_memory;
- int length;
- int free_memory = padding_size;
- if (tenure == i::TENURED) {
- int current_free_memory =
- static_cast<int>(*heap->old_space()->allocation_limit_address() -
- *heap->old_space()->allocation_top_address());
- CHECK(padding_size <= current_free_memory || current_free_memory == 0);
- } else {
- heap->new_space()->DisableInlineAllocationSteps();
- int current_free_memory =
- static_cast<int>(*heap->new_space()->allocation_limit_address() -
- *heap->new_space()->allocation_top_address());
- CHECK(padding_size <= current_free_memory || current_free_memory == 0);
- }
- for (int i = 0; i < max_number_of_objects && free_memory > 0; i++) {
- if (free_memory > i::Page::kMaxRegularHeapObjectSize) {
- allocate_memory = i::Page::kMaxRegularHeapObjectSize;
- length = LenFromSize(allocate_memory);
- } else {
- allocate_memory = free_memory;
- length = LenFromSize(allocate_memory);
- if (length <= 0) {
- // Not enough room to create another fixed array. Let's create a filler.
- heap->CreateFillerObjectAt(*heap->old_space()->allocation_top_address(),
- free_memory);
- break;
- }
- }
- big_objects[i] = isolate->factory()->NewFixedArray(length, tenure);
- CHECK((tenure == i::NOT_TENURED && heap->InNewSpace(*big_objects[i])) ||
- (tenure == i::TENURED && heap->InOldSpace(*big_objects[i])));
- free_memory -= allocate_memory;
- }
-}
-
-
-// Helper function that simulates a full new-space in the heap.
-static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
- space->DisableInlineAllocationSteps();
- int space_remaining = static_cast<int>(*space->allocation_limit_address() -
- *space->allocation_top_address());
- if (space_remaining == 0) return false;
- CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
- return true;
-}
-
-
-// Helper function that simulates a fill new-space in the heap.
-static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
- int extra_bytes) {
- space->DisableInlineAllocationSteps();
- int space_remaining = static_cast<int>(*space->allocation_limit_address() -
- *space->allocation_top_address());
- CHECK(space_remaining >= extra_bytes);
- int new_linear_size = space_remaining - extra_bytes;
- if (new_linear_size == 0) return;
- CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
-}
-
-
-static inline void FillCurrentPage(v8::internal::NewSpace* space) {
- AllocateAllButNBytes(space, 0);
-}
-
-
-static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
- FillCurrentPage(space);
- while (FillUpOnePage(space)) {
- }
-}
-
-
-// Helper function that simulates a full old-space in the heap.
-static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
- space->EmptyAllocationInfo();
- space->ResetFreeList();
- space->ClearStats();
-}
-
-
-// Helper function that simulates many incremental marking steps until
-// marking is completed.
-static inline void SimulateIncrementalMarking(i::Heap* heap,
- bool force_completion = true) {
- i::MarkCompactCollector* collector = heap->mark_compact_collector();
- i::IncrementalMarking* marking = heap->incremental_marking();
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
- }
- CHECK(marking->IsMarking() || marking->IsStopped());
- if (marking->IsStopped()) {
- heap->StartIncrementalMarking();
- }
- CHECK(marking->IsMarking());
- if (!force_completion) return;
-
- while (!marking->IsComplete()) {
- marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- if (marking->IsReadyToOverApproximateWeakClosure()) {
- marking->FinalizeIncrementally();
- }
- }
- CHECK(marking->IsComplete());
-}
-
-
static void DummyDebugEventListener(
const v8::Debug::EventDetails& event_details) {}
-static inline void EnableDebugger() {
- v8::Debug::SetDebugEventListener(&DummyDebugEventListener);
+static inline void EnableDebugger(v8::Isolate* isolate) {
+ v8::Debug::SetDebugEventListener(isolate, &DummyDebugEventListener);
}
-static inline void DisableDebugger() { v8::Debug::SetDebugEventListener(NULL); }
+static inline void DisableDebugger(v8::Isolate* isolate) {
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+}
static inline void EmptyMessageQueues(v8::Isolate* isolate) {
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index d01a2474d4..80837534ce 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -33,12 +33,6 @@
##############################################################################
- # TODO(danno): These tests fail because the incoming descriptor for JavaScript
- # calls has nothing to do with the interface descriptors of some code stubs,
- # and they cannot be used interchangably.
- 'test-run-stubs/RunOptimizedMathFloorStub': [SKIP],
- 'test-run-stubs/RunStringAddTFStub': [SKIP],
-
# BUG(382): Weird test. Can't guarantee that it never times out.
'test-api/ApplyInterruption': [PASS, TIMEOUT],
@@ -96,8 +90,8 @@
'test-debug/RecursiveBreakpoints': [PASS, FLAKY],
'test-debug/RecursiveBreakpointsGlobal': [PASS, FLAKY],
- # Fails sometimes.
- 'test-debug/ProcessDebugMessagesThreaded': [PASS, FLAKY],
+ # BUG(v8:4358). Hangs flakily.
+ 'test-debug/ProcessDebugMessagesThreaded': [SKIP],
# BUG(2340). Preprocessing stack traces is disabled at the moment.
'test-heap/PreprocessStackTrace': [FAIL],
@@ -121,10 +115,13 @@
# optimized and hence scripts don't "return" the correct value. Fix this.
'test-compiler/CompileFunctionInContext*': [PASS, NO_VARIANTS],
+ # TODO(bmeurer): TurboFan embeds strong references to all kinds of objects
+ # via deoptimization data (Crankshaft also does this, but lack proper test
+ # coverage).
+ 'test-heap/ObjectsInOptimizedCodeAreWeak': [PASS, NO_VARIANTS],
+
# TurboFan doesn't support allocation sites currently.
- 'test-heap/CellsInOptimizedCodeAreWeak': [PASS, NO_VARIANTS],
'test-heap/EnsureAllocationSiteDependentCodesProcessed': [PASS, NO_VARIANTS],
- 'test-heap/ObjectsInOptimizedCodeAreWeak': [PASS, NO_VARIANTS],
'test-heap/OptimizedPretenuringAllocationFolding': [PASS, NO_VARIANTS],
'test-heap/OptimizedPretenuringdoubleArrayLiterals': [PASS, NO_VARIANTS],
'test-heap/OptimizedPretenuringDoubleArrayProperties': [PASS, NO_VARIANTS],
@@ -240,6 +237,11 @@
# BUG(3331). Fails on windows.
'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
+ # BUG(v8:4573).
+ 'test-api/MultipleIsolatesOnIndividualThreads': [PASS, NO_VARIANTS],
+
+ # BUG(v8:4642).
+ 'test-lockers/LockAndUnlockDifferentIsolates': [PASS, NO_VARIANTS],
}], # 'system == windows'
##############################################################################
@@ -252,6 +254,26 @@
}], # 'system == macos'
##############################################################################
+['byteorder == big', {
+ # TODO(mips-team): Fix Wasm for big-endian.
+ 'test-run-wasm-module/Run_WasmModule_CallAdd_rev': [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_ReadLoadedDataSegment': [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_CheckMemoryIsZero': [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_Global': [SKIP],
+ 'test-run-wasm/Run_WasmInt32*': [SKIP],
+ 'test-run-wasm/Run_Wasm_TableSwitch*': [SKIP],
+ 'test-run-wasm/Run_Wasm_StoreMemI32_offset': [SKIP],
+ 'test-run-wasm/Run_Wasm_Int32LoadInt16_*': [SKIP],
+ 'test-run-wasm/Run_WasmMixedGlobals': [SKIP],
+ 'test-run-wasm/Run_WasmCall*': [SKIP],
+ 'test-run-wasm/Run_WasmMixedCall_*': [SKIP],
+ 'test-run-wasm/Run_WasmInt64*': [SKIP],
+ 'test-run-wasm/Run_Wasm_LoadStoreI64_sx': [SKIP],
+ 'test-run-wasm/Run_WASM_Int64DivS_byzero_const': [SKIP],
+ 'test-run-wasm/Run_TestI64WasmRunner': [SKIP],
+}], # 'byteorder == big'
+
+##############################################################################
['arch == arm', {
'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
@@ -308,6 +330,9 @@
# BUG(v8:3154).
'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
+ # TODO(mips-team): Improve code-size on large RegExp's.
+ 'test-heap/TestSizeOfRegExpCode': [SKIP],
+
# BUG(1075): Unresolved crashes on MIPS also.
'test-serialize/Deserialize': [SKIP],
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
@@ -453,8 +478,11 @@
##############################################################################
['system == aix or (arch == ppc64 and byteorder == big)', {
- # Test currently broken for platforms with function desciptors
+ # TODO(ppc): Fix for platforms with function desciptors.
'test-run-machops/RunComputedCodeObject' : [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_Return114' : [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_CallAdd' : [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_CallMain_recursive' : [SKIP],
}], # 'system == aix or (arch == ppc64 and byteorder == big)'
@@ -467,4 +495,95 @@
'test-api/ExternalArrays': [PASS, SLOW],
}], # 'arch == ppc64 and simulator_run == True'
+
+['ignition == True', {
+ 'test-api/*' : [SKIP],
+ 'test-cpu-profiler/*' : [SKIP],
+ 'test-debug/*' : [SKIP],
+ 'test-func-name-inference/*' : [SKIP],
+ 'test-inobject-slack-tracking/*' : [SKIP],
+ 'test-run-jsexceptions/*' : [SKIP],
+ 'test-serialize/*' : [SKIP],
+
+ 'test-api-interceptors/InterceptorCallICInvalidatedConstantFunctionViaGlobal': [SKIP],
+ 'test-api-interceptors/InterceptorLoadICInvalidatedCallbackViaGlobal': [SKIP],
+ 'test-api-interceptors/InterceptorLoadICInvalidatedFieldViaGlobal': [SKIP],
+ 'test-bytecode-generator/TryCatch': [SKIP],
+ 'test-bytecode-generator/TryFinally': [SKIP],
+ 'test-compiler/C2JSFrames': [SKIP],
+ 'test-compiler/FeedbackVectorPreservedAcrossRecompiles': [SKIP],
+ 'test-compiler/FeedbackVectorUnaffectedByScopeChanges': [SKIP],
+ 'test-compiler/OptimizedCodeSharing2': [SKIP],
+ 'test-compiler/OptimizedCodeSharing3': [SKIP],
+ 'test-compiler/Print': [SKIP],
+ 'test-compiler/UncaughtThrow': [SKIP],
+ 'test-decls/CrossScriptDynamicLookup': [SKIP],
+ 'test-decls/Regress425510': [SKIP],
+ 'test-feedback-vector/VectorCallICStates': [SKIP],
+ 'test-heap/AddInstructionChangesNewSpacePromotion': [SKIP],
+ 'test-heap/ArrayShiftSweeping': [SKIP],
+ 'test-heap/CanonicalSharedFunctionInfo': [SKIP],
+ 'test-heap/CellsInOptimizedCodeAreWeak': [SKIP],
+ 'test-heap/CompilationCacheCachingBehavior': [SKIP],
+ 'test-heap/CountForcedGC': [SKIP],
+ 'test-heap/IncrementalMarkingClearsMonomorphicConstructor': [SKIP],
+ 'test-heap/IncrementalMarkingPreservesMonomorphicCallIC': [SKIP],
+ 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [SKIP],
+ 'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
+ 'test-heap-profiler/HeapSnapshotCollection': [SKIP],
+ 'test-heap-profiler/HeapSnapshotSimd': [SKIP],
+ 'test-heap-profiler/HeapSnapshotWeakCollection': [SKIP],
+ 'test-heap/OptimizedAllocationAlwaysInNewSpace': [SKIP],
+ 'test-heap/PromotionQueue': [SKIP],
+ 'test-heap/Regress169209': [SKIP],
+ 'test-heap/Regress1878': [SKIP],
+ 'test-heap/Regress357137': [SKIP],
+ 'test-heap/Regress3631': [SKIP],
+ 'test-heap/Regress388880': [SKIP],
+ 'test-heap/TestCodeFlushingIncrementalAbort': [SKIP],
+ 'test-heap/TestCodeFlushingIncrementalScavenge': [SKIP],
+ 'test-heap/TestCodeFlushingIncremental': [SKIP],
+ 'test-heap/TestCodeFlushingPreAged': [SKIP],
+ 'test-heap/TestCodeFlushing': [SKIP],
+ 'test-heap/WeakFunctionInConstructor': [SKIP],
+ 'test-log-stack-tracer/CFromJSStackTrace': [SKIP],
+ 'test-log-stack-tracer/JsEntrySp': [SKIP],
+ 'test-log-stack-tracer/PureCStackTrace': [SKIP],
+ 'test-log-stack-tracer/PureJSStackTrace': [SKIP],
+ 'test-parsing/DestructuringNegativeTests': [SKIP],
+ 'test-parsing/StrongModeFreeVariablesDeclaredByLanguage': [SKIP],
+ 'test-parsing/StrongModeFreeVariablesDeclaredByPreviousScript': [SKIP],
+ 'test-parsing/StrongModeFreeVariablesDeclaredInGlobalPrototype': [SKIP],
+ 'test-pipeline/PipelineGeneric': [SKIP],
+ 'test-pipeline/PipelineTyped': [SKIP],
+ 'test-profile-generator/BailoutReason': [SKIP],
+ 'test-profile-generator/LineNumber': [SKIP],
+ 'test-profile-generator/ProfileNodeScriptId': [SKIP],
+ 'test-profile-generator/RecordStackTraceAtStartProfiling': [SKIP],
+ 'test-run-inlining/InlineTwice': [SKIP],
+ 'test-run-jsbranches/ForOfContinueStatement': [SKIP],
+ 'test-run-jscalls/LookupCall': [SKIP],
+ 'test-run-jsobjects/ArgumentsRest': [SKIP],
+ 'test-run-jsops/ClassLiteral': [SKIP],
+ 'test-run-jsops/LookupLoad': [SKIP],
+ 'test-run-jsops/LookupStore': [SKIP],
+ 'test-run-variables/ContextInitializeVariables': [SKIP],
+ 'test-run-variables/ContextLoadVariables': [SKIP],
+ 'test-run-variables/ContextStoreVariables': [SKIP],
+ 'test-run-variables/StackInitializeVariables': [SKIP],
+ 'test-run-variables/StackLoadVariables': [SKIP],
+ 'test-run-variables/StackStoreVariables': [SKIP],
+ 'test-sampler-api/StackFramesConsistent': [SKIP],
+ 'test-thread-termination/TerminateCancelTerminateFromThreadItself': [SKIP],
+ 'test-thread-termination/TerminateFromOtherThreadWhileMicrotaskRunning': [SKIP],
+ 'test-thread-termination/TerminateOnlyV8ThreadFromThreadItselfNoLoop': [SKIP],
+ 'test-thread-termination/TerminationInInnerTryCall': [SKIP],
+ 'test-unscopables-hidden-prototype/Unscopables': [SKIP],
+}], # ignition == True
+
+['ignition == True and arch == arm64', {
+ 'test-js-arm64-variables/lookup_slots': [SKIP],
+ 'test-spaces/SizeOfFirstPageIsLargeEnough': [SKIP],
+}], # ignition == True and arch == arm64
+
]
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index 5a72c551ab..13ef38aaed 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -5,27 +5,27 @@
#ifndef V8_COMPILER_C_SIGNATURE_H_
#define V8_COMPILER_C_SIGNATURE_H_
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
namespace compiler {
#define FOREACH_CTYPE_MACHINE_TYPE_MAPPING(V) \
- V(void, kMachNone) \
- V(bool, kMachUint8) \
- V(int8_t, kMachInt8) \
- V(uint8_t, kMachUint8) \
- V(int16_t, kMachInt16) \
- V(uint16_t, kMachUint16) \
- V(int32_t, kMachInt32) \
- V(uint32_t, kMachUint32) \
- V(int64_t, kMachInt64) \
- V(uint64_t, kMachUint64) \
- V(float, kMachFloat32) \
- V(double, kMachFloat64) \
- V(void*, kMachPtr) \
- V(int*, kMachPtr)
+ V(void, MachineType::None()) \
+ V(bool, MachineType::Uint8()) \
+ V(int8_t, MachineType::Int8()) \
+ V(uint8_t, MachineType::Uint8()) \
+ V(int16_t, MachineType::Int16()) \
+ V(uint16_t, MachineType::Uint16()) \
+ V(int32_t, MachineType::Int32()) \
+ V(uint32_t, MachineType::Uint32()) \
+ V(int64_t, MachineType::Int64()) \
+ V(uint64_t, MachineType::Uint64()) \
+ V(float, MachineType::Float32()) \
+ V(double, MachineType::Float64()) \
+ V(void*, MachineType::Pointer()) \
+ V(int*, MachineType::Pointer())
template <typename T>
inline MachineType MachineTypeForC() {
@@ -33,7 +33,7 @@ inline MachineType MachineTypeForC() {
// All other types T must be assignable to Object*
*(static_cast<Object* volatile*>(0)) = static_cast<T>(0);
}
- return kMachAnyTagged;
+ return MachineType::AnyTagged();
}
#define DECLARE_TEMPLATE_SPECIALIZATION(ctype, mtype) \
@@ -64,7 +64,7 @@ class CSignature : public MachineSignature {
if (p < static_cast<int>(parameter_count())) {
CHECK_EQ(GetParam(p), params[p]);
} else {
- CHECK_EQ(kMachNone, params[p]);
+ CHECK_EQ(MachineType::None(), params[p]);
}
}
}
@@ -74,13 +74,15 @@ class CSignature : public MachineSignature {
}
static CSignature* New(Zone* zone, MachineType ret,
- MachineType p1 = kMachNone, MachineType p2 = kMachNone,
- MachineType p3 = kMachNone, MachineType p4 = kMachNone,
- MachineType p5 = kMachNone) {
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None(),
+ MachineType p4 = MachineType::None(),
+ MachineType p5 = MachineType::None()) {
MachineType* buffer = zone->NewArray<MachineType>(6);
int pos = 0;
size_t return_count = 0;
- if (ret != kMachNone) {
+ if (ret != MachineType::None()) {
buffer[pos++] = ret;
return_count++;
}
@@ -90,14 +92,15 @@ class CSignature : public MachineSignature {
buffer[pos++] = p4;
buffer[pos++] = p5;
size_t param_count = 5;
- if (p5 == kMachNone) param_count--;
- if (p4 == kMachNone) param_count--;
- if (p3 == kMachNone) param_count--;
- if (p2 == kMachNone) param_count--;
- if (p1 == kMachNone) param_count--;
+ if (p5 == MachineType::None()) param_count--;
+ if (p4 == MachineType::None()) param_count--;
+ if (p3 == MachineType::None()) param_count--;
+ if (p2 == MachineType::None()) param_count--;
+ if (p1 == MachineType::None()) param_count--;
for (size_t i = 0; i < param_count; i++) {
- // Check that there are no kMachNone's in the middle of parameters.
- CHECK_NE(kMachNone, buffer[return_count + i]);
+ // Check that there are no MachineType::None()'s in the middle of
+ // parameters.
+ CHECK_NE(MachineType::None(), buffer[return_count + i]);
}
return new (zone) CSignature(return_count, param_count, buffer);
}
@@ -110,12 +113,13 @@ class CSignatureOf : public CSignature {
MachineType storage_[1 + kParamCount];
CSignatureOf()
- : CSignature(MachineTypeForC<Ret>() != kMachNone ? 1 : 0, kParamCount,
- reinterpret_cast<MachineType*>(&storage_)) {
+ : CSignature(MachineTypeForC<Ret>() != MachineType::None() ? 1 : 0,
+ kParamCount, reinterpret_cast<MachineType*>(&storage_)) {
if (return_count_ == 1) storage_[0] = MachineTypeForC<Ret>();
}
void Set(int index, MachineType type) {
- DCHECK(index >= 0 && index < kParamCount);
+ CHECK_LE(0, index);
+ CHECK_LT(index, kParamCount);
reps_[return_count_ + index] = type;
}
};
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index ba73822d32..fc0956fb50 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -291,7 +291,8 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
for (int i = -2; i < num_int_inputs; i++) { // for all left shapes
for (int j = -2; j < num_int_inputs; j++) { // for all right shapes
if (i >= 0 && j >= 0) break; // No constant/constant combos
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
Node* n0;
@@ -301,7 +302,7 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
if (i == -2) {
n0 = p0;
} else if (i == -1) {
- n0 = m.LoadFromPointer(&input_a, kMachInt32);
+ n0 = m.LoadFromPointer(&input_a, MachineType::Int32());
} else {
n0 = m.Int32Constant(inputs[i]);
}
@@ -310,7 +311,7 @@ void Int32BinopInputShapeTester::TestAllInputShapes() {
if (j == -2) {
n1 = p1;
} else if (j == -1) {
- n1 = m.LoadFromPointer(&input_b, kMachInt32);
+ n1 = m.LoadFromPointer(&input_b, MachineType::Int32());
} else {
n1 = m.Int32Constant(inputs[j]);
}
@@ -368,7 +369,8 @@ void Int32BinopInputShapeTester::RunRight(
TEST(ParametersEqual) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
Node* p1 = m.Parameter(1);
CHECK(p1);
Node* p0 = m.Parameter(0);
@@ -482,7 +484,7 @@ TEST(RunHeapNumberConstant) {
TEST(RunParam1) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Parameter(0));
FOR_INT32_INPUTS(i) {
@@ -493,7 +495,8 @@ TEST(RunParam1) {
TEST(RunParam2_1) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
m.Return(p0);
@@ -507,7 +510,8 @@ TEST(RunParam2_1) {
TEST(RunParam2_2) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
m.Return(p1);
@@ -522,7 +526,8 @@ TEST(RunParam2_2) {
TEST(RunParam3) {
for (int i = 0; i < 3; i++) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
Node* nodes[] = {m.Parameter(0), m.Parameter(1), m.Parameter(2)};
m.Return(nodes[i]);
@@ -580,12 +585,13 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
CHECK_EQ(0x12500000000, m.Call());
}
{
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Parameter(0));
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(*i, m.Call(*i)); }
}
{
- BufferedRawMachineAssemblerTester<int64_t> m(kMachInt64, kMachInt64);
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Int64(),
+ MachineType::Int64());
m.Return(m.Int64Add(m.Parameter(0), m.Parameter(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -595,8 +601,8 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
}
}
{
- BufferedRawMachineAssemblerTester<int64_t> m(kMachInt64, kMachInt64,
- kMachInt64);
+ BufferedRawMachineAssemblerTester<int64_t> m(
+ MachineType::Int64(), MachineType::Int64(), MachineType::Int64());
m.Return(
m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
FOR_INT64_INPUTS(i) {
@@ -608,8 +614,9 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
}
}
{
- BufferedRawMachineAssemblerTester<int64_t> m(kMachInt64, kMachInt64,
- kMachInt64, kMachInt64);
+ BufferedRawMachineAssemblerTester<int64_t> m(
+ MachineType::Int64(), MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(m.Int64Add(
m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2)),
m.Parameter(3)));
@@ -625,17 +632,18 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
{
BufferedRawMachineAssemblerTester<void> m;
int64_t result;
- m.Store(MachineTypeForC<int64_t>(), m.PointerConstant(&result),
- m.Int64Constant(0x12500000000), kNoWriteBarrier);
+ m.Store(MachineTypeForC<int64_t>().representation(),
+ m.PointerConstant(&result), m.Int64Constant(0x12500000000),
+ kNoWriteBarrier);
m.Return(m.Int32Constant(0));
m.Call();
CHECK_EQ(0x12500000000, result);
}
{
- BufferedRawMachineAssemblerTester<void> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<void> m(MachineType::Float64());
double result;
- m.Store(MachineTypeForC<double>(), m.PointerConstant(&result),
- m.Parameter(0), kNoWriteBarrier);
+ m.Store(MachineTypeForC<double>().representation(),
+ m.PointerConstant(&result), m.Parameter(0), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
FOR_FLOAT64_INPUTS(i) {
m.Call(*i);
@@ -643,9 +651,11 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
}
}
{
- BufferedRawMachineAssemblerTester<void> m(kMachInt64, kMachInt64);
+ BufferedRawMachineAssemblerTester<void> m(MachineType::Int64(),
+ MachineType::Int64());
int64_t result;
- m.Store(MachineTypeForC<int64_t>(), m.PointerConstant(&result),
+ m.Store(MachineTypeForC<int64_t>().representation(),
+ m.PointerConstant(&result),
m.Int64Add(m.Parameter(0), m.Parameter(1)), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
FOR_INT64_INPUTS(i) {
@@ -659,11 +669,11 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
}
}
{
- BufferedRawMachineAssemblerTester<void> m(kMachInt64, kMachInt64,
- kMachInt64);
+ BufferedRawMachineAssemblerTester<void> m(
+ MachineType::Int64(), MachineType::Int64(), MachineType::Int64());
int64_t result;
m.Store(
- MachineTypeForC<int64_t>(), m.PointerConstant(&result),
+ MachineTypeForC<int64_t>().representation(), m.PointerConstant(&result),
m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)), m.Parameter(2)),
kNoWriteBarrier);
m.Return(m.Int32Constant(0));
@@ -681,10 +691,12 @@ TEST(RunBufferedRawMachineAssemblerTesterTester) {
}
}
{
- BufferedRawMachineAssemblerTester<void> m(kMachInt64, kMachInt64,
- kMachInt64, kMachInt64);
+ BufferedRawMachineAssemblerTester<void> m(
+ MachineType::Int64(), MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
int64_t result;
- m.Store(MachineTypeForC<int64_t>(), m.PointerConstant(&result),
+ m.Store(MachineTypeForC<int64_t>().representation(),
+ m.PointerConstant(&result),
m.Int64Add(m.Int64Add(m.Int64Add(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)),
m.Parameter(3)),
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 55dbecdb9e..56e90c65b7 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -20,11 +20,11 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
public CallHelper<ReturnType>,
public RawMachineAssembler {
public:
- RawMachineAssemblerTester(MachineType p0 = kMachNone,
- MachineType p1 = kMachNone,
- MachineType p2 = kMachNone,
- MachineType p3 = kMachNone,
- MachineType p4 = kMachNone)
+ RawMachineAssemblerTester(MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None(),
+ MachineType p4 = MachineType::None())
: HandleAndZoneScope(),
CallHelper<ReturnType>(
main_isolate(),
@@ -36,7 +36,8 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
main_zone(),
CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0,
p1, p2, p3, p4)),
- kMachPtr, InstructionSelector::SupportedMachineOperatorFlags()) {}
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags()) {}
void CheckNumber(double expected, Object* number) {
CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
@@ -77,10 +78,10 @@ template <typename ReturnType>
class BufferedRawMachineAssemblerTester
: public RawMachineAssemblerTester<int32_t> {
public:
- BufferedRawMachineAssemblerTester(MachineType p0 = kMachNone,
- MachineType p1 = kMachNone,
- MachineType p2 = kMachNone,
- MachineType p3 = kMachNone)
+ BufferedRawMachineAssemblerTester(MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None())
: BufferedRawMachineAssemblerTester(ComputeParameterCount(p0, p1, p2, p3),
p0, p1, p2, p3) {}
@@ -101,7 +102,7 @@ class BufferedRawMachineAssemblerTester
// Store node is provided as a parameter. By storing the return value in
// memory it is possible to return 64 bit values.
void Return(Node* input) {
- Store(MachineTypeForC<ReturnType>(),
+ Store(MachineTypeForC<ReturnType>().representation(),
RawMachineAssembler::Parameter(return_parameter_index_), input,
kNoWriteBarrier);
RawMachineAssembler::Return(Int32Constant(1234));
@@ -159,36 +160,45 @@ class BufferedRawMachineAssemblerTester
MachineType p0, MachineType p1,
MachineType p2, MachineType p3)
: RawMachineAssemblerTester<int32_t>(
- kMachPtr, p0 == kMachNone ? kMachNone : kMachPtr,
- p1 == kMachNone ? kMachNone : kMachPtr,
- p2 == kMachNone ? kMachNone : kMachPtr,
- p3 == kMachNone ? kMachNone : kMachPtr),
+ MachineType::Pointer(),
+ p0 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer(),
+ p1 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer(),
+ p2 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer(),
+ p3 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer()),
test_graph_signature_(
- CSignature::New(main_zone(), kMachInt32, p0, p1, p2, p3)),
+ CSignature::New(main_zone(), MachineType::Int32(), p0, p1, p2, p3)),
return_parameter_index_(return_parameter_index) {
- parameter_nodes_[0] =
- p0 == kMachNone ? nullptr : Load(p0, RawMachineAssembler::Parameter(0));
- parameter_nodes_[1] =
- p1 == kMachNone ? nullptr : Load(p1, RawMachineAssembler::Parameter(1));
- parameter_nodes_[2] =
- p2 == kMachNone ? nullptr : Load(p2, RawMachineAssembler::Parameter(2));
- parameter_nodes_[3] =
- p3 == kMachNone ? nullptr : Load(p3, RawMachineAssembler::Parameter(3));
+ parameter_nodes_[0] = p0 == MachineType::None()
+ ? nullptr
+ : Load(p0, RawMachineAssembler::Parameter(0));
+ parameter_nodes_[1] = p1 == MachineType::None()
+ ? nullptr
+ : Load(p1, RawMachineAssembler::Parameter(1));
+ parameter_nodes_[2] = p2 == MachineType::None()
+ ? nullptr
+ : Load(p2, RawMachineAssembler::Parameter(2));
+ parameter_nodes_[3] = p3 == MachineType::None()
+ ? nullptr
+ : Load(p3, RawMachineAssembler::Parameter(3));
}
static uint32_t ComputeParameterCount(MachineType p0, MachineType p1,
MachineType p2, MachineType p3) {
- if (p0 == kMachNone) {
+ if (p0 == MachineType::None()) {
return 0;
}
- if (p1 == kMachNone) {
+ if (p1 == MachineType::None()) {
return 1;
}
- if (p2 == kMachNone) {
+ if (p2 == MachineType::None()) {
return 2;
}
- if (p3 == kMachNone) {
+ if (p3 == MachineType::None()) {
return 3;
}
return 4;
@@ -205,25 +215,34 @@ template <>
class BufferedRawMachineAssemblerTester<void>
: public RawMachineAssemblerTester<void> {
public:
- BufferedRawMachineAssemblerTester(MachineType p0 = kMachNone,
- MachineType p1 = kMachNone,
- MachineType p2 = kMachNone,
- MachineType p3 = kMachNone)
- : RawMachineAssemblerTester<void>(p0 == kMachNone ? kMachNone : kMachPtr,
- p1 == kMachNone ? kMachNone : kMachPtr,
- p2 == kMachNone ? kMachNone : kMachPtr,
- p3 == kMachNone ? kMachNone : kMachPtr),
+ BufferedRawMachineAssemblerTester(MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None())
+ : RawMachineAssemblerTester<void>(
+ p0 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer(),
+ p1 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer(),
+ p2 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer(),
+ p3 == MachineType::None() ? MachineType::None()
+ : MachineType::Pointer()),
test_graph_signature_(
CSignature::New(RawMachineAssemblerTester<void>::main_zone(),
- kMachNone, p0, p1, p2, p3)) {
- parameter_nodes_[0] =
- p0 == kMachNone ? nullptr : Load(p0, RawMachineAssembler::Parameter(0));
- parameter_nodes_[1] =
- p1 == kMachNone ? nullptr : Load(p1, RawMachineAssembler::Parameter(1));
- parameter_nodes_[2] =
- p2 == kMachNone ? nullptr : Load(p2, RawMachineAssembler::Parameter(2));
- parameter_nodes_[3] =
- p3 == kMachNone ? nullptr : Load(p3, RawMachineAssembler::Parameter(3));
+ MachineType::None(), p0, p1, p2, p3)) {
+ parameter_nodes_[0] = p0 == MachineType::None()
+ ? nullptr
+ : Load(p0, RawMachineAssembler::Parameter(0));
+ parameter_nodes_[1] = p1 == MachineType::None()
+ ? nullptr
+ : Load(p1, RawMachineAssembler::Parameter(1));
+ parameter_nodes_[2] = p2 == MachineType::None()
+ ? nullptr
+ : Load(p2, RawMachineAssembler::Parameter(2));
+ parameter_nodes_[3] = p3 == MachineType::None()
+ ? nullptr
+ : Load(p3, RawMachineAssembler::Parameter(3));
}
@@ -283,13 +302,15 @@ static const int32_t CHECK_VALUE = 0x99BEEDCE;
// TODO(titzer): use the C-style calling convention, or any register-based
// calling convention for binop tests.
-template <typename CType, MachineType rep, bool use_result_buffer>
+template <typename CType, bool use_result_buffer>
class BinopTester {
public:
- explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+ explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester,
+ MachineType rep)
: T(tester),
param0(T->LoadFromPointer(&p0, rep)),
param1(T->LoadFromPointer(&p1, rep)),
+ rep(rep),
p0(static_cast<CType>(0)),
p1(static_cast<CType>(0)),
result(static_cast<CType>(0)) {}
@@ -311,8 +332,8 @@ class BinopTester {
void AddReturn(Node* val) {
if (use_result_buffer) {
- T->Store(rep, T->PointerConstant(&result), T->Int32Constant(0), val,
- kNoWriteBarrier);
+ T->Store(rep.representation(), T->PointerConstant(&result),
+ T->Int32Constant(0), val, kNoWriteBarrier);
T->Return(T->Int32Constant(CHECK_VALUE));
} else {
T->Return(val);
@@ -331,6 +352,7 @@ class BinopTester {
}
protected:
+ MachineType rep;
CType p0;
CType p1;
CType result;
@@ -339,21 +361,31 @@ class BinopTester {
// A helper class for testing code sequences that take two int parameters and
// return an int value.
-class Int32BinopTester
- : public BinopTester<int32_t, kMachInt32, USE_RETURN_REGISTER> {
+class Int32BinopTester : public BinopTester<int32_t, USE_RETURN_REGISTER> {
public:
explicit Int32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<int32_t, kMachInt32, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<int32_t, USE_RETURN_REGISTER>(tester,
+ MachineType::Int32()) {}
+};
+
+
+// A helper class for testing code sequences that take two int parameters and
+// return an int value.
+class Int64BinopTester : public BinopTester<int64_t, USE_RETURN_REGISTER> {
+ public:
+ explicit Int64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+ : BinopTester<int64_t, USE_RETURN_REGISTER>(tester,
+ MachineType::Int64()) {}
};
// A helper class for testing code sequences that take two uint parameters and
// return an uint value.
-class Uint32BinopTester
- : public BinopTester<uint32_t, kMachUint32, USE_RETURN_REGISTER> {
+class Uint32BinopTester : public BinopTester<uint32_t, USE_RETURN_REGISTER> {
public:
explicit Uint32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<uint32_t, kMachUint32, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<uint32_t, USE_RETURN_REGISTER>(tester,
+ MachineType::Uint32()) {}
uint32_t call(uint32_t a0, uint32_t a1) {
p0 = a0;
@@ -366,22 +398,21 @@ class Uint32BinopTester
// A helper class for testing code sequences that take two float parameters and
// return a float value.
// TODO(titzer): figure out how to return floats correctly on ia32.
-class Float32BinopTester
- : public BinopTester<float, kMachFloat32, USE_RESULT_BUFFER> {
+class Float32BinopTester : public BinopTester<float, USE_RESULT_BUFFER> {
public:
explicit Float32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<float, kMachFloat32, USE_RESULT_BUFFER>(tester) {}
+ : BinopTester<float, USE_RESULT_BUFFER>(tester, MachineType::Float32()) {}
};
// A helper class for testing code sequences that take two double parameters and
// return a double value.
// TODO(titzer): figure out how to return doubles correctly on ia32.
-class Float64BinopTester
- : public BinopTester<double, kMachFloat64, USE_RESULT_BUFFER> {
+class Float64BinopTester : public BinopTester<double, USE_RESULT_BUFFER> {
public:
explicit Float64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<double, kMachFloat64, USE_RESULT_BUFFER>(tester) {}
+ : BinopTester<double, USE_RESULT_BUFFER>(tester, MachineType::Float64()) {
+ }
};
@@ -389,22 +420,22 @@ class Float64BinopTester
// and return a pointer value.
// TODO(titzer): pick word size of pointers based on V8_TARGET.
template <typename Type>
-class PointerBinopTester
- : public BinopTester<Type*, kMachPtr, USE_RETURN_REGISTER> {
+class PointerBinopTester : public BinopTester<Type*, USE_RETURN_REGISTER> {
public:
explicit PointerBinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<Type*, kMachPtr, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<Type*, USE_RETURN_REGISTER>(tester,
+ MachineType::Pointer()) {}
};
// A helper class for testing code sequences that take two tagged parameters and
// return a tagged value.
template <typename Type>
-class TaggedBinopTester
- : public BinopTester<Type*, kMachAnyTagged, USE_RETURN_REGISTER> {
+class TaggedBinopTester : public BinopTester<Type*, USE_RETURN_REGISTER> {
public:
explicit TaggedBinopTester(RawMachineAssemblerTester<int32_t>* tester)
- : BinopTester<Type*, kMachAnyTagged, USE_RETURN_REGISTER>(tester) {}
+ : BinopTester<Type*, USE_RETURN_REGISTER>(tester,
+ MachineType::AnyTagged()) {}
};
// A helper class for testing compares. Wraps a machine opcode and provides
@@ -512,7 +543,7 @@ static inline void CheckFloatEq(volatile float x, volatile float y) {
if (std::isnan(x)) {
CHECK(std::isnan(y));
} else {
- CHECK(x == y);
+ CHECK_EQ(x, y);
}
}
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 8741808d82..2fcd35398c 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -5,7 +5,8 @@
#ifndef V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
-#include "src/ast-numbering.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -13,9 +14,8 @@
#include "src/full-codegen/full-codegen.h"
#include "src/handles.h"
#include "src/objects-inl.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -35,18 +35,30 @@ class FunctionTester : public InitializedHandleScope {
CHECK_EQ(0u, flags_ & ~supported_flags);
}
- // TODO(turbofan): generalize FunctionTester to work with N arguments. Now, it
- // can handle up to four.
- explicit FunctionTester(Graph* graph)
+ FunctionTester(Graph* graph, int param_count)
: isolate(main_isolate()),
- function(NewFunction("(function(a,b,c,d){})")),
+ function(NewFunction(BuildFunction(param_count).c_str())),
flags_(0) {
CompileGraph(graph);
}
+ FunctionTester(const CallInterfaceDescriptor& descriptor, Handle<Code> code)
+ : isolate(main_isolate()),
+ function(
+ (FLAG_allow_natives_syntax = true,
+ NewFunction(BuildFunctionFromDescriptor(descriptor).c_str()))),
+ flags_(0) {
+ Compile(function);
+ function->ReplaceCode(*code);
+ }
+
Isolate* isolate;
Handle<JSFunction> function;
+ MaybeHandle<Object> Call() {
+ return Execution::Call(isolate, function, undefined(), 0, nullptr);
+ }
+
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
Handle<Object> args[] = {a, b};
return Execution::Call(isolate, function, undefined(), 2, args);
@@ -124,8 +136,8 @@ class FunctionTester : public InitializedHandleScope {
}
Handle<JSObject> NewObject(const char* source) {
- return v8::Utils::OpenHandle(
- *v8::Local<v8::Object>::Cast(CompileRun(source)));
+ return Handle<JSObject>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Object>::Cast(CompileRun(source))));
}
Handle<String> Val(const char* string) {
@@ -180,10 +192,10 @@ class FunctionTester : public InitializedHandleScope {
return function;
}
- static Handle<JSFunction> ForMachineGraph(Graph* graph) {
+ static Handle<JSFunction> ForMachineGraph(Graph* graph, int param_count) {
JSFunction* p = NULL;
{ // because of the implicit handle scope of FunctionTester.
- FunctionTester f(graph);
+ FunctionTester f(graph, param_count);
p = *f.function;
}
return Handle<JSFunction>(p); // allocated in outer handle scope.
@@ -192,6 +204,25 @@ class FunctionTester : public InitializedHandleScope {
private:
uint32_t flags_;
+ std::string BuildFunction(int param_count) {
+ std::string function_string = "(function(";
+ if (param_count > 0) {
+ char next = 'a';
+ function_string += next;
+ while (param_count-- > 0) {
+ function_string += ',';
+ function_string += ++next;
+ }
+ }
+ function_string += "){})";
+ return function_string;
+ }
+
+ std::string BuildFunctionFromDescriptor(
+ const CallInterfaceDescriptor& descriptor) {
+ return BuildFunction(descriptor.GetParameterCount());
+ }
+
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index ea2c3ad139..de2713a5ac 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -24,7 +24,7 @@ class GraphAndBuilders {
explicit GraphAndBuilders(Zone* zone)
: main_graph_(new (zone) Graph(zone)),
main_common_(zone),
- main_machine_(zone, kMachPtr,
+ main_machine_(zone, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags()),
main_simplified_(zone) {}
@@ -48,11 +48,11 @@ class GraphBuilderTester : public HandleAndZoneScope,
public GraphAndBuilders,
public CallHelper<ReturnType> {
public:
- explicit GraphBuilderTester(MachineType p0 = kMachNone,
- MachineType p1 = kMachNone,
- MachineType p2 = kMachNone,
- MachineType p3 = kMachNone,
- MachineType p4 = kMachNone)
+ explicit GraphBuilderTester(MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None(),
+ MachineType p4 = MachineType::None())
: GraphAndBuilders(main_zone()),
CallHelper<ReturnType>(
main_isolate(),
@@ -68,7 +68,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
void GenerateCode() { Generate(); }
Node* Parameter(size_t index) {
- DCHECK(index < parameter_count());
+ CHECK_LT(index, parameter_count());
return parameters_[index];
}
@@ -77,7 +77,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
// Initialize graph and builder.
void Begin(int num_parameters) {
- DCHECK(graph()->start() == NULL);
+ CHECK_NULL(graph()->start());
Node* start = graph()->NewNode(common()->Start(num_parameters + 3));
graph()->SetStart(start);
effect_ = start;
@@ -235,15 +235,15 @@ class GraphBuilderTester : public HandleAndZoneScope,
protected:
Node* MakeNode(const Operator* op, int value_input_count,
Node** value_inputs) {
- DCHECK(op->ValueInputCount() == value_input_count);
+ CHECK_EQ(op->ValueInputCount(), value_input_count);
- DCHECK(!OperatorProperties::HasContextInput(op));
- DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
+ CHECK(!OperatorProperties::HasContextInput(op));
+ CHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
bool has_control = op->ControlInputCount() == 1;
bool has_effect = op->EffectInputCount() == 1;
- DCHECK(op->ControlInputCount() < 2);
- DCHECK(op->EffectInputCount() < 2);
+ CHECK_LT(op->ControlInputCount(), 2);
+ CHECK_LT(op->EffectInputCount(), 2);
Node* result = NULL;
if (!has_control && !has_effect) {
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index 1f4d87f18d..17400abe53 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/basic-block-profiler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
@@ -13,11 +10,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-typedef RawMachineAssembler::Label MLabel;
-
class BasicBlockProfilerTest : public RawMachineAssemblerTester<int32_t> {
public:
- BasicBlockProfilerTest() : RawMachineAssemblerTester<int32_t>(kMachInt32) {
+ BasicBlockProfilerTest()
+ : RawMachineAssemblerTester<int32_t>(MachineType::Int32()) {
FLAG_turbo_profiling = true;
}
@@ -41,7 +37,7 @@ class BasicBlockProfilerTest : public RawMachineAssemblerTester<int32_t> {
TEST(ProfileDiamond) {
BasicBlockProfilerTest m;
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
m.Branch(m.Parameter(0), &blocka, &blockb);
m.Bind(&blocka);
m.Goto(&end);
@@ -81,12 +77,12 @@ TEST(ProfileDiamond) {
TEST(ProfileLoop) {
BasicBlockProfilerTest m;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* one = m.Int32Constant(1);
m.Goto(&header);
m.Bind(&header);
- Node* count = m.Phi(kMachInt32, m.Parameter(0), one);
+ Node* count = m.Phi(MachineRepresentation::kWord32, m.Parameter(0), one);
m.Branch(count, &body, &end);
m.Bind(&body);
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index 984c7130ba..c3b4308a93 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -13,8 +10,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-typedef RawMachineAssembler::Label MLabel;
-
static IrOpcode::Value int32cmp_opcodes[] = {
IrOpcode::kWord32Equal, IrOpcode::kInt32LessThan,
IrOpcode::kInt32LessThanOrEqual, IrOpcode::kUint32LessThan,
@@ -23,12 +18,12 @@ static IrOpcode::Value int32cmp_opcodes[] = {
TEST(BranchCombineWord32EqualZero_1) {
// Test combining a branch with x == 0
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t eq_constant = -1033;
int32_t ne_constant = 825118;
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -49,9 +44,9 @@ TEST(BranchCombineWord32EqualZero_chain) {
int32_t ne_constant = 815118;
for (int k = 0; k < 6; k++) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
Node* cond = p0;
for (int j = 0; j < k; j++) {
cond = m.Word32Equal(cond, m.Int32Constant(0));
@@ -74,12 +69,12 @@ TEST(BranchCombineWord32EqualZero_chain) {
TEST(BranchCombineInt32LessThanZero_1) {
// Test combining a branch with x < 0
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t eq_constant = -1433;
int32_t ne_constant = 845118;
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Int32LessThan(p0, m.Int32Constant(0)), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -96,12 +91,12 @@ TEST(BranchCombineInt32LessThanZero_1) {
TEST(BranchCombineUint32LessThan100_1) {
// Test combining a branch with x < 100
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
int32_t eq_constant = 1471;
int32_t ne_constant = 88845718;
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Uint32LessThan(p0, m.Int32Constant(100)), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -118,12 +113,12 @@ TEST(BranchCombineUint32LessThan100_1) {
TEST(BranchCombineUint32LessThanOrEqual100_1) {
// Test combining a branch with x <= 100
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
int32_t eq_constant = 1479;
int32_t ne_constant = 77845719;
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Uint32LessThanOrEqual(p0, m.Int32Constant(100)), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -140,12 +135,12 @@ TEST(BranchCombineUint32LessThanOrEqual100_1) {
TEST(BranchCombineZeroLessThanInt32_1) {
// Test combining a branch with 0 < x
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t eq_constant = -2033;
int32_t ne_constant = 225118;
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Int32LessThan(m.Int32Constant(0), p0), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -162,12 +157,12 @@ TEST(BranchCombineZeroLessThanInt32_1) {
TEST(BranchCombineInt32GreaterThanZero_1) {
// Test combining a branch with x > 0
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t eq_constant = -1073;
int32_t ne_constant = 825178;
Node* p0 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Int32GreaterThan(p0, m.Int32Constant(0)), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -184,13 +179,14 @@ TEST(BranchCombineInt32GreaterThanZero_1) {
TEST(BranchCombineWord32EqualP) {
// Test combining a branch with an Word32Equal.
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
int32_t eq_constant = -1035;
int32_t ne_constant = 825018;
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(eq_constant));
@@ -214,13 +210,13 @@ TEST(BranchCombineWord32EqualI) {
for (int left = 0; left < 2; left++) {
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t a = *i;
Node* p0 = m.Int32Constant(a);
Node* p1 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
if (left == 1) m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
if (left == 0) m.Branch(m.Word32Equal(p1, p0), &blocka, &blockb);
m.Bind(&blocka);
@@ -243,11 +239,12 @@ TEST(BranchCombineInt32CmpP) {
int32_t ne_constant = 725018;
for (int op = 0; op < 2; op++) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
m.Bind(&blocka);
@@ -275,12 +272,12 @@ TEST(BranchCombineInt32CmpI) {
for (int op = 0; op < 2; op++) {
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t a = *i;
Node* p0 = m.Int32Constant(a);
Node* p1 = m.Parameter(0);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
m.Bind(&blocka);
@@ -336,7 +333,7 @@ class CmpBranchGen : public BinopGen<int32_t> {
: w(opcode), invert(i), true_first(t), eq_constant(eq), ne_constant(ne) {}
virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
Node* cond = w.MakeNode(m, a, b);
if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
m->Branch(cond, &blocka, &blockb);
@@ -432,10 +429,10 @@ TEST(BranchCombineFloat64Compares) {
CompareWrapper cmp = cmps[c];
for (int invert = 0; invert < 2; invert++) {
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+ Node* a = m.LoadFromPointer(&input_a, MachineType::Float64());
+ Node* b = m.LoadFromPointer(&input_b, MachineType::Float64());
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
Node* cond = cmp.MakeNode(&m, a, b);
if (invert) cond = m.Word32Equal(cond, m.Int32Constant(0));
m.Branch(cond, &blocka, &blockb);
diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
index 028ac4b7c8..e850da7735 100644
--- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <limits>
+#include "src/ast/scopes.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/js-graph.h"
@@ -18,9 +16,8 @@
#include "src/compiler/verifier.h"
#include "src/execution.h"
#include "src/globals.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -34,7 +31,7 @@ namespace compiler {
template <typename ReturnType>
class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
public:
- explicit ChangesLoweringTester(MachineType p0 = kMachNone)
+ explicit ChangesLoweringTester(MachineType p0 = MachineType::None())
: GraphBuilderTester<ReturnType>(p0),
javascript(this->zone()),
jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
@@ -63,22 +60,22 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
void StoreFloat64(Node* node, double* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- this->Store(kMachFloat64, ptr_node, node);
+ this->Store(MachineType::Float64(), ptr_node, node);
}
Node* LoadInt32(int32_t* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(kMachInt32, ptr_node);
+ return this->Load(MachineType::Int32(), ptr_node);
}
Node* LoadUint32(uint32_t* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(kMachUint32, ptr_node);
+ return this->Load(MachineType::Uint32(), ptr_node);
}
Node* LoadFloat64(double* ptr) {
Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(kMachFloat64, ptr_node);
+ return this->Load(MachineType::Float64(), ptr_node);
}
void CheckNumber(double expected, Object* number) {
@@ -148,7 +145,7 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
TEST(RunChangeTaggedToInt32) {
// Build and lower a graph by hand.
- ChangesLoweringTester<int32_t> t(kMachAnyTagged);
+ ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
FOR_INT32_INPUTS(i) {
@@ -176,7 +173,7 @@ TEST(RunChangeTaggedToInt32) {
TEST(RunChangeTaggedToUint32) {
// Build and lower a graph by hand.
- ChangesLoweringTester<uint32_t> t(kMachAnyTagged);
+ ChangesLoweringTester<uint32_t> t(MachineType::AnyTagged());
t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
FOR_UINT32_INPUTS(i) {
@@ -203,13 +200,13 @@ TEST(RunChangeTaggedToUint32) {
TEST(RunChangeTaggedToFloat64) {
- ChangesLoweringTester<int32_t> t(kMachAnyTagged);
+ ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
double result;
- t.BuildStoreAndLower(
- t.simplified()->ChangeTaggedToFloat64(),
- t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
- &result);
+ t.BuildStoreAndLower(t.simplified()->ChangeTaggedToFloat64(),
+ t.machine()->Store(StoreRepresentation(
+ MachineRepresentation::kFloat64, kNoWriteBarrier)),
+ &result);
{
FOR_INT32_INPUTS(i) {
@@ -254,7 +251,7 @@ TEST(RunChangeTaggedToFloat64) {
TEST(RunChangeBoolToBit) {
- ChangesLoweringTester<int32_t> t(kMachAnyTagged);
+ ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
t.BuildAndLower(t.simplified()->ChangeBoolToBit());
{
@@ -272,7 +269,7 @@ TEST(RunChangeBoolToBit) {
TEST(RunChangeBitToBool) {
- ChangesLoweringTester<Object*> t(kMachInt32);
+ ChangesLoweringTester<Object*> t(MachineType::Int32());
t.BuildAndLower(t.simplified()->ChangeBitToBool());
{
diff --git a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc b/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
new file mode 100644
index 0000000000..d7a7a8198a
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+class CodeStubAssemblerTester : public CodeStubAssembler {
+ public:
+ CodeStubAssemblerTester(Isolate* isolate,
+ const CallInterfaceDescriptor& descriptor)
+ : CodeStubAssembler(isolate, isolate->runtime_zone(), descriptor,
+ Code::STUB, "test"),
+ scope_(isolate) {}
+
+ private:
+ HandleScope scope_;
+ LocalContext context_;
+};
+
+
+TEST(SimpleSmiReturn) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ m.Return(m.SmiTag(m.Int32Constant(37)));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(37, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+
+TEST(SimpleIntPtrReturn) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ int test;
+ m.Return(m.IntPtrConstant(reinterpret_cast<intptr_t>(&test)));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(reinterpret_cast<intptr_t>(&test),
+ reinterpret_cast<intptr_t>(*result.ToHandleChecked()));
+}
+
+
+TEST(SimpleDoubleReturn) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ m.Return(m.NumberConstant(0.5));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(0.5, Handle<HeapNumber>::cast(result.ToHandleChecked())->value());
+}
+
+
+TEST(SimpleCallRuntime1Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ Node* b = m.SmiTag(m.Int32Constant(256));
+ m.Return(m.CallRuntime(Runtime::kMathSqrt, context, b));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+
+TEST(SimpleTailCallRuntime1Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ Node* b = m.SmiTag(m.Int32Constant(256));
+ m.TailCallRuntime(Runtime::kMathSqrt, context, b);
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+
+TEST(SimpleCallRuntime2Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ Node* a = m.SmiTag(m.Int32Constant(2));
+ Node* b = m.SmiTag(m.Int32Constant(4));
+ m.Return(m.CallRuntime(Runtime::kMathPow, context, a, b));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+
+TEST(SimpleTailCallRuntime2Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ Node* a = m.SmiTag(m.Int32Constant(2));
+ Node* b = m.SmiTag(m.Int32Constant(4));
+ m.TailCallRuntime(Runtime::kMathPow, context, a, b);
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index a0e1d6023b..7f85088809 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler/gap-resolver.h"
#include "src/base/utils/random-number-generator.h"
@@ -89,7 +86,7 @@ class InterpreterState {
} else {
index = LocationOperand::cast(op).index();
}
- is_float = IsFloatingPoint(LocationOperand::cast(op).machine_type());
+ is_float = IsFloatingPoint(LocationOperand::cast(op).representation());
kind = LocationOperand::cast(op).location_kind();
} else {
index = ConstantOperand::cast(op).virtual_register();
@@ -181,24 +178,24 @@ class ParallelMoveCreator : public HandleAndZoneScope {
}
private:
- MachineType RandomType() {
+ MachineRepresentation RandomRepresentation() {
int index = rng_->NextInt(3);
switch (index) {
case 0:
- return kRepWord32;
+ return MachineRepresentation::kWord32;
case 1:
- return kRepWord64;
+ return MachineRepresentation::kWord64;
case 2:
- return kRepTagged;
+ return MachineRepresentation::kTagged;
}
UNREACHABLE();
- return kMachNone;
+ return MachineRepresentation::kNone;
}
- MachineType RandomDoubleType() {
+ MachineRepresentation RandomDoubleRepresentation() {
int index = rng_->NextInt(2);
- if (index == 0) return kRepFloat64;
- return kRepFloat32;
+ if (index == 0) return MachineRepresentation::kFloat64;
+ return MachineRepresentation::kFloat32;
}
InstructionOperand CreateRandomOperand(bool is_source) {
@@ -206,24 +203,25 @@ class ParallelMoveCreator : public HandleAndZoneScope {
// destination can't be Constant.
switch (rng_->NextInt(is_source ? 7 : 6)) {
case 0:
- return AllocatedOperand(LocationOperand::STACK_SLOT, RandomType(),
- index);
+ return AllocatedOperand(LocationOperand::STACK_SLOT,
+ RandomRepresentation(), index);
case 1:
- return AllocatedOperand(LocationOperand::STACK_SLOT, RandomDoubleType(),
- index);
+ return AllocatedOperand(LocationOperand::STACK_SLOT,
+ RandomDoubleRepresentation(), index);
case 2:
- return AllocatedOperand(LocationOperand::REGISTER, RandomType(), index);
+ return AllocatedOperand(LocationOperand::REGISTER,
+ RandomRepresentation(), index);
case 3:
- return AllocatedOperand(LocationOperand::REGISTER, RandomDoubleType(),
- index);
+ return AllocatedOperand(LocationOperand::REGISTER,
+ RandomDoubleRepresentation(), index);
case 4:
return ExplicitOperand(
- LocationOperand::REGISTER, RandomType(),
+ LocationOperand::REGISTER, RandomRepresentation(),
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->GetAllocatableGeneralCode(1));
case 5:
return ExplicitOperand(
- LocationOperand::STACK_SLOT, RandomType(),
+ LocationOperand::STACK_SLOT, RandomRepresentation(),
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->GetAllocatableGeneralCode(index));
case 6:
diff --git a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
index 9ce34cd64a..48be46ce5f 100644
--- a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
+++ b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
@@ -36,12 +33,12 @@ TEST(NodeWithNullInputReachableFromEnd) {
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
Node* k = graph.NewNode(common.Int32Constant(0));
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 1), k, start);
+ Node* phi =
+ graph.NewNode(common.Phi(MachineRepresentation::kTagged, 1), k, start);
phi->ReplaceInput(0, NULL);
graph.SetEnd(phi);
OFStream os(stdout);
- os << AsDOT(graph);
SourcePositionTable table(&graph);
os << AsJSON(graph, &table);
}
@@ -55,12 +52,12 @@ TEST(NodeWithNullControlReachableFromEnd) {
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
Node* k = graph.NewNode(common.Int32Constant(0));
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 1), k, start);
+ Node* phi =
+ graph.NewNode(common.Phi(MachineRepresentation::kTagged, 1), k, start);
phi->ReplaceInput(1, NULL);
graph.SetEnd(phi);
OFStream os(stdout);
- os << AsDOT(graph);
SourcePositionTable table(&graph);
os << AsJSON(graph, &table);
}
@@ -74,12 +71,12 @@ TEST(NodeWithNullInputReachableFromStart) {
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
Node* k = graph.NewNode(common.Int32Constant(0));
- Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 1), k, start);
+ Node* phi =
+ graph.NewNode(common.Phi(MachineRepresentation::kTagged, 1), k, start);
phi->ReplaceInput(0, NULL);
graph.SetEnd(start);
OFStream os(stdout);
- os << AsDOT(graph);
SourcePositionTable table(&graph);
os << AsJSON(graph, &table);
}
@@ -97,7 +94,6 @@ TEST(NodeWithNullControlReachableFromStart) {
graph.SetEnd(merge);
OFStream os(stdout);
- os << AsDOT(graph);
SourcePositionTable table(&graph);
os << AsJSON(graph, &table);
}
@@ -125,7 +121,6 @@ TEST(NodeNetworkOfDummiesReachableFromEnd) {
graph.SetEnd(end);
OFStream os(stdout);
- os << AsDOT(graph);
SourcePositionTable table(&graph);
os << AsJSON(graph, &table);
}
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 53562953ee..4de3373dad 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
@@ -46,7 +43,7 @@ class InstructionTester : public HandleAndZoneScope {
if (schedule.rpo_order()->size() == 0) {
// Compute the RPO order.
Scheduler::ComputeSpecialRPO(main_zone(), &schedule);
- DCHECK(schedule.rpo_order()->size() > 0);
+ CHECK_NE(0u, schedule.rpo_order()->size());
}
InstructionBlocks* instruction_blocks =
TestInstrSeq::InstructionBlocksFor(main_zone(), &schedule);
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 7559ecd16b..06169f3ba6 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index e2612e9ea8..43b7665459 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
@@ -65,7 +62,7 @@ TEST(ReduceJSLoadContext) {
subcontext2->set_previous(*subcontext1);
subcontext1->set_previous(*native);
Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
- const int slot = Context::GLOBAL_OBJECT_INDEX;
+ const int slot = Context::NATIVE_CONTEXT_INDEX;
native->set(slot, *expected);
Node* const_context = t.jsgraph()->Constant(native);
@@ -136,7 +133,7 @@ TEST(ReduceJSStoreContext) {
subcontext2->set_previous(*subcontext1);
subcontext1->set_previous(*native);
Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
- const int slot = Context::GLOBAL_OBJECT_INDEX;
+ const int slot = Context::NATIVE_CONTEXT_INDEX;
native->set(slot, *expected);
Node* const_context = t.jsgraph()->Constant(native);
@@ -204,7 +201,7 @@ TEST(SpecializeToContext) {
// Make a context and initialize it a bit for this test.
Handle<Context> native = t.factory()->NewNativeContext();
Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
- const int slot = Context::GLOBAL_OBJECT_INDEX;
+ const int slot = Context::NATIVE_CONTEXT_INDEX;
native->set(slot, *expected);
Node* const_context = t.jsgraph()->Constant(native);
@@ -228,8 +225,8 @@ TEST(SpecializeToContext) {
t.graph()->NewNode(t.simplified()->ChangeTaggedToInt32(), other_load);
Node* add = t.graph()->NewNode(
- t.javascript()->Add(LanguageMode::SLOPPY), value_use, other_use,
- param_context, t.jsgraph()->EmptyFrameState(),
+ t.javascript()->Add(LanguageMode::SLOPPY, BinaryOperationHints::Any()),
+ value_use, other_use, param_context, t.jsgraph()->EmptyFrameState(),
t.jsgraph()->EmptyFrameState(), other_load, start);
Node* ret =
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index b22785207d..c8b7734eb2 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
@@ -63,6 +60,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Graph graph;
Typer typer;
Node* context_node;
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* Parameter(Type* t, int32_t index = 0) {
Node* n = graph.NewNode(common.Parameter(index), graph.start());
@@ -156,7 +154,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* Unop(const Operator* op, Node* input) {
// JS unops also require context, effect, and control
if (OperatorProperties::GetFrameStateInputCount(op) > 0) {
- DCHECK(OperatorProperties::GetFrameStateInputCount(op) == 1);
+ CHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(op));
return graph.NewNode(op, input, context(), EmptyFrameState(context()),
start(), control());
} else {
@@ -168,8 +166,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
// TODO(titzer): use EffectPhi after fixing EffectCount
if (OperatorProperties::GetFrameStateInputCount(javascript.ToNumber()) >
0) {
- DCHECK(OperatorProperties::GetFrameStateInputCount(
- javascript.ToNumber()) == 1);
+ CHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(
+ javascript.ToNumber()));
return graph.NewNode(javascript.ToNumber(), node, context(),
EmptyFrameState(context()), node, control());
} else {
@@ -268,7 +266,8 @@ TEST_WITH_STRONG(AddNumber1) {
for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
Node* p0 = R.Parameter(kNumberTypes[i], 0);
Node* p1 = R.Parameter(kNumberTypes[i], 1);
- Node* add = R.Binop(R.javascript.Add(language_mode), p0, p1);
+ Node* add = R.Binop(
+ R.javascript.Add(language_mode, BinaryOperationHints::Any()), p0, p1);
Node* r = R.reduce(add);
R.CheckBinop(IrOpcode::kNumberAdd, r);
@@ -281,11 +280,16 @@ TEST_WITH_STRONG(AddNumber1) {
TEST_WITH_STRONG(NumberBinops) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Add(language_mode), R.simplified.NumberAdd(),
- R.javascript.Subtract(language_mode), R.simplified.NumberSubtract(),
- R.javascript.Multiply(language_mode), R.simplified.NumberMultiply(),
- R.javascript.Divide(language_mode), R.simplified.NumberDivide(),
- R.javascript.Modulus(language_mode), R.simplified.NumberModulus(),
+ R.javascript.Add(language_mode, R.hints),
+ R.simplified.NumberAdd(),
+ R.javascript.Subtract(language_mode, R.hints),
+ R.simplified.NumberSubtract(),
+ R.javascript.Multiply(language_mode, R.hints),
+ R.simplified.NumberMultiply(),
+ R.javascript.Divide(language_mode, R.hints),
+ R.simplified.NumberDivide(),
+ R.javascript.Modulus(language_mode, R.hints),
+ R.simplified.NumberModulus(),
};
for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
@@ -328,11 +332,11 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
explicit JSBitwiseShiftTypedLoweringTester(LanguageMode language_mode)
: JSTypedLoweringTester(), language_mode_(language_mode) {
int i = 0;
- set(i++, javascript.ShiftLeft(language_mode_), true);
+ set(i++, javascript.ShiftLeft(language_mode_, hints), true);
set(i++, simplified.NumberShiftLeft(), false);
- set(i++, javascript.ShiftRight(language_mode_), true);
+ set(i++, javascript.ShiftRight(language_mode_, hints), true);
set(i++, simplified.NumberShiftRight(), false);
- set(i++, javascript.ShiftRightLogical(language_mode_), false);
+ set(i++, javascript.ShiftRightLogical(language_mode_, hints), false);
set(i++, simplified.NumberShiftRightLogical(), false);
}
static const int kNumberOps = 6;
@@ -386,11 +390,11 @@ class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
explicit JSBitwiseTypedLoweringTester(LanguageMode language_mode)
: JSTypedLoweringTester(), language_mode_(language_mode) {
int i = 0;
- set(i++, javascript.BitwiseOr(language_mode_), true);
+ set(i++, javascript.BitwiseOr(language_mode_, hints), true);
set(i++, simplified.NumberBitwiseOr(), true);
- set(i++, javascript.BitwiseXor(language_mode_), true);
+ set(i++, javascript.BitwiseXor(language_mode_, hints), true);
set(i++, simplified.NumberBitwiseXor(), true);
- set(i++, javascript.BitwiseAnd(language_mode_), true);
+ set(i++, javascript.BitwiseAnd(language_mode_, hints), true);
set(i++, simplified.NumberBitwiseAnd(), true);
}
static const int kNumberOps = 6;
@@ -726,28 +730,28 @@ TEST_WITH_STRONG(RemoveToNumberEffects) {
switch (i) {
case 0:
- DCHECK(OperatorProperties::GetFrameStateInputCount(
- R.javascript.ToNumber()) == 1);
+ CHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(
+ R.javascript.ToNumber()));
effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
frame_state, ton, R.start());
break;
case 1:
- DCHECK(OperatorProperties::GetFrameStateInputCount(
- R.javascript.ToNumber()) == 1);
+ CHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(
+ R.javascript.ToNumber()));
effect_use = R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
frame_state, ton, R.start());
break;
case 2:
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
case 3:
- effect_use = R.graph.NewNode(R.javascript.Add(language_mode), ton, ton,
- R.context(), frame_state, frame_state, ton,
- R.start());
+ effect_use = R.graph.NewNode(R.javascript.Add(language_mode, R.hints),
+ ton, ton, R.context(), frame_state,
+ frame_state, ton, R.start());
break;
case 4:
- effect_use = R.graph.NewNode(R.javascript.Add(language_mode), p0, p0,
- R.context(), frame_state, frame_state, ton,
- R.start());
+ effect_use = R.graph.NewNode(R.javascript.Add(language_mode, R.hints),
+ p0, p0, R.context(), frame_state,
+ frame_state, ton, R.start());
break;
case 5:
effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
@@ -910,13 +914,20 @@ TEST_WITH_STRONG(RemovePureNumberBinopEffects) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Equal(), R.simplified.NumberEqual(),
- R.javascript.Add(language_mode), R.simplified.NumberAdd(),
- R.javascript.Subtract(language_mode), R.simplified.NumberSubtract(),
- R.javascript.Multiply(language_mode), R.simplified.NumberMultiply(),
- R.javascript.Divide(language_mode), R.simplified.NumberDivide(),
- R.javascript.Modulus(language_mode), R.simplified.NumberModulus(),
- R.javascript.LessThan(language_mode), R.simplified.NumberLessThan(),
+ R.javascript.Equal(),
+ R.simplified.NumberEqual(),
+ R.javascript.Add(language_mode, R.hints),
+ R.simplified.NumberAdd(),
+ R.javascript.Subtract(language_mode, R.hints),
+ R.simplified.NumberSubtract(),
+ R.javascript.Multiply(language_mode, R.hints),
+ R.simplified.NumberMultiply(),
+ R.javascript.Divide(language_mode, R.hints),
+ R.simplified.NumberDivide(),
+ R.javascript.Modulus(language_mode, R.hints),
+ R.simplified.NumberModulus(),
+ R.javascript.LessThan(language_mode),
+ R.simplified.NumberLessThan(),
R.javascript.LessThanOrEqual(language_mode),
R.simplified.NumberLessThanOrEqual(),
};
@@ -939,11 +950,11 @@ TEST(OrderNumberBinopEffects1) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Subtract(LanguageMode::SLOPPY),
+ R.javascript.Subtract(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberSubtract(),
- R.javascript.Multiply(LanguageMode::SLOPPY),
+ R.javascript.Multiply(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberMultiply(),
- R.javascript.Divide(LanguageMode::SLOPPY),
+ R.javascript.Divide(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberDivide(),
};
@@ -967,13 +978,13 @@ TEST(OrderNumberBinopEffects2) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Add(LanguageMode::SLOPPY),
+ R.javascript.Add(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberAdd(),
- R.javascript.Subtract(LanguageMode::SLOPPY),
+ R.javascript.Subtract(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberSubtract(),
- R.javascript.Multiply(LanguageMode::SLOPPY),
+ R.javascript.Multiply(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberMultiply(),
- R.javascript.Divide(LanguageMode::SLOPPY),
+ R.javascript.Divide(LanguageMode::SLOPPY, R.hints),
R.simplified.NumberDivide(),
};
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 00baca2c5b..8c02012e0a 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/jump-threading.h"
@@ -60,16 +57,18 @@ class TestCode : public HandleAndZoneScope {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
- AddGapMove(index,
- AllocatedOperand(LocationOperand::REGISTER, kRepWord32, 13),
- AllocatedOperand(LocationOperand::REGISTER, kRepWord32, 13));
+ AddGapMove(index, AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, 13),
+ AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, 13));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ConstantOperand(11),
- AllocatedOperand(LocationOperand::REGISTER, kRepWord32, 11));
+ AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, 11));
}
void Other() {
Start();
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 2aaf8c766d..939b144731 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -2,12 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/code-stubs.h"
#include "src/compiler.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/zone.h"
#include "src/compiler/common-operator.h"
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 3862a647de..68bfc2858f 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
@@ -114,7 +111,8 @@ class LoopFinderTester : HandleAndZoneScope {
}
const Operator* op(int count, bool effect) {
- return effect ? common.EffectPhi(count) : common.Phi(kMachAnyTagged, count);
+ return effect ? common.EffectPhi(count)
+ : common.Phi(MachineRepresentation::kTagged, count);
}
Node* Return(Node* val, Node* effect, Node* control) {
@@ -269,8 +267,8 @@ TEST(LaLoop1phi) {
// One loop with a simple phi.
LoopFinderTester t;
While w(t, t.p0);
- Node* phi =
- t.graph.NewNode(t.common.Phi(kMachAnyTagged, 2), t.zero, t.one, w.loop);
+ Node* phi = t.graph.NewNode(t.common.Phi(MachineRepresentation::kTagged, 2),
+ t.zero, t.one, w.loop);
t.Return(phi, t.start, w.exit);
Node* chain[] = {w.loop};
@@ -478,7 +476,7 @@ TEST(LaNestedLoop1x) {
While w2(t, t.p0);
w2.nest(w1);
- const Operator* op = t.common.Phi(kMachInt32, 2);
+ const Operator* op = t.common.Phi(MachineRepresentation::kWord32, 2);
Node* p1a = t.graph.NewNode(op, t.p0, t.p0, w1.loop);
Node* p1b = t.graph.NewNode(op, t.p0, t.p0, w1.loop);
Node* p2a = t.graph.NewNode(op, p1a, t.p0, w2.loop);
@@ -691,8 +689,8 @@ TEST(LaEdgeMatrix1) {
Node* p3 = t.jsgraph.Int32Constant(33);
Node* loop = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
- Node* phi =
- t.graph.NewNode(t.common.Phi(kMachInt32, 2), t.one, p1, loop);
+ Node* phi = t.graph.NewNode(
+ t.common.Phi(MachineRepresentation::kWord32, 2), t.one, p1, loop);
Node* cond = t.graph.NewNode(&kIntAdd, phi, p2);
Node* branch = t.graph.NewNode(t.common.Branch(), cond, loop);
Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
@@ -716,7 +714,7 @@ TEST(LaEdgeMatrix1) {
void RunEdgeMatrix2(int i) {
- DCHECK(i >= 0 && i < 5);
+ CHECK(i >= 0 && i < 5);
for (int j = 0; j < 5; j++) {
for (int k = 0; k < 5; k++) {
LoopFinderTester t;
@@ -727,8 +725,8 @@ void RunEdgeMatrix2(int i) {
// outer loop.
Node* loop1 = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
- Node* phi1 =
- t.graph.NewNode(t.common.Phi(kMachInt32, 2), t.one, p1, loop1);
+ Node* phi1 = t.graph.NewNode(
+ t.common.Phi(MachineRepresentation::kWord32, 2), t.one, p1, loop1);
Node* cond1 = t.graph.NewNode(&kIntAdd, phi1, t.one);
Node* branch1 = t.graph.NewNode(t.common.Branch(), cond1, loop1);
Node* if_true1 = t.graph.NewNode(t.common.IfTrue(), branch1);
@@ -736,8 +734,8 @@ void RunEdgeMatrix2(int i) {
// inner loop.
Node* loop2 = t.graph.NewNode(t.common.Loop(2), if_true1, t.start);
- Node* phi2 =
- t.graph.NewNode(t.common.Phi(kMachInt32, 2), t.one, p2, loop2);
+ Node* phi2 = t.graph.NewNode(
+ t.common.Phi(MachineRepresentation::kWord32, 2), t.one, p2, loop2);
Node* cond2 = t.graph.NewNode(&kIntAdd, phi2, p3);
Node* branch2 = t.graph.NewNode(t.common.Branch(), cond2, loop2);
Node* if_true2 = t.graph.NewNode(t.common.IfTrue(), branch2);
@@ -803,7 +801,8 @@ void RunEdgeMatrix3(int c1a, int c1b, int c1c, // line break
// L1 depth = 0
Node* loop1 = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
- Node* phi1 = t.graph.NewNode(t.common.Phi(kMachInt32, 2), p1a, p1c, loop1);
+ Node* phi1 = t.graph.NewNode(t.common.Phi(MachineRepresentation::kWord32, 2),
+ p1a, p1c, loop1);
Node* cond1 = t.graph.NewNode(&kIntAdd, phi1, p1b);
Node* branch1 = t.graph.NewNode(t.common.Branch(), cond1, loop1);
Node* if_true1 = t.graph.NewNode(t.common.IfTrue(), branch1);
@@ -811,7 +810,8 @@ void RunEdgeMatrix3(int c1a, int c1b, int c1c, // line break
// L2 depth = 1
Node* loop2 = t.graph.NewNode(t.common.Loop(2), if_true1, t.start);
- Node* phi2 = t.graph.NewNode(t.common.Phi(kMachInt32, 2), p2a, p2c, loop2);
+ Node* phi2 = t.graph.NewNode(t.common.Phi(MachineRepresentation::kWord32, 2),
+ p2a, p2c, loop2);
Node* cond2 = t.graph.NewNode(&kIntAdd, phi2, p2b);
Node* branch2 = t.graph.NewNode(t.common.Branch(), cond2, loop2);
Node* if_true2 = t.graph.NewNode(t.common.IfTrue(), branch2);
@@ -819,7 +819,8 @@ void RunEdgeMatrix3(int c1a, int c1b, int c1c, // line break
// L3 depth = 2
Node* loop3 = t.graph.NewNode(t.common.Loop(2), if_true2, t.start);
- Node* phi3 = t.graph.NewNode(t.common.Phi(kMachInt32, 2), p3a, p3c, loop3);
+ Node* phi3 = t.graph.NewNode(t.common.Phi(MachineRepresentation::kWord32, 2),
+ p3a, p3c, loop3);
Node* cond3 = t.graph.NewNode(&kIntAdd, phi3, p3b);
Node* branch3 = t.graph.NewNode(t.common.Branch(), cond3, loop3);
Node* if_true3 = t.graph.NewNode(t.common.IfTrue(), branch3);
@@ -927,7 +928,8 @@ static void RunManyChainedLoops_i(int count) {
// Build loops.
for (int i = 0; i < count; i++) {
Node* loop = t.graph.NewNode(t.common.Loop(2), last, t.start);
- Node* phi = t.graph.NewNode(t.common.Phi(kMachInt32, 2), k11, k12, loop);
+ Node* phi = t.graph.NewNode(t.common.Phi(MachineRepresentation::kWord32, 2),
+ k11, k12, loop);
Node* branch = t.graph.NewNode(t.common.Branch(), phi, loop);
Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
@@ -962,7 +964,8 @@ static void RunManyNestedLoops_i(int count) {
// Build loops.
for (int i = 0; i < count; i++) {
Node* loop = t.graph.NewNode(t.common.Loop(2), entry, t.start);
- Node* phi = t.graph.NewNode(t.common.Phi(kMachInt32, 2), k11, k12, loop);
+ Node* phi = t.graph.NewNode(t.common.Phi(MachineRepresentation::kWord32, 2),
+ k11, k12, loop);
Node* branch = t.graph.NewNode(t.common.Branch(), phi, loop);
Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index e1af1626e1..69f5e157ad 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -2,13 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
+#include "src/ast/scopes.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index ca49369592..86888e96f5 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
#include "src/compiler/js-graph.h"
@@ -59,7 +56,7 @@ class ReducerTester : public HandleAndZoneScope {
: isolate(main_isolate()),
binop(NULL),
unop(NULL),
- machine(main_zone(), kMachPtr, flags),
+ machine(main_zone(), MachineType::PointerRepresentation(), flags),
common(main_zone()),
graph(main_zone()),
javascript(main_zone()),
@@ -362,7 +359,7 @@ TEST(ReduceWord32Sar) {
static void CheckJsShift(ReducerTester* R) {
- DCHECK(R->machine.Word32ShiftIsSafe());
+ CHECK(R->machine.Word32ShiftIsSafe());
Node* x = R->Parameter(0);
Node* y = R->Parameter(1);
@@ -706,8 +703,8 @@ TEST(ReduceLoadStore) {
Node* base = R.Constant<int32_t>(11);
Node* index = R.Constant<int32_t>(4);
- Node* load = R.graph.NewNode(R.machine.Load(kMachInt32), base, index,
- R.graph.start(), R.graph.start());
+ Node* load = R.graph.NewNode(R.machine.Load(MachineType::Int32()), base,
+ index, R.graph.start(), R.graph.start());
{
MachineOperatorReducer reducer(&R.jsgraph);
@@ -716,9 +713,10 @@ TEST(ReduceLoadStore) {
}
{
- Node* store = R.graph.NewNode(
- R.machine.Store(StoreRepresentation(kMachInt32, kNoWriteBarrier)), base,
- index, load, load, R.graph.start());
+ Node* store =
+ R.graph.NewNode(R.machine.Store(StoreRepresentation(
+ MachineRepresentation::kWord32, kNoWriteBarrier)),
+ base, index, load, load, R.graph.start());
MachineOperatorReducer reducer(&R.jsgraph);
Reduction reduction = reducer.Reduce(store);
CHECK(!reduction.Changed()); // stores should not be reduced.
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 00fcc5f814..7c08238411 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <cmath>
#include <functional>
#include <limits>
@@ -34,17 +31,17 @@ CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
// Add return location(s).
- DCHECK(return_count <= config->num_allocatable_general_registers());
+ CHECK(return_count <= config->num_allocatable_general_registers());
for (int i = 0; i < return_count; i++) {
- msig.AddReturn(compiler::kMachInt32);
+ msig.AddReturn(MachineType::Int32());
locations.AddReturn(
LinkageLocation::ForRegister(config->allocatable_general_codes()[i]));
}
// Add register and/or stack parameter(s).
- DCHECK(param_count <= config->num_allocatable_general_registers());
+ CHECK(param_count <= config->num_allocatable_general_registers());
for (int i = 0; i < param_count; i++) {
- msig.AddParam(compiler::kMachInt32);
+ msig.AddParam(MachineType::Int32());
locations.AddParam(
LinkageLocation::ForRegister(config->allocatable_general_codes()[i]));
}
@@ -53,7 +50,7 @@ CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
const RegList kCalleeSaveFPRegisters = 0;
// The target for WASM calls is always a code object.
- MachineType target_type = compiler::kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
@@ -77,7 +74,7 @@ TEST(ReturnThreeValues) {
HandleAndZoneScope handles;
RawMachineAssembler m(handles.main_isolate(),
new (handles.main_zone()) Graph(handles.main_zone()),
- desc, kMachPtr,
+ desc, MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags());
Node* p0 = m.Parameter(0);
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index fcd6b74c4b..de1c2c02a2 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <functional>
#include "src/compiler/graph.h"
diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc
index 7d2dfdd469..eecf46a054 100644
--- a/deps/v8/test/cctest/compiler/test-operator.cc
+++ b/deps/v8/test/cctest/compiler/test-operator.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <sstream>
#include "src/compiler/operator.h"
diff --git a/deps/v8/test/cctest/compiler/test-osr.cc b/deps/v8/test/cctest/compiler/test-osr.cc
index 7cc8fa6338..f0640c2e0a 100644
--- a/deps/v8/test/cctest/compiler/test-osr.cc
+++ b/deps/v8/test/cctest/compiler/test-osr.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/codegen.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
@@ -94,7 +91,8 @@ class OsrDeconstructorTester : public HandleAndZoneScope {
if (count > 3) inputs[3] = back2;
if (count > 4) inputs[4] = back3;
inputs[count] = loop;
- return graph.NewNode(common.Phi(kMachAnyTagged, count), count + 1, inputs);
+ return graph.NewNode(common.Phi(MachineRepresentation::kTagged, count),
+ count + 1, inputs);
}
Node* NewLoop(bool is_osr, int num_backedges, Node* entry = nullptr) {
@@ -317,9 +315,11 @@ struct While {
Node* Phi(Node* i1, Node* i2, Node* i3) {
if (loop->InputCount() == 2) {
- return t.graph.NewNode(t.common.Phi(kMachAnyTagged, 2), i1, i2, loop);
+ return t.graph.NewNode(t.common.Phi(MachineRepresentation::kTagged, 2),
+ i1, i2, loop);
} else {
- return t.graph.NewNode(t.common.Phi(kMachAnyTagged, 3), i1, i2, i3, loop);
+ return t.graph.NewNode(t.common.Phi(MachineRepresentation::kTagged, 3),
+ i1, i2, i3, loop);
}
}
};
@@ -476,7 +476,8 @@ Node* MakeCounter(JSGraph* jsgraph, Node* start, Node* loop) {
tmp_inputs.push_back(loop);
Node* phi = jsgraph->graph()->NewNode(
- jsgraph->common()->Phi(kMachInt32, count), count + 1, &tmp_inputs[0]);
+ jsgraph->common()->Phi(MachineRepresentation::kWord32, count), count + 1,
+ &tmp_inputs[0]);
Node* inc = jsgraph->graph()->NewNode(&kIntAdd, phi, jsgraph->OneConstant());
for (int i = 1; i < count; i++) {
@@ -496,8 +497,9 @@ TEST(Deconstruct_osr_nested3) {
// middle loop.
Node* loop1 = T.graph.NewNode(T.common.Loop(1), loop0.if_true);
- Node* loop1_phi = T.graph.NewNode(T.common.Phi(kMachAnyTagged, 2), loop0_cntr,
- loop0_cntr, loop1);
+ Node* loop1_phi =
+ T.graph.NewNode(T.common.Phi(MachineRepresentation::kTagged, 2),
+ loop0_cntr, loop0_cntr, loop1);
// innermost (OSR) loop.
While loop2(T, T.p0, true, 1);
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
index c00fa6a331..f4ffd02296 100644
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ b/deps/v8/test/cctest/compiler/test-pipeline.cc
@@ -2,13 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/compiler.h"
#include "src/compiler/pipeline.h"
#include "src/handles.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 616c3736e1..7353e167d9 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <limits>
#include "test/cctest/cctest.h"
@@ -64,7 +61,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckFloat32Constant(Node* n, float expected) {
CHECK_EQ(IrOpcode::kFloat32Constant, n->opcode());
float fval = OpParameter<float>(n->op());
- CHECK_EQ(expected, fval);
+ CheckDoubleEq(expected, fval);
}
void CheckHeapConstant(Node* n, HeapObject* expected) {
@@ -81,41 +78,50 @@ class RepresentationChangerTester : public HandleAndZoneScope,
}
Node* Parameter(int index = 0) {
- return graph()->NewNode(common()->Parameter(index), graph()->start());
+ Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
+ NodeProperties::SetType(n, Type::Any());
+ return n;
}
- void CheckTypeError(MachineTypeUnion from, MachineTypeUnion to) {
+ void CheckTypeError(MachineRepresentation from, Type* from_type,
+ MachineRepresentation to) {
changer()->testing_type_errors_ = true;
changer()->type_error_ = false;
Node* n = Parameter(0);
- Node* c = changer()->GetRepresentationFor(n, from, to);
+ Node* c = changer()->GetRepresentationFor(n, from, from_type, to);
CHECK(changer()->type_error_);
CHECK_EQ(n, c);
}
- void CheckNop(MachineTypeUnion from, MachineTypeUnion to) {
+ void CheckNop(MachineRepresentation from, Type* from_type,
+ MachineRepresentation to) {
Node* n = Parameter(0);
- Node* c = changer()->GetRepresentationFor(n, from, to);
+ Node* c = changer()->GetRepresentationFor(n, from, from_type, to);
CHECK_EQ(n, c);
}
};
-static const MachineType all_reps[] = {kRepBit, kRepWord32, kRepWord64,
- kRepFloat32, kRepFloat64, kRepTagged};
+const MachineType kMachineTypes[] = {
+ MachineType::Float32(), MachineType::Float64(), MachineType::Int8(),
+ MachineType::Uint8(), MachineType::Int16(), MachineType::Uint16(),
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Int64(),
+ MachineType::Uint64(), MachineType::AnyTagged()};
TEST(BoolToBit_constant) {
RepresentationChangerTester r;
Node* true_node = r.jsgraph()->TrueConstant();
- Node* true_bit =
- r.changer()->GetRepresentationFor(true_node, kRepTagged, kRepBit);
+ Node* true_bit = r.changer()->GetRepresentationFor(
+ true_node, MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kBit);
r.CheckInt32Constant(true_bit, 1);
Node* false_node = r.jsgraph()->FalseConstant();
- Node* false_bit =
- r.changer()->GetRepresentationFor(false_node, kRepTagged, kRepBit);
+ Node* false_bit = r.changer()->GetRepresentationFor(
+ false_node, MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kBit);
r.CheckInt32Constant(false_bit, 0);
}
@@ -125,7 +131,9 @@ TEST(BitToBool_constant) {
for (int i = -5; i < 5; i++) {
Node* node = r.jsgraph()->Int32Constant(i);
- Node* val = r.changer()->GetRepresentationFor(node, kRepBit, kRepTagged);
+ Node* val = r.changer()->GetRepresentationFor(
+ node, MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kTagged);
r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
: r.isolate()->heap()->true_value());
}
@@ -138,7 +146,9 @@ TEST(ToTagged_constant) {
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepTagged);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kTagged);
r.CheckNumberConstant(c, *i);
}
}
@@ -146,7 +156,9 @@ TEST(ToTagged_constant) {
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepTagged);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kTagged);
r.CheckNumberConstant(c, *i);
}
}
@@ -154,7 +166,9 @@ TEST(ToTagged_constant) {
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32, kRepTagged);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kTagged);
r.CheckNumberConstant(c, *i);
}
}
@@ -162,8 +176,9 @@ TEST(ToTagged_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
- kRepTagged);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kTagged);
r.CheckNumberConstant(c, *i);
}
}
@@ -171,8 +186,9 @@ TEST(ToTagged_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
- kRepTagged);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(),
+ MachineRepresentation::kTagged);
r.CheckNumberConstant(c, *i);
}
}
@@ -185,7 +201,9 @@ TEST(ToFloat64_constant) {
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepFloat64);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kFloat64);
CHECK_EQ(n, c);
}
}
@@ -193,7 +211,9 @@ TEST(ToFloat64_constant) {
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepTagged, kRepFloat64);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kFloat64);
r.CheckFloat64Constant(c, *i);
}
}
@@ -201,7 +221,9 @@ TEST(ToFloat64_constant) {
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32, kRepFloat64);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kFloat64);
r.CheckFloat64Constant(c, *i);
}
}
@@ -209,8 +231,9 @@ TEST(ToFloat64_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
- kRepFloat64);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kFloat64);
r.CheckFloat64Constant(c, *i);
}
}
@@ -218,8 +241,9 @@ TEST(ToFloat64_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
- kRepFloat64);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(),
+ MachineRepresentation::kFloat64);
r.CheckFloat64Constant(c, *i);
}
}
@@ -240,7 +264,9 @@ TEST(ToFloat32_constant) {
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32, kRepFloat32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kFloat32);
CHECK_EQ(n, c);
}
}
@@ -248,7 +274,9 @@ TEST(ToFloat32_constant) {
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepTagged, kRepFloat32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kFloat32);
r.CheckFloat32Constant(c, *i);
}
}
@@ -256,7 +284,9 @@ TEST(ToFloat32_constant) {
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepFloat32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kFloat32);
r.CheckFloat32Constant(c, *i);
}
}
@@ -265,8 +295,9 @@ TEST(ToFloat32_constant) {
FOR_INT32_INPUTS(i) {
if (!IsFloat32Int32(*i)) continue;
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
- kRepFloat32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kFloat32);
r.CheckFloat32Constant(c, static_cast<float>(*i));
}
}
@@ -275,8 +306,9 @@ TEST(ToFloat32_constant) {
FOR_UINT32_INPUTS(i) {
if (!IsFloat32Uint32(*i)) continue;
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
- kRepFloat32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(),
+ MachineRepresentation::kFloat32);
r.CheckFloat32Constant(c, static_cast<float>(*i));
}
}
@@ -289,8 +321,9 @@ TEST(ToInt32_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kWord32);
r.CheckInt32Constant(c, *i);
}
}
@@ -299,8 +332,9 @@ TEST(ToInt32_constant) {
FOR_INT32_INPUTS(i) {
if (!IsFloat32Int32(*i)) continue;
Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32 | kTypeInt32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::Signed32(),
+ MachineRepresentation::kWord32);
r.CheckInt32Constant(c, *i);
}
}
@@ -308,8 +342,9 @@ TEST(ToInt32_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64 | kTypeInt32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::Signed32(),
+ MachineRepresentation::kWord32);
r.CheckInt32Constant(c, *i);
}
}
@@ -317,8 +352,9 @@ TEST(ToInt32_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepTagged | kTypeInt32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::Signed32(),
+ MachineRepresentation::kWord32);
r.CheckInt32Constant(c, *i);
}
}
@@ -331,8 +367,9 @@ TEST(ToUint32_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(),
+ MachineRepresentation::kWord32);
r.CheckUint32Constant(c, *i);
}
}
@@ -341,8 +378,9 @@ TEST(ToUint32_constant) {
FOR_UINT32_INPUTS(i) {
if (!IsFloat32Uint32(*i)) continue;
Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat32 | kTypeUint32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::Unsigned32(),
+ MachineRepresentation::kWord32);
r.CheckUint32Constant(c, *i);
}
}
@@ -350,8 +388,9 @@ TEST(ToUint32_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64 | kTypeUint32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::Unsigned32(),
+ MachineRepresentation::kWord32);
r.CheckUint32Constant(c, *i);
}
}
@@ -359,20 +398,21 @@ TEST(ToUint32_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(static_cast<double>(*i));
- Node* c = r.changer()->GetRepresentationFor(n, kRepTagged | kTypeUint32,
- kRepWord32);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::Unsigned32(),
+ MachineRepresentation::kWord32);
r.CheckUint32Constant(c, *i);
}
}
}
-static void CheckChange(IrOpcode::Value expected, MachineTypeUnion from,
- MachineTypeUnion to) {
+static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
+ Type* from_type, MachineRepresentation to) {
RepresentationChangerTester r;
Node* n = r.Parameter();
- Node* c = r.changer()->GetRepresentationFor(n, from, to);
+ Node* c = r.changer()->GetRepresentationFor(n, from, from_type, to);
CHECK_NE(c, n);
CHECK_EQ(expected, c->opcode());
@@ -381,12 +421,13 @@ static void CheckChange(IrOpcode::Value expected, MachineTypeUnion from,
static void CheckTwoChanges(IrOpcode::Value expected2,
- IrOpcode::Value expected1, MachineTypeUnion from,
- MachineTypeUnion to) {
+ IrOpcode::Value expected1,
+ MachineRepresentation from, Type* from_type,
+ MachineRepresentation to) {
RepresentationChangerTester r;
Node* n = r.Parameter();
- Node* c1 = r.changer()->GetRepresentationFor(n, from, to);
+ Node* c1 = r.changer()->GetRepresentationFor(n, from, from_type, to);
CHECK_NE(c1, n);
CHECK_EQ(expected1, c1->opcode());
@@ -398,70 +439,92 @@ static void CheckTwoChanges(IrOpcode::Value expected2,
TEST(SingleChanges) {
- CheckChange(IrOpcode::kChangeBoolToBit, kRepTagged, kRepBit);
- CheckChange(IrOpcode::kChangeBitToBool, kRepBit, kRepTagged);
-
- CheckChange(IrOpcode::kChangeInt32ToTagged, kRepWord32 | kTypeInt32,
- kRepTagged);
- CheckChange(IrOpcode::kChangeUint32ToTagged, kRepWord32 | kTypeUint32,
- kRepTagged);
- CheckChange(IrOpcode::kChangeFloat64ToTagged, kRepFloat64, kRepTagged);
-
- CheckChange(IrOpcode::kChangeTaggedToInt32, kRepTagged | kTypeInt32,
- kRepWord32);
- CheckChange(IrOpcode::kChangeTaggedToUint32, kRepTagged | kTypeUint32,
- kRepWord32);
- CheckChange(IrOpcode::kChangeTaggedToFloat64, kRepTagged, kRepFloat64);
+ CheckChange(IrOpcode::kChangeBoolToBit, MachineRepresentation::kTagged,
+ Type::None(), MachineRepresentation::kBit);
+ CheckChange(IrOpcode::kChangeBitToBool, MachineRepresentation::kBit,
+ Type::None(), MachineRepresentation::kTagged);
+
+ CheckChange(IrOpcode::kChangeInt32ToTagged, MachineRepresentation::kWord32,
+ Type::Signed32(), MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeUint32ToTagged, MachineRepresentation::kWord32,
+ Type::Unsigned32(), MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeFloat64ToTagged, MachineRepresentation::kFloat64,
+ Type::None(), MachineRepresentation::kTagged);
+
+ CheckChange(IrOpcode::kChangeTaggedToInt32, MachineRepresentation::kTagged,
+ Type::Signed32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kChangeTaggedToUint32, MachineRepresentation::kTagged,
+ Type::Unsigned32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
+ Type::None(), MachineRepresentation::kFloat64);
// Int32,Uint32 <-> Float64 are actually machine conversions.
- CheckChange(IrOpcode::kChangeInt32ToFloat64, kRepWord32 | kTypeInt32,
- kRepFloat64);
- CheckChange(IrOpcode::kChangeUint32ToFloat64, kRepWord32 | kTypeUint32,
- kRepFloat64);
- CheckChange(IrOpcode::kChangeFloat64ToInt32, kRepFloat64 | kTypeInt32,
- kRepWord32);
- CheckChange(IrOpcode::kChangeFloat64ToUint32, kRepFloat64 | kTypeUint32,
- kRepWord32);
-
- CheckChange(IrOpcode::kTruncateFloat64ToFloat32, kRepFloat64, kRepFloat32);
+ CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
+ Type::Signed32(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeUint32ToFloat64, MachineRepresentation::kWord32,
+ Type::Unsigned32(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt32, MachineRepresentation::kFloat64,
+ Type::Signed32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kChangeFloat64ToUint32, MachineRepresentation::kFloat64,
+ Type::Unsigned32(), MachineRepresentation::kWord32);
+
+ CheckChange(IrOpcode::kTruncateFloat64ToFloat32,
+ MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kFloat32);
// Int32,Uint32 <-> Float32 require two changes.
CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
- IrOpcode::kTruncateFloat64ToFloat32, kRepWord32 | kTypeInt32,
- kRepFloat32);
+ IrOpcode::kTruncateFloat64ToFloat32,
+ MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kFloat32);
CheckTwoChanges(IrOpcode::kChangeUint32ToFloat64,
- IrOpcode::kTruncateFloat64ToFloat32, kRepWord32 | kTypeUint32,
- kRepFloat32);
+ IrOpcode::kTruncateFloat64ToFloat32,
+ MachineRepresentation::kWord32, Type::Unsigned32(),
+ MachineRepresentation::kFloat32);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
- IrOpcode::kChangeFloat64ToInt32, kRepFloat32 | kTypeInt32,
- kRepWord32);
+ IrOpcode::kChangeFloat64ToInt32,
+ MachineRepresentation::kFloat32, Type::Signed32(),
+ MachineRepresentation::kWord32);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
- IrOpcode::kChangeFloat64ToUint32, kRepFloat32 | kTypeUint32,
- kRepWord32);
+ IrOpcode::kChangeFloat64ToUint32,
+ MachineRepresentation::kFloat32, Type::Unsigned32(),
+ MachineRepresentation::kWord32);
// Float32 <-> Tagged require two changes.
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
- IrOpcode::kChangeFloat64ToTagged, kRepFloat32, kRepTagged);
+ IrOpcode::kChangeFloat64ToTagged,
+ MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kTagged);
CheckTwoChanges(IrOpcode::kChangeTaggedToFloat64,
- IrOpcode::kTruncateFloat64ToFloat32, kRepTagged, kRepFloat32);
+ IrOpcode::kTruncateFloat64ToFloat32,
+ MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kFloat32);
}
TEST(SignednessInWord32) {
RepresentationChangerTester r;
- // TODO(titzer): assume that uses of a word32 without a sign mean kTypeInt32.
- CheckChange(IrOpcode::kChangeTaggedToInt32, kRepTagged,
- kRepWord32 | kTypeInt32);
- CheckChange(IrOpcode::kChangeTaggedToUint32, kRepTagged,
- kRepWord32 | kTypeUint32);
- CheckChange(IrOpcode::kChangeInt32ToFloat64, kRepWord32, kRepFloat64);
- CheckChange(IrOpcode::kChangeFloat64ToInt32, kRepFloat64, kRepWord32);
+ CheckChange(IrOpcode::kChangeTaggedToInt32, MachineRepresentation::kTagged,
+ Type::Signed32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kChangeTaggedToUint32, MachineRepresentation::kTagged,
+ Type::Unsigned32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
+ Type::None(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt32, MachineRepresentation::kFloat64,
+ Type::Signed32(), MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kTruncateFloat64ToInt32,
+ MachineRepresentation::kFloat64, Type::Number(),
+ MachineRepresentation::kWord32);
CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
- IrOpcode::kTruncateFloat64ToFloat32, kRepWord32, kRepFloat32);
+ IrOpcode::kTruncateFloat64ToFloat32,
+ MachineRepresentation::kWord32, Type::None(),
+ MachineRepresentation::kFloat32);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
- IrOpcode::kChangeFloat64ToInt32, kRepFloat32, kRepWord32);
+ IrOpcode::kTruncateFloat64ToInt32,
+ MachineRepresentation::kFloat32, Type::Number(),
+ MachineRepresentation::kWord32);
}
@@ -469,33 +532,48 @@ TEST(Nops) {
RepresentationChangerTester r;
// X -> X is always a nop for any single representation X.
- for (size_t i = 0; i < arraysize(all_reps); i++) {
- r.CheckNop(all_reps[i], all_reps[i]);
+ for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
+ r.CheckNop(kMachineTypes[i].representation(), Type::None(),
+ kMachineTypes[i].representation());
}
// 32-bit floats.
- r.CheckNop(kRepFloat32, kRepFloat32);
- r.CheckNop(kRepFloat32 | kTypeNumber, kRepFloat32);
- r.CheckNop(kRepFloat32, kRepFloat32 | kTypeNumber);
+ r.CheckNop(MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kFloat32);
+ r.CheckNop(MachineRepresentation::kFloat32, Type::Number(),
+ MachineRepresentation::kFloat32);
// 32-bit words can be used as smaller word sizes and vice versa, because
// loads from memory implicitly sign or zero extend the value to the
// full machine word size, and stores implicitly truncate.
- r.CheckNop(kRepWord32, kRepWord8);
- r.CheckNop(kRepWord32, kRepWord16);
- r.CheckNop(kRepWord32, kRepWord32);
- r.CheckNop(kRepWord8, kRepWord32);
- r.CheckNop(kRepWord16, kRepWord32);
+ r.CheckNop(MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kWord8);
+ r.CheckNop(MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kWord16);
+ r.CheckNop(MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kWord32);
+ r.CheckNop(MachineRepresentation::kWord8, Type::Signed32(),
+ MachineRepresentation::kWord32);
+ r.CheckNop(MachineRepresentation::kWord16, Type::Signed32(),
+ MachineRepresentation::kWord32);
// kRepBit (result of comparison) is implicitly a wordish thing.
- r.CheckNop(kRepBit, kRepWord8);
- r.CheckNop(kRepBit | kTypeBool, kRepWord8);
- r.CheckNop(kRepBit, kRepWord16);
- r.CheckNop(kRepBit | kTypeBool, kRepWord16);
- r.CheckNop(kRepBit, kRepWord32);
- r.CheckNop(kRepBit | kTypeBool, kRepWord32);
- r.CheckNop(kRepBit, kRepWord64);
- r.CheckNop(kRepBit | kTypeBool, kRepWord64);
+ r.CheckNop(MachineRepresentation::kBit, Type::None(),
+ MachineRepresentation::kWord8);
+ r.CheckNop(MachineRepresentation::kBit, Type::None(),
+ MachineRepresentation::kWord16);
+ r.CheckNop(MachineRepresentation::kBit, Type::None(),
+ MachineRepresentation::kWord32);
+ r.CheckNop(MachineRepresentation::kBit, Type::None(),
+ MachineRepresentation::kWord64);
+ r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kWord8);
+ r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kWord16);
+ r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kWord32);
+ r.CheckNop(MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kWord64);
}
@@ -503,49 +581,48 @@ TEST(TypeErrors) {
RepresentationChangerTester r;
// Wordish cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(kRepWord8, kRepBit);
- r.CheckTypeError(kRepWord8, kRepBit | kTypeBool);
- r.CheckTypeError(kRepWord16, kRepBit);
- r.CheckTypeError(kRepWord16, kRepBit | kTypeBool);
- r.CheckTypeError(kRepWord32, kRepBit);
- r.CheckTypeError(kRepWord32, kRepBit | kTypeBool);
- r.CheckTypeError(kRepWord64, kRepBit);
- r.CheckTypeError(kRepWord64, kRepBit | kTypeBool);
+ r.CheckTypeError(MachineRepresentation::kWord8, Type::None(),
+ MachineRepresentation::kBit);
+ r.CheckTypeError(MachineRepresentation::kWord16, Type::None(),
+ MachineRepresentation::kBit);
+ r.CheckTypeError(MachineRepresentation::kWord32, Type::None(),
+ MachineRepresentation::kBit);
+ r.CheckTypeError(MachineRepresentation::kWord64, Type::None(),
+ MachineRepresentation::kBit);
// Floats cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(kRepFloat64, kRepBit);
- r.CheckTypeError(kRepFloat64, kRepBit | kTypeBool);
- r.CheckTypeError(kRepBit, kRepFloat64);
- r.CheckTypeError(kRepBit | kTypeBool, kRepFloat64);
+ r.CheckTypeError(MachineRepresentation::kFloat64, Type::None(),
+ MachineRepresentation::kBit);
+ r.CheckTypeError(MachineRepresentation::kBit, Type::None(),
+ MachineRepresentation::kFloat64);
+ r.CheckTypeError(MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kFloat64);
// Floats cannot be implicitly converted to/from comparison conditions.
- r.CheckTypeError(kRepFloat32, kRepBit);
- r.CheckTypeError(kRepFloat32, kRepBit | kTypeBool);
- r.CheckTypeError(kRepBit, kRepFloat32);
- r.CheckTypeError(kRepBit | kTypeBool, kRepFloat32);
+ r.CheckTypeError(MachineRepresentation::kFloat32, Type::None(),
+ MachineRepresentation::kBit);
+ r.CheckTypeError(MachineRepresentation::kBit, Type::None(),
+ MachineRepresentation::kFloat32);
+ r.CheckTypeError(MachineRepresentation::kBit, Type::Boolean(),
+ MachineRepresentation::kFloat32);
// Word64 is internal and shouldn't be implicitly converted.
- r.CheckTypeError(kRepWord64, kRepTagged | kTypeBool);
- r.CheckTypeError(kRepWord64, kRepTagged);
- r.CheckTypeError(kRepWord64, kRepTagged | kTypeBool);
- r.CheckTypeError(kRepTagged, kRepWord64);
- r.CheckTypeError(kRepTagged | kTypeBool, kRepWord64);
+ r.CheckTypeError(MachineRepresentation::kWord64, Type::None(),
+ MachineRepresentation::kTagged);
+ r.CheckTypeError(MachineRepresentation::kTagged, Type::None(),
+ MachineRepresentation::kWord64);
+ r.CheckTypeError(MachineRepresentation::kTagged, Type::Boolean(),
+ MachineRepresentation::kWord64);
// Word64 / Word32 shouldn't be implicitly converted.
- r.CheckTypeError(kRepWord64, kRepWord32);
- r.CheckTypeError(kRepWord32, kRepWord64);
- r.CheckTypeError(kRepWord64, kRepWord32 | kTypeInt32);
- r.CheckTypeError(kRepWord32 | kTypeInt32, kRepWord64);
- r.CheckTypeError(kRepWord64, kRepWord32 | kTypeUint32);
- r.CheckTypeError(kRepWord32 | kTypeUint32, kRepWord64);
-
- for (size_t i = 0; i < arraysize(all_reps); i++) {
- for (size_t j = 0; j < arraysize(all_reps); j++) {
- if (i == j) continue;
- // Only a single from representation is allowed.
- r.CheckTypeError(all_reps[i] | all_reps[j], kRepTagged);
- }
- }
+ r.CheckTypeError(MachineRepresentation::kWord64, Type::None(),
+ MachineRepresentation::kWord32);
+ r.CheckTypeError(MachineRepresentation::kWord32, Type::None(),
+ MachineRepresentation::kWord64);
+ r.CheckTypeError(MachineRepresentation::kWord32, Type::Signed32(),
+ MachineRepresentation::kWord64);
+ r.CheckTypeError(MachineRepresentation::kWord32, Type::Unsigned32(),
+ MachineRepresentation::kWord64);
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index cadd436385..88555b7d57 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <utility>
#include "src/compiler/pipeline.h"
@@ -12,7 +9,7 @@
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/interpreter.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -22,6 +19,13 @@ namespace compiler {
static const char kFunctionName[] = "f";
+static const Token::Value kCompareOperators[] = {
+ Token::Value::EQ, Token::Value::NE, Token::Value::EQ_STRICT,
+ Token::Value::NE_STRICT, Token::Value::LT, Token::Value::LTE,
+ Token::Value::GT, Token::Value::GTE};
+
+static const int SMI_MAX = (1 << 30) - 1;
+static const int SMI_MIN = -(1 << 30);
static MaybeHandle<Object> CallFunction(Isolate* isolate,
Handle<JSFunction> function) {
@@ -60,15 +64,17 @@ class BytecodeGraphCallable {
class BytecodeGraphTester {
public:
- BytecodeGraphTester(Isolate* isolate, Zone* zone, const char* script)
+ BytecodeGraphTester(Isolate* isolate, Zone* zone, const char* script,
+ const char* filter = kFunctionName)
: isolate_(isolate), zone_(zone), script_(script) {
i::FLAG_ignition = true;
i::FLAG_always_opt = false;
- i::FLAG_vector_stores = true;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_ignition_fallback_on_eval_and_catch = false;
// Set ignition filter flag via SetFlagsFromString to avoid double-free
// (or potential leak with StrDup() based on ownership confusion).
ScopedVector<char> ignition_filter(64);
- SNPrintF(ignition_filter, "--ignition-filter=%s", kFunctionName);
+ SNPrintF(ignition_filter, "--ignition-filter=%s", filter);
FlagList::SetFlagsFromString(ignition_filter.start(),
ignition_filter.length());
// Ensure handler table is generated.
@@ -77,8 +83,25 @@ class BytecodeGraphTester {
virtual ~BytecodeGraphTester() {}
template <class... A>
- BytecodeGraphCallable<A...> GetCallable() {
- return BytecodeGraphCallable<A...>(isolate_, GetFunction());
+ BytecodeGraphCallable<A...> GetCallable(
+ const char* functionName = kFunctionName) {
+ return BytecodeGraphCallable<A...>(isolate_, GetFunction(functionName));
+ }
+
+ Local<Message> CheckThrowsReturnMessage() {
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate_));
+ auto callable = GetCallable<>();
+ MaybeHandle<Object> no_result = callable();
+ CHECK(isolate_->has_pending_exception());
+ CHECK(try_catch.HasCaught());
+ CHECK(no_result.is_null());
+ isolate_->OptionalRescheduleException(true);
+ CHECK(!try_catch.Message().IsEmpty());
+ return try_catch.Message();
+ }
+
+ static Handle<Object> NewObject(const char* script) {
+ return v8::Utils::OpenHandle(*CompileRun(script));
}
private:
@@ -86,11 +109,11 @@ class BytecodeGraphTester {
Zone* zone_;
const char* script_;
- Handle<JSFunction> GetFunction() {
+ Handle<JSFunction> GetFunction(const char* functionName) {
CompileRun(script_);
Local<Function> api_function = Local<Function>::Cast(
CcTest::global()
- ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(kFunctionName))
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(functionName))
.ToLocalChecked());
Handle<JSFunction> function =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
@@ -100,8 +123,9 @@ class BytecodeGraphTester {
CompilationInfo compilation_info(&parse_info);
compilation_info.SetOptimizing(BailoutId::None(), Handle<Code>());
- Parser parser(&parse_info);
- CHECK(parser.Parse(&parse_info));
+ compilation_info.MarkAsDeoptimizationEnabled();
+ // TODO(mythria): Remove this step once parse_info is not needed.
+ CHECK(Compiler::ParseAndAnalyze(&parse_info));
compiler::Pipeline pipeline(&compilation_info);
Handle<Code> code = pipeline.GenerateCode();
function->ReplaceCode(*code);
@@ -113,16 +137,46 @@ class BytecodeGraphTester {
};
-template <int N>
+#define SPACE()
+
+#define REPEAT_2(SEP, ...) __VA_ARGS__ SEP() __VA_ARGS__
+#define REPEAT_4(SEP, ...) \
+ REPEAT_2(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__)
+#define REPEAT_8(SEP, ...) \
+ REPEAT_4(SEP, __VA_ARGS__) SEP() REPEAT_4(SEP, __VA_ARGS__)
+#define REPEAT_16(SEP, ...) \
+ REPEAT_8(SEP, __VA_ARGS__) SEP() REPEAT_8(SEP, __VA_ARGS__)
+#define REPEAT_32(SEP, ...) \
+ REPEAT_16(SEP, __VA_ARGS__) SEP() REPEAT_16(SEP, __VA_ARGS__)
+#define REPEAT_64(SEP, ...) \
+ REPEAT_32(SEP, __VA_ARGS__) SEP() REPEAT_32(SEP, __VA_ARGS__)
+#define REPEAT_128(SEP, ...) \
+ REPEAT_64(SEP, __VA_ARGS__) SEP() REPEAT_64(SEP, __VA_ARGS__)
+#define REPEAT_256(SEP, ...) \
+ REPEAT_128(SEP, __VA_ARGS__) SEP() REPEAT_128(SEP, __VA_ARGS__)
+
+#define REPEAT_127(SEP, ...) \
+ REPEAT_64(SEP, __VA_ARGS__) \
+ SEP() \
+ REPEAT_32(SEP, __VA_ARGS__) \
+ SEP() \
+ REPEAT_16(SEP, __VA_ARGS__) \
+ SEP() \
+ REPEAT_8(SEP, __VA_ARGS__) \
+ SEP() \
+ REPEAT_4(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__) SEP() __VA_ARGS__
+
+
+template <int N, typename T = Handle<Object>>
struct ExpectedSnippet {
const char* code_snippet;
- Handle<Object> return_value_and_parameters[N + 1];
+ T return_value_and_parameters[N + 1];
- inline Handle<Object> return_value() const {
- return return_value_and_parameters[0];
- }
+ inline T return_value() const { return return_value_and_parameters[0]; }
- inline Handle<Object> parameter(int i) const {
+ inline T parameter(int i) const {
+ CHECK_GE(i, 0);
+ CHECK_LT(i, N);
return return_value_and_parameters[1 + i];
}
};
@@ -148,9 +202,8 @@ TEST(BytecodeGraphBuilderReturnStatements) {
{"return 3.7e-60;", {factory->NewNumber(3.7e-60)}},
{"return -3.7e60;", {factory->NewNumber(-3.7e60)}},
{"return '';", {factory->NewStringFromStaticChars("")}},
- {"return 'catfood';", {factory->NewStringFromStaticChars("catfood")}}
- // TODO(oth): {"return NaN;", {factory->NewNumber(NAN)}}
- };
+ {"return 'catfood';", {factory->NewStringFromStaticChars("catfood")}},
+ {"return NaN;", {factory->nan_value()}}};
size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
for (size_t i = 0; i < num_snippets; i++) {
@@ -254,6 +307,2033 @@ TEST(BytecodeGraphBuilderTwoParameterTests) {
}
}
+
+TEST(BytecodeGraphBuilderNamedLoad) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return p1.val;",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"return p1[\"name\"];",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({name : 'abc'})")}},
+ {"'use strict'; return p1.val;",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10 })")}},
+ {"'use strict'; return p1[\"val\"];",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10, name : 'abc'})")}},
+ {"var b;\n" REPEAT_127(SPACE, " b = p1.name; ") " return p1.name;\n",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({name : 'abc'})")}},
+ {"'use strict'; var b;\n"
+ REPEAT_127(SPACE, " b = p1.name; ")
+ "return p1.name;\n",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({ name : 'abc'})")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderKeyedLoad) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<2> snippets[] = {
+ {"return p1[p2];",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10})"),
+ factory->NewStringFromStaticChars("val")}},
+ {"return p1[100];",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(0)}},
+ {"var b = 100; return p1[b];",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(0)}},
+ {"'use strict'; return p1[p2];",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10 })"),
+ factory->NewStringFromStaticChars("val")}},
+ {"'use strict'; return p1[100];",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({100 : 10})"),
+ factory->NewNumberFromInt(0)}},
+ {"'use strict'; var b = p2; return p1[b];",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(100)}},
+ {"var b;\n" REPEAT_127(SPACE, " b = p1[p2]; ") " return p1[p2];\n",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(100)}},
+ {"'use strict'; var b;\n" REPEAT_127(SPACE,
+ " b = p1[p2]; ") "return p1[p2];\n",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({ 100 : 'abc'})"),
+ factory->NewNumberFromInt(100)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1, p2) { %s };\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0), snippets[i].parameter(1))
+ .ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderNamedStore) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return p1.val = 20;",
+ {factory->NewNumberFromInt(20),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"p1.type = 'int'; return p1.type;",
+ {factory->NewStringFromStaticChars("int"),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"p1.name = 'def'; return p1[\"name\"];",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({name : 'abc'})")}},
+ {"'use strict'; p1.val = 20; return p1.val;",
+ {factory->NewNumberFromInt(20),
+ BytecodeGraphTester::NewObject("({val : 10 })")}},
+ {"'use strict'; return p1.type = 'int';",
+ {factory->NewStringFromStaticChars("int"),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"'use strict'; p1.val = 20; return p1[\"val\"];",
+ {factory->NewNumberFromInt(20),
+ BytecodeGraphTester::NewObject("({val : 10, name : 'abc'})")}},
+ {"var b = 'abc';\n" REPEAT_127(
+ SPACE, " p1.name = b; ") " p1.name = 'def'; return p1.name;\n",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({name : 'abc'})")}},
+ {"'use strict'; var b = 'def';\n" REPEAT_127(
+ SPACE, " p1.name = 'abc'; ") "p1.name = b; return p1.name;\n",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({ name : 'abc'})")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(3072);
+ SNPrintF(script, "function %s(p1) { %s };\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderKeyedStore) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<2> snippets[] = {
+ {"p1[p2] = 20; return p1[p2];",
+ {factory->NewNumberFromInt(20),
+ BytecodeGraphTester::NewObject("({val : 10})"),
+ factory->NewStringFromStaticChars("val")}},
+ {"return p1[100] = 'def';",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(0)}},
+ {"var b = 100; p1[b] = 'def'; return p1[b];",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(0)}},
+ {"'use strict'; p1[p2] = 20; return p1[p2];",
+ {factory->NewNumberFromInt(20),
+ BytecodeGraphTester::NewObject("({val : 10 })"),
+ factory->NewStringFromStaticChars("val")}},
+ {"'use strict'; return p1[100] = 20;",
+ {factory->NewNumberFromInt(20),
+ BytecodeGraphTester::NewObject("({100 : 10})"),
+ factory->NewNumberFromInt(0)}},
+ {"'use strict'; var b = p2; p1[b] = 'def'; return p1[b];",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(100)}},
+ {"var b;\n" REPEAT_127(
+ SPACE, " b = p1[p2]; ") " p1[p2] = 'def'; return p1[p2];\n",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({100 : 'abc'})"),
+ factory->NewNumberFromInt(100)}},
+ {"'use strict'; var b;\n" REPEAT_127(
+ SPACE, " b = p1[p2]; ") " p1[p2] = 'def'; return p1[p2];\n",
+ {factory->NewStringFromStaticChars("def"),
+ BytecodeGraphTester::NewObject("({ 100 : 'abc'})"),
+ factory->NewNumberFromInt(100)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1, p2) { %s };\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderPropertyCall) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return p1.func();",
+ {factory->NewNumberFromInt(25),
+ BytecodeGraphTester::NewObject("({func() { return 25; }})")}},
+ {"return p1.func('abc');",
+ {factory->NewStringFromStaticChars("abc"),
+ BytecodeGraphTester::NewObject("({func(a) { return a; }})")}},
+ {"return p1.func(1, 2, 3, 4, 5, 6, 7, 8);",
+ {factory->NewNumberFromInt(36),
+ BytecodeGraphTester::NewObject(
+ "({func(a, b, c, d, e, f, g, h) {\n"
+ " return a + b + c + d + e + f + g + h;}})")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1) { %s };\n%s({func() {}});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCallNew) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"function counter() { this.count = 20; }\n"
+ "function f() {\n"
+ " var c = new counter();\n"
+ " return c.count;\n"
+ "}; f()",
+ {factory->NewNumberFromInt(20)}},
+ {"function counter(arg0) { this.count = 17; this.x = arg0; }\n"
+ "function f() {\n"
+ " var c = new counter(6);\n"
+ " return c.count + c.x;\n"
+ "}; f()",
+ {factory->NewNumberFromInt(23)}},
+ {"function counter(arg0, arg1) {\n"
+ " this.count = 17; this.x = arg0; this.y = arg1;\n"
+ "}\n"
+ "function f() {\n"
+ " var c = new counter(3, 5);\n"
+ " return c.count + c.x + c.y;\n"
+ "}; f()",
+ {factory->NewNumberFromInt(25)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCreateClosure) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"function f() {\n"
+ " function counter() { this.count = 20; }\n"
+ " var c = new counter();\n"
+ " return c.count;\n"
+ "}; f()",
+ {factory->NewNumberFromInt(20)}},
+ {"function f() {\n"
+ " function counter(arg0) { this.count = 17; this.x = arg0; }\n"
+ " var c = new counter(6);\n"
+ " return c.count + c.x;\n"
+ "}; f()",
+ {factory->NewNumberFromInt(23)}},
+ {"function f() {\n"
+ " function counter(arg0, arg1) {\n"
+ " this.count = 17; this.x = arg0; this.y = arg1;\n"
+ " }\n"
+ " var c = new counter(3, 5);\n"
+ " return c.count + c.x + c.y;\n"
+ "}; f()",
+ {factory->NewNumberFromInt(25)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCallRuntime) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"function f(arg0) { return %MaxSmi(); }\nf()",
+ {factory->NewNumberFromInt(Smi::kMaxValue), factory->undefined_value()}},
+ {"function f(arg0) { return %IsArray(arg0) }\nf(undefined)",
+ {factory->true_value(), BytecodeGraphTester::NewObject("[1, 2, 3]")}},
+ {"function f(arg0) { return %Add(arg0, 2) }\nf(1)",
+ {factory->NewNumberFromInt(5), factory->NewNumberFromInt(3)}},
+ {"function f(arg0) { return %spread_arguments(arg0).length }\nf([])",
+ {factory->NewNumberFromInt(3),
+ BytecodeGraphTester::NewObject("[1, 2, 3]")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderGlobals) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var global = 321;\n function f() { return global; };\n f();",
+ {factory->NewNumberFromInt(321)}},
+ {"var global = 321;\n"
+ "function f() { global = 123; return global };\n f();",
+ {factory->NewNumberFromInt(123)}},
+ {"var global = function() { return 'abc'};\n"
+ "function f() { return global(); };\n f();",
+ {factory->NewStringFromStaticChars("abc")}},
+ {"var global = 456;\n"
+ "function f() { 'use strict'; return global; };\n f();",
+ {factory->NewNumberFromInt(456)}},
+ {"var global = 987;\n"
+ "function f() { 'use strict'; global = 789; return global };\n f();",
+ {factory->NewNumberFromInt(789)}},
+ {"var global = function() { return 'xyz'};\n"
+ "function f() { 'use strict'; return global(); };\n f();",
+ {factory->NewStringFromStaticChars("xyz")}},
+ {"var global = 'abc'; var global_obj = {val:123};\n"
+ "function f() {\n" REPEAT_127(
+ SPACE, " var b = global_obj.name;\n") "return global; };\n f();\n",
+ {factory->NewStringFromStaticChars("abc")}},
+ {"var global = 'abc'; var global_obj = {val:123};\n"
+ "function f() { 'use strict';\n" REPEAT_127(
+ SPACE, " var b = global_obj.name;\n") "global = 'xyz'; return "
+ "global };\n f();\n",
+ {factory->NewStringFromStaticChars("xyz")}},
+ {"function f() { return typeof(undeclared_var); }\n; f();\n",
+ {factory->NewStringFromStaticChars("undefined")}},
+ {"var defined_var = 10; function f() { return typeof(defined_var); }\n; "
+ "f();\n",
+ {factory->NewStringFromStaticChars("number")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderToObject) {
+ // TODO(mythria): tests for ToObject. Needs ForIn.
+}
+
+
+TEST(BytecodeGraphBuilderToName) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var a = 'val'; var obj = {[a] : 10}; return obj.val;",
+ {factory->NewNumberFromInt(10)}},
+ {"var a = 20; var obj = {[a] : 10}; return obj['20'];",
+ {factory->NewNumberFromInt(10)}},
+ {"var a = 20; var obj = {[a] : 10}; return obj[20];",
+ {factory->NewNumberFromInt(10)}},
+ {"var a = {val:23}; var obj = {[a] : 10}; return obj[a];",
+ {factory->NewNumberFromInt(10)}},
+ {"var a = {val:23}; var obj = {[a] : 10}; return obj['[object Object]'];",
+ {factory->NewNumberFromInt(10)}},
+ {"var a = {toString : function() { return 'x'}};\n"
+ "var obj = {[a] : 10};\n"
+ "return obj.x;",
+ {factory->NewNumberFromInt(10)}},
+ {"var a = {valueOf : function() { return 'x'}};\n"
+ "var obj = {[a] : 10};\n"
+ "return obj.x;",
+ {factory->undefined_value()}},
+ {"var a = {[Symbol.toPrimitive] : function() { return 'x'}};\n"
+ "var obj = {[a] : 10};\n"
+ "return obj.x;",
+ {factory->NewNumberFromInt(10)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderLogicalNot) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return !p1;",
+ {factory->false_value(),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"return !p1;", {factory->true_value(), factory->NewNumberFromInt(0)}},
+ {"return !p1;", {factory->true_value(), factory->undefined_value()}},
+ {"return !p1;", {factory->false_value(), factory->NewNumberFromInt(10)}},
+ {"return !p1;", {factory->false_value(), factory->true_value()}},
+ {"return !p1;",
+ {factory->false_value(), factory->NewStringFromStaticChars("abc")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderTypeOf) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return typeof p1;",
+ {factory->NewStringFromStaticChars("object"),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"return typeof p1;",
+ {factory->NewStringFromStaticChars("undefined"),
+ factory->undefined_value()}},
+ {"return typeof p1;",
+ {factory->NewStringFromStaticChars("number"),
+ factory->NewNumberFromInt(10)}},
+ {"return typeof p1;",
+ {factory->NewStringFromStaticChars("boolean"), factory->true_value()}},
+ {"return typeof p1;",
+ {factory->NewStringFromStaticChars("string"),
+ factory->NewStringFromStaticChars("abc")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCountOperation) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return ++p1;",
+ {factory->NewNumberFromInt(11), factory->NewNumberFromInt(10)}},
+ {"return p1++;",
+ {factory->NewNumberFromInt(10), factory->NewNumberFromInt(10)}},
+ {"return p1++ + 10;",
+ {factory->NewHeapNumber(15.23), factory->NewHeapNumber(5.23)}},
+ {"return 20 + ++p1;",
+ {factory->NewHeapNumber(27.23), factory->NewHeapNumber(6.23)}},
+ {"return --p1;",
+ {factory->NewHeapNumber(9.8), factory->NewHeapNumber(10.8)}},
+ {"return p1--;",
+ {factory->NewHeapNumber(10.8), factory->NewHeapNumber(10.8)}},
+ {"return p1-- + 10;",
+ {factory->NewNumberFromInt(20), factory->NewNumberFromInt(10)}},
+ {"return 20 + --p1;",
+ {factory->NewNumberFromInt(29), factory->NewNumberFromInt(10)}},
+ {"return p1.val--;",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"return ++p1['val'];",
+ {factory->NewNumberFromInt(11),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"return ++p1[1];",
+ {factory->NewNumberFromInt(11),
+ BytecodeGraphTester::NewObject("({1 : 10})")}},
+ {" function inner() { return p1 } return --p1;",
+ {factory->NewNumberFromInt(9), factory->NewNumberFromInt(10)}},
+ {" function inner() { return p1 } return p1--;",
+ {factory->NewNumberFromInt(10), factory->NewNumberFromInt(10)}},
+ {"return ++p1;",
+ {factory->nan_value(), factory->NewStringFromStaticChars("String")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderDelete) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return delete p1.val;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"delete p1.val; return p1.val;",
+ {factory->undefined_value(),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"delete p1.name; return p1.val;",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10, name:'abc'})")}},
+ {"'use strict'; return delete p1.val;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"'use strict'; delete p1.val; return p1.val;",
+ {factory->undefined_value(),
+ BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"'use strict'; delete p1.name; return p1.val;",
+ {factory->NewNumberFromInt(10),
+ BytecodeGraphTester::NewObject("({val : 10, name:'abc'})")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderDeleteGlobal) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var obj = {val : 10, type : 'int'};"
+ "function f() {return delete obj;};",
+ {factory->false_value()}},
+ {"function f() {return delete this;};", {factory->true_value()}},
+ {"var obj = {val : 10, type : 'int'};"
+ "function f() {return delete obj.val;};",
+ {factory->true_value()}},
+ {"var obj = {val : 10, type : 'int'};"
+ "function f() {'use strict'; return delete obj.val;};",
+ {factory->true_value()}},
+ {"var obj = {val : 10, type : 'int'};"
+ "function f() {delete obj.val; return obj.val;};",
+ {factory->undefined_value()}},
+ {"var obj = {val : 10, type : 'int'};"
+ "function f() {'use strict'; delete obj.val; return obj.val;};",
+ {factory->undefined_value()}},
+ {"var obj = {1 : 10, 2 : 20};"
+ "function f() { return delete obj[1]; };",
+ {factory->true_value()}},
+ {"var obj = {1 : 10, 2 : 20};"
+ "function f() { 'use strict'; return delete obj[1];};",
+ {factory->true_value()}},
+ {"obj = {1 : 10, 2 : 20};"
+ "function f() { delete obj[1]; return obj[2];};",
+ {factory->NewNumberFromInt(20)}},
+ {"function f() {"
+ " var obj = {1 : 10, 2 : 20};"
+ " function inner() { return obj[1]; };"
+ " return delete obj[1];"
+ "}",
+ {factory->true_value()}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s %s({});", snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderDeleteLookupSlot) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ // TODO(mythria): Add more tests when we have support for LdaLookupSlot.
+ const char* function_prologue = "var f;"
+ "var x = 1;"
+ "y = 10;"
+ "var obj = {val:10};"
+ "var z = 30;"
+ "function f1() {"
+ " var z = 20;"
+ " eval(\"function t() {";
+ const char* function_epilogue = " }; f = t; t();\");"
+ "}"
+ "f1();";
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return delete y;", {factory->true_value()}},
+ {"return delete z;", {factory->false_value()}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
+ function_epilogue);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "t");
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderLookupSlot) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ const char* function_prologue = "var f;"
+ "var x = 12;"
+ "y = 10;"
+ "var obj = {val:3.1414};"
+ "var z = 30;"
+ "function f1() {"
+ " var z = 20;"
+ " eval(\"function t() {";
+ const char* function_epilogue = " }; f = t; t();\");"
+ "}"
+ "f1();";
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return x;", {factory->NewNumber(12)}},
+ {"return obj.val;", {factory->NewNumber(3.1414)}},
+ {"return typeof x;", {factory->NewStringFromStaticChars("number")}},
+ {"return typeof dummy;",
+ {factory->NewStringFromStaticChars("undefined")}},
+ {"x = 23; return x;", {factory->NewNumber(23)}},
+ {"'use strict'; obj.val = 23.456; return obj.val;",
+ {factory->NewNumber(23.456)}}};
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
+ function_epilogue);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "t");
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderLookupSlotWide) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ const char* function_prologue =
+ "var f;"
+ "var x = 12;"
+ "y = 10;"
+ "var obj = {val:3.1414};"
+ "var z = 30;"
+ "function f1() {"
+ " var z = 20;"
+ " eval(\"function t() {";
+ const char* function_epilogue =
+ " }; f = t; t();\");"
+ "}"
+ "f1();";
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var y = 2.3;" REPEAT_256(SPACE, "y = 2.3;") "return x;",
+ {factory->NewNumber(12)}},
+ {"var y = 2.3;" REPEAT_256(SPACE, "y = 2.3;") "return typeof x;",
+ {factory->NewStringFromStaticChars("number")}},
+ {"var y = 2.3;" REPEAT_256(SPACE, "y = 2.3;") "return x = 23;",
+ {factory->NewNumber(23)}},
+ {"'use strict';" REPEAT_256(SPACE, "y = 2.3;") "return obj.val = 23.456;",
+ {factory->NewNumber(23.456)}}};
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(3072);
+ SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
+ function_epilogue);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "t");
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCallLookupSlot) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"g = function(){ return 2 }; eval(''); return g();",
+ {handle(Smi::FromInt(2), isolate)}},
+ {"g = function(){ return 2 }; eval('g = function() {return 3}');\n"
+ "return g();",
+ {handle(Smi::FromInt(3), isolate)}},
+ {"g = { x: function(){ return this.y }, y: 20 };\n"
+ "eval('g = { x: g.x, y: 30 }');\n"
+ "return g.x();",
+ {handle(Smi::FromInt(30), isolate)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderEval) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return eval('1;');", {handle(Smi::FromInt(1), isolate)}},
+ {"return eval('100 * 20;');", {handle(Smi::FromInt(2000), isolate)}},
+ {"var x = 10; return eval('x + 20;');",
+ {handle(Smi::FromInt(30), isolate)}},
+ {"var x = 10; eval('x = 33;'); return x;",
+ {handle(Smi::FromInt(33), isolate)}},
+ {"'use strict'; var x = 20; var z = 0;\n"
+ "eval('var x = 33; z = x;'); return x + z;",
+ {handle(Smi::FromInt(53), isolate)}},
+ {"eval('var x = 33;'); eval('var y = x + 20'); return x + y;",
+ {handle(Smi::FromInt(86), isolate)}},
+ {"var x = 1; eval('for(i = 0; i < 10; i++) x = x + 1;'); return x",
+ {handle(Smi::FromInt(11), isolate)}},
+ {"var x = 10; eval('var x = 20;'); return x;",
+ {handle(Smi::FromInt(20), isolate)}},
+ {"var x = 1; eval('\"use strict\"; var x = 2;'); return x;",
+ {handle(Smi::FromInt(1), isolate)}},
+ {"'use strict'; var x = 1; eval('var x = 2;'); return x;",
+ {handle(Smi::FromInt(1), isolate)}},
+ {"var x = 10; eval('x + 20;'); return typeof x;",
+ {factory->NewStringFromStaticChars("number")}},
+ {"eval('var y = 10;'); return typeof unallocated;",
+ {factory->NewStringFromStaticChars("undefined")}},
+ {"'use strict'; eval('var y = 10;'); return typeof unallocated;",
+ {factory->NewStringFromStaticChars("undefined")}},
+ {"eval('var x = 10;'); return typeof x;",
+ {factory->NewStringFromStaticChars("number")}},
+ {"var x = {}; eval('var x = 10;'); return typeof x;",
+ {factory->NewStringFromStaticChars("number")}},
+ {"'use strict'; var x = {}; eval('var x = 10;'); return typeof x;",
+ {factory->NewStringFromStaticChars("object")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderEvalParams) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"var x = 10; return eval('x + p1;');",
+ {handle(Smi::FromInt(30), isolate), handle(Smi::FromInt(20), isolate)}},
+ {"var x = 10; eval('p1 = x;'); return p1;",
+ {handle(Smi::FromInt(10), isolate), handle(Smi::FromInt(20), isolate)}},
+ {"var a = 10;"
+ "function inner() { return eval('a + p1;');}"
+ "return inner();",
+ {handle(Smi::FromInt(30), isolate), handle(Smi::FromInt(20), isolate)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s }\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderEvalGlobal) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"function add_global() { eval('function f() { z = 33; }; f()'); };"
+ "function f() { add_global(); return z; }; f();",
+ {handle(Smi::FromInt(33), isolate)}},
+ {"function add_global() {\n"
+ " eval('\"use strict\"; function f() { y = 33; };"
+ " try { f() } catch(e) {}');\n"
+ "}\n"
+ "function f() { add_global(); return typeof y; } f();",
+ {factory->NewStringFromStaticChars("undefined")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+bool get_compare_result(Token::Value opcode, Handle<Object> lhs_value,
+ Handle<Object> rhs_value) {
+ switch (opcode) {
+ case Token::Value::EQ:
+ return Object::Equals(lhs_value, rhs_value).FromJust();
+ case Token::Value::NE:
+ return !Object::Equals(lhs_value, rhs_value).FromJust();
+ case Token::Value::EQ_STRICT:
+ return lhs_value->StrictEquals(*rhs_value);
+ case Token::Value::NE_STRICT:
+ return !lhs_value->StrictEquals(*rhs_value);
+ case Token::Value::LT:
+ return Object::LessThan(lhs_value, rhs_value).FromJust();
+ case Token::Value::LTE:
+ return Object::LessThanOrEqual(lhs_value, rhs_value).FromJust();
+ case Token::Value::GT:
+ return Object::GreaterThan(lhs_value, rhs_value).FromJust();
+ case Token::Value::GTE:
+ return Object::GreaterThanOrEqual(lhs_value, rhs_value).FromJust();
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+
+const char* get_code_snippet(Token::Value opcode) {
+ switch (opcode) {
+ case Token::Value::EQ:
+ return "return p1 == p2;";
+ case Token::Value::NE:
+ return "return p1 != p2;";
+ case Token::Value::EQ_STRICT:
+ return "return p1 === p2;";
+ case Token::Value::NE_STRICT:
+ return "return p1 !== p2;";
+ case Token::Value::LT:
+ return "return p1 < p2;";
+ case Token::Value::LTE:
+ return "return p1 <= p2;";
+ case Token::Value::GT:
+ return "return p1 > p2;";
+ case Token::Value::GTE:
+ return "return p1 >= p2;";
+ default:
+ UNREACHABLE();
+ return "";
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCompare) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+ Handle<Object> lhs_values[] = {
+ factory->NewNumberFromInt(10), factory->NewHeapNumber(3.45),
+ factory->NewStringFromStaticChars("abc"),
+ factory->NewNumberFromInt(SMI_MAX), factory->NewNumberFromInt(SMI_MIN)};
+ Handle<Object> rhs_values[] = {factory->NewNumberFromInt(10),
+ factory->NewStringFromStaticChars("10"),
+ factory->NewNumberFromInt(20),
+ factory->NewStringFromStaticChars("abc"),
+ factory->NewHeapNumber(3.45),
+ factory->NewNumberFromInt(SMI_MAX),
+ factory->NewNumberFromInt(SMI_MIN)};
+
+ for (size_t i = 0; i < arraysize(kCompareOperators); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1, p2) { %s }\n%s({}, {});", kFunctionName,
+ get_code_snippet(kCompareOperators[i]), kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
+ for (size_t j = 0; j < arraysize(lhs_values); j++) {
+ for (size_t k = 0; k < arraysize(rhs_values); k++) {
+ Handle<Object> return_value =
+ callable(lhs_values[j], rhs_values[k]).ToHandleChecked();
+ bool result = get_compare_result(kCompareOperators[i], lhs_values[j],
+ rhs_values[k]);
+ CHECK(return_value->SameValue(*factory->ToBoolean(result)));
+ }
+ }
+ }
+}
+
+
+TEST(BytecodeGraphBuilderTestIn) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<2> snippets[] = {
+ {"return p2 in p1;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("({val : 10})"),
+ factory->NewStringFromStaticChars("val")}},
+ {"return p2 in p1;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("[]"),
+ factory->NewStringFromStaticChars("length")}},
+ {"return p2 in p1;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("[]"),
+ factory->NewStringFromStaticChars("toString")}},
+ {"return p2 in p1;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("({val : 10})"),
+ factory->NewStringFromStaticChars("toString")}},
+ {"return p2 in p1;",
+ {factory->false_value(), BytecodeGraphTester::NewObject("({val : 10})"),
+ factory->NewStringFromStaticChars("abc")}},
+ {"return p2 in p1;",
+ {factory->false_value(), BytecodeGraphTester::NewObject("({val : 10})"),
+ factory->NewNumberFromInt(10)}},
+ {"return p2 in p1;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("({10 : 'val'})"),
+ factory->NewNumberFromInt(10)}},
+ {"return p2 in p1;",
+ {factory->false_value(),
+ BytecodeGraphTester::NewObject("({10 : 'val'})"),
+ factory->NewNumberFromInt(1)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1, p2) { %s }\n%s({}, {});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0), snippets[i].parameter(1))
+ .ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderTestInstanceOf) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return p1 instanceof Object;",
+ {factory->true_value(), BytecodeGraphTester::NewObject("({val : 10})")}},
+ {"return p1 instanceof String;",
+ {factory->false_value(), factory->NewStringFromStaticChars("string")}},
+ {"var cons = function() {};"
+ "var obj = new cons();"
+ "return obj instanceof cons;",
+ {factory->true_value(), factory->undefined_value()}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderThrow) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ // TODO(mythria): Add more tests when real try-catch and deoptimization
+ // information are supported.
+ ExpectedSnippet<0, const char*> snippets[] = {
+ {"throw undefined;", {"Uncaught undefined"}},
+ {"throw 1;", {"Uncaught 1"}},
+ {"throw 'Error';", {"Uncaught Error"}},
+ {"throw 'Error1'; throw 'Error2'", {"Uncaught Error1"}},
+ // TODO(mythria): Enable these tests when JumpIfTrue is supported.
+ // {"var a = true; if (a) { throw 'Error'; }", {"Error"}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
+ v8::Local<v8::String> expected_string = v8_str(snippets[i].return_value());
+ CHECK(
+ message->Equals(CcTest::isolate()->GetCurrentContext(), expected_string)
+ .FromJust());
+ }
+}
+
+
+TEST(BytecodeGraphBuilderContext) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var x = 'outer';"
+ "function f() {"
+ " 'use strict';"
+ " {"
+ " let x = 'inner';"
+ " (function() {x});"
+ " }"
+ "return(x);"
+ "}"
+ "f();",
+ {factory->NewStringFromStaticChars("outer")}},
+ {"var x = 'outer';"
+ "function f() {"
+ " 'use strict';"
+ " {"
+ " let x = 'inner ';"
+ " var innerFunc = function() {return x};"
+ " }"
+ "return(innerFunc() + x);"
+ "}"
+ "f();",
+ {factory->NewStringFromStaticChars("inner outer")}},
+ {"var x = 'outer';"
+ "function f() {"
+ " 'use strict';"
+ " {"
+ " let x = 'inner ';"
+ " var innerFunc = function() {return x;};"
+ " {"
+ " let x = 'innermost ';"
+ " var innerMostFunc = function() {return x + innerFunc();};"
+ " }"
+ " x = 'inner_changed ';"
+ " }"
+ " return(innerMostFunc() + x);"
+ "}"
+ "f();",
+ {factory->NewStringFromStaticChars("innermost inner_changed outer")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s", snippets[i].code_snippet);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "f");
+ auto callable = tester.GetCallable<>("f");
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderLoadContext) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"function Outer() {"
+ " var outerVar = 2;"
+ " function Inner(innerArg) {"
+ " this.innerFunc = function () {"
+ " return outerVar * innerArg;"
+ " };"
+ " };"
+ " this.getInnerFunc = function GetInner() {"
+ " return new Inner(3).innerFunc;"
+ " }"
+ "}"
+ "var f = new Outer().getInnerFunc();"
+ "f();",
+ {factory->NewNumberFromInt(6), factory->undefined_value()}},
+ {"function Outer() {"
+ " var outerVar = 2;"
+ " function Inner(innerArg) {"
+ " this.innerFunc = function () {"
+ " outerVar = innerArg; return outerVar;"
+ " };"
+ " };"
+ " this.getInnerFunc = function GetInner() {"
+ " return new Inner(10).innerFunc;"
+ " }"
+ "}"
+ "var f = new Outer().getInnerFunc();"
+ "f();",
+ {factory->NewNumberFromInt(10), factory->undefined_value()}},
+ {"function testOuter(outerArg) {"
+ " this.testinnerFunc = function testInner(innerArg) {"
+ " return innerArg + outerArg;"
+ " }"
+ "}"
+ "var f = new testOuter(10).testinnerFunc;"
+ "f(0);",
+ {factory->NewNumberFromInt(14), factory->NewNumberFromInt(4)}},
+ {"function testOuter(outerArg) {"
+ " var outerVar = outerArg * 2;"
+ " this.testinnerFunc = function testInner(innerArg) {"
+ " outerVar = outerVar + innerArg; return outerVar;"
+ " }"
+ "}"
+ "var f = new testOuter(10).testinnerFunc;"
+ "f(0);",
+ {factory->NewNumberFromInt(24), factory->NewNumberFromInt(4)}}};
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s", snippets[i].code_snippet);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "*");
+ auto callable = tester.GetCallable<Handle<Object>>("f");
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCreateArgumentsNoParameters) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"function f() {return arguments[0];}", {factory->undefined_value()}},
+ {"function f(a) {return arguments[0];}", {factory->undefined_value()}},
+ {"function f() {'use strict'; return arguments[0];}",
+ {factory->undefined_value()}},
+ {"function f(a) {'use strict'; return arguments[0];}",
+ {factory->undefined_value()}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderCreateArguments) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<3> snippets[] = {
+ {"function f(a, b, c) {return arguments[0];}",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, c) {return arguments[3];}",
+ {factory->undefined_value(), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, c) { b = c; return arguments[1];}",
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, c) {'use strict'; return arguments[0];}",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, c) {'use strict'; return arguments[3];}",
+ {factory->undefined_value(), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, c) {'use strict'; b = c; return arguments[1];}",
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function inline_func(a, b) { return arguments[0] }"
+ "function f(a, b, c) {return inline_func(b, c) + arguments[0];}",
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(30)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable =
+ tester.GetCallable<Handle<Object>, Handle<Object>, Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0), snippets[i].parameter(1),
+ snippets[i].parameter(2))
+ .ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderRegExpLiterals) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return /abd/.exec('cccabbdd');", {factory->null_value()}},
+ {"return /ab+d/.exec('cccabbdd')[0];",
+ {factory->NewStringFromStaticChars("abbd")}},
+ {"var a = 3.1414;"
+ REPEAT_256(SPACE, "a = 3.1414;")
+ "return /ab+d/.exec('cccabbdd')[0];",
+ {factory->NewStringFromStaticChars("abbd")}},
+ {"return /ab+d/.exec('cccabbdd')[1];", {factory->undefined_value()}},
+ {"return /AbC/i.exec('ssaBC')[0];",
+ {factory->NewStringFromStaticChars("aBC")}},
+ {"return 'ssaBC'.match(/AbC/i)[0];",
+ {factory->NewStringFromStaticChars("aBC")}},
+ {"return 'ssaBCtAbC'.match(/(AbC)/gi)[1];",
+ {factory->NewStringFromStaticChars("AbC")}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(4096);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderArrayLiterals) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return [][0];", {factory->undefined_value()}},
+ {"return [1, 3, 2][1];", {factory->NewNumberFromInt(3)}},
+ {"var a;" REPEAT_256(SPACE, "a = 9.87;") "return [1, 3, 2][1];",
+ {factory->NewNumberFromInt(3)}},
+ {"return ['a', 'b', 'c'][2];", {factory->NewStringFromStaticChars("c")}},
+ {"var a = 100; return [a, a++, a + 2, a + 3][2];",
+ {factory->NewNumberFromInt(103)}},
+ {"var a = 100; return [a, ++a, a + 2, a + 3][1];",
+ {factory->NewNumberFromInt(101)}},
+ {"var a = 9.2;"
+ REPEAT_256(SPACE, "a = 9.34;")
+ "return [a, ++a, a + 2, a + 3][2];",
+ {factory->NewHeapNumber(12.34)}},
+ {"return [[1, 2, 3], ['a', 'b', 'c']][1][0];",
+ {factory->NewStringFromStaticChars("a")}},
+ {"var t = 't'; return [[t, t + 'est'], [1 + t]][0][1];",
+ {factory->NewStringFromStaticChars("test")}},
+ {"var t = 't'; return [[t, t + 'est'], [1 + t]][1][0];",
+ {factory->NewStringFromStaticChars("1t")}}};
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(4096);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderObjectLiterals) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return { }.name;", {factory->undefined_value()}},
+ {"return { name: 'string', val: 9.2 }.name;",
+ {factory->NewStringFromStaticChars("string")}},
+ {"var a;\n"
+ REPEAT_256(SPACE, "a = 1.23;\n")
+ "return { name: 'string', val: 9.2 }.name;",
+ {factory->NewStringFromStaticChars("string")}},
+ {"return { name: 'string', val: 9.2 }['name'];",
+ {factory->NewStringFromStaticChars("string")}},
+ {"var a = 15; return { name: 'string', val: a }.val;",
+ {factory->NewNumberFromInt(15)}},
+ {"var a;"
+ REPEAT_256(SPACE, "a = 1.23;")
+ "return { name: 'string', val: a }.val;",
+ {factory->NewHeapNumber(1.23)}},
+ {"var a = 15; var b = 'val'; return { name: 'string', val: a }[b];",
+ {factory->NewNumberFromInt(15)}},
+ {"var a = 5; return { val: a, val: a + 1 }.val;",
+ {factory->NewNumberFromInt(6)}},
+ {"return { func: function() { return 'test' } }.func();",
+ {factory->NewStringFromStaticChars("test")}},
+ {"return { func(a) { return a + 'st'; } }.func('te');",
+ {factory->NewStringFromStaticChars("test")}},
+ {"return { get a() { return 22; } }.a;", {factory->NewNumberFromInt(22)}},
+ {"var a = { get b() { return this.x + 't'; },\n"
+ " set b(val) { this.x = val + 's' } };\n"
+ "a.b = 'te';\n"
+ "return a.b;",
+ {factory->NewStringFromStaticChars("test")}},
+ {"var a = 123; return { 1: a }[1];", {factory->NewNumberFromInt(123)}},
+ {"return Object.getPrototypeOf({ __proto__: null });",
+ {factory->null_value()}},
+ {"var a = 'test'; return { [a]: 1 }.test;",
+ {factory->NewNumberFromInt(1)}},
+ {"var a = 'test'; return { b: a, [a]: a + 'ing' }['test']",
+ {factory->NewStringFromStaticChars("testing")}},
+ {"var a = 'proto_str';\n"
+ "var b = { [a]: 1, __proto__: { var : a } };\n"
+ "return Object.getPrototypeOf(b).var",
+ {factory->NewStringFromStaticChars("proto_str")}},
+ {"var n = 'name';\n"
+ "return { [n]: 'val', get a() { return 987 } }['a'];",
+ {factory->NewNumberFromInt(987)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(4096);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderIf) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"if (p1 > 1) return 1;\n"
+ "return -1;",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(2)}},
+ {"if (p1 > 1) return 1;\n"
+ "return -1;",
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(1)}},
+ {"if (p1 > 1) { return 1; } else { return -1; }",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(2)}},
+ {"if (p1 > 1) { return 1; } else { return -1; }",
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(1)}},
+ {"if (p1 > 50) {\n"
+ " return 1;\n"
+ "} else if (p1 < 10) {\n"
+ " return 10;\n"
+ "} else {\n"
+ " return -10;\n"
+ "}",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(51)}},
+ {"if (p1 > 50) {\n"
+ " return 1;\n"
+ "} else if (p1 < 10) {\n"
+ " return 10;\n"
+ "} else {\n"
+ " return 100;\n"
+ "}",
+ {factory->NewNumberFromInt(10), factory->NewNumberFromInt(9)}},
+ {"if (p1 > 50) {\n"
+ " return 1;\n"
+ "} else if (p1 < 10) {\n"
+ " return 10;\n"
+ "} else {\n"
+ " return 100;\n"
+ "}",
+ {factory->NewNumberFromInt(100), factory->NewNumberFromInt(10)}},
+ {"if (p1 >= 0) {\n"
+ " if (p1 > 10) { return 2; } else { return 1; }\n"
+ "} else {\n"
+ " if (p1 < -10) { return -2; } else { return -1; }\n"
+ "}",
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(100)}},
+ {"if (p1 >= 0) {\n"
+ " if (p1 > 10) { return 2; } else { return 1; }\n"
+ "} else {\n"
+ " if (p1 < -10) { return -2; } else { return -1; }\n"
+ "}",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(10)}},
+ {"if (p1 >= 0) {\n"
+ " if (p1 > 10) { return 2; } else { return 1; }\n"
+ "} else {\n"
+ " if (p1 < -10) { return -2; } else { return -1; }\n"
+ "}",
+ {factory->NewNumberFromInt(-2), factory->NewNumberFromInt(-11)}},
+ {"if (p1 >= 0) {\n"
+ " if (p1 > 10) { return 2; } else { return 1; }\n"
+ "} else {\n"
+ " if (p1 < -10) { return -2; } else { return -1; }\n"
+ "}",
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(-10)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderConditionalOperator) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<1> snippets[] = {
+ {"return (p1 > 1) ? 1 : -1;",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(2)}},
+ {"return (p1 > 1) ? 1 : -1;",
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(0)}},
+ {"return (p1 > 50) ? 1 : ((p1 < 10) ? 10 : -10);",
+ {factory->NewNumberFromInt(10), factory->NewNumberFromInt(2)}},
+ {"return (p1 > 50) ? 1 : ((p1 < 10) ? 10 : -10);",
+ {factory->NewNumberFromInt(-10), factory->NewNumberFromInt(20)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderSwitch) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ const char* switch_code =
+ "switch (p1) {\n"
+ " case 1: return 0;\n"
+ " case 2: return 1;\n"
+ " case 3:\n"
+ " case 4: return 2;\n"
+ " case 9: break;\n"
+ " default: return 3;\n"
+ "}\n"
+ "return 9;";
+
+ ExpectedSnippet<1> snippets[] = {
+ {switch_code,
+ {factory->NewNumberFromInt(0), factory->NewNumberFromInt(1)}},
+ {switch_code,
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(2)}},
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(4)}},
+ {switch_code,
+ {factory->NewNumberFromInt(9), factory->NewNumberFromInt(9)}},
+ {switch_code,
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(5)}},
+ {switch_code,
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(6)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderNestedSwitch) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ const char* switch_code =
+ "switch (p1) {\n"
+ " case 0: {"
+ " switch (p2) { case 0: return 0; case 1: return 1; case 2: break; }\n"
+ " return -1;"
+ " }\n"
+ " case 1: {"
+ " switch (p2) { case 0: return 2; case 1: return 3; }\n"
+ " }\n"
+ " case 2: break;"
+ " }\n"
+ "return -2;";
+
+ ExpectedSnippet<2> snippets[] = {
+ {switch_code,
+ {factory->NewNumberFromInt(0), factory->NewNumberFromInt(0),
+ factory->NewNumberFromInt(0)}},
+ {switch_code,
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(0),
+ factory->NewNumberFromInt(1)}},
+ {switch_code,
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(0),
+ factory->NewNumberFromInt(2)}},
+ {switch_code,
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(0),
+ factory->NewNumberFromInt(3)}},
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(0)}},
+ {switch_code,
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(1)}},
+ {switch_code,
+ {factory->NewNumberFromInt(-2), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2)}},
+ {switch_code,
+ {factory->NewNumberFromInt(-2), factory->NewNumberFromInt(2),
+ factory->NewNumberFromInt(0)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1, p2) { %s };\n%s(0, 0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0), snippets[i].parameter(1))
+ .ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderBreakableBlocks) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var x = 0;\n"
+ "my_heart: {\n"
+ " x = x + 1;\n"
+ " break my_heart;\n"
+ " x = x + 2;\n"
+ "}\n"
+ "return x;\n",
+ {factory->NewNumberFromInt(1)}},
+ {"var sum = 0;\n"
+ "outta_here: {\n"
+ " for (var x = 0; x < 10; ++x) {\n"
+ " for (var y = 0; y < 3; ++y) {\n"
+ " ++sum;\n"
+ " if (x + y == 12) { break outta_here; }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumber(30)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderWhile) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var x = 1; while (x < 1) { x *= 100; } return x;",
+ {factory->NewNumberFromInt(1)}},
+ {"var x = 1, y = 0; while (x < 7) { y += x * x; x += 1; } return y;",
+ {factory->NewNumberFromInt(91)}},
+ {"var x = 1; while (true) { x += 1; if (x == 10) break; } return x;",
+ {factory->NewNumberFromInt(10)}},
+ {"var x = 1; while (false) { x += 1; } return x;",
+ {factory->NewNumberFromInt(1)}},
+ {"var x = 0;\n"
+ "while (true) {\n"
+ " while (x < 10) {\n"
+ " x = x * x + 1;\n"
+ " }"
+ " x += 1;\n"
+ " break;\n"
+ "}\n"
+ "return x;",
+ {factory->NewNumberFromInt(27)}},
+ {"var x = 1, y = 0;\n"
+ "while (x < 7) {\n"
+ " x += 1;\n"
+ " if (x == 2) continue;\n"
+ " if (x == 3) continue;\n"
+ " y += x * x;\n"
+ " if (x == 4) break;\n"
+ "}\n"
+ "return y;",
+ {factory->NewNumberFromInt(16)}}};
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderDo) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var x = 1; do { x *= 100; } while (x < 100); return x;",
+ {factory->NewNumberFromInt(100)}},
+ {"var x = 1; do { x = x * x + 1; } while (x < 7) return x;",
+ {factory->NewNumberFromInt(26)}},
+ {"var x = 1; do { x += 1; } while (false); return x;",
+ {factory->NewNumberFromInt(2)}},
+ {"var x = 1, y = 0;\n"
+ "do {\n"
+ " x += 1;\n"
+ " if (x == 2) continue;\n"
+ " if (x == 3) continue;\n"
+ " y += x * x;\n"
+ " if (x == 4) break;\n"
+ "} while (x < 7);\n"
+ "return y;",
+ {factory->NewNumberFromInt(16)}}};
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderFor) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"for (var x = 0;; x = 2 * x + 1) { if (x > 10) return x; }",
+ {factory->NewNumberFromInt(15)}},
+ {"for (var x = 0; true; x = 2 * x + 1) { if (x > 100) return x; }",
+ {factory->NewNumberFromInt(127)}},
+ {"for (var x = 0; false; x = 2 * x + 1) { if (x > 100) return x; } "
+ "return 0;",
+ {factory->NewNumberFromInt(0)}},
+ {"for (var x = 0; x < 200; x = 2 * x + 1) { x = x; } return x;",
+ {factory->NewNumberFromInt(255)}},
+ {"for (var x = 0; x < 200; x = 2 * x + 1) {} return x;",
+ {factory->NewNumberFromInt(255)}},
+ {"var sum = 0;\n"
+ "for (var x = 0; x < 200; x += 1) {\n"
+ " if (x % 2) continue;\n"
+ " if (sum > 10) break;\n"
+ " sum += x;\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(12)}},
+ {"var sum = 0;\n"
+ "for (var w = 0; w < 2; w++) {\n"
+ " for (var x = 0; x < 200; x += 1) {\n"
+ " if (x % 2) continue;\n"
+ " if (x > 4) break;\n"
+ " sum += x + w;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(15)}},
+ {"var sum = 0;\n"
+ "for (var w = 0; w < 2; w++) {\n"
+ " if (w == 1) break;\n"
+ " for (var x = 0; x < 200; x += 1) {\n"
+ " if (x % 2) continue;\n"
+ " if (x > 4) break;\n"
+ " sum += x + w;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(6)}},
+ {"var sum = 0;\n"
+ "for (var w = 0; w < 3; w++) {\n"
+ " if (w == 1) continue;\n"
+ " for (var x = 0; x < 200; x += 1) {\n"
+ " if (x % 2) continue;\n"
+ " if (x > 4) break;\n"
+ " sum += x + w;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(18)}},
+ {"var sum = 0;\n"
+ "for (var x = 1; x < 10; x += 2) {\n"
+ " for (var y = x; y < x + 2; y++) {\n"
+ " sum += y * y;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(385)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderForIn) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+ ExpectedSnippet<0> snippets[] = {
+ {"var sum = 0;\n"
+ "var empty = null;\n"
+ "for (var x in empty) { sum++; }\n"
+ "return sum;",
+ {factory->NewNumberFromInt(0)}},
+ {"var sum = 100;\n"
+ "var empty = 1;\n"
+ "for (var x in empty) { sum++; }\n"
+ "return sum;",
+ {factory->NewNumberFromInt(100)}},
+ {"for (var x in [ 10, 20, 30 ]) {}\n"
+ "return 2;",
+ {factory->NewNumberFromInt(2)}},
+ {"var last = 0;\n"
+ "for (var x in [ 10, 20, 30 ]) {\n"
+ " last = x;\n"
+ "}\n"
+ "return +last;",
+ {factory->NewNumberFromInt(2)}},
+ {"var first = -1;\n"
+ "for (var x in [ 10, 20, 30 ]) {\n"
+ " first = +x;\n"
+ " if (first > 0) break;\n"
+ "}\n"
+ "return first;",
+ {factory->NewNumberFromInt(1)}},
+ {"var first = -1;\n"
+ "for (var x in [ 10, 20, 30 ]) {\n"
+ " if (first >= 0) continue;\n"
+ " first = x;\n"
+ "}\n"
+ "return +first;",
+ {factory->NewNumberFromInt(0)}},
+ {"var sum = 0;\n"
+ "for (var x in [ 10, 20, 30 ]) {\n"
+ " for (var y in [ 11, 22, 33, 44, 55, 66, 77 ]) {\n"
+ " sum += 1;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(21)}},
+ {"var sum = 0;\n"
+ "for (var x in [ 10, 20, 30 ]) {\n"
+ " for (var y in [ 11, 22, 33, 44, 55, 66, 77 ]) {\n"
+ " if (sum == 7) break;\n"
+ " if (sum == 6) continue;\n"
+ " sum += 1;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(6)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(JumpWithConstantsAndWideConstants) {
+ HandleAndZoneScope scope;
+ auto isolate = scope.main_isolate();
+ const int kStep = 19;
+ int start = 7;
+ for (int constants = start; constants < 256 + 3 * kStep; constants += kStep) {
+ std::stringstream filler_os;
+ // Generate a string that consumes constant pool entries and
+ // spread out branch distances in script below.
+ for (int i = 0; i < constants; i++) {
+ filler_os << "var x_ = 'x_" << i << "';\n";
+ }
+ std::string filler(filler_os.str());
+
+ std::stringstream script_os;
+ script_os << "function " << kFunctionName << "(a) {\n";
+ script_os << " " << filler;
+ script_os << " for (var i = a; i < 2; i++) {\n";
+ script_os << " " << filler;
+ script_os << " if (i == 0) { " << filler << "i = 10; continue; }\n";
+ script_os << " else if (i == a) { " << filler << "i = 12; break; }\n";
+ script_os << " else { " << filler << " }\n";
+ script_os << " }\n";
+ script_os << " return i;\n";
+ script_os << "}\n";
+ script_os << kFunctionName << "(0);\n";
+ std::string script(script_os.str());
+ auto factory = isolate->factory();
+ auto zone = scope.main_zone();
+ for (int a = 0; a < 3; a++) {
+ BytecodeGraphTester tester(isolate, zone, script.c_str());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_val =
+ callable(factory->NewNumberFromInt(a)).ToHandleChecked();
+ static const int results[] = {11, 12, 2};
+ CHECK_EQ(Handle<Smi>::cast(return_val)->value(), results[a]);
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index 458fcbb423..8b4c9dccb1 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/frames-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index 9e82d550b1..f332d7499b 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/frames-inl.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index fd2b3e6d16..b2017114b4 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -37,17 +34,6 @@ TEST(ClassOf) {
}
-TEST(HeapObjectGetMap) {
- FunctionTester T("(function(a) { return %_HeapObjectGetMap(a); })", flags);
-
- Factory* factory = T.main_isolate()->factory();
- T.CheckCall(factory->null_map(), T.null());
- T.CheckCall(factory->undefined_map(), T.undefined());
- T.CheckCall(factory->heap_number_map(), T.Val(3.1415));
- T.CheckCall(factory->symbol_map(), factory->NewSymbol());
-}
-
-
#define COUNTER_NAME "hurz"
static int* LookupCounter(const char* name) {
@@ -162,19 +148,6 @@ TEST(IsSmi) {
}
-TEST(MapGetInstanceType) {
- FunctionTester T(
- "(function(a) { return %_MapGetInstanceType(%_HeapObjectGetMap(a)); })",
- flags);
-
- Factory* factory = T.main_isolate()->factory();
- T.CheckCall(T.Val(ODDBALL_TYPE), T.null());
- T.CheckCall(T.Val(ODDBALL_TYPE), T.undefined());
- T.CheckCall(T.Val(HEAP_NUMBER_TYPE), T.Val(3.1415));
- T.CheckCall(T.Val(SYMBOL_TYPE), factory->NewSymbol());
-}
-
-
TEST(ObjectEquals) {
FunctionTester T("(function(a,b) { return %_ObjectEquals(a,b); })", flags);
CompileRun("var o = {}");
diff --git a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
index 577a6d1dad..613528d7a0 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 2688c622e1..474453da7d 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -145,7 +142,7 @@ TEST(RuntimeCallCPP2) {
TEST(RuntimeCallInline) {
FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a) { return %_IsSpecObject(a); })");
+ FunctionTester T("(function(a) { return %_IsJSReceiver(a); })");
T.CheckCall(T.false_value(), T.Val(23), T.undefined());
T.CheckCall(T.false_value(), T.Val(4.2), T.undefined());
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index 5c56b036ef..37b2a2d243 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
index 5b0fd39283..4bf10ca8fe 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -38,7 +35,6 @@ TEST(ArgumentsUnmapped) {
TEST(ArgumentsRest) {
- FLAG_harmony_rest_parameters = true;
FunctionTester T("(function(a, ...args) { return args; })");
Handle<Object> arguments;
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index ff18613b10..9a2c4679a0 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -398,6 +395,7 @@ TEST(GlobalLoad) {
TEST(GlobalStoreSloppy) {
+ FLAG_legacy_const = true;
FunctionTester T("(function(a,b) { g = a + b; return g; })");
T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 038fe241eb..11a3582cbb 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -2,9 +2,6 @@
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <cmath>
#include <functional>
#include <limits>
@@ -23,8 +20,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-typedef RawMachineAssembler::Label MLabel;
-
TEST(RunInt32Add) {
RawMachineAssemblerTester<int32_t> m;
@@ -35,7 +30,7 @@ TEST(RunInt32Add) {
TEST(RunWord32Ctz) {
- BufferedRawMachineAssemblerTester<int32_t> m(kMachUint32);
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
if (!m.machine()->Word32Ctz().IsSupported()) {
// We can only test the operator if it exists on the testing platform.
return;
@@ -79,7 +74,7 @@ TEST(RunWord32Ctz) {
TEST(RunWord32Clz) {
- BufferedRawMachineAssemblerTester<int32_t> m(kMachUint32);
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
m.Return(m.Word32Clz(m.Parameter(0)));
CHECK_EQ(0, m.Call(uint32_t(0x80001000)));
@@ -119,7 +114,7 @@ TEST(RunWord32Clz) {
TEST(RunWord32Popcnt) {
- BufferedRawMachineAssemblerTester<int32_t> m(kMachUint32);
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
if (!m.machine()->Word32Popcnt().IsSupported()) {
// We can only test the operator if it exists on the testing platform.
return;
@@ -139,7 +134,7 @@ TEST(RunWord32Popcnt) {
#if V8_TARGET_ARCH_64_BIT
TEST(RunWord64Clz) {
- BufferedRawMachineAssemblerTester<int32_t> m(kMachUint64);
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint64());
m.Return(m.Word64Clz(m.Parameter(0)));
CHECK_EQ(0, m.Call(uint64_t(0x8000100000000000)));
@@ -211,7 +206,7 @@ TEST(RunWord64Clz) {
TEST(RunWord64Ctz) {
- RawMachineAssemblerTester<int32_t> m(kMachUint64);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint64());
if (!m.machine()->Word64Ctz().IsSupported()) {
return;
}
@@ -287,7 +282,7 @@ TEST(RunWord64Ctz) {
TEST(RunWord64Popcnt) {
- BufferedRawMachineAssemblerTester<int32_t> m(kMachUint64);
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint64());
if (!m.machine()->Word64Popcnt().IsSupported()) {
return;
}
@@ -323,7 +318,7 @@ static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
case 6:
return m->Int32Constant(0x01234567);
case 7:
- return m->Load(kMachInt32, m->PointerConstant(NULL));
+ return m->Load(MachineType::Int32(), m->PointerConstant(NULL));
default:
return NULL;
}
@@ -348,7 +343,8 @@ TEST(CodeGenInt32Binop) {
for (size_t i = 0; i < arraysize(kOps); ++i) {
for (int j = 0; j < 8; j++) {
for (int k = 0; k < 8; k++) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
Node* a = Int32Input(&m, j);
Node* b = Int32Input(&m, k);
m.Return(m.AddNode(kOps[i], a, b));
@@ -384,7 +380,7 @@ static Node* Int64Input(RawMachineAssemblerTester<int64_t>* m, int index) {
case 6:
return m->Int64Constant(0x0123456789abcdefLL);
case 7:
- return m->Load(kMachInt64, m->PointerConstant(NULL));
+ return m->Load(MachineType::Int64(), m->PointerConstant(NULL));
default:
return NULL;
}
@@ -408,7 +404,8 @@ TEST(CodeGenInt64Binop) {
for (size_t i = 0; i < arraysize(kOps); ++i) {
for (int j = 0; j < 8; j++) {
for (int k = 0; k < 8; k++) {
- RawMachineAssemblerTester<int64_t> m(kMachInt64, kMachInt64);
+ RawMachineAssemblerTester<int64_t> m(MachineType::Int64(),
+ MachineType::Int64());
Node* a = Int64Input(&m, j);
Node* b = Int64Input(&m, k);
m.Return(m.AddNode(kOps[i], a, b));
@@ -419,6 +416,190 @@ TEST(CodeGenInt64Binop) {
}
+TEST(RunInt64AddWithOverflowP) {
+ int64_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int64BinopTester bt(&m);
+ Node* add = m.Int64AddWithOverflow(bt.param0, bt.param1);
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ bt.AddReturn(ovf);
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int64_t expected_val;
+ int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt64AddWithOverflowImm) {
+ int64_t actual_val = -1, expected_val = 0;
+ FOR_INT64_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
+ Node* add = m.Int64AddWithOverflow(m.Int64Constant(*i), m.Parameter(0));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ FOR_INT64_INPUTS(j) {
+ int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
+ Node* add = m.Int64AddWithOverflow(m.Parameter(0), m.Int64Constant(*i));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ FOR_INT64_INPUTS(j) {
+ int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ FOR_INT64_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* add =
+ m.Int64AddWithOverflow(m.Int64Constant(*i), m.Int64Constant(*j));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ int expected_ovf = bits::SignedAddOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt64AddWithOverflowInBranchP) {
+ int constant = 911777;
+ RawMachineLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int64BinopTester bt(&m);
+ Node* add = m.Int64AddWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, add);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Int64Constant(constant));
+ m.Bind(&blockb);
+ Node* val = m.Projection(0, add);
+ Node* truncated = m.TruncateInt64ToInt32(val);
+ bt.AddReturn(truncated);
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int32_t expected = constant;
+ int64_t result;
+ if (!bits::SignedAddOverflow64(*i, *j, &result)) {
+ expected = static_cast<int32_t>(result);
+ }
+ CHECK_EQ(expected, bt.call(*i, *j));
+ }
+ }
+}
+
+
+TEST(RunInt64SubWithOverflowP) {
+ int64_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int64BinopTester bt(&m);
+ Node* add = m.Int64SubWithOverflow(bt.param0, bt.param1);
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ bt.AddReturn(ovf);
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int64_t expected_val;
+ int expected_ovf = bits::SignedSubOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt64SubWithOverflowImm) {
+ int64_t actual_val = -1, expected_val = 0;
+ FOR_INT64_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
+ Node* add = m.Int64SubWithOverflow(m.Int64Constant(*i), m.Parameter(0));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ FOR_INT64_INPUTS(j) {
+ int expected_ovf = bits::SignedSubOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int64());
+ Node* add = m.Int64SubWithOverflow(m.Parameter(0), m.Int64Constant(*i));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ FOR_INT64_INPUTS(j) {
+ int expected_ovf = bits::SignedSubOverflow64(*j, *i, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ FOR_INT64_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* add =
+ m.Int64SubWithOverflow(m.Int64Constant(*i), m.Int64Constant(*j));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord64, val);
+ m.Return(ovf);
+ int expected_ovf = bits::SignedSubOverflow64(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt64SubWithOverflowInBranchP) {
+ int constant = 911999;
+ RawMachineLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int64BinopTester bt(&m);
+ Node* sub = m.Int64SubWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, sub);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Int64Constant(constant));
+ m.Bind(&blockb);
+ Node* val = m.Projection(0, sub);
+ Node* truncated = m.TruncateInt64ToInt32(val);
+ bt.AddReturn(truncated);
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int32_t expected = constant;
+ int64_t result;
+ if (!bits::SignedSubOverflow64(*i, *j, &result)) {
+ expected = static_cast<int32_t>(result);
+ }
+ CHECK_EQ(expected, static_cast<int32_t>(bt.call(*i, *j)));
+ }
+ }
+}
+
+
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TARGET_ARCH_64_BIT
@@ -427,7 +608,7 @@ TEST(RunGoto) {
RawMachineAssemblerTester<int32_t> m;
int constant = 99999;
- MLabel next;
+ RawMachineLabel next;
m.Goto(&next);
m.Bind(&next);
m.Return(m.Int32Constant(constant));
@@ -440,7 +621,7 @@ TEST(RunGotoMultiple) {
RawMachineAssemblerTester<int32_t> m;
int constant = 9999977;
- MLabel labels[10];
+ RawMachineLabel labels[10];
for (size_t i = 0; i < arraysize(labels); i++) {
m.Goto(&labels[i]);
m.Bind(&labels[i]);
@@ -455,7 +636,7 @@ TEST(RunBranch) {
RawMachineAssemblerTester<int32_t> m;
int constant = 999777;
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(m.Int32Constant(0), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(0 - constant));
@@ -471,7 +652,7 @@ TEST(RunDiamond2) {
int constant = 995666;
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
m.Branch(m.Int32Constant(0), &blocka, &blockb);
m.Bind(&blocka);
m.Goto(&end);
@@ -488,7 +669,7 @@ TEST(RunLoop) {
RawMachineAssemblerTester<int32_t> m;
int constant = 999555;
- MLabel header, body, exit;
+ RawMachineLabel header, body, exit;
m.Goto(&header);
m.Bind(&header);
m.Branch(m.Int32Constant(0), &body, &exit);
@@ -503,9 +684,9 @@ TEST(RunLoop) {
template <typename R>
static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
- MachineType type, Node* true_node,
+ MachineRepresentation rep, Node* true_node,
Node* false_node) {
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
m->Branch(cond_node, &blocka, &blockb);
m->Bind(&blocka);
m->Goto(&end);
@@ -513,51 +694,55 @@ static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
m->Goto(&end);
m->Bind(&end);
- Node* phi = m->Phi(type, true_node, false_node);
+ Node* phi = m->Phi(rep, true_node, false_node);
m->Return(phi);
}
TEST(RunDiamondPhiConst) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int false_val = 0xFF666;
int true_val = 0x00DDD;
Node* true_node = m.Int32Constant(true_val);
Node* false_node = m.Int32Constant(false_val);
- BuildDiamondPhi(&m, m.Parameter(0), kMachInt32, true_node, false_node);
+ BuildDiamondPhi(&m, m.Parameter(0), MachineRepresentation::kWord32, true_node,
+ false_node);
CHECK_EQ(false_val, m.Call(0));
CHECK_EQ(true_val, m.Call(1));
}
TEST(RunDiamondPhiNumber) {
- RawMachineAssemblerTester<Object*> m(kMachInt32);
+ RawMachineAssemblerTester<Object*> m(MachineType::Int32());
double false_val = -11.1;
double true_val = 200.1;
Node* true_node = m.NumberConstant(true_val);
Node* false_node = m.NumberConstant(false_val);
- BuildDiamondPhi(&m, m.Parameter(0), kMachAnyTagged, true_node, false_node);
+ BuildDiamondPhi(&m, m.Parameter(0), MachineRepresentation::kTagged, true_node,
+ false_node);
m.CheckNumber(false_val, m.Call(0));
m.CheckNumber(true_val, m.Call(1));
}
TEST(RunDiamondPhiString) {
- RawMachineAssemblerTester<Object*> m(kMachInt32);
+ RawMachineAssemblerTester<Object*> m(MachineType::Int32());
const char* false_val = "false";
const char* true_val = "true";
Node* true_node = m.StringConstant(true_val);
Node* false_node = m.StringConstant(false_val);
- BuildDiamondPhi(&m, m.Parameter(0), kMachAnyTagged, true_node, false_node);
+ BuildDiamondPhi(&m, m.Parameter(0), MachineRepresentation::kTagged, true_node,
+ false_node);
m.CheckString(false_val, m.Call(0));
m.CheckString(true_val, m.Call(1));
}
TEST(RunDiamondPhiParam) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
- BuildDiamondPhi(&m, m.Parameter(0), kMachInt32, m.Parameter(1),
- m.Parameter(2));
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
+ BuildDiamondPhi(&m, m.Parameter(0), MachineRepresentation::kWord32,
+ m.Parameter(1), m.Parameter(2));
int32_t c1 = 0x260cb75a;
int32_t c2 = 0xcd3e9c8b;
int result = m.Call(0, c1, c2);
@@ -577,11 +762,11 @@ TEST(RunLoopPhiConst) {
Node* false_node = m.Int32Constant(false_val);
// x = false_val; while(false) { x = true_val; } return x;
- MLabel body, header, end;
+ RawMachineLabel body, header, end;
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, false_node, true_node);
+ Node* phi = m.Phi(MachineRepresentation::kWord32, false_node, true_node);
m.Branch(cond_node, &body, &end);
m.Bind(&body);
m.Goto(&header);
@@ -593,15 +778,18 @@ TEST(RunLoopPhiConst) {
TEST(RunLoopPhiParam) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
m.Goto(&blocka);
m.Bind(&blocka);
- Node* phi = m.Phi(kMachInt32, m.Parameter(1), m.Parameter(2));
- Node* cond = m.Phi(kMachInt32, m.Parameter(0), m.Int32Constant(0));
+ Node* phi =
+ m.Phi(MachineRepresentation::kWord32, m.Parameter(1), m.Parameter(2));
+ Node* cond =
+ m.Phi(MachineRepresentation::kWord32, m.Parameter(0), m.Int32Constant(0));
m.Branch(cond, &blockb, &end);
m.Bind(&blockb);
@@ -625,13 +813,13 @@ TEST(RunLoopPhiInduction) {
int false_val = 0x10777;
// x = false_val; while(false) { x++; } return x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* false_node = m.Int32Constant(false_val);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, false_node, false_node);
+ Node* phi = m.Phi(MachineRepresentation::kWord32, false_node, false_node);
m.Branch(m.Int32Constant(0), &body, &end);
m.Bind(&body);
@@ -651,13 +839,13 @@ TEST(RunLoopIncrement) {
Int32BinopTester bt(&m);
// x = 0; while(x ^ param) { x++; } return x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* zero = m.Int32Constant(0);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, zero, zero);
+ Node* phi = m.Phi(MachineRepresentation::kWord32, zero, zero);
m.Branch(m.WordXor(phi, bt.param0), &body, &end);
m.Bind(&body);
@@ -678,13 +866,13 @@ TEST(RunLoopIncrement2) {
Int32BinopTester bt(&m);
// x = 0; while(x < param) { x++; } return x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* zero = m.Int32Constant(0);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, zero, zero);
+ Node* phi = m.Phi(MachineRepresentation::kWord32, zero, zero);
m.Branch(m.Int32LessThan(phi, bt.param0), &body, &end);
m.Bind(&body);
@@ -706,13 +894,13 @@ TEST(RunLoopIncrement3) {
Int32BinopTester bt(&m);
// x = 0; while(x < param) { x++; } return x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* zero = m.Int32Constant(0);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, zero, zero);
+ Node* phi = m.Phi(MachineRepresentation::kWord32, zero, zero);
m.Branch(m.Uint32LessThan(phi, bt.param0), &body, &end);
m.Bind(&body);
@@ -734,12 +922,13 @@ TEST(RunLoopDecrement) {
Int32BinopTester bt(&m);
// x = param; while(x) { x--; } return x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, bt.param0, m.Int32Constant(0));
+ Node* phi =
+ m.Phi(MachineRepresentation::kWord32, bt.param0, m.Int32Constant(0));
m.Branch(phi, &body, &end);
m.Bind(&body);
@@ -759,14 +948,14 @@ TEST(RunLoopIncrementFloat32) {
RawMachineAssemblerTester<int32_t> m;
// x = -3.0f; while(x < 10f) { x = x + 0.5f; } return (int) (double) x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* minus_3 = m.Float32Constant(-3.0f);
Node* ten = m.Float32Constant(10.0f);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachFloat32, minus_3, ten);
+ Node* phi = m.Phi(MachineRepresentation::kFloat32, minus_3, ten);
m.Branch(m.Float32LessThan(phi, ten), &body, &end);
m.Bind(&body);
@@ -784,14 +973,14 @@ TEST(RunLoopIncrementFloat64) {
RawMachineAssemblerTester<int32_t> m;
// x = -3.0; while(x < 10) { x = x + 0.5; } return (int) x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* minus_3 = m.Float64Constant(-3.0);
Node* ten = m.Float64Constant(10.0);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachFloat64, minus_3, ten);
+ Node* phi = m.Phi(MachineRepresentation::kFloat64, minus_3, ten);
m.Branch(m.Float64LessThan(phi, ten), &body, &end);
m.Bind(&body);
@@ -810,8 +999,8 @@ TEST(RunSwitch1) {
int constant = 11223344;
- MLabel block0, block1, def, end;
- MLabel* case_labels[] = {&block0, &block1};
+ RawMachineLabel block0, block1, def, end;
+ RawMachineLabel* case_labels[] = {&block0, &block1};
int32_t case_values[] = {0, 1};
m.Switch(m.Int32Constant(0), &def, case_values, case_labels,
arraysize(case_labels));
@@ -829,10 +1018,10 @@ TEST(RunSwitch1) {
TEST(RunSwitch2) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- MLabel blocka, blockb, blockc;
- MLabel* case_labels[] = {&blocka, &blockb};
+ RawMachineLabel blocka, blockb, blockc;
+ RawMachineLabel* case_labels[] = {&blocka, &blockb};
int32_t case_values[] = {std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max()};
m.Switch(m.Parameter(0), &blockc, case_values, case_labels,
@@ -853,10 +1042,10 @@ TEST(RunSwitch2) {
TEST(RunSwitch3) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- MLabel blocka, blockb, blockc;
- MLabel* case_labels[] = {&blocka, &blockb};
+ RawMachineLabel blocka, blockb, blockc;
+ RawMachineLabel* case_labels[] = {&blocka, &blockb};
int32_t case_values[] = {std::numeric_limits<int32_t>::min() + 0,
std::numeric_limits<int32_t>::min() + 1};
m.Switch(m.Parameter(0), &blockc, case_values, case_labels,
@@ -877,20 +1066,21 @@ TEST(RunSwitch3) {
TEST(RunSwitch4) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
const size_t kNumCases = 512;
const size_t kNumValues = kNumCases + 1;
int32_t values[kNumValues];
m.main_isolate()->random_number_generator()->NextBytes(values,
sizeof(values));
- MLabel end, def;
+ RawMachineLabel end, def;
int32_t case_values[kNumCases];
- MLabel* case_labels[kNumCases];
+ RawMachineLabel* case_labels[kNumCases];
Node* results[kNumValues];
for (size_t i = 0; i < kNumCases; ++i) {
case_values[i] = static_cast<int32_t>(i);
- case_labels[i] = new (m.main_zone()->New(sizeof(MLabel))) MLabel;
+ case_labels[i] =
+ new (m.main_zone()->New(sizeof(RawMachineLabel))) RawMachineLabel;
}
m.Switch(m.Parameter(0), &def, case_values, case_labels,
arraysize(case_labels));
@@ -905,7 +1095,8 @@ TEST(RunSwitch4) {
m.Bind(&end);
const int num_results = static_cast<int>(arraysize(results));
Node* phi =
- m.AddNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
+ m.AddNode(m.common()->Phi(MachineRepresentation::kWord32, num_results),
+ num_results, results);
m.Return(phi);
for (size_t i = 0; i < kNumValues; ++i) {
@@ -918,7 +1109,7 @@ TEST(RunLoadInt32) {
RawMachineAssemblerTester<int32_t> m;
int32_t p1 = 0; // loads directly from this location.
- m.Return(m.LoadFromPointer(&p1, kMachInt32));
+ m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
FOR_INT32_INPUTS(i) {
p1 = *i;
@@ -938,7 +1129,7 @@ TEST(RunLoadInt32Offset) {
int32_t offset = offsets[i];
byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
// generate load [#base + #index]
- m.Return(m.LoadFromPointer(pointer, kMachInt32, offset));
+ m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
FOR_INT32_INPUTS(j) {
p1 = *j;
@@ -959,10 +1150,10 @@ TEST(RunLoadStoreFloat32Offset) {
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
- Node* load =
- m.Load(kMachFloat32, m.PointerConstant(from), m.IntPtrConstant(offset));
- m.Store(kMachFloat32, m.PointerConstant(to), m.IntPtrConstant(offset), load,
- kNoWriteBarrier);
+ Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
FOR_FLOAT32_INPUTS(j) {
@@ -986,10 +1177,10 @@ TEST(RunLoadStoreFloat64Offset) {
byte* from = reinterpret_cast<byte*>(&p1) - offset;
byte* to = reinterpret_cast<byte*>(&p2) - offset;
// generate load [#base + #index]
- Node* load =
- m.Load(kMachFloat64, m.PointerConstant(from), m.IntPtrConstant(offset));
- m.Store(kMachFloat64, m.PointerConstant(to), m.IntPtrConstant(offset), load,
- kNoWriteBarrier);
+ Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
FOR_FLOAT64_INPUTS(j) {
@@ -1020,7 +1211,8 @@ TEST(RunInt32AddP) {
TEST(RunInt32AddAndWord32EqualP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Equal(m.Parameter(1), m.Parameter(2))));
FOR_INT32_INPUTS(i) {
@@ -1035,7 +1227,8 @@ TEST(RunInt32AddAndWord32EqualP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Word32Equal(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1055,7 +1248,8 @@ TEST(RunInt32AddAndWord32EqualP) {
TEST(RunInt32AddAndWord32EqualImm) {
{
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(*i),
m.Word32Equal(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(j) {
@@ -1070,7 +1264,8 @@ TEST(RunInt32AddAndWord32EqualImm) {
}
{
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Word32Equal(m.Int32Constant(*i), m.Parameter(0)),
m.Parameter(1)));
FOR_INT32_INPUTS(j) {
@@ -1088,7 +1283,8 @@ TEST(RunInt32AddAndWord32EqualImm) {
TEST(RunInt32AddAndWord32NotEqualP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Parameter(0),
m.Word32NotEqual(m.Parameter(1), m.Parameter(2))));
FOR_INT32_INPUTS(i) {
@@ -1103,7 +1299,8 @@ TEST(RunInt32AddAndWord32NotEqualP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Word32NotEqual(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1123,7 +1320,8 @@ TEST(RunInt32AddAndWord32NotEqualP) {
TEST(RunInt32AddAndWord32NotEqualImm) {
{
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(*i),
m.Word32NotEqual(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(j) {
@@ -1138,7 +1336,8 @@ TEST(RunInt32AddAndWord32NotEqualImm) {
}
{
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Word32NotEqual(m.Int32Constant(*i), m.Parameter(0)),
m.Parameter(1)));
FOR_INT32_INPUTS(j) {
@@ -1156,7 +1355,8 @@ TEST(RunInt32AddAndWord32NotEqualImm) {
TEST(RunInt32AddAndWord32SarP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Sar(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1170,7 +1370,8 @@ TEST(RunInt32AddAndWord32SarP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1188,7 +1389,8 @@ TEST(RunInt32AddAndWord32SarP) {
TEST(RunInt32AddAndWord32ShlP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1202,7 +1404,8 @@ TEST(RunInt32AddAndWord32ShlP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1220,7 +1423,8 @@ TEST(RunInt32AddAndWord32ShlP) {
TEST(RunInt32AddAndWord32ShrP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Parameter(0),
m.Word32Shr(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1234,7 +1438,8 @@ TEST(RunInt32AddAndWord32ShrP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Add(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
@@ -1255,7 +1460,7 @@ TEST(RunInt32AddInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -1273,7 +1478,7 @@ TEST(RunInt32AddInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -1290,8 +1495,8 @@ TEST(RunInt32AddInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -1307,8 +1512,8 @@ TEST(RunInt32AddInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -1328,9 +1533,9 @@ TEST(RunInt32AddInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
@@ -1394,7 +1599,7 @@ TEST(RunInt32AddInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -1405,7 +1610,7 @@ TEST(RunInt32AddInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -1420,8 +1625,8 @@ TEST(RunInt32AddInComparison) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Word32Equal(
m.Int32Add(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1), m.Parameter(2))),
@@ -1471,7 +1676,7 @@ TEST(RunInt32SubP) {
TEST(RunInt32SubImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i - *j;
@@ -1481,7 +1686,7 @@ TEST(RunInt32SubImm) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j - *i;
@@ -1494,7 +1699,8 @@ TEST(RunInt32SubImm) {
TEST(RunInt32SubAndWord32SarP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Sar(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1507,7 +1713,8 @@ TEST(RunInt32SubAndWord32SarP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1524,7 +1731,8 @@ TEST(RunInt32SubAndWord32SarP) {
TEST(RunInt32SubAndWord32ShlP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1537,7 +1745,8 @@ TEST(RunInt32SubAndWord32ShlP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1555,8 +1764,8 @@ TEST(RunInt32SubAndWord32ShlP) {
TEST(RunInt32SubAndWord32ShrP) {
{
- RawMachineAssemblerTester<uint32_t> m(kMachUint32, kMachUint32,
- kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Parameter(0),
m.Word32Shr(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -1570,8 +1779,8 @@ TEST(RunInt32SubAndWord32ShrP) {
}
}
{
- RawMachineAssemblerTester<uint32_t> m(kMachUint32, kMachUint32,
- kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Int32Sub(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
@@ -1592,7 +1801,7 @@ TEST(RunInt32SubInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -1610,7 +1819,7 @@ TEST(RunInt32SubInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -1627,8 +1836,8 @@ TEST(RunInt32SubInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -1644,8 +1853,8 @@ TEST(RunInt32SubInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -1665,9 +1874,9 @@ TEST(RunInt32SubInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
@@ -1731,7 +1940,7 @@ TEST(RunInt32SubInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -1742,7 +1951,7 @@ TEST(RunInt32SubInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -1757,8 +1966,8 @@ TEST(RunInt32SubInComparison) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Word32Equal(
m.Int32Sub(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1), m.Parameter(2))),
@@ -1833,7 +2042,7 @@ TEST(RunInt32MulHighP) {
TEST(RunInt32MulImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i * *j;
@@ -1843,7 +2052,7 @@ TEST(RunInt32MulImm) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j * *i;
@@ -1858,7 +2067,7 @@ TEST(RunInt32MulAndInt32AddP) {
{
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
int32_t p0 = *i;
int32_t p1 = *j;
m.Return(m.Int32Add(m.Int32Constant(p0),
@@ -1872,7 +2081,8 @@ TEST(RunInt32MulAndInt32AddP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
FOR_INT32_INPUTS(i) {
@@ -1888,7 +2098,8 @@ TEST(RunInt32MulAndInt32AddP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Add(m.Int32Mul(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -1924,7 +2135,8 @@ TEST(RunInt32MulAndInt32AddP) {
TEST(RunInt32MulAndInt32SubP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -2234,7 +2446,7 @@ TEST(RunWord32AndAndWord32SarP) {
TEST(RunWord32AndImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & *j;
@@ -2244,7 +2456,7 @@ TEST(RunWord32AndImm) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i & ~(*j);
@@ -2260,7 +2472,7 @@ TEST(RunWord32AndInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -2278,7 +2490,7 @@ TEST(RunWord32AndInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -2295,8 +2507,8 @@ TEST(RunWord32AndInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -2312,8 +2524,8 @@ TEST(RunWord32AndInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -2334,9 +2546,9 @@ TEST(RunWord32AndInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
@@ -2400,7 +2612,7 @@ TEST(RunWord32AndInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -2411,7 +2623,7 @@ TEST(RunWord32AndInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -2463,7 +2675,7 @@ TEST(RunWord32OrP) {
TEST(RunWord32OrImm) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | *j;
@@ -2473,7 +2685,7 @@ TEST(RunWord32OrImm) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i | ~(*j);
@@ -2489,7 +2701,7 @@ TEST(RunWord32OrInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -2507,7 +2719,7 @@ TEST(RunWord32OrInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -2524,8 +2736,8 @@ TEST(RunWord32OrInBranch) {
}
{
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -2541,8 +2753,8 @@ TEST(RunWord32OrInBranch) {
}
{
FOR_INT32_INPUTS(i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -2562,9 +2774,9 @@ TEST(RunWord32OrInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
@@ -2628,7 +2840,7 @@ TEST(RunWord32OrInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -2639,7 +2851,7 @@ TEST(RunWord32OrInComparison) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(*i)),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(j) {
@@ -2654,7 +2866,7 @@ TEST(RunWord32OrInComparison) {
TEST(RunWord32XorP) {
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ *j;
@@ -2697,7 +2909,7 @@ TEST(RunWord32XorP) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *i ^ ~(*j);
@@ -2713,7 +2925,7 @@ TEST(RunWord32XorInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32Equal(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -2731,7 +2943,7 @@ TEST(RunWord32XorInBranch) {
{
RawMachineAssemblerTester<int32_t> m;
Uint32BinopTester bt(&m);
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
&blocka, &blockb);
@@ -2748,8 +2960,8 @@ TEST(RunWord32XorInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -2765,8 +2977,8 @@ TEST(RunWord32XorInBranch) {
}
{
FOR_UINT32_INPUTS(i) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(
m.Word32NotEqual(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
m.Int32Constant(0)),
@@ -2787,9 +2999,9 @@ TEST(RunWord32XorInBranch) {
m.machine()->Word32Shl(),
m.machine()->Word32Shr()};
for (size_t n = 0; n < arraysize(shops); n++) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
- kMachUint32);
- MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Int32(), MachineType::Uint32());
+ RawMachineLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
@@ -2829,7 +3041,7 @@ TEST(RunWord32XorInBranch) {
TEST(RunWord32ShlP) {
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j << shift;
@@ -2878,7 +3090,7 @@ TEST(RunWord32ShlInComparison) {
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))));
@@ -2890,7 +3102,7 @@ TEST(RunWord32ShlInComparison) {
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
@@ -2906,7 +3118,7 @@ TEST(RunWord32ShlInComparison) {
TEST(RunWord32ShrP) {
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
uint32_t expected = *j >> shift;
@@ -2956,7 +3168,7 @@ TEST(RunWord32ShrInComparison) {
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
@@ -2968,7 +3180,7 @@ TEST(RunWord32ShrInComparison) {
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
@@ -2984,7 +3196,7 @@ TEST(RunWord32ShrInComparison) {
TEST(RunWord32SarP) {
{
FOR_INT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)));
FOR_INT32_INPUTS(j) {
int32_t expected = *j >> shift;
@@ -3034,7 +3246,7 @@ TEST(RunWord32SarInComparison) {
}
{
FOR_INT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Sar(m.Parameter(0), m.Int32Constant(shift))));
@@ -3046,7 +3258,7 @@ TEST(RunWord32SarInComparison) {
}
{
FOR_INT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(
m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
@@ -3062,7 +3274,7 @@ TEST(RunWord32SarInComparison) {
TEST(RunWord32RorP) {
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
m.Return(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)));
FOR_UINT32_INPUTS(j) {
int32_t expected = bits::RotateRight32(*j, shift);
@@ -3111,7 +3323,7 @@ TEST(RunWord32RorInComparison) {
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
m.Word32Equal(m.Int32Constant(0),
m.Word32Ror(m.Parameter(0), m.Int32Constant(shift))));
@@ -3123,7 +3335,7 @@ TEST(RunWord32RorInComparison) {
}
{
FOR_UINT32_SHIFTS(shift) {
- RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+ RawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
m.Return(
m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(0)));
@@ -3137,7 +3349,7 @@ TEST(RunWord32RorInComparison) {
TEST(RunWord32NotP) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Word32Not(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
int expected = ~(*i);
@@ -3147,7 +3359,7 @@ TEST(RunWord32NotP) {
TEST(RunInt32NegP) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
m.Return(m.Int32Neg(m.Parameter(0)));
FOR_INT32_INPUTS(i) {
int expected = -*i;
@@ -3158,7 +3370,8 @@ TEST(RunInt32NegP) {
TEST(RunWord32EqualAndWord32SarP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Uint32());
m.Return(m.Word32Equal(m.Parameter(0),
m.Word32Sar(m.Parameter(1), m.Parameter(2))));
FOR_INT32_INPUTS(i) {
@@ -3171,7 +3384,8 @@ TEST(RunWord32EqualAndWord32SarP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_INT32_INPUTS(i) {
@@ -3188,7 +3402,8 @@ TEST(RunWord32EqualAndWord32SarP) {
TEST(RunWord32EqualAndWord32ShlP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Word32Equal(m.Parameter(0),
m.Word32Shl(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -3201,7 +3416,8 @@ TEST(RunWord32EqualAndWord32ShlP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
@@ -3218,7 +3434,8 @@ TEST(RunWord32EqualAndWord32ShlP) {
TEST(RunWord32EqualAndWord32ShrP) {
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Word32Equal(m.Parameter(0),
m.Word32Shr(m.Parameter(1), m.Parameter(2))));
FOR_UINT32_INPUTS(i) {
@@ -3231,7 +3448,8 @@ TEST(RunWord32EqualAndWord32ShrP) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+ RawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
m.Return(m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
FOR_UINT32_INPUTS(i) {
@@ -3248,7 +3466,8 @@ TEST(RunWord32EqualAndWord32ShrP) {
TEST(RunDeadNodes) {
for (int i = 0; true; i++) {
- RawMachineAssemblerTester<int32_t> m(i == 5 ? kMachInt32 : kMachNone);
+ RawMachineAssemblerTester<int32_t> m(i == 5 ? MachineType::Int32()
+ : MachineType::None());
int constant = 0x55 + i;
switch (i) {
case 0:
@@ -3264,7 +3483,7 @@ TEST(RunDeadNodes) {
m.PointerConstant(&constant);
break;
case 4:
- m.LoadFromPointer(&constant, kMachInt32);
+ m.LoadFromPointer(&constant, MachineType::Int32());
break;
case 5:
m.Parameter(0);
@@ -3299,7 +3518,8 @@ TEST(RunDeadInt32Binops) {
m.machine()->Uint32LessThanOrEqual()};
for (size_t i = 0; i < arraysize(kOps); ++i) {
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
int32_t constant = static_cast<int32_t>(0x55555 + i);
m.AddNode(kOps[i], m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(constant));
@@ -3337,16 +3557,16 @@ static void RunLoadImmIndex(MachineType rep) {
TEST(RunLoadImmIndex) {
- RunLoadImmIndex<int8_t>(kMachInt8);
- RunLoadImmIndex<uint8_t>(kMachUint8);
- RunLoadImmIndex<int16_t>(kMachInt16);
- RunLoadImmIndex<uint16_t>(kMachUint16);
- RunLoadImmIndex<int32_t>(kMachInt32);
- RunLoadImmIndex<uint32_t>(kMachUint32);
- RunLoadImmIndex<int32_t*>(kMachAnyTagged);
+ RunLoadImmIndex<int8_t>(MachineType::Int8());
+ RunLoadImmIndex<uint8_t>(MachineType::Uint8());
+ RunLoadImmIndex<int16_t>(MachineType::Int16());
+ RunLoadImmIndex<uint16_t>(MachineType::Uint16());
+ RunLoadImmIndex<int32_t>(MachineType::Int32());
+ RunLoadImmIndex<uint32_t>(MachineType::Uint32());
+ RunLoadImmIndex<int32_t*>(MachineType::AnyTagged());
// TODO(titzer): test kRepBit loads
- // TODO(titzer): test kMachFloat64 loads
+ // TODO(titzer): test MachineType::Float64() loads
// TODO(titzer): test various indexing modes.
}
@@ -3370,7 +3590,7 @@ static void RunLoadStore(MachineType rep) {
Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
Node* load = m.Load(rep, base, index0);
Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
- m.Store(rep, base, index1, load, kNoWriteBarrier);
+ m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
m.Return(m.Int32Constant(OK));
CHECK(buffer[x] != buffer[y]);
@@ -3381,20 +3601,21 @@ static void RunLoadStore(MachineType rep) {
TEST(RunLoadStore) {
- RunLoadStore<int8_t>(kMachInt8);
- RunLoadStore<uint8_t>(kMachUint8);
- RunLoadStore<int16_t>(kMachInt16);
- RunLoadStore<uint16_t>(kMachUint16);
- RunLoadStore<int32_t>(kMachInt32);
- RunLoadStore<uint32_t>(kMachUint32);
- RunLoadStore<void*>(kMachAnyTagged);
- RunLoadStore<float>(kMachFloat32);
- RunLoadStore<double>(kMachFloat64);
+ RunLoadStore<int8_t>(MachineType::Int8());
+ RunLoadStore<uint8_t>(MachineType::Uint8());
+ RunLoadStore<int16_t>(MachineType::Int16());
+ RunLoadStore<uint16_t>(MachineType::Uint16());
+ RunLoadStore<int32_t>(MachineType::Int32());
+ RunLoadStore<uint32_t>(MachineType::Uint32());
+ RunLoadStore<void*>(MachineType::AnyTagged());
+ RunLoadStore<float>(MachineType::Float32());
+ RunLoadStore<double>(MachineType::Float64());
}
TEST(RunFloat32Add) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
m.Return(m.Float32Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -3407,7 +3628,8 @@ TEST(RunFloat32Add) {
TEST(RunFloat32Sub) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
m.Return(m.Float32Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -3420,7 +3642,8 @@ TEST(RunFloat32Sub) {
TEST(RunFloat32Mul) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
m.Return(m.Float32Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -3433,7 +3656,8 @@ TEST(RunFloat32Mul) {
TEST(RunFloat32Div) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32, kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
m.Return(m.Float32Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -3446,27 +3670,36 @@ TEST(RunFloat32Div) {
TEST(RunFloat64Add) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
m.Return(m.Float64Add(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i + *j, m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) {
+ volatile double expected = *i + *j;
+ CheckDoubleEq(expected, m.Call(*i, *j));
+ }
}
}
TEST(RunFloat64Sub) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
m.Return(m.Float64Sub(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(*i - *j, m.Call(*i, *j)); }
+ FOR_FLOAT64_INPUTS(j) {
+ volatile double expected = *i - *j;
+ CheckDoubleEq(expected, m.Call(*i, *j));
+ }
}
}
TEST(RunFloat64Mul) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
m.Return(m.Float64Mul(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -3479,7 +3712,8 @@ TEST(RunFloat64Mul) {
TEST(RunFloat64Div) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
m.Return(m.Float64Div(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -3492,7 +3726,8 @@ TEST(RunFloat64Div) {
TEST(RunFloat64Mod) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
m.Return(m.Float64Mod(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -3646,7 +3881,7 @@ TEST(RunFloat32SubP) {
TEST(RunFloat32SubImm1) {
FOR_FLOAT32_INPUTS(i) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Sub(m.Float32Constant(*i), m.Parameter(0)));
FOR_FLOAT32_INPUTS(j) {
@@ -3659,7 +3894,7 @@ TEST(RunFloat32SubImm1) {
TEST(RunFloat32SubImm2) {
FOR_FLOAT32_INPUTS(i) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Sub(m.Parameter(0), m.Float32Constant(*i)));
FOR_FLOAT32_INPUTS(j) {
@@ -3672,7 +3907,7 @@ TEST(RunFloat32SubImm2) {
TEST(RunFloat64SubImm1) {
FOR_FLOAT64_INPUTS(i) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Sub(m.Float64Constant(*i), m.Parameter(0)));
FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i - *j, m.Call(*j)); }
@@ -3682,7 +3917,7 @@ TEST(RunFloat64SubImm1) {
TEST(RunFloat64SubImm2) {
FOR_FLOAT64_INPUTS(i) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Sub(m.Parameter(0), m.Float64Constant(*i)));
FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*j - *i, m.Call(*j)); }
@@ -3736,8 +3971,8 @@ TEST(RunFloat64MulP) {
TEST(RunFloat64MulAndFloat64Add1) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64,
- kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(
+ MachineType::Float64(), MachineType::Float64(), MachineType::Float64());
m.Return(m.Float64Add(m.Float64Mul(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
@@ -3752,8 +3987,8 @@ TEST(RunFloat64MulAndFloat64Add1) {
TEST(RunFloat64MulAndFloat64Add2) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64,
- kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(
+ MachineType::Float64(), MachineType::Float64(), MachineType::Float64());
m.Return(m.Float64Add(m.Parameter(0),
m.Float64Mul(m.Parameter(1), m.Parameter(2))));
@@ -3768,8 +4003,8 @@ TEST(RunFloat64MulAndFloat64Add2) {
TEST(RunFloat64MulAndFloat64Sub1) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64,
- kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(
+ MachineType::Float64(), MachineType::Float64(), MachineType::Float64());
m.Return(m.Float64Sub(m.Float64Mul(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
@@ -3784,8 +4019,8 @@ TEST(RunFloat64MulAndFloat64Sub1) {
TEST(RunFloat64MulAndFloat64Sub2) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachFloat64,
- kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(
+ MachineType::Float64(), MachineType::Float64(), MachineType::Float64());
m.Return(m.Float64Sub(m.Parameter(0),
m.Float64Mul(m.Parameter(1), m.Parameter(2))));
@@ -3801,7 +4036,7 @@ TEST(RunFloat64MulAndFloat64Sub2) {
TEST(RunFloat64MulImm1) {
FOR_FLOAT64_INPUTS(i) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Mul(m.Float64Constant(*i), m.Parameter(0)));
FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*i * *j, m.Call(*j)); }
@@ -3811,7 +4046,7 @@ TEST(RunFloat64MulImm1) {
TEST(RunFloat64MulImm2) {
FOR_FLOAT64_INPUTS(i) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Mul(m.Parameter(0), m.Float64Constant(*i)));
FOR_FLOAT64_INPUTS(j) { CheckFloatEq(*j * *i, m.Call(*j)); }
@@ -3874,7 +4109,7 @@ TEST(RunChangeInt32ToFloat64_A) {
TEST(RunChangeInt32ToFloat64_B) {
- BufferedRawMachineAssemblerTester<double> m(kMachInt32);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Int32());
m.Return(m.ChangeInt32ToFloat64(m.Parameter(0)));
FOR_INT32_INPUTS(i) { CheckDoubleEq(static_cast<double>(*i), m.Call(*i)); }
@@ -3882,7 +4117,7 @@ TEST(RunChangeInt32ToFloat64_B) {
TEST(RunChangeUint32ToFloat64) {
- BufferedRawMachineAssemblerTester<double> m(kMachUint32);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
FOR_UINT32_INPUTS(i) { CheckDoubleEq(static_cast<double>(*i), m.Call(*i)); }
@@ -3898,7 +4133,7 @@ TEST(RunChangeFloat64ToInt32_A) {
TEST(RunChangeFloat64ToInt32_B) {
- BufferedRawMachineAssemblerTester<int32_t> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Float64());
m.Return(m.ChangeFloat64ToInt32(m.Parameter(0)));
// Note we don't check fractional inputs, or inputs outside the range of
@@ -3916,7 +4151,7 @@ TEST(RunChangeFloat64ToInt32_B) {
TEST(RunChangeFloat64ToUint32) {
- BufferedRawMachineAssemblerTester<uint32_t> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.ChangeFloat64ToUint32(m.Parameter(0)));
{
@@ -3935,7 +4170,7 @@ TEST(RunChangeFloat64ToUint32) {
TEST(RunTruncateFloat64ToFloat32) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float64());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
@@ -3967,11 +4202,11 @@ TEST(RunLoopPhiInduction2) {
int false_val = 0x10777;
// x = false_val; while(false) { x++; } return x;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
Node* false_node = m.Int32Constant(false_val);
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachInt32, false_node, false_node);
+ Node* phi = m.Phi(MachineRepresentation::kWord32, false_node, false_node);
m.Branch(m.Int32Constant(0), &body, &end);
m.Bind(&body);
Node* add = m.Int32Add(phi, m.Int32Constant(1));
@@ -3991,7 +4226,7 @@ TEST(RunFloatDiamond) {
float buffer = 0.1f;
float constant = 99.99f;
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
Node* k1 = m.Float32Constant(constant);
Node* k2 = m.Float32Constant(0 - constant);
m.Branch(m.Int32Constant(0), &blocka, &blockb);
@@ -4000,9 +4235,9 @@ TEST(RunFloatDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(kMachFloat32, k2, k1);
- m.Store(kMachFloat32, m.PointerConstant(&buffer), m.IntPtrConstant(0), phi,
- kNoWriteBarrier);
+ Node* phi = m.Phi(MachineRepresentation::kFloat32, k2, k1);
+ m.Store(MachineRepresentation::kFloat32, m.PointerConstant(&buffer),
+ m.IntPtrConstant(0), phi, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4017,7 +4252,7 @@ TEST(RunDoubleDiamond) {
double buffer = 0.1;
double constant = 99.99;
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
Node* k1 = m.Float64Constant(constant);
Node* k2 = m.Float64Constant(0 - constant);
m.Branch(m.Int32Constant(0), &blocka, &blockb);
@@ -4026,9 +4261,9 @@ TEST(RunDoubleDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(kMachFloat64, k2, k1);
- m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi,
- kNoWriteBarrier);
+ Node* phi = m.Phi(MachineRepresentation::kFloat64, k2, k1);
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(&buffer),
+ m.Int32Constant(0), phi, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4044,7 +4279,7 @@ TEST(RunRefDiamond) {
CcTest::i_isolate()->factory()->InternalizeUtf8String("A");
String* buffer;
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
Node* k1 = m.StringConstant("A");
Node* k2 = m.StringConstant("B");
m.Branch(m.Int32Constant(0), &blocka, &blockb);
@@ -4053,9 +4288,9 @@ TEST(RunRefDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(kMachAnyTagged, k2, k1);
- m.Store(kMachAnyTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi,
- kNoWriteBarrier);
+ Node* phi = m.Phi(MachineRepresentation::kTagged, k2, k1);
+ m.Store(MachineRepresentation::kTagged, m.PointerConstant(&buffer),
+ m.Int32Constant(0), phi, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4073,7 +4308,7 @@ TEST(RunDoubleRefDiamond) {
CcTest::i_isolate()->factory()->InternalizeUtf8String("AX");
String* rbuffer;
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
Node* d1 = m.Float64Constant(dconstant);
Node* d2 = m.Float64Constant(0 - dconstant);
Node* r1 = m.StringConstant("AX");
@@ -4084,12 +4319,12 @@ TEST(RunDoubleRefDiamond) {
m.Bind(&blockb);
m.Goto(&end);
m.Bind(&end);
- Node* dphi = m.Phi(kMachFloat64, d2, d1);
- Node* rphi = m.Phi(kMachAnyTagged, r2, r1);
- m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi,
- kNoWriteBarrier);
- m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0), rphi,
- kNoWriteBarrier);
+ Node* dphi = m.Phi(MachineRepresentation::kFloat64, d2, d1);
+ Node* rphi = m.Phi(MachineRepresentation::kTagged, r2, r1);
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(&dbuffer),
+ m.Int32Constant(0), dphi, kNoWriteBarrier);
+ m.Store(MachineRepresentation::kTagged, m.PointerConstant(&rbuffer),
+ m.Int32Constant(0), rphi, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4108,7 +4343,7 @@ TEST(RunDoubleRefDoubleDiamond) {
CcTest::i_isolate()->factory()->InternalizeUtf8String("AD");
String* rbuffer;
- MLabel blocka, blockb, mid, blockd, blocke, end;
+ RawMachineLabel blocka, blockb, mid, blockd, blocke, end;
Node* d1 = m.Float64Constant(dconstant);
Node* d2 = m.Float64Constant(0 - dconstant);
Node* r1 = m.StringConstant("AD");
@@ -4119,8 +4354,8 @@ TEST(RunDoubleRefDoubleDiamond) {
m.Bind(&blockb);
m.Goto(&mid);
m.Bind(&mid);
- Node* dphi1 = m.Phi(kMachFloat64, d2, d1);
- Node* rphi1 = m.Phi(kMachAnyTagged, r2, r1);
+ Node* dphi1 = m.Phi(MachineRepresentation::kFloat64, d2, d1);
+ Node* rphi1 = m.Phi(MachineRepresentation::kTagged, r2, r1);
m.Branch(m.Int32Constant(0), &blockd, &blocke);
m.Bind(&blockd);
@@ -4128,13 +4363,13 @@ TEST(RunDoubleRefDoubleDiamond) {
m.Bind(&blocke);
m.Goto(&end);
m.Bind(&end);
- Node* dphi2 = m.Phi(kMachFloat64, d1, dphi1);
- Node* rphi2 = m.Phi(kMachAnyTagged, r1, rphi1);
+ Node* dphi2 = m.Phi(MachineRepresentation::kFloat64, d1, dphi1);
+ Node* rphi2 = m.Phi(MachineRepresentation::kTagged, r1, rphi1);
- m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi2,
- kNoWriteBarrier);
- m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
- rphi2, kNoWriteBarrier);
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(&dbuffer),
+ m.Int32Constant(0), dphi2, kNoWriteBarrier);
+ m.Store(MachineRepresentation::kTagged, m.PointerConstant(&rbuffer),
+ m.Int32Constant(0), rphi2, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4145,7 +4380,7 @@ TEST(RunDoubleRefDoubleDiamond) {
TEST(RunDoubleLoopPhi) {
RawMachineAssemblerTester<int32_t> m;
- MLabel header, body, end;
+ RawMachineLabel header, body, end;
int magic = 99773;
double buffer = 0.99;
@@ -4156,14 +4391,14 @@ TEST(RunDoubleLoopPhi) {
m.Goto(&header);
m.Bind(&header);
- Node* phi = m.Phi(kMachFloat64, dk, dk);
+ Node* phi = m.Phi(MachineRepresentation::kFloat64, dk, dk);
phi->ReplaceInput(1, phi);
m.Branch(zero, &body, &end);
m.Bind(&body);
m.Goto(&header);
m.Bind(&end);
- m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi,
- kNoWriteBarrier);
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(&buffer),
+ m.Int32Constant(0), phi, kNoWriteBarrier);
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -4177,13 +4412,13 @@ TEST(RunCountToTenAccRaw) {
Node* ten = m.Int32Constant(10);
Node* one = m.Int32Constant(1);
- MLabel header, body, body_cont, end;
+ RawMachineLabel header, body, body_cont, end;
m.Goto(&header);
m.Bind(&header);
- Node* i = m.Phi(kMachInt32, zero, zero);
- Node* j = m.Phi(kMachInt32, zero, zero);
+ Node* i = m.Phi(MachineRepresentation::kWord32, zero, zero);
+ Node* j = m.Phi(MachineRepresentation::kWord32, zero, zero);
m.Goto(&body);
m.Bind(&body);
@@ -4210,14 +4445,14 @@ TEST(RunCountToTenAccRaw2) {
Node* ten = m.Int32Constant(10);
Node* one = m.Int32Constant(1);
- MLabel header, body, body_cont, end;
+ RawMachineLabel header, body, body_cont, end;
m.Goto(&header);
m.Bind(&header);
- Node* i = m.Phi(kMachInt32, zero, zero);
- Node* j = m.Phi(kMachInt32, zero, zero);
- Node* k = m.Phi(kMachInt32, zero, zero);
+ Node* i = m.Phi(MachineRepresentation::kWord32, zero, zero);
+ Node* j = m.Phi(MachineRepresentation::kWord32, zero, zero);
+ Node* k = m.Phi(MachineRepresentation::kWord32, zero, zero);
m.Goto(&body);
m.Bind(&body);
@@ -4244,14 +4479,22 @@ TEST(RunAddTree) {
int32_t inputs[] = {11, 12, 13, 14, 15, 16, 17, 18};
Node* base = m.PointerConstant(inputs);
- Node* n0 = m.Load(kMachInt32, base, m.Int32Constant(0 * sizeof(int32_t)));
- Node* n1 = m.Load(kMachInt32, base, m.Int32Constant(1 * sizeof(int32_t)));
- Node* n2 = m.Load(kMachInt32, base, m.Int32Constant(2 * sizeof(int32_t)));
- Node* n3 = m.Load(kMachInt32, base, m.Int32Constant(3 * sizeof(int32_t)));
- Node* n4 = m.Load(kMachInt32, base, m.Int32Constant(4 * sizeof(int32_t)));
- Node* n5 = m.Load(kMachInt32, base, m.Int32Constant(5 * sizeof(int32_t)));
- Node* n6 = m.Load(kMachInt32, base, m.Int32Constant(6 * sizeof(int32_t)));
- Node* n7 = m.Load(kMachInt32, base, m.Int32Constant(7 * sizeof(int32_t)));
+ Node* n0 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(0 * sizeof(int32_t)));
+ Node* n1 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(1 * sizeof(int32_t)));
+ Node* n2 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(2 * sizeof(int32_t)));
+ Node* n3 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(3 * sizeof(int32_t)));
+ Node* n4 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(4 * sizeof(int32_t)));
+ Node* n5 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(5 * sizeof(int32_t)));
+ Node* n6 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(6 * sizeof(int32_t)));
+ Node* n7 =
+ m.Load(MachineType::Int32(), base, m.Int32Constant(7 * sizeof(int32_t)));
Node* i1 = m.Int32Add(n0, n1);
Node* i2 = m.Int32Add(n2, n3);
@@ -4283,10 +4526,12 @@ static int Float64CompareHelper(RawMachineAssemblerTester<int32_t>* m,
CHECK(x < y);
bool load_a = node_type / 2 == 1;
bool load_b = node_type % 2 == 1;
- Node* a = load_a ? m->Load(kMachFloat64, m->PointerConstant(&buffer[0]))
- : m->Float64Constant(x);
- Node* b = load_b ? m->Load(kMachFloat64, m->PointerConstant(&buffer[1]))
- : m->Float64Constant(y);
+ Node* a =
+ load_a ? m->Load(MachineType::Float64(), m->PointerConstant(&buffer[0]))
+ : m->Float64Constant(x);
+ Node* b =
+ load_b ? m->Load(MachineType::Float64(), m->PointerConstant(&buffer[1]))
+ : m->Float64Constant(y);
Node* cmp = NULL;
bool expected = false;
switch (test_case) {
@@ -4413,8 +4658,8 @@ TEST(RunFloat64Equal) {
double input_b = 0.0;
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+ Node* a = m.LoadFromPointer(&input_a, MachineType::Float64());
+ Node* b = m.LoadFromPointer(&input_b, MachineType::Float64());
m.Return(m.Float64Equal(a, b));
CompareWrapper cmp(IrOpcode::kFloat64Equal);
@@ -4434,8 +4679,8 @@ TEST(RunFloat64LessThan) {
double input_b = 0.0;
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
- Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+ Node* a = m.LoadFromPointer(&input_a, MachineType::Float64());
+ Node* b = m.LoadFromPointer(&input_b, MachineType::Float64());
m.Return(m.Float64LessThan(a, b));
CompareWrapper cmp(IrOpcode::kFloat64LessThan);
@@ -4450,14 +4695,14 @@ TEST(RunFloat64LessThan) {
}
-template <typename IntType, MachineType kRepresentation>
-static void LoadStoreTruncation() {
+template <typename IntType>
+static void LoadStoreTruncation(MachineType kRepresentation) {
IntType input;
RawMachineAssemblerTester<int32_t> m;
Node* a = m.LoadFromPointer(&input, kRepresentation);
Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
- m.StoreToPointer(&input, kRepresentation, ap1);
+ m.StoreToPointer(&input, kRepresentation.representation(), ap1);
m.Return(ap1);
const IntType max = std::numeric_limits<IntType>::max();
@@ -4484,14 +4729,15 @@ static void LoadStoreTruncation() {
TEST(RunLoadStoreTruncation) {
- LoadStoreTruncation<int8_t, kMachInt8>();
- LoadStoreTruncation<int16_t, kMachInt16>();
+ LoadStoreTruncation<int8_t>(MachineType::Int8());
+ LoadStoreTruncation<int16_t>(MachineType::Int16());
}
static void IntPtrCompare(intptr_t left, intptr_t right) {
for (int test = 0; test < 7; test++) {
- RawMachineAssemblerTester<bool> m(kMachPtr, kMachPtr);
+ RawMachineAssemblerTester<bool> m(MachineType::Pointer(),
+ MachineType::Pointer());
Node* p0 = m.Parameter(0);
Node* p1 = m.Parameter(1);
Node* res = NULL;
@@ -4560,7 +4806,8 @@ TEST(RunTestIntPtrArithmetic) {
Node* output = m.PointerConstant(&outputs[kInputSize - 1]);
Node* elem_size = m.IntPtrConstant(sizeof(inputs[0]));
for (int i = 0; i < kInputSize; i++) {
- m.Store(kMachInt32, output, m.Load(kMachInt32, input), kNoWriteBarrier);
+ m.Store(MachineRepresentation::kWord32, output,
+ m.Load(MachineType::Int32(), input), kNoWriteBarrier);
input = m.IntPtrAdd(input, elem_size);
output = m.IntPtrSub(output, elem_size);
}
@@ -4585,7 +4832,7 @@ TEST(RunSpillLotsOfThings) {
accs[i] = acc;
}
for (int i = 0; i < kInputSize; i++) {
- m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
+ m.StoreToPointer(&outputs[i], MachineRepresentation::kWord32, accs[i]);
}
m.Return(one);
m.Call();
@@ -4598,7 +4845,8 @@ TEST(RunSpillLotsOfThings) {
TEST(RunSpillConstantsAndParameters) {
static const int kInputSize = 1000;
static const int32_t kBase = 987;
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
int32_t outputs[kInputSize];
Node* csts[kInputSize];
Node* accs[kInputSize];
@@ -4611,7 +4859,7 @@ TEST(RunSpillConstantsAndParameters) {
accs[i] = acc;
}
for (int i = 0; i < kInputSize; i++) {
- m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
+ m.StoreToPointer(&outputs[i], MachineRepresentation::kWord32, accs[i]);
}
m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
FOR_INT32_INPUTS(i) {
@@ -4632,7 +4880,7 @@ TEST(RunSpillConstantsAndParameters) {
TEST(RunNewSpaceConstantsInPhi) {
- RawMachineAssemblerTester<Object*> m(kMachInt32);
+ RawMachineAssemblerTester<Object*> m(MachineType::Int32());
Isolate* isolate = CcTest::i_isolate();
Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2);
@@ -4640,7 +4888,7 @@ TEST(RunNewSpaceConstantsInPhi) {
Node* true_node = m.HeapConstant(true_val);
Node* false_node = m.HeapConstant(false_val);
- MLabel blocka, blockb, end;
+ RawMachineLabel blocka, blockb, end;
m.Branch(m.Parameter(0), &blocka, &blockb);
m.Bind(&blocka);
m.Goto(&end);
@@ -4648,7 +4896,7 @@ TEST(RunNewSpaceConstantsInPhi) {
m.Goto(&end);
m.Bind(&end);
- Node* phi = m.Phi(kMachAnyTagged, true_node, false_node);
+ Node* phi = m.Phi(MachineRepresentation::kTagged, true_node, false_node);
m.Return(phi);
CHECK_EQ(*false_val, m.Call(0));
@@ -4663,7 +4911,7 @@ TEST(RunInt32AddWithOverflowP) {
Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -4680,11 +4928,11 @@ TEST(RunInt32AddWithOverflowImm) {
int32_t actual_val = -1, expected_val = 0;
FOR_INT32_INPUTS(i) {
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
@@ -4693,11 +4941,11 @@ TEST(RunInt32AddWithOverflowImm) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
@@ -4711,7 +4959,7 @@ TEST(RunInt32AddWithOverflowImm) {
m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
@@ -4723,7 +4971,7 @@ TEST(RunInt32AddWithOverflowImm) {
TEST(RunInt32AddWithOverflowInBranchP) {
int constant = 911777;
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
@@ -4751,7 +4999,7 @@ TEST(RunInt32SubWithOverflowP) {
Node* add = m.Int32SubWithOverflow(bt.param0, bt.param1);
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -4768,11 +5016,11 @@ TEST(RunInt32SubWithOverflowImm) {
int32_t actual_val = -1, expected_val = 0;
FOR_INT32_INPUTS(i) {
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
@@ -4781,11 +5029,11 @@ TEST(RunInt32SubWithOverflowImm) {
}
}
{
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
int expected_ovf = bits::SignedSubOverflow32(*j, *i, &expected_val);
@@ -4799,7 +5047,7 @@ TEST(RunInt32SubWithOverflowImm) {
m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
Node* val = m.Projection(0, add);
Node* ovf = m.Projection(1, add);
- m.StoreToPointer(&actual_val, kMachInt32, val);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
m.Return(ovf);
int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
CHECK_EQ(expected_ovf, m.Call());
@@ -4811,7 +5059,7 @@ TEST(RunInt32SubWithOverflowImm) {
TEST(RunInt32SubWithOverflowInBranchP) {
int constant = 911999;
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
Node* sub = m.Int32SubWithOverflow(bt.param0, bt.param1);
@@ -4834,10 +5082,10 @@ TEST(RunInt32SubWithOverflowInBranchP) {
TEST(RunWord64EqualInBranchP) {
int64_t input;
- MLabel blocka, blockb;
+ RawMachineLabel blocka, blockb;
RawMachineAssemblerTester<int64_t> m;
if (!m.machine()->Is64()) return;
- Node* value = m.LoadFromPointer(&input, kMachInt64);
+ Node* value = m.LoadFromPointer(&input, MachineType::Int64());
m.Branch(m.Word64Equal(value, m.Int64Constant(0)), &blocka, &blockb);
m.Bind(&blocka);
m.Return(m.Int32Constant(1));
@@ -4855,8 +5103,9 @@ TEST(RunWord64EqualInBranchP) {
TEST(RunChangeInt32ToInt64P) {
if (kPointerSize < 8) return;
int64_t actual = -1;
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
- m.StoreToPointer(&actual, kMachInt64, m.ChangeInt32ToInt64(m.Parameter(0)));
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ m.StoreToPointer(&actual, MachineRepresentation::kWord64,
+ m.ChangeInt32ToInt64(m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_INT32_INPUTS(i) {
int64_t expected = *i;
@@ -4869,8 +5118,8 @@ TEST(RunChangeInt32ToInt64P) {
TEST(RunChangeUint32ToUint64P) {
if (kPointerSize < 8) return;
int64_t actual = -1;
- RawMachineAssemblerTester<int32_t> m(kMachUint32);
- m.StoreToPointer(&actual, kMachUint64,
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
+ m.StoreToPointer(&actual, MachineRepresentation::kWord64,
m.ChangeUint32ToUint64(m.Parameter(0)));
m.Return(m.Int32Constant(0));
FOR_UINT32_INPUTS(i) {
@@ -4885,7 +5134,8 @@ TEST(RunTruncateInt64ToInt32P) {
if (kPointerSize < 8) return;
int64_t expected = -1;
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.TruncateInt64ToInt32(m.LoadFromPointer(&expected, kMachInt64)));
+ m.Return(m.TruncateInt64ToInt32(
+ m.LoadFromPointer(&expected, MachineType::Int64())));
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
expected = (static_cast<uint64_t>(*j) << 32) | *i;
@@ -4956,8 +5206,9 @@ TEST(RunTruncateFloat64ToInt32P) {
{-1.7976931348623157e+308, 0}};
double input = -1.0;
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.TruncateFloat64ToInt32(TruncationMode::kJavaScript,
- m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript,
+ m.LoadFromPointer(&input, MachineType::Float64())));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i].from;
uint64_t expected = static_cast<int64_t>(kValues[i].raw);
@@ -4967,7 +5218,7 @@ TEST(RunTruncateFloat64ToInt32P) {
TEST(RunChangeFloat32ToFloat64) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat32);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float32());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
@@ -4985,7 +5236,7 @@ TEST(RunFloat32Constant) {
TEST(RunFloat64ExtractLowWord32) {
- BufferedRawMachineAssemblerTester<uint32_t> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.Float64ExtractLowWord32(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) {
uint32_t expected = static_cast<uint32_t>(bit_cast<uint64_t>(*i));
@@ -4995,7 +5246,7 @@ TEST(RunFloat64ExtractLowWord32) {
TEST(RunFloat64ExtractHighWord32) {
- BufferedRawMachineAssemblerTester<uint32_t> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.Float64ExtractHighWord32(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) {
uint32_t expected = static_cast<uint32_t>(bit_cast<uint64_t>(*i) >> 32);
@@ -5005,7 +5256,8 @@ TEST(RunFloat64ExtractHighWord32) {
TEST(RunFloat64InsertLowWord32) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachInt32);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Int32());
m.Return(m.Float64InsertLowWord32(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -5019,7 +5271,8 @@ TEST(RunFloat64InsertLowWord32) {
TEST(RunFloat64InsertHighWord32) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64, kMachUint32);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Uint32());
m.Return(m.Float64InsertHighWord32(m.Parameter(0), m.Parameter(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
@@ -5033,14 +5286,14 @@ TEST(RunFloat64InsertHighWord32) {
TEST(RunFloat32Abs) {
- BufferedRawMachineAssemblerTester<float> m(kMachFloat32);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
m.Return(m.Float32Abs(m.Parameter(0)));
FOR_FLOAT32_INPUTS(i) { CheckFloatEq(std::abs(*i), m.Call(*i)); }
}
TEST(RunFloat64Abs) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
m.Return(m.Float64Abs(m.Parameter(0)));
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(std::abs(*i), m.Call(*i)); }
}
@@ -5144,68 +5397,101 @@ static double kValues[] = {0.1,
-two_52 + 1 - 0.7};
+TEST(RunFloat32RoundDown) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ if (!m.machine()->Float32RoundDown().IsSupported()) return;
+
+ m.Return(m.Float32RoundDown(m.Parameter(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(floorf(*i), m.Call(*i)); }
+}
+
+
TEST(RunFloat64RoundDown1) {
- BufferedRawMachineAssemblerTester<double> m(kMachFloat64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
if (!m.machine()->Float64RoundDown().IsSupported()) return;
m.Return(m.Float64RoundDown(m.Parameter(0)));
- FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(std::floor(*i), m.Call(*i)); }
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(floor(*i), m.Call(*i)); }
}
TEST(RunFloat64RoundDown2) {
- double input = -1.0;
- double result = 0.0;
- RawMachineAssemblerTester<int32_t> m;
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
if (!m.machine()->Float64RoundDown().IsSupported()) return;
- m.StoreToPointer(&result, kMachFloat64,
- m.Float64Sub(m.Float64Constant(-0.0),
- m.Float64RoundDown(m.Float64Sub(
- m.Float64Constant(-0.0),
- m.LoadFromPointer(&input, kMachFloat64)))));
- m.Return(m.Int32Constant(0));
+ m.Return(m.Float64Sub(m.Float64Constant(-0.0),
+ m.Float64RoundDown(m.Float64Sub(m.Float64Constant(-0.0),
+ m.Parameter(0)))));
+
for (size_t i = 0; i < arraysize(kValues); ++i) {
- input = kValues[i];
- CHECK_EQ(0, m.Call());
- double expected = std::ceil(kValues[i]);
- CHECK_EQ(expected, result);
+ CHECK_EQ(ceil(kValues[i]), m.Call(kValues[i]));
}
}
+TEST(RunFloat32RoundUp) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ if (!m.machine()->Float32RoundUp().IsSupported()) return;
+ m.Return(m.Float32RoundUp(m.Parameter(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceilf(*i), m.Call(*i)); }
+}
+
+
+TEST(RunFloat64RoundUp) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ if (!m.machine()->Float64RoundUp().IsSupported()) return;
+ m.Return(m.Float64RoundUp(m.Parameter(0)));
+
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(ceil(*i), m.Call(*i)); }
+}
+
+
+TEST(RunFloat32RoundTiesEven) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ if (!m.machine()->Float32RoundTiesEven().IsSupported()) return;
+ m.Return(m.Float32RoundTiesEven(m.Parameter(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(nearbyint(*i), m.Call(*i)); }
+}
+
+
+TEST(RunFloat64RoundTiesEven) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ if (!m.machine()->Float64RoundTiesEven().IsSupported()) return;
+ m.Return(m.Float64RoundTiesEven(m.Parameter(0)));
+
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(nearbyint(*i), m.Call(*i)); }
+}
+
+
+TEST(RunFloat32RoundTruncate) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ if (!m.machine()->Float32RoundTruncate().IsSupported()) return;
+
+ m.Return(m.Float32RoundTruncate(m.Parameter(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(truncf(*i), m.Call(*i)); }
+}
+
+
TEST(RunFloat64RoundTruncate) {
- double input = -1.0;
- double result = 0.0;
- RawMachineAssemblerTester<int32_t> m;
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
if (!m.machine()->Float64RoundTruncate().IsSupported()) return;
- m.StoreToPointer(
- &result, kMachFloat64,
- m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
- m.Return(m.Int32Constant(0));
+ m.Return(m.Float64RoundTruncate(m.Parameter(0)));
for (size_t i = 0; i < arraysize(kValues); ++i) {
- input = kValues[i];
- CHECK_EQ(0, m.Call());
- double expected = trunc(kValues[i]);
- CHECK_EQ(expected, result);
+ CHECK_EQ(trunc(kValues[i]), m.Call(kValues[i]));
}
}
TEST(RunFloat64RoundTiesAway) {
- double input = -1.0;
- double result = 0.0;
- RawMachineAssemblerTester<int32_t> m;
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
if (!m.machine()->Float64RoundTiesAway().IsSupported()) return;
- m.StoreToPointer(
- &result, kMachFloat64,
- m.Float64RoundTiesAway(m.LoadFromPointer(&input, kMachFloat64)));
- m.Return(m.Int32Constant(0));
+ m.Return(m.Float64RoundTiesAway(m.Parameter(0)));
for (size_t i = 0; i < arraysize(kValues); ++i) {
- input = kValues[i];
- CHECK_EQ(0, m.Call());
- double expected = round(kValues[i]);
- CHECK_EQ(expected, result);
+ CHECK_EQ(round(kValues[i]), m.Call(kValues[i]));
}
}
@@ -5237,17 +5523,18 @@ int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
TEST(RunCallCFunction0) {
auto* foo0_ptr = &foo0;
RawMachineAssemblerTester<int32_t> m;
- Node* function = m.LoadFromPointer(&foo0_ptr, kMachPtr);
- m.Return(m.CallCFunction0(kMachInt32, function));
+ Node* function = m.LoadFromPointer(&foo0_ptr, MachineType::Pointer());
+ m.Return(m.CallCFunction0(MachineType::Int32(), function));
CHECK_EQ(kMagicFoo0, m.Call());
}
TEST(RunCallCFunction1) {
auto* foo1_ptr = &foo1;
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
- Node* function = m.LoadFromPointer(&foo1_ptr, kMachPtr);
- m.Return(m.CallCFunction1(kMachInt32, kMachInt32, function, m.Parameter(0)));
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* function = m.LoadFromPointer(&foo1_ptr, MachineType::Pointer());
+ m.Return(m.CallCFunction1(MachineType::Int32(), MachineType::Int32(),
+ function, m.Parameter(0)));
FOR_INT32_INPUTS(i) {
int32_t const expected = *i;
CHECK_EQ(expected, m.Call(expected));
@@ -5257,10 +5544,12 @@ TEST(RunCallCFunction1) {
TEST(RunCallCFunction2) {
auto* foo2_ptr = &foo2;
- RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
- Node* function = m.LoadFromPointer(&foo2_ptr, kMachPtr);
- m.Return(m.CallCFunction2(kMachInt32, kMachInt32, kMachInt32, function,
- m.Parameter(0), m.Parameter(1)));
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* function = m.LoadFromPointer(&foo2_ptr, MachineType::Pointer());
+ m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), function, m.Parameter(0),
+ m.Parameter(1)));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
FOR_INT32_INPUTS(j) {
@@ -5273,13 +5562,14 @@ TEST(RunCallCFunction2) {
TEST(RunCallCFunction8) {
auto* foo8_ptr = &foo8;
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
- Node* function = m.LoadFromPointer(&foo8_ptr, kMachPtr);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* function = m.LoadFromPointer(&foo8_ptr, MachineType::Pointer());
Node* param = m.Parameter(0);
- m.Return(m.CallCFunction8(kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, function, param, param, param, param,
- param, param, param, param));
+ m.Return(m.CallCFunction8(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ function, param, param, param, param, param, param, param, param));
FOR_INT32_INPUTS(i) {
int32_t const x = *i;
CHECK_EQ(x * 8, m.Call(x));
@@ -5291,12 +5581,12 @@ TEST(RunCallCFunction8) {
// TODO(titzer): run int64 tests on all platforms when supported.
TEST(RunCheckedLoadInt64) {
int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
- RawMachineAssemblerTester<int64_t> m(kMachInt32);
+ RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
Node* base = m.PointerConstant(buffer);
Node* index = m.Parameter(0);
Node* length = m.Int32Constant(16);
- Node* load =
- m.AddNode(m.machine()->CheckedLoad(kMachInt64), base, index, length);
+ Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
+ index, length);
m.Return(load);
CHECK_EQ(buffer[0], m.Call(0));
@@ -5309,13 +5599,14 @@ TEST(RunCheckedStoreInt64) {
const int64_t write = 0x5566778899aabbLL;
const int64_t before = 0x33bbccddeeff0011LL;
int64_t buffer[] = {before, before};
- RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
Node* base = m.PointerConstant(buffer);
Node* index = m.Parameter(0);
Node* length = m.Int32Constant(16);
Node* value = m.Int64Constant(write);
- Node* store = m.AddNode(m.machine()->CheckedStore(kMachInt64), base, index,
- length, value);
+ Node* store =
+ m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
+ index, length, value);
USE(store);
m.Return(m.Int32Constant(11));
@@ -5338,8 +5629,8 @@ TEST(RunBitcastInt64ToFloat64) {
double output = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
- &output, kMachFloat64,
- m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, kMachInt64)));
+ &output, MachineRepresentation::kFloat64,
+ m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, MachineType::Int64())));
m.Return(m.Int32Constant(11));
FOR_INT64_INPUTS(i) {
input = *i;
@@ -5351,43 +5642,357 @@ TEST(RunBitcastInt64ToFloat64) {
TEST(RunBitcastFloat64ToInt64) {
- double input = 0;
- int64_t output = 0;
- RawMachineAssemblerTester<int32_t> m;
- m.StoreToPointer(
- &output, kMachInt64,
- m.BitcastFloat64ToInt64(m.LoadFromPointer(&input, kMachFloat64)));
- m.Return(m.Int32Constant(11));
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
+
+ m.Return(m.BitcastFloat64ToInt64(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_EQ(bit_cast<int64_t>(*i), m.Call(*i)); }
+}
+
+
+TEST(RunTryTruncateFloat32ToInt64WithoutCheck) {
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float32());
+ m.Return(m.TryTruncateFloat32ToInt64(m.Parameter(0)));
+
+ FOR_INT64_INPUTS(i) {
+ float input = static_cast<float>(*i);
+ if (input < static_cast<float>(INT64_MAX) &&
+ input >= static_cast<float>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
+ }
+ }
+}
+
+
+TEST(RunTryTruncateFloat32ToInt64WithCheck) {
+ int64_t success = 0;
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float32());
+ Node* trunc = m.TryTruncateFloat32ToInt64(m.Parameter(0));
+ Node* val = m.Projection(0, trunc);
+ Node* check = m.Projection(1, trunc);
+ m.StoreToPointer(&success, MachineRepresentation::kWord64, check);
+ m.Return(val);
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(INT64_MAX) &&
+ *i >= static_cast<float>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(*i), m.Call(*i));
+ CHECK_NE(0, success);
+ } else {
+ m.Call(*i);
+ CHECK_EQ(0, success);
+ }
+ }
+}
+
+
+TEST(RunTryTruncateFloat64ToInt64WithoutCheck) {
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
+ m.Return(m.TryTruncateFloat64ToInt64(m.Parameter(0)));
+
+ FOR_INT64_INPUTS(i) {
+ double input = static_cast<double>(*i);
+ CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
+ }
+}
+
+
+TEST(RunTryTruncateFloat64ToInt64WithCheck) {
+ int64_t success = 0;
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
+ Node* trunc = m.TryTruncateFloat64ToInt64(m.Parameter(0));
+ Node* val = m.Projection(0, trunc);
+ Node* check = m.Projection(1, trunc);
+ m.StoreToPointer(&success, MachineRepresentation::kWord64, check);
+ m.Return(val);
+
FOR_FLOAT64_INPUTS(i) {
- input = *i;
- CHECK_EQ(11, m.Call());
- double expected = bit_cast<int64_t>(input);
- CHECK_EQ(expected, output);
+ if (*i < static_cast<double>(INT64_MAX) &&
+ *i >= static_cast<double>(INT64_MIN)) {
+ // Conversions within this range should succeed.
+ CHECK_EQ(static_cast<int64_t>(*i), m.Call(*i));
+ CHECK_NE(0, success);
+ } else {
+ m.Call(*i);
+ CHECK_EQ(0, success);
+ }
+ }
+}
+
+
+TEST(RunTryTruncateFloat32ToUint64WithoutCheck) {
+ BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Float32());
+ m.Return(m.TryTruncateFloat32ToUint64(m.Parameter(0)));
+
+ FOR_UINT64_INPUTS(i) {
+ float input = static_cast<float>(*i);
+ // This condition on 'input' is required because
+ // static_cast<float>(UINT64_MAX) results in a value outside uint64 range.
+ if (input < static_cast<float>(UINT64_MAX)) {
+ CHECK_EQ(static_cast<uint64_t>(input), m.Call(input));
+ }
+ }
+}
+
+
+TEST(RunTryTruncateFloat32ToUint64WithCheck) {
+ int64_t success = 0;
+ BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Float32());
+ Node* trunc = m.TryTruncateFloat32ToUint64(m.Parameter(0));
+ Node* val = m.Projection(0, trunc);
+ Node* check = m.Projection(1, trunc);
+ m.StoreToPointer(&success, MachineRepresentation::kWord64, check);
+ m.Return(val);
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(UINT64_MAX) && *i > -1.0) {
+ // Conversions within this range should succeed.
+ CHECK_EQ(static_cast<uint64_t>(*i), m.Call(*i));
+ CHECK_NE(0, success);
+ } else {
+ m.Call(*i);
+ CHECK_EQ(0, success);
+ }
+ }
+}
+
+
+TEST(RunTryTruncateFloat64ToUint64WithoutCheck) {
+ BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Float64());
+ m.Return(m.TruncateFloat64ToUint64(m.Parameter(0)));
+
+ FOR_UINT64_INPUTS(j) {
+ double input = static_cast<double>(*j);
+
+ if (input < static_cast<float>(UINT64_MAX)) {
+ CHECK_EQ(static_cast<uint64_t>(input), m.Call(input));
+ }
+ }
+}
+
+
+TEST(RunTryTruncateFloat64ToUint64WithCheck) {
+ int64_t success = 0;
+ BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
+ Node* trunc = m.TryTruncateFloat64ToUint64(m.Parameter(0));
+ Node* val = m.Projection(0, trunc);
+ Node* check = m.Projection(1, trunc);
+ m.StoreToPointer(&success, MachineRepresentation::kWord64, check);
+ m.Return(val);
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < 18446744073709551616.0 && *i > -1) {
+ // Conversions within this range should succeed.
+ CHECK_EQ(static_cast<uint64_t>(*i), m.Call(*i));
+ CHECK_NE(0, success);
+ } else {
+ m.Call(*i);
+ CHECK_EQ(0, success);
+ }
}
}
TEST(RunRoundInt64ToFloat32) {
- BufferedRawMachineAssemblerTester<float> m(kMachInt64);
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Int64());
m.Return(m.RoundInt64ToFloat32(m.Parameter(0)));
FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<float>(*i), m.Call(*i)); }
}
TEST(RunRoundInt64ToFloat64) {
- BufferedRawMachineAssemblerTester<double> m(kMachInt64);
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Int64());
m.Return(m.RoundInt64ToFloat64(m.Parameter(0)));
FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<double>(*i), m.Call(*i)); }
}
+TEST(RunRoundUint64ToFloat64) {
+ struct {
+ uint64_t input;
+ uint64_t expected;
+ } values[] = {{0x0, 0x0},
+ {0x1, 0x3ff0000000000000},
+ {0xffffffff, 0x41efffffffe00000},
+ {0x1b09788b, 0x41bb09788b000000},
+ {0x4c5fce8, 0x419317f3a0000000},
+ {0xcc0de5bf, 0x41e981bcb7e00000},
+ {0x2, 0x4000000000000000},
+ {0x3, 0x4008000000000000},
+ {0x4, 0x4010000000000000},
+ {0x5, 0x4014000000000000},
+ {0x8, 0x4020000000000000},
+ {0x9, 0x4022000000000000},
+ {0xffffffffffffffff, 0x43f0000000000000},
+ {0xfffffffffffffffe, 0x43f0000000000000},
+ {0xfffffffffffffffd, 0x43f0000000000000},
+ {0x100000000, 0x41f0000000000000},
+ {0xffffffff00000000, 0x43efffffffe00000},
+ {0x1b09788b00000000, 0x43bb09788b000000},
+ {0x4c5fce800000000, 0x439317f3a0000000},
+ {0xcc0de5bf00000000, 0x43e981bcb7e00000},
+ {0x200000000, 0x4200000000000000},
+ {0x300000000, 0x4208000000000000},
+ {0x400000000, 0x4210000000000000},
+ {0x500000000, 0x4214000000000000},
+ {0x800000000, 0x4220000000000000},
+ {0x900000000, 0x4222000000000000},
+ {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
+ {0xece3af835495a16b, 0x43ed9c75f06a92b4},
+ {0xb668ecc11223344, 0x43a6cd1d98224467},
+ {0x9e, 0x4063c00000000000},
+ {0x43, 0x4050c00000000000},
+ {0xaf73, 0x40e5ee6000000000},
+ {0x116b, 0x40b16b0000000000},
+ {0x658ecc, 0x415963b300000000},
+ {0x2b3b4c, 0x41459da600000000},
+ {0x88776655, 0x41e10eeccaa00000},
+ {0x70000000, 0x41dc000000000000},
+ {0x7200000, 0x419c800000000000},
+ {0x7fffffff, 0x41dfffffffc00000},
+ {0x56123761, 0x41d5848dd8400000},
+ {0x7fffff00, 0x41dfffffc0000000},
+ {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
+ {0x80000000eeeeeeee, 0x43e00000001dddde},
+ {0x88888888dddddddd, 0x43e11111111bbbbc},
+ {0xa0000000dddddddd, 0x43e40000001bbbbc},
+ {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
+ {0xe0000000aaaaaaaa, 0x43ec000000155555},
+ {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
+ {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
+ {0xf0000000dddddddd, 0x43ee0000001bbbbc},
+ {0x7fffffdddddddd, 0x435ffffff7777777},
+ {0x3fffffaaaaaaaa, 0x434fffffd5555555},
+ {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
+ {0xfffff, 0x412ffffe00000000},
+ {0x7ffff, 0x411ffffc00000000},
+ {0x3ffff, 0x410ffff800000000},
+ {0x1ffff, 0x40fffff000000000},
+ {0xffff, 0x40efffe000000000},
+ {0x7fff, 0x40dfffc000000000},
+ {0x3fff, 0x40cfff8000000000},
+ {0x1fff, 0x40bfff0000000000},
+ {0xfff, 0x40affe0000000000},
+ {0x7ff, 0x409ffc0000000000},
+ {0x3ff, 0x408ff80000000000},
+ {0x1ff, 0x407ff00000000000},
+ {0x3fffffffffff, 0x42cfffffffffff80},
+ {0x1fffffffffff, 0x42bfffffffffff00},
+ {0xfffffffffff, 0x42affffffffffe00},
+ {0x7ffffffffff, 0x429ffffffffffc00},
+ {0x3ffffffffff, 0x428ffffffffff800},
+ {0x1ffffffffff, 0x427ffffffffff000},
+ {0x8000008000000000, 0x43e0000010000000},
+ {0x8000008000000001, 0x43e0000010000000},
+ {0x8000000000000400, 0x43e0000000000000},
+ {0x8000000000000401, 0x43e0000000000001}};
+
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Uint64());
+ m.Return(m.RoundUint64ToFloat64(m.Parameter(0)));
+
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(bit_cast<double>(values[i].expected), m.Call(values[i].input));
+ }
+}
+
+
+TEST(RunRoundUint64ToFloat32) {
+ struct {
+ uint64_t input;
+ uint32_t expected;
+ } values[] = {{0x0, 0x0},
+ {0x1, 0x3f800000},
+ {0xffffffff, 0x4f800000},
+ {0x1b09788b, 0x4dd84bc4},
+ {0x4c5fce8, 0x4c98bf9d},
+ {0xcc0de5bf, 0x4f4c0de6},
+ {0x2, 0x40000000},
+ {0x3, 0x40400000},
+ {0x4, 0x40800000},
+ {0x5, 0x40a00000},
+ {0x8, 0x41000000},
+ {0x9, 0x41100000},
+ {0xffffffffffffffff, 0x5f800000},
+ {0xfffffffffffffffe, 0x5f800000},
+ {0xfffffffffffffffd, 0x5f800000},
+ {0x0, 0x0},
+ {0x100000000, 0x4f800000},
+ {0xffffffff00000000, 0x5f800000},
+ {0x1b09788b00000000, 0x5dd84bc4},
+ {0x4c5fce800000000, 0x5c98bf9d},
+ {0xcc0de5bf00000000, 0x5f4c0de6},
+ {0x200000000, 0x50000000},
+ {0x300000000, 0x50400000},
+ {0x400000000, 0x50800000},
+ {0x500000000, 0x50a00000},
+ {0x800000000, 0x51000000},
+ {0x900000000, 0x51100000},
+ {0x273a798e187937a3, 0x5e1ce9e6},
+ {0xece3af835495a16b, 0x5f6ce3b0},
+ {0xb668ecc11223344, 0x5d3668ed},
+ {0x9e, 0x431e0000},
+ {0x43, 0x42860000},
+ {0xaf73, 0x472f7300},
+ {0x116b, 0x458b5800},
+ {0x658ecc, 0x4acb1d98},
+ {0x2b3b4c, 0x4a2ced30},
+ {0x88776655, 0x4f087766},
+ {0x70000000, 0x4ee00000},
+ {0x7200000, 0x4ce40000},
+ {0x7fffffff, 0x4f000000},
+ {0x56123761, 0x4eac246f},
+ {0x7fffff00, 0x4efffffe},
+ {0x761c4761eeeeeeee, 0x5eec388f},
+ {0x80000000eeeeeeee, 0x5f000000},
+ {0x88888888dddddddd, 0x5f088889},
+ {0xa0000000dddddddd, 0x5f200000},
+ {0xddddddddaaaaaaaa, 0x5f5dddde},
+ {0xe0000000aaaaaaaa, 0x5f600000},
+ {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
+ {0xfffffffdeeeeeeee, 0x5f800000},
+ {0xf0000000dddddddd, 0x5f700000},
+ {0x7fffffdddddddd, 0x5b000000},
+ {0x3fffffaaaaaaaa, 0x5a7fffff},
+ {0x1fffffaaaaaaaa, 0x59fffffd},
+ {0xfffff, 0x497ffff0},
+ {0x7ffff, 0x48ffffe0},
+ {0x3ffff, 0x487fffc0},
+ {0x1ffff, 0x47ffff80},
+ {0xffff, 0x477fff00},
+ {0x7fff, 0x46fffe00},
+ {0x3fff, 0x467ffc00},
+ {0x1fff, 0x45fff800},
+ {0xfff, 0x457ff000},
+ {0x7ff, 0x44ffe000},
+ {0x3ff, 0x447fc000},
+ {0x1ff, 0x43ff8000},
+ {0x3fffffffffff, 0x56800000},
+ {0x1fffffffffff, 0x56000000},
+ {0xfffffffffff, 0x55800000},
+ {0x7ffffffffff, 0x55000000},
+ {0x3ffffffffff, 0x54800000},
+ {0x1ffffffffff, 0x54000000},
+ {0x8000008000000000, 0x5f000000},
+ {0x8000008000000001, 0x5f000001},
+ {0x8000000000000400, 0x5f000000},
+ {0x8000000000000401, 0x5f000000}};
+
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Uint64());
+ m.Return(m.RoundUint64ToFloat32(m.Parameter(0)));
+
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(bit_cast<float>(values[i].expected), m.Call(values[i].input));
+ }
+}
+
+
#endif
TEST(RunBitcastFloat32ToInt32) {
float input = 32.25;
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.BitcastFloat32ToInt32(m.LoadFromPointer(&input, kMachFloat32)));
+ m.Return(m.BitcastFloat32ToInt32(
+ m.LoadFromPointer(&input, MachineType::Float32())));
FOR_FLOAT32_INPUTS(i) {
input = *i;
int32_t expected = bit_cast<int32_t>(input);
@@ -5401,8 +6006,8 @@ TEST(RunBitcastInt32ToFloat32) {
float output = 0.0;
RawMachineAssemblerTester<int32_t> m;
m.StoreToPointer(
- &output, kMachFloat32,
- m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, kMachInt32)));
+ &output, MachineRepresentation::kFloat32,
+ m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, MachineType::Int32())));
m.Return(m.Int32Constant(11));
FOR_INT32_INPUTS(i) {
input = *i;
@@ -5424,10 +6029,10 @@ TEST(RunComputedCodeObject) {
b.End();
Handle<Code> code_b = b.GetCode();
- RawMachineAssemblerTester<int32_t> r(kMachInt32);
- RawMachineAssembler::Label tlabel;
- RawMachineAssembler::Label flabel;
- RawMachineAssembler::Label merge;
+ RawMachineAssemblerTester<int32_t> r(MachineType::Int32());
+ RawMachineLabel tlabel;
+ RawMachineLabel flabel;
+ RawMachineLabel merge;
r.Branch(r.Parameter(0), &tlabel, &flabel);
r.Bind(&tlabel);
Node* fa = r.HeapConstant(code_a);
@@ -5436,7 +6041,7 @@ TEST(RunComputedCodeObject) {
Node* fb = r.HeapConstant(code_b);
r.Goto(&merge);
r.Bind(&merge);
- Node* phi = r.Phi(kMachInt32, fa, fb);
+ Node* phi = r.Phi(MachineRepresentation::kWord32, fa, fb);
// TODO(titzer): all this descriptor hackery is just to call the above
// functions as code objects instead of direct addresses.
@@ -5446,7 +6051,7 @@ TEST(RunComputedCodeObject) {
Signature<LinkageLocation> loc(1, 0, ret);
CallDescriptor* desc = new (r.zone()) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
- kMachAnyTagged, // target_type
+ MachineType::AnyTagged(), // target_type
c->GetInputLocation(0), // target_loc
&sig, // machine_sig
&loc, // location_sig
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 3c64bc1295..791b0d7ae5 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -2,14 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/assembler.h"
#include "src/codegen.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/raw-machine-assembler.h"
+#include "src/machine-type.h"
#include "src/register-configuration.h"
#include "test/cctest/cctest.h"
@@ -21,15 +18,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-typedef RawMachineAssembler::Label MLabel;
-
-#if V8_TARGET_ARCH_ARM64
-// TODO(titzer): fix native stack parameters on arm64
-#define DISABLE_NATIVE_STACK_PARAMS true
-#else
-#define DISABLE_NATIVE_STACK_PARAMS false
-#endif
-
namespace {
typedef float float32;
typedef double float64;
@@ -145,7 +133,7 @@ struct Allocator {
int stack_offset;
LinkageLocation Next(MachineType type) {
- if (IsFloatingPoint(type)) {
+ if (IsFloatingPoint(type.representation())) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
return LinkageLocation::ForRegister(fp_regs[fp_offset++]);
@@ -165,16 +153,11 @@ struct Allocator {
}
}
}
- bool IsFloatingPoint(MachineType type) {
- return RepresentationOf(type) == kRepFloat32 ||
- RepresentationOf(type) == kRepFloat64;
- }
int StackWords(MachineType type) {
// TODO(titzer): hack. float32 occupies 8 bytes on stack.
- int size = (RepresentationOf(type) == kRepFloat32 ||
- RepresentationOf(type) == kRepFloat64)
+ int size = IsFloatingPoint(type.representation())
? kDoubleSize
- : ElementSizeOf(type);
+ : (1 << ElementSizeLog2Of(type.representation()));
return size <= kPointerSize ? 1 : size / kPointerSize;
}
void Reset() {
@@ -210,7 +193,7 @@ class RegisterConfig {
const RegList kCalleeSaveRegisters = 0;
const RegList kCalleeSaveFPRegisters = 0;
- MachineType target_type = compiler::kMachAnyTagged;
+ MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
int stack_param_count = params.stack_offset;
return new (zone) CallDescriptor( // --
@@ -223,7 +206,7 @@ class RegisterConfig {
compiler::Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
- CallDescriptor::kNoFlags, // flags
+ CallDescriptor::kUseNativeStack, // flags
"c-call");
}
@@ -235,17 +218,28 @@ class RegisterConfig {
const int kMaxParamCount = 64;
MachineType kIntTypes[kMaxParamCount + 1] = {
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32,
- kMachInt32, kMachInt32, kMachInt32, kMachInt32, kMachInt32};
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32()};
// For making uniform int32 signatures shorter.
@@ -364,8 +358,8 @@ class ArgsBuffer {
Node* StoreOutput(RawMachineAssembler& raw, Node* value) {
Node* base = raw.PointerConstant(&output);
Node* offset = raw.Int32Constant(0);
- return raw.Store(MachineTypeForC<CType>(), base, offset, value,
- kNoWriteBarrier);
+ return raw.Store(MachineTypeForC<CType>().representation(), base, offset,
+ value, kNoWriteBarrier);
}
// Computes the next set of inputs by updating the {input} array.
@@ -561,8 +555,6 @@ static void TestInt32Sub(CallDescriptor* desc) {
static void CopyTwentyInt32(CallDescriptor* desc) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
-
const int kNumParams = 20;
int32_t input[kNumParams];
int32_t output[kNumParams];
@@ -577,7 +569,8 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Node* base = raw.PointerConstant(output);
for (int i = 0; i < kNumParams; i++) {
Node* offset = raw.Int32Constant(i * sizeof(int32_t));
- raw.Store(kMachInt32, base, offset, raw.Parameter(i), kNoWriteBarrier);
+ raw.Store(MachineRepresentation::kWord32, base, offset, raw.Parameter(i),
+ kNoWriteBarrier);
}
raw.Return(raw.Int32Constant(42));
inner = CompileGraph("CopyTwentyInt32", desc, &graph, raw.Export());
@@ -596,7 +589,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Node** args = zone.NewArray<Node*>(kNumParams);
for (int i = 0; i < kNumParams; i++) {
Node* offset = raw.Int32Constant(i * sizeof(int32_t));
- args[i] = raw.Load(kMachInt32, base, offset);
+ args[i] = raw.Load(MachineType::Int32(), base, offset);
}
Node* call = raw.CallN(desc, target, args);
@@ -674,7 +667,6 @@ TEST_INT32_SUB_WITH_RET(19)
TEST(Run_Int32Sub_all_allocatable_single) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
Int32Signature sig(2);
RegisterPairs pairs;
while (pairs.More()) {
@@ -692,7 +684,6 @@ TEST(Run_Int32Sub_all_allocatable_single) {
TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
Int32Signature sig(20);
RegisterPairs pairs;
while (pairs.More()) {
@@ -745,7 +736,6 @@ static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
static void Test_Int32_WeightedSum_of_size(int count) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
Int32Signature sig(count);
for (int p0 = 0; p0 < Register::kNumRegisters; p0++) {
if (Register::from_code(p0).IsAllocatable()) {
@@ -807,8 +797,6 @@ static void RunSelect(CallDescriptor* desc) {
template <int which>
void Test_Int32_Select() {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
-
int parray[] = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->GetAllocatableGeneralCode(0)};
@@ -932,7 +920,6 @@ TEST(Float64Select_registers) {
TEST(Float32Select_stack_params_return_reg) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
int rarray[] = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->GetAllocatableDoubleCode(0)};
@@ -955,7 +942,6 @@ TEST(Float32Select_stack_params_return_reg) {
TEST(Float64Select_stack_params_return_reg) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
int rarray[] = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->GetAllocatableDoubleCode(0)};
@@ -1010,8 +996,6 @@ static void Build_Select_With_Call(CallDescriptor* desc,
TEST(Float64StackParamsToStackParams) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
-
int rarray[] = {
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->GetAllocatableDoubleCode(0)};
@@ -1032,7 +1016,6 @@ TEST(Float64StackParamsToStackParams) {
void MixedParamTest(int start) {
- if (DISABLE_NATIVE_STACK_PARAMS) return;
if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
->num_double_registers() < 2)
return;
@@ -1040,16 +1023,21 @@ void MixedParamTest(int start) {
// TODO(titzer): mix in 64-bit types on all platforms when supported.
#if V8_TARGET_ARCH_32_BIT
static MachineType types[] = {
- kMachInt32, kMachFloat32, kMachFloat64, kMachInt32, kMachFloat64,
- kMachFloat32, kMachFloat32, kMachFloat64, kMachInt32, kMachFloat32,
- kMachInt32, kMachFloat64, kMachFloat64, kMachFloat32, kMachInt32,
- kMachFloat64, kMachInt32, kMachFloat32};
+ MachineType::Int32(), MachineType::Float32(), MachineType::Float64(),
+ MachineType::Int32(), MachineType::Float64(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float64(), MachineType::Int32(),
+ MachineType::Float32(), MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float32(), MachineType::Int32(),
+ MachineType::Float64(), MachineType::Int32(), MachineType::Float32()};
#else
static MachineType types[] = {
- kMachInt32, kMachInt64, kMachFloat32, kMachFloat64, kMachInt32,
- kMachFloat64, kMachFloat32, kMachInt64, kMachFloat64, kMachInt32,
- kMachFloat32, kMachInt32, kMachFloat64, kMachFloat64, kMachInt64,
- kMachInt32, kMachFloat64, kMachInt32, kMachFloat32};
+ MachineType::Int32(), MachineType::Int64(), MachineType::Float32(),
+ MachineType::Float64(), MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float32(), MachineType::Int64(), MachineType::Int64(),
+ MachineType::Float32(), MachineType::Float32(), MachineType::Int32(),
+ MachineType::Float64(), MachineType::Float64(), MachineType::Int64(),
+ MachineType::Int32(), MachineType::Float64(), MachineType::Int32(),
+ MachineType::Float32()};
#endif
Isolate* isolate = CcTest::InitIsolateOnce();
@@ -1118,22 +1106,22 @@ void MixedParamTest(int start) {
for (int i = 0; i < num_params; i++) {
MachineType param_type = sig->GetParam(i);
Node* konst = nullptr;
- if (param_type == kMachInt32) {
+ if (param_type == MachineType::Int32()) {
int32_t value[] = {static_cast<int32_t>(constant)};
konst = raw.Int32Constant(value[0]);
if (i == which) memcpy(bytes, value, expected_size = 4);
}
- if (param_type == kMachInt64) {
+ if (param_type == MachineType::Int64()) {
int64_t value[] = {static_cast<int64_t>(constant)};
konst = raw.Int64Constant(value[0]);
if (i == which) memcpy(bytes, value, expected_size = 8);
}
- if (param_type == kMachFloat32) {
+ if (param_type == MachineType::Float32()) {
float32 value[] = {static_cast<float32>(constant)};
konst = raw.Float32Constant(value[0]);
if (i == which) memcpy(bytes, value, expected_size = 4);
}
- if (param_type == kMachFloat64) {
+ if (param_type == MachineType::Float64()) {
float64 value[] = {static_cast<float64>(constant)};
konst = raw.Float64Constant(value[0]);
if (i == which) memcpy(bytes, value, expected_size = 8);
@@ -1145,7 +1133,8 @@ void MixedParamTest(int start) {
}
Node* call = raw.CallN(desc, target, args);
- Node* store = raw.StoreToPointer(output, sig->GetReturn(), call);
+ Node* store =
+ raw.StoreToPointer(output, sig->GetReturn().representation(), call);
USE(store);
expected_ret = static_cast<int32_t>(constant);
raw.Return(raw.Int32Constant(expected_ret));
diff --git a/deps/v8/test/cctest/compiler/test-run-properties.cc b/deps/v8/test/cctest/compiler/test-run-properties.cc
index 2a592300e7..3c42102529 100644
--- a/deps/v8/test/cctest/compiler/test-run-properties.cc
+++ b/deps/v8/test/cctest/compiler/test-run-properties.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-stackcheck.cc b/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
index bba411d283..52556ac87f 100644
--- a/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index c4751c0e28..7a2a09405c 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
@@ -14,7 +11,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/pipeline.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -22,55 +19,13 @@ namespace internal {
namespace compiler {
-TEST(RunOptimizedMathFloorStub) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
-
- // Create code and an accompanying descriptor.
- MathFloorStub stub(isolate, TurboFanIC::CALL_FROM_OPTIMIZED_CODE);
- Handle<Code> code = stub.GenerateCode();
- Zone* zone = scope.main_zone();
- CompilationInfo info(&stub, isolate, zone);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info);
- Handle<FixedArray> tv = isolate->factory()->NewFixedArray(10);
-
- // Create a function to call the code using the descriptor.
- Graph graph(zone);
- CommonOperatorBuilder common(zone);
- JSOperatorBuilder javascript(zone);
- MachineOperatorBuilder machine(zone);
- JSGraph js(isolate, &graph, &common, &javascript, nullptr, &machine);
-
- // FunctionTester (ab)uses a 2-argument function
- Node* start = graph.NewNode(common.Start(4));
- // Parameter 0 is the number to round
- Node* numberParam = graph.NewNode(common.Parameter(1), start);
- Node* theCode = graph.NewNode(common.HeapConstant(code));
- Node* vector = graph.NewNode(common.HeapConstant(tv));
- Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
- Node* call =
- graph.NewNode(common.Call(descriptor), theCode, js.UndefinedConstant(),
- js.OneConstant(), vector, js.UndefinedConstant(),
- numberParam, dummyContext, start, start);
- Node* ret = graph.NewNode(common.Return(), call, call, start);
- Node* end = graph.NewNode(common.End(1), ret);
- graph.SetStart(start);
- graph.SetEnd(end);
- FunctionTester ft(&graph);
-
- Handle<Object> value = ft.Val(1.5);
- Handle<Object> result = ft.Call(value, value).ToHandleChecked();
- CHECK_EQ(1, Smi::cast(*result)->value());
-}
-
-
-TEST(RunStringLengthTFStub) {
+TEST(RunStringLengthStub) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
Zone* zone = scope.main_zone();
// Create code and an accompanying descriptor.
- StringLengthTFStub stub(isolate);
+ StringLengthStub stub(isolate);
Handle<Code> code = stub.GenerateCode();
CompilationInfo info(&stub, isolate, zone);
CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info);
@@ -94,7 +49,7 @@ TEST(RunStringLengthTFStub) {
Node* end = graph.NewNode(common.End(1), ret);
graph.SetStart(start);
graph.SetEnd(end);
- FunctionTester ft(&graph);
+ FunctionTester ft(&graph, 4);
// Actuall call through to the stub, verifying its result.
const char* testString = "Und das Lamm schrie HURZ!";
@@ -109,42 +64,6 @@ TEST(RunStringLengthTFStub) {
}
-TEST(RunStringAddTFStub) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- Zone* zone = scope.main_zone();
-
- // Create code and an accompanying descriptor.
- StringAddTFStub stub(isolate, STRING_ADD_CHECK_BOTH, NOT_TENURED);
- Handle<Code> code = stub.GenerateCode();
- CompilationInfo info(&stub, isolate, zone);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info);
-
- // Create a function to call the code using the descriptor.
- Graph graph(zone);
- CommonOperatorBuilder common(zone);
- // FunctionTester (ab)uses a 2-argument function
- Node* start = graph.NewNode(common.Start(4));
- // Parameter 0 is the receiver
- Node* leftParam = graph.NewNode(common.Parameter(1), start);
- Node* rightParam = graph.NewNode(common.Parameter(2), start);
- Node* theCode = graph.NewNode(common.HeapConstant(code));
- Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
- Node* call = graph.NewNode(common.Call(descriptor), theCode, leftParam,
- rightParam, dummyContext, start, start);
- Node* ret = graph.NewNode(common.Return(), call, call, start);
- Node* end = graph.NewNode(common.End(1), ret);
- graph.SetStart(start);
- graph.SetEnd(end);
- FunctionTester ft(&graph);
-
- // Actuall call through to the stub, verifying its result.
- Handle<String> leftArg = ft.Val("links");
- Handle<String> rightArg = ft.Val("rechts");
- Handle<Object> result = ft.Call(leftArg, rightArg).ToHandleChecked();
- CHECK(String::Equals(ft.Val("linksrechts"), Handle<String>::cast(result)));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index 4f587ef085..f856368509 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -51,6 +48,7 @@ static const char* bind_tests[] = {
static void RunVariableTests(const char* source, const char* tests[]) {
+ i::FLAG_legacy_const = true;
EmbeddedVector<char, 512> buffer;
for (int i = 0; tests[i] != NULL; i += 3) {
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 20bfc0f731..1b752edd3c 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <limits>
+#include "src/ast/scopes.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h"
@@ -20,9 +18,8 @@
#include "src/compiler/typer.h"
#include "src/compiler/verifier.h"
#include "src/execution.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -36,8 +33,8 @@ namespace compiler {
template <typename ReturnType>
class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
public:
- SimplifiedLoweringTester(MachineType p0 = kMachNone,
- MachineType p1 = kMachNone)
+ SimplifiedLoweringTester(MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None())
: GraphBuilderTester<ReturnType>(p0, p1),
typer(this->isolate(), this->graph()),
javascript(this->zone()),
@@ -83,7 +80,7 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
T* CallWithPotentialGC() {
// TODO(titzer): we wrap the code in a JSFunction here to reuse the
// JSEntryStub; that could be done with a special prologue or other stub.
- Handle<JSFunction> fun = FunctionTester::ForMachineGraph(this->graph());
+ Handle<JSFunction> fun = FunctionTester::ForMachineGraph(this->graph(), 0);
Handle<Object>* args = NULL;
MaybeHandle<Object> result = Execution::Call(
this->isolate(), fun, factory()->undefined_value(), 0, args);
@@ -103,15 +100,15 @@ TEST(RunNumberToInt32_float64) {
int32_t result;
SimplifiedLoweringTester<Object*> t;
FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
- kMachFloat64};
+ MachineType::Float64()};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToInt32(loaded);
FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
- kMachInt32};
+ MachineType::Int32()};
t.StoreField(store, t.PointerConstant(&result), convert);
t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_FLOAT64_INPUTS(i) {
@@ -130,15 +127,15 @@ TEST(RunNumberToUint32_float64) {
uint32_t result;
SimplifiedLoweringTester<Object*> t;
FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
- kMachFloat64};
+ MachineType::Float64()};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToUint32(loaded);
FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
- kMachUint32};
+ MachineType::Uint32()};
t.StoreField(store, t.PointerConstant(&result), convert);
t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
FOR_FLOAT64_INPUTS(i) {
@@ -160,12 +157,12 @@ static Handle<JSObject> TestObject() {
TEST(RunLoadMap) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
FieldAccess access = AccessBuilder::ForMap();
Node* load = t.LoadField(access, t.Parameter(0));
t.Return(load);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Handle<JSObject> src = TestObject();
@@ -176,12 +173,13 @@ TEST(RunLoadMap) {
TEST(RunStoreMap) {
- SimplifiedLoweringTester<int32_t> t(kMachAnyTagged, kMachAnyTagged);
+ SimplifiedLoweringTester<int32_t> t(MachineType::AnyTagged(),
+ MachineType::AnyTagged());
FieldAccess access = AccessBuilder::ForMap();
t.StoreField(access, t.Parameter(1), t.Parameter(0));
t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Handle<JSObject> src = TestObject();
@@ -194,12 +192,12 @@ TEST(RunStoreMap) {
TEST(RunLoadProperties) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
FieldAccess access = AccessBuilder::ForJSObjectProperties();
Node* load = t.LoadField(access, t.Parameter(0));
t.Return(load);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Handle<JSObject> src = TestObject();
@@ -210,13 +208,14 @@ TEST(RunLoadProperties) {
TEST(RunLoadStoreMap) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged, kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged(),
+ MachineType::AnyTagged());
FieldAccess access = AccessBuilder::ForMap();
Node* load = t.LoadField(access, t.Parameter(0));
t.StoreField(access, t.Parameter(1), load);
t.Return(load);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Handle<JSObject> src = TestObject();
@@ -231,13 +230,13 @@ TEST(RunLoadStoreMap) {
TEST(RunLoadStoreFixedArrayIndex) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
ElementAccess access = AccessBuilder::ForFixedArrayElement();
Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
t.Return(load);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Handle<FixedArray> array = t.factory()->NewFixedArray(2);
@@ -253,7 +252,7 @@ TEST(RunLoadStoreFixedArrayIndex) {
TEST(RunLoadStoreArrayBuffer) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
const int index = 12;
const int array_length = 2 * index;
ElementAccess buffer_access =
@@ -266,7 +265,7 @@ TEST(RunLoadStoreArrayBuffer) {
load);
t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
@@ -293,12 +292,12 @@ TEST(RunLoadFieldFromUntaggedBase) {
for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), kMachAnyTagged};
+ Type::Integral32(), MachineType::AnyTagged()};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadField(access, t.PointerConstant(smis));
t.Return(load);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
for (int j = -5; j <= 5; j++) {
Smi* expected = Smi::FromInt(j);
@@ -315,13 +314,13 @@ TEST(RunStoreFieldToUntaggedBase) {
for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), kMachAnyTagged};
+ Type::Integral32(), MachineType::AnyTagged()};
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* p0 = t.Parameter(0);
t.StoreField(access, t.PointerConstant(smis), p0);
t.Return(p0);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
for (int j = -5; j <= 5; j++) {
Smi* expected = Smi::FromInt(j);
@@ -341,13 +340,13 @@ TEST(RunLoadElementFromUntaggedBase) {
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadElement(access, t.PointerConstant(smis),
t.Int32Constant(static_cast<int>(j)));
t.Return(load);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
for (int k = -5; k <= 5; k++) {
Smi* expected = Smi::FromInt(k);
@@ -367,14 +366,14 @@ TEST(RunStoreElementFromUntaggedBase) {
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* p0 = t.Parameter(0);
t.StoreElement(access, t.PointerConstant(smis),
t.Int32Constant(static_cast<int>(j)), p0);
t.Return(p0);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
for (int k = -5; k <= 5; k++) {
Smi* expected = Smi::FromInt(k);
@@ -439,7 +438,7 @@ class AccessTester : public HandleAndZoneScope {
Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Object* result = t.Call();
@@ -459,7 +458,7 @@ class AccessTester : public HandleAndZoneScope {
Node* load = t.LoadField(from_access, ptr);
t.StoreField(to_access, ptr, load);
t.Return(t.jsgraph.TrueConstant());
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
Object* result = t.Call();
@@ -595,19 +594,19 @@ static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
TEST(RunAccessTests_uint8) {
uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
- RunAccessTest<uint8_t>(kMachInt8, data, arraysize(data));
+ RunAccessTest<uint8_t>(MachineType::Int8(), data, arraysize(data));
}
TEST(RunAccessTests_uint16) {
uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
- RunAccessTest<uint16_t>(kMachInt16, data, arraysize(data));
+ RunAccessTest<uint16_t>(MachineType::Int16(), data, arraysize(data));
}
TEST(RunAccessTests_int32) {
int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
- RunAccessTest<int32_t>(kMachInt32, data, arraysize(data));
+ RunAccessTest<int32_t>(MachineType::Int32(), data, arraysize(data));
}
@@ -621,13 +620,13 @@ TEST(RunAccessTests_int64) {
V8_2PART_INT64(0x30313233, 34353637),
V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
- RunAccessTest<int64_t>(kMachInt64, data, arraysize(data));
+ RunAccessTest<int64_t>(MachineType::Int64(), data, arraysize(data));
}
TEST(RunAccessTests_float64) {
double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
- RunAccessTest<double>(kMachFloat64, data, arraysize(data));
+ RunAccessTest<double>(MachineType::Float64(), data, arraysize(data));
}
@@ -635,7 +634,7 @@ TEST(RunAccessTests_Smi) {
Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
Smi::FromInt(0), Smi::FromInt(666),
Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
- RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
+ RunAccessTest<Smi*>(MachineType::AnyTagged(), data, arraysize(data));
}
@@ -651,7 +650,7 @@ TEST(RunAllocate) {
t.StoreField(access, alloc, map);
t.Return(alloc);
- t.LowerAllNodes();
+ t.LowerAllNodesAndLowerChanges();
t.GenerateCode();
HeapObject* result = t.CallWithPotentialGC<HeapObject>();
@@ -682,7 +681,7 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
javascript(main_zone()),
jsgraph(main_isolate(), graph(), common(), &javascript, simplified(),
machine()) {
- start = graph()->NewNode(common()->Start(2));
+ start = graph()->NewNode(common()->Start(4));
graph()->SetStart(start);
ret =
graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
@@ -723,6 +722,17 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
}
+ void LowerAllNodesAndLowerChanges() {
+ SourcePositionTable table(jsgraph.graph());
+ SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
+
+ ChangeLowering lowering(&jsgraph);
+ GraphReducer reducer(this->zone(), this->graph());
+ reducer.AddReducer(&lowering);
+ reducer.ReduceGraph();
+ Verifier::Run(this->graph());
+ }
+
// Inserts the node as the return value of the graph.
Node* Return(Node* node) {
ret->ReplaceInput(0, node);
@@ -733,44 +743,43 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
void Effect(Node* node) { ret->ReplaceInput(1, node); }
Node* ExampleWithOutput(MachineType type) {
- // TODO(titzer): use parameters with guaranteed representations.
- if (type & kTypeInt32) {
+ if (type.semantic() == MachineSemantic::kInt32) {
return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
jsgraph.Int32Constant(1));
- } else if (type & kTypeUint32) {
+ } else if (type.semantic() == MachineSemantic::kUint32) {
return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
jsgraph.Int32Constant(1));
- } else if (type & kRepFloat64) {
+ } else if (type.representation() == MachineRepresentation::kFloat64) {
return graph()->NewNode(machine()->Float64Add(),
jsgraph.Float64Constant(1),
jsgraph.Float64Constant(1));
- } else if (type & kRepBit) {
+ } else if (type.representation() == MachineRepresentation::kBit) {
return graph()->NewNode(machine()->Word32Equal(),
jsgraph.Int32Constant(1),
jsgraph.Int32Constant(1));
- } else if (type & kRepWord64) {
+ } else if (type.representation() == MachineRepresentation::kWord64) {
return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
Int64Constant(1));
} else {
- CHECK(type & kRepTagged);
+ CHECK(type.representation() == MachineRepresentation::kTagged);
return p0;
}
}
Node* Use(Node* node, MachineType type) {
- if (type & kTypeInt32) {
+ if (type.semantic() == MachineSemantic::kInt32) {
return graph()->NewNode(machine()->Int32LessThan(), node,
jsgraph.Int32Constant(1));
- } else if (type & kTypeUint32) {
+ } else if (type.semantic() == MachineSemantic::kUint32) {
return graph()->NewNode(machine()->Uint32LessThan(), node,
jsgraph.Int32Constant(1));
- } else if (type & kRepFloat64) {
+ } else if (type.representation() == MachineRepresentation::kFloat64) {
return graph()->NewNode(machine()->Float64Add(), node,
jsgraph.Float64Constant(1));
- } else if (type & kRepWord64) {
+ } else if (type.representation() == MachineRepresentation::kWord64) {
return graph()->NewNode(machine()->Int64LessThan(), node,
Int64Constant(1));
- } else if (type & kRepWord32) {
+ } else if (type.representation() == MachineRepresentation::kWord32) {
return graph()->NewNode(machine()->Word32Equal(), node,
jsgraph.Int32Constant(1));
} else {
@@ -802,7 +811,7 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
TEST(LowerBooleanNot_bit_bit) {
// BooleanNot(x: kRepBit) used as kRepBit
TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(kRepBit);
+ Node* b = t.ExampleWithOutput(MachineType::Bool());
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
Node* use = t.Branch(inv);
t.Lower();
@@ -817,9 +826,9 @@ TEST(LowerBooleanNot_bit_bit) {
TEST(LowerBooleanNot_bit_tagged) {
// BooleanNot(x: kRepBit) used as kRepTagged
TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(kRepBit);
+ Node* b = t.ExampleWithOutput(MachineType::Bool());
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Use(inv, kRepTagged);
+ Node* use = t.Use(inv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
@@ -851,7 +860,7 @@ TEST(LowerBooleanNot_tagged_tagged) {
TestingGraph t(Type::Boolean());
Node* b = t.p0;
Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
- Node* use = t.Use(inv, kRepTagged);
+ Node* use = t.Use(inv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
@@ -864,11 +873,11 @@ TEST(LowerBooleanNot_tagged_tagged) {
TEST(LowerBooleanToNumber_bit_int32) {
- // BooleanToNumber(x: kRepBit) used as kMachInt32
+ // BooleanToNumber(x: kRepBit) used as MachineType::Int32()
TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(kRepBit);
+ Node* b = t.ExampleWithOutput(MachineType::Bool());
Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, kMachInt32);
+ Node* use = t.Use(cnv, MachineType::Int32());
t.Return(use);
t.Lower();
CHECK_EQ(b, use->InputAt(0));
@@ -876,11 +885,11 @@ TEST(LowerBooleanToNumber_bit_int32) {
TEST(LowerBooleanToNumber_tagged_int32) {
- // BooleanToNumber(x: kRepTagged) used as kMachInt32
+ // BooleanToNumber(x: kRepTagged) used as MachineType::Int32()
TestingGraph t(Type::Boolean());
Node* b = t.p0;
Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, kMachInt32);
+ Node* use = t.Use(cnv, MachineType::Int32());
t.Return(use);
t.Lower();
CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
@@ -891,28 +900,28 @@ TEST(LowerBooleanToNumber_tagged_int32) {
TEST(LowerBooleanToNumber_bit_tagged) {
- // BooleanToNumber(x: kRepBit) used as kMachAnyTagged
+ // BooleanToNumber(x: kRepBit) used as MachineType::AnyTagged()
TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(kRepBit);
+ Node* b = t.ExampleWithOutput(MachineType::Bool());
Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, kMachAnyTagged);
+ Node* use = t.Use(cnv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
CHECK_EQ(b, use->InputAt(0)->InputAt(0));
- CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeUint32ToTagged, use->InputAt(0)->opcode());
}
TEST(LowerBooleanToNumber_tagged_tagged) {
- // BooleanToNumber(x: kRepTagged) used as kMachAnyTagged
+ // BooleanToNumber(x: kRepTagged) used as MachineType::AnyTagged()
TestingGraph t(Type::Boolean());
Node* b = t.p0;
Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, kMachAnyTagged);
+ Node* use = t.Use(cnv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
- CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeUint32ToTagged, use->InputAt(0)->opcode());
CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
Node* c = t.jsgraph.TrueConstant();
@@ -1034,33 +1043,11 @@ static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
}
-TEST(LowerNumberToInt32_to_nop) {
- // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepTagged
- TestingGraph t(Type::Signed32());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, kRepTagged);
- t.Return(use);
- t.Lower();
- CHECK_EQ(t.p0, use->InputAt(0));
-}
-
-
-TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
- // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepFloat64
- TestingGraph t(Type::Signed32());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, kRepFloat64);
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
-}
-
-
TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
// NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
TestingGraph t(Type::Signed32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, kTypeInt32);
+ Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
@@ -1068,11 +1055,11 @@ TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
- // NumberToInt32(x: kRepFloat64) used as kMachInt32
+ // NumberToInt32(x: kRepFloat64) used as MachineType::Int32()
TestingGraph t(Type::Number());
- Node* p0 = t.ExampleWithOutput(kMachFloat64);
+ Node* p0 = t.ExampleWithOutput(MachineType::Float64());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
- Node* use = t.Use(trunc, kMachInt32);
+ Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
@@ -1080,10 +1067,10 @@ TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
- // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachInt32
+ // NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Int32()
TestingGraph t(Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
- Node* use = t.Use(trunc, kMachInt32);
+ Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
Node* node = use->InputAt(0);
@@ -1094,33 +1081,11 @@ TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
}
-TEST(LowerNumberToUint32_to_nop) {
- // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
- TestingGraph t(Type::Unsigned32());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, kRepTagged);
- t.Return(use);
- t.Lower();
- CHECK_EQ(t.p0, use->InputAt(0));
-}
-
-
-TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
- // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
- TestingGraph t(Type::Unsigned32());
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, kRepFloat64);
- t.Return(use);
- t.Lower();
- CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
-}
-
-
TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
// NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
TestingGraph t(Type::Unsigned32());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, kTypeUint32);
+ Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
@@ -1128,13 +1093,13 @@ TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
- // NumberToUint32(x: kRepFloat64) used as kMachUint32
+ // NumberToUint32(x: kRepFloat64) used as MachineType::Uint32()
TestingGraph t(Type::Number());
- Node* p0 = t.ExampleWithOutput(kMachFloat64);
+ Node* p0 = t.ExampleWithOutput(MachineType::Float64());
// TODO(titzer): run the typer here, or attach machine type to param.
NodeProperties::SetType(p0, Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
- Node* use = t.Use(trunc, kMachUint32);
+ Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
@@ -1142,10 +1107,10 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
- // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachUint32
+ // NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Uint32()
TestingGraph t(Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
- Node* use = t.Use(trunc, kMachUint32);
+ Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
Node* node = use->InputAt(0);
@@ -1159,9 +1124,9 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
// NumberToUint32(x: kRepFloat64) used as kRepWord32
TestingGraph t(Type::Unsigned32());
- Node* input = t.ExampleWithOutput(kMachFloat64);
+ Node* input = t.ExampleWithOutput(MachineType::Float64());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
- Node* use = t.Use(trunc, kRepWord32);
+ Node* use = t.Use(trunc, MachineType::RepWord32());
t.Return(use);
t.Lower();
CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, input, use->InputAt(0));
@@ -1192,10 +1157,11 @@ TEST(LowerStringOps_to_call_and_compare) {
}
-void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
- MachineType to) {
+ void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
+ MachineType to, Type* type = Type::Any()) {
TestingGraph t(Type::Any());
Node* in = t.ExampleWithOutput(from);
+ NodeProperties::SetType(in, type);
Node* use = t.Use(in, to);
t.Return(use);
t.Lower();
@@ -1205,27 +1171,33 @@ void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
TEST(InsertBasicChanges) {
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, kRepFloat64,
- kTypeInt32);
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, kRepFloat64,
- kTypeUint32);
- CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, kRepTagged, kTypeInt32);
- CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, kRepTagged,
- kTypeUint32);
-
- CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, kRepFloat64,
- kRepTagged);
- CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, kRepTagged,
- kRepFloat64);
-
- CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, kTypeInt32,
- kRepFloat64);
- CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, kTypeInt32, kRepTagged);
-
- CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, kTypeUint32,
- kRepFloat64);
- CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, kTypeUint32,
- kRepTagged);
+ CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, MachineType::Float64(),
+ MachineType::Int32(), Type::Signed32());
+ CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, MachineType::Float64(),
+ MachineType::Uint32(), Type::Unsigned32());
+ CheckChangeInsertion(IrOpcode::kTruncateFloat64ToInt32,
+ MachineType::Float64(), MachineType::Uint32(),
+ Type::Integral32());
+ CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, MachineType::AnyTagged(),
+ MachineType::Int32(), Type::Signed32());
+ CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32,
+ MachineType::AnyTagged(), MachineType::Uint32(),
+ Type::Unsigned32());
+
+ CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, MachineType::Float64(),
+ MachineType::AnyTagged());
+ CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64,
+ MachineType::AnyTagged(), MachineType::Float64());
+
+ CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, MachineType::Int32(),
+ MachineType::Float64());
+ CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, MachineType::Int32(),
+ MachineType::AnyTagged());
+
+ CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, MachineType::Uint32(),
+ MachineType::Float64());
+ CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, MachineType::Uint32(),
+ MachineType::AnyTagged());
}
@@ -1338,7 +1310,8 @@ Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
CHECK(mindex.right().Is(access.header_size - access.tag()));
- const int element_size_shift = ElementSizeLog2Of(access.machine_type);
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
Int32BinopMatcher shl(mindex.left().node());
CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
@@ -1350,9 +1323,10 @@ Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
}
-const MachineType kMachineReps[] = {kMachInt8, kMachInt16, kMachInt32,
- kMachUint32, kMachInt64, kMachFloat64,
- kMachAnyTagged};
+const MachineType kMachineReps[] = {
+ MachineType::Int8(), MachineType::Int16(), MachineType::Int32(),
+ MachineType::Uint32(), MachineType::Int64(), MachineType::Float64(),
+ MachineType::AnyTagged()};
} // namespace
@@ -1368,12 +1342,12 @@ TEST(LowerLoadField_to_load) {
t.start, t.start);
Node* use = t.Use(load, kMachineReps[i]);
t.Return(use);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
CHECK_EQ(t.p0, load->InputAt(0));
CheckFieldAccessArithmetic(access, load);
- MachineType rep = OpParameter<MachineType>(load);
+ MachineType rep = LoadRepresentationOf(load->op());
CHECK_EQ(kMachineReps[i], rep);
}
}
@@ -1392,16 +1366,16 @@ TEST(LowerStoreField_to_store) {
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
val, t.start, t.start);
t.Effect(store);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(val, store->InputAt(2));
CheckFieldAccessArithmetic(access, store);
- StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
- if (kMachineReps[i] & kRepTagged) {
+ StoreRepresentation rep = StoreRepresentationOf(store->op());
+ if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
- CHECK_EQ(kMachineReps[i], rep.machine_type());
+ CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
}
{
@@ -1410,14 +1384,15 @@ TEST(LowerStoreField_to_store) {
TestingGraph t(Type::Any(), Type::Intersect(Type::SignedSmall(),
Type::TaggedSigned(), z));
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachAnyTagged};
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
t.Effect(store);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2));
- StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
+ StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
@@ -1434,12 +1409,12 @@ TEST(LowerLoadElement_to_load) {
t.p1, t.start, t.start);
Node* use = t.Use(load, kMachineReps[i]);
t.Return(use);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
CHECK_EQ(IrOpcode::kLoad, load->opcode());
CHECK_EQ(t.p0, load->InputAt(0));
CheckElementAccessArithmetic(access, load);
- MachineType rep = OpParameter<MachineType>(load);
+ MachineType rep = LoadRepresentationOf(load->op());
CHECK_EQ(kMachineReps[i], rep);
}
}
@@ -1457,16 +1432,16 @@ TEST(LowerStoreElement_to_store) {
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
t.p0, t.p1, val, t.start, t.start);
t.Effect(store);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(val, store->InputAt(2));
CheckElementAccessArithmetic(access, store);
- StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
- if (kMachineReps[i] & kRepTagged) {
+ StoreRepresentation rep = StoreRepresentationOf(store->op());
+ if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
- CHECK_EQ(kMachineReps[i], rep.machine_type());
+ CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
}
{
@@ -1476,14 +1451,14 @@ TEST(LowerStoreElement_to_store) {
Type::Any(), Type::Signed32(),
Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), z));
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachAnyTagged};
+ Type::Any(), MachineType::AnyTagged()};
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.p1, t.p2, t.start, t.start);
t.Effect(store);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2));
- StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
+ StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
@@ -1494,17 +1469,15 @@ TEST(InsertChangeForLoadElementIndex) {
// Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
t.Return(load);
t.Lower();
- CHECK_EQ(IrOpcode::kLoad, load->opcode());
+ CHECK_EQ(IrOpcode::kLoadElement, load->opcode());
CHECK_EQ(t.p0, load->InputAt(0));
-
- Node* index = CheckElementAccessArithmetic(access, load);
- CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
+ CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, load->InputAt(1));
}
@@ -1513,18 +1486,16 @@ TEST(InsertChangeForStoreElementIndex) {
// Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachAnyTagged};
+ MachineType::AnyTagged()};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
t.jsgraph.TrueConstant(), t.start, t.start);
t.Effect(store);
t.Lower();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
+ CHECK_EQ(IrOpcode::kStoreElement, store->opcode());
CHECK_EQ(t.p0, store->InputAt(0));
-
- Node* index = CheckElementAccessArithmetic(access, store);
- CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
+ CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, store->InputAt(1));
}
@@ -1532,13 +1503,13 @@ TEST(InsertChangeForLoadElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachFloat64};
+ MachineType::Float64()};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
t.Return(load);
t.Lower();
- CHECK_EQ(IrOpcode::kLoad, load->opcode());
+ CHECK_EQ(IrOpcode::kLoadElement, load->opcode());
CHECK_EQ(t.p0, load->InputAt(0));
CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
}
@@ -1548,13 +1519,14 @@ TEST(InsertChangeForLoadField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachFloat64};
+ Handle<Name>::null(), Type::Any(),
+ MachineType::Float64()};
Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);
t.Return(load);
t.Lower();
- CHECK_EQ(IrOpcode::kLoad, load->opcode());
+ CHECK_EQ(IrOpcode::kLoadField, load->opcode());
CHECK_EQ(t.p0, load->InputAt(0));
CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
}
@@ -1564,7 +1536,7 @@ TEST(InsertChangeForStoreElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- kMachFloat64};
+ MachineType::Float64()};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
@@ -1572,7 +1544,7 @@ TEST(InsertChangeForStoreElement) {
t.Effect(store);
t.Lower();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
+ CHECK_EQ(IrOpcode::kStoreElement, store->opcode());
CHECK_EQ(t.p0, store->InputAt(0));
CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
}
@@ -1582,23 +1554,24 @@ TEST(InsertChangeForStoreField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachFloat64};
+ Handle<Name>::null(), Type::Any(),
+ MachineType::Float64()};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
t.Effect(store);
t.Lower();
- CHECK_EQ(IrOpcode::kStore, store->opcode());
+ CHECK_EQ(IrOpcode::kStoreField, store->opcode());
CHECK_EQ(t.p0, store->InputAt(0));
- CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
+ CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(1));
}
TEST(UpdatePhi) {
TestingGraph t(Type::Any(), Type::Signed32());
- static const MachineType kMachineTypes[] = {kMachInt32, kMachUint32,
- kMachFloat64};
+ static const MachineType kMachineTypes[] = {
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Float64()};
Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
@@ -1609,47 +1582,31 @@ TEST(UpdatePhi) {
t.start, t.start);
Node* load1 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p1,
t.start, t.start);
- Node* phi = t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), load0,
- load1, t.start);
+ Node* phi =
+ t.graph()->NewNode(t.common()->Phi(MachineRepresentation::kTagged, 2),
+ load0, load1, t.start);
t.Return(t.Use(phi, kMachineTypes[i]));
t.Lower();
CHECK_EQ(IrOpcode::kPhi, phi->opcode());
- CHECK_EQ(RepresentationOf(kMachineTypes[i]),
- RepresentationOf(OpParameter<MachineType>(phi)));
+ CHECK_EQ(kMachineTypes[i].representation(), PhiRepresentationOf(phi->op()));
}
}
TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToInt32(t.Parameter(0));
Node* div = t.NumberDivide(num, t.jsgraph.Constant(-1));
Node* trunc = t.NumberToInt32(div);
t.Return(trunc);
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_INT32_INPUTS(i) {
- int32_t x = 0 - *i;
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
-}
-
-
-TEST(NumberMultiply_TruncatingToInt32) {
- int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- TestingGraph t(Type::Signed32());
- Node* k = t.jsgraph.Constant(constants[i]);
- Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
- Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
- t.Return(trunc);
- t.Lower();
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
- CHECK_EQ(IrOpcode::kInt32Mul, mul->opcode());
+ FOR_INT32_INPUTS(i) {
+ int32_t x = 0 - *i;
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
}
}
@@ -1659,7 +1616,7 @@ TEST(RunNumberMultiply_TruncatingToInt32) {
for (size_t i = 0; i < arraysize(constants); i++) {
double k = static_cast<double>(constants[i]);
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToInt32(t.Parameter(0));
Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
Node* trunc = t.NumberToInt32(mul);
@@ -1681,7 +1638,7 @@ TEST(RunNumberMultiply_TruncatingToUint32) {
for (size_t i = 0; i < arraysize(constants); i++) {
double k = static_cast<double>(constants[i]);
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToUint32(t.Parameter(0));
Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
Node* trunc = t.NumberToUint32(mul);
@@ -1699,7 +1656,7 @@ TEST(RunNumberMultiply_TruncatingToUint32) {
TEST(RunNumberDivide_2_TruncatingToUint32) {
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToUint32(t.Parameter(0));
Node* div = t.NumberDivide(num, t.jsgraph.Constant(2));
Node* trunc = t.NumberToUint32(div);
@@ -1745,7 +1702,7 @@ TEST(NumberDivide_TruncatingToInt32) {
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(constants[i]);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, kMachInt32);
+ Node* use = t.Use(div, MachineType::Int32());
t.Return(use);
t.Lower();
@@ -1759,7 +1716,7 @@ TEST(RunNumberDivide_TruncatingToInt32) {
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToInt32(t.Parameter(0));
Node* div = t.NumberDivide(num, t.jsgraph.Constant(k));
Node* trunc = t.NumberToInt32(div);
@@ -1784,7 +1741,7 @@ TEST(NumberDivide_TruncatingToUint32) {
TestingGraph t(Type::Unsigned32());
Node* k = t.jsgraph.Constant(constants[i]);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, kMachUint32);
+ Node* use = t.Use(div, MachineType::Uint32());
t.Return(use);
t.Lower();
@@ -1798,7 +1755,7 @@ TEST(RunNumberDivide_TruncatingToUint32) {
for (size_t i = 0; i < arraysize(constants); i++) {
uint32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToUint32(t.Parameter(0));
Node* div = t.NumberDivide(num, t.jsgraph.Constant(static_cast<double>(k)));
Node* trunc = t.NumberToUint32(div);
@@ -1820,7 +1777,7 @@ TEST(NumberDivide_BadConstants) {
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(-1);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, kMachInt32);
+ Node* use = t.Use(div, MachineType::Int32());
t.Return(use);
t.Lower();
@@ -1831,7 +1788,7 @@ TEST(NumberDivide_BadConstants) {
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(0);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, kMachInt32);
+ Node* use = t.Use(div, MachineType::Int32());
t.Return(use);
t.Lower();
@@ -1843,7 +1800,7 @@ TEST(NumberDivide_BadConstants) {
TestingGraph t(Type::Unsigned32());
Node* k = t.jsgraph.Constant(0);
Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
- Node* use = t.Use(div, kMachUint32);
+ Node* use = t.Use(div, MachineType::Uint32());
t.Return(use);
t.Lower();
@@ -1860,7 +1817,7 @@ TEST(NumberModulus_TruncatingToInt32) {
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(constants[i]);
Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
- Node* use = t.Use(mod, kMachInt32);
+ Node* use = t.Use(mod, MachineType::Int32());
t.Return(use);
t.Lower();
@@ -1874,7 +1831,7 @@ TEST(RunNumberModulus_TruncatingToInt32) {
for (size_t i = 0; i < arraysize(constants); i++) {
int32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToInt32(t.Parameter(0));
Node* mod = t.NumberModulus(num, t.jsgraph.Constant(k));
Node* trunc = t.NumberToInt32(mod);
@@ -1913,7 +1870,7 @@ TEST(RunNumberModulus_TruncatingToUint32) {
for (size_t i = 0; i < arraysize(constants); i++) {
uint32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* num = t.NumberToUint32(t.Parameter(0));
Node* mod =
t.NumberModulus(num, t.jsgraph.Constant(static_cast<double>(k)));
@@ -1948,7 +1905,7 @@ TEST(NumberModulus_Int32) {
TEST(NumberModulus_Uint32) {
const double kConstants[] = {2, 100, 1000, 1024, 2048};
- const MachineType kTypes[] = {kMachInt32, kMachUint32};
+ const MachineType kTypes[] = {MachineType::Int32(), MachineType::Uint32()};
for (auto const type : kTypes) {
for (auto const c : kConstants) {
@@ -1973,18 +1930,20 @@ TEST(PhiRepresentation) {
Type* arg1;
Type* arg2;
MachineType use;
- MachineTypeUnion expected;
+ MachineRepresentation expected;
};
TestData test_data[] = {
- {Type::Signed32(), Type::Unsigned32(), kMachInt32,
- kRepWord32 | kTypeNumber},
- {Type::Signed32(), Type::Unsigned32(), kMachUint32,
- kRepWord32 | kTypeNumber},
- {Type::Signed32(), Type::Signed32(), kMachInt32, kMachInt32},
- {Type::Unsigned32(), Type::Unsigned32(), kMachInt32, kMachUint32},
- {Type::Number(), Type::Signed32(), kMachInt32, kMachFloat64},
- {Type::Signed32(), Type::String(), kMachInt32, kMachAnyTagged}};
+ {Type::Signed32(), Type::Unsigned32(), MachineType::Int32(),
+ MachineRepresentation::kWord32},
+ {Type::Signed32(), Type::Unsigned32(), MachineType::Uint32(),
+ MachineRepresentation::kWord32},
+ {Type::Signed32(), Type::Signed32(), MachineType::Int32(),
+ MachineRepresentation::kWord32},
+ {Type::Unsigned32(), Type::Unsigned32(), MachineType::Int32(),
+ MachineRepresentation::kWord32},
+ {Type::Number(), Type::Signed32(), MachineType::Int32(),
+ MachineRepresentation::kWord32}};
for (auto const d : test_data) {
TestingGraph t(d.arg1, d.arg2, Type::Boolean());
@@ -1994,8 +1953,8 @@ TEST(PhiRepresentation) {
Node* fb = t.graph()->NewNode(t.common()->IfFalse(), br);
Node* m = t.graph()->NewNode(t.common()->Merge(2), tb, fb);
- Node* phi =
- t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), t.p0, t.p1, m);
+ Node* phi = t.graph()->NewNode(
+ t.common()->Phi(MachineRepresentation::kTagged, 2), t.p0, t.p1, m);
Type* phi_type = Type::Union(d.arg1, d.arg2, z);
NodeProperties::SetType(phi, phi_type);
@@ -2004,7 +1963,7 @@ TEST(PhiRepresentation) {
t.Return(use);
t.Lower();
- CHECK_EQ(d.expected, OpParameter<MachineType>(phi));
+ CHECK_EQ(d.expected, PhiRepresentationOf(phi->op()));
}
}
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 20be67a975..cbde9a7417 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -5,6 +5,8 @@
#ifndef V8_CCTEST_COMPILER_VALUE_HELPER_H_
#define V8_CCTEST_COMPILER_VALUE_HELPER_H_
+#include <stdint.h>
+
#include "src/compiler/common-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
@@ -59,53 +61,164 @@ class ValueHelper {
}
static std::vector<float> float32_vector() {
+ static const float nan = std::numeric_limits<float>::quiet_NaN();
static const float kValues[] = {
- -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
- -1.22813e+35f, -1.20555e+35f, -1.34584e+34f,
- -1.0079e+32f, -6.49364e+26f, -3.06077e+25f,
- -1.46821e+25f, -1.17658e+23f, -1.9617e+22f,
- -2.7357e+20f, -1.48708e+13f, -1.89633e+12f,
- -4.66622e+11f, -2.22581e+11f, -1.45381e+10f,
- -1.3956e+09f, -1.32951e+09f, -1.30721e+09f,
- -1.19756e+09f, -9.26822e+08f, -6.35647e+08f,
- -4.00037e+08f, -1.81227e+08f, -5.09256e+07f,
- -964300.0f, -192446.0f, -28455.0f,
- -27194.0f, -26401.0f, -20575.0f,
- -17069.0f, -9167.0f, -960.178f,
- -113.0f, -62.0f, -15.0f,
- -7.0f, -0.0256635f, -4.60374e-07f,
- -3.63759e-10f, -4.30175e-14f, -5.27385e-15f,
- -1.48084e-15f, -1.05755e-19f, -3.2995e-21f,
- -1.67354e-23f, -1.11885e-23f, -1.78506e-30f,
- -5.07594e-31f, -3.65799e-31f, -1.43718e-34f,
- -1.27126e-38f, -0.0f, 0.0f,
- 1.17549e-38f, 1.56657e-37f, 4.08512e-29f,
- 3.31357e-28f, 6.25073e-22f, 4.1723e-13f,
- 1.44343e-09f, 5.27004e-08f, 9.48298e-08f,
- 5.57888e-07f, 4.89988e-05f, 0.244326f,
- 12.4895f, 19.0f, 47.0f,
- 106.0f, 538.324f, 564.536f,
- 819.124f, 7048.0f, 12611.0f,
- 19878.0f, 20309.0f, 797056.0f,
- 1.77219e+09f, 1.51116e+11f, 4.18193e+13f,
- 3.59167e+16f, 3.38211e+19f, 2.67488e+20f,
- 1.78831e+21f, 9.20914e+21f, 8.35654e+23f,
- 1.4495e+24f, 5.94015e+25f, 4.43608e+30f,
- 2.44502e+33f, 2.61152e+33f, 1.38178e+37f,
- 1.71306e+37f, 3.31899e+38f, 3.40282e+38f,
- std::numeric_limits<float>::infinity()};
+ -std::numeric_limits<float>::infinity(),
+ -2.70497e+38f,
+ -1.4698e+37f,
+ -1.22813e+35f,
+ -1.20555e+35f,
+ -1.34584e+34f,
+ -1.0079e+32f,
+ -6.49364e+26f,
+ -3.06077e+25f,
+ -1.46821e+25f,
+ -1.17658e+23f,
+ -1.9617e+22f,
+ -2.7357e+20f,
+ -9223372036854775808.0f, // INT64_MIN
+ -1.48708e+13f,
+ -1.89633e+12f,
+ -4.66622e+11f,
+ -2.22581e+11f,
+ -1.45381e+10f,
+ -1.3956e+09f,
+ -1.32951e+09f,
+ -1.30721e+09f,
+ -1.19756e+09f,
+ -9.26822e+08f,
+ -6.35647e+08f,
+ -4.00037e+08f,
+ -1.81227e+08f,
+ -5.09256e+07f,
+ -964300.0f,
+ -192446.0f,
+ -28455.0f,
+ -27194.0f,
+ -26401.0f,
+ -20575.0f,
+ -17069.0f,
+ -9167.0f,
+ -960.178f,
+ -113.0f,
+ -62.0f,
+ -15.0f,
+ -7.0f,
+ -1.0f,
+ -0.0256635f,
+ -4.60374e-07f,
+ -3.63759e-10f,
+ -4.30175e-14f,
+ -5.27385e-15f,
+ -1.48084e-15f,
+ -1.05755e-19f,
+ -3.2995e-21f,
+ -1.67354e-23f,
+ -1.11885e-23f,
+ -1.78506e-30f,
+ -5.07594e-31f,
+ -3.65799e-31f,
+ -1.43718e-34f,
+ -1.27126e-38f,
+ -0.0f,
+ 0.0f,
+ 1.17549e-38f,
+ 1.56657e-37f,
+ 4.08512e-29f,
+ 3.31357e-28f,
+ 6.25073e-22f,
+ 4.1723e-13f,
+ 1.44343e-09f,
+ 5.27004e-08f,
+ 9.48298e-08f,
+ 5.57888e-07f,
+ 4.89988e-05f,
+ 0.244326f,
+ 1.0f,
+ 12.4895f,
+ 19.0f,
+ 47.0f,
+ 106.0f,
+ 538.324f,
+ 564.536f,
+ 819.124f,
+ 7048.0f,
+ 12611.0f,
+ 19878.0f,
+ 20309.0f,
+ 797056.0f,
+ 1.77219e+09f,
+ 1.51116e+11f,
+ 4.18193e+13f,
+ 3.59167e+16f,
+ 9223372036854775807.0f, // INT64_MAX
+ 18446744073709551615.0f, // UINT64_MAX
+ 3.38211e+19f,
+ 2.67488e+20f,
+ 1.78831e+21f,
+ 9.20914e+21f,
+ 8.35654e+23f,
+ 1.4495e+24f,
+ 5.94015e+25f,
+ 4.43608e+30f,
+ 2.44502e+33f,
+ 2.61152e+33f,
+ 1.38178e+37f,
+ 1.71306e+37f,
+ 3.31899e+38f,
+ 3.40282e+38f,
+ std::numeric_limits<float>::infinity(),
+ nan,
+ -nan,
+ };
return std::vector<float>(&kValues[0], &kValues[arraysize(kValues)]);
}
static std::vector<double> float64_vector() {
static const double nan = std::numeric_limits<double>::quiet_NaN();
- static const double values[] = {
- 0.125, 0.25, 0.375, 0.5, 1.25, -1.75, 2, 5.125, 6.25, 0.0, -0.0,
- 982983.25, 888, 2147483647.0, -999.75, 3.1e7, -2e66, 3e-88,
- -2147483648.0, V8_INFINITY, -V8_INFINITY, -nan, nan, 2147483647.375,
- 2147483647.75, 2147483648.0, 2147483648.25, 2147483649.25,
- -2147483647.0, -2147483647.125, -2147483647.875, -2147483648.25,
- -2147483649.5};
+ static const double values[] = {-2e66,
+ -9223373136366403584.0,
+ -9223372036854775808.0, // INT64_MIN
+ -2147483649.5,
+ -2147483648.25,
+ -2147483648.0,
+ -2147483647.875,
+ -2147483647.125,
+ -2147483647.0,
+ -999.75,
+ -2e66,
+ -1.75,
+ -1.0,
+ -0.5,
+ -0.0,
+ 0.0,
+ 3e-88,
+ 0.125,
+ 0.25,
+ 0.375,
+ 0.5,
+ 1.0,
+ 1.25,
+ 2,
+ 3.1e7,
+ 5.125,
+ 6.25,
+ 888,
+ 982983.25,
+ 2147483647.0,
+ 2147483647.375,
+ 2147483647.75,
+ 2147483648.0,
+ 2147483648.25,
+ 2147483649.25,
+ 9223372036854775807.0, // INT64_MAX
+ 9223373136366403584.0,
+ 18446744073709551615.0, // UINT64_MAX
+ 2e66,
+ V8_INFINITY,
+ -V8_INFINITY,
+ -nan,
+ nan};
return std::vector<double>(&values[0], &values[arraysize(values)]);
}
@@ -159,7 +272,9 @@ class ValueHelper {
0x00003fff, 0x00001fff, 0x00000fff,
0x000007ff, 0x000003ff, 0x000001ff,
0x00003fffffffffff, 0x00001fffffffffff, 0x00000fffffffffff,
- 0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff};
+ 0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff,
+ 0x8000008000000000, 0x8000008000000001, 0x8000000000000400,
+ 0x8000000000000401};
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
diff --git a/deps/v8/test/cctest/expression-type-collector-macros.h b/deps/v8/test/cctest/expression-type-collector-macros.h
index 0b739f40b7..68d69481d9 100644
--- a/deps/v8/test/cctest/expression-type-collector-macros.h
+++ b/deps/v8/test/cctest/expression-type-collector-macros.h
@@ -14,11 +14,30 @@
CHECK_EQ(index, types.size()); \
}
+#ifdef DEBUG
+#define CHECK_TYPE(type) \
+ if (!types[index].bounds.Narrows(type)) { \
+ fprintf(stderr, "Expected:\n"); \
+ fprintf(stderr, " lower: "); \
+ type.lower->Print(); \
+ fprintf(stderr, " upper: "); \
+ type.upper->Print(); \
+ fprintf(stderr, "Actual:\n"); \
+ fprintf(stderr, " lower: "); \
+ types[index].bounds.lower->Print(); \
+ fprintf(stderr, " upper: "); \
+ types[index].bounds.upper->Print(); \
+ } \
+ CHECK(types[index].bounds.Narrows(type));
+#else
+#define CHECK_TYPE(type) CHECK(types[index].bounds.Narrows(type));
+#endif
+
#define CHECK_EXPR(ekind, type) \
CHECK_LT(index, types.size()); \
CHECK(strcmp(#ekind, types[index].kind) == 0); \
CHECK_EQ(depth, types[index].depth); \
- CHECK(types[index].bounds.Narrows(type)); \
+ CHECK_TYPE(type); \
for (int j = (++depth, ++index, 0); j < 1 ? 1 : (--depth, 0); ++j)
#define CHECK_VAR(vname, type) \
diff --git a/deps/v8/test/cctest/expression-type-collector.cc b/deps/v8/test/cctest/expression-type-collector.cc
index bf2d10f306..c5218b3ec4 100644
--- a/deps/v8/test/cctest/expression-type-collector.cc
+++ b/deps/v8/test/cctest/expression-type-collector.cc
@@ -6,9 +6,9 @@
#include "test/cctest/expression-type-collector.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/scopes.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/expression-type-collector.h b/deps/v8/test/cctest/expression-type-collector.h
index dab1f010cd..37bb9a3c02 100644
--- a/deps/v8/test/cctest/expression-type-collector.h
+++ b/deps/v8/test/cctest/expression-type-collector.h
@@ -5,7 +5,7 @@
#ifndef V8_EXPRESSION_TYPE_COLLECTOR_H_
#define V8_EXPRESSION_TYPE_COLLECTOR_H_
-#include "src/ast-expression-visitor.h"
+#include "src/ast/ast-expression-visitor.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index bb26b5a775..0a0860bcc4 100644
--- a/deps/v8/test/cctest/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -2,28 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef HEAP_TESTER_H_
-#define HEAP_TESTER_H_
+#ifndef HEAP_HEAP_TESTER_H_
+#define HEAP_HEAP_TESTER_H_
#include "src/handles.h"
#include "src/heap/spaces.h"
// Tests that should have access to private methods of {v8::internal::Heap}.
// Those tests need to be defined using HEAP_TEST(Name) { ... }.
-#define HEAP_TEST_METHODS(V) \
- V(CompactionSpaceDivideMultiplePages) \
- V(CompactionSpaceDivideSinglePage) \
- V(GCFlags) \
- V(MarkCompactCollector) \
- V(NoPromotion) \
- V(NumberStringCacheSize) \
- V(ObjectGroups) \
- V(Promotion) \
- V(Regression39128) \
- V(ResetWeakHandle) \
- V(StressHandles) \
- V(TestMemoryReducerSampleJsCalls) \
- V(TestSizeOfObjects) \
+#define HEAP_TEST_METHODS(V) \
+ V(CompactionFullAbortedPage) \
+ V(CompactionPartiallyAbortedPage) \
+ V(CompactionPartiallyAbortedPageIntraAbortedPointers) \
+ V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
+ V(CompactionSpaceDivideMultiplePages) \
+ V(CompactionSpaceDivideSinglePage) \
+ V(GCFlags) \
+ V(MarkCompactCollector) \
+ V(NoPromotion) \
+ V(NumberStringCacheSize) \
+ V(ObjectGroups) \
+ V(Promotion) \
+ V(Regression39128) \
+ V(ResetWeakHandle) \
+ V(StressHandles) \
+ V(TestMemoryReducerSampleJsCalls) \
+ V(TestSizeOfObjects) \
V(WriteBarriersInCopyJSObject)
@@ -79,4 +83,4 @@ class HeapTester {
} // namespace internal
} // namespace v8
-#endif // HEAP_TESTER_H_
+#endif // HEAP_HEAP_TESTER_H_
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 864eb12b6a..1b969b21ff 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -25,16 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "src/accessors.h"
#include "src/api.h"
-#include "test/cctest/heap-tester.h"
-
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
using namespace v8::internal;
@@ -66,7 +63,7 @@ AllocationResult v8::internal::HeapTester::AllocateAfterFailures() {
static const int kLargeObjectSpaceFillerLength = 3 * (Page::kPageSize / 10);
static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
kLargeObjectSpaceFillerLength);
- DCHECK(kLargeObjectSpaceFillerSize > heap->old_space()->AreaSize());
+ CHECK(kLargeObjectSpaceFillerSize > heap->old_space()->AreaSize());
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
heap->AllocateFixedArray(
kLargeObjectSpaceFillerLength, TENURED).ToObjectChecked();
@@ -152,9 +149,9 @@ TEST(StressJS) {
// Patch the map to have an accessor for "get".
Handle<Map> map(function->initial_map());
Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
- DCHECK(instance_descriptors->IsEmpty());
+ CHECK(instance_descriptors->IsEmpty());
- PropertyAttributes attrs = static_cast<PropertyAttributes>(0);
+ PropertyAttributes attrs = NONE;
Handle<AccessorInfo> foreign = TestAccessorInfo(isolate, attrs);
Map::EnsureDescriptorSlack(map, 1);
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
new file mode 100644
index 0000000000..064e5a82c0
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -0,0 +1,340 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
+
+namespace v8 {
+namespace internal {
+
+static void CheckInvariantsOfAbortedPage(Page* page) {
+ // Check invariants:
+ // 1) Markbits are cleared
+ // 2) The page is not marked as evacuation candidate anymore
+ // 3) The page is not marked as aborted compaction anymore.
+ CHECK(page->markbits()->IsClean());
+ CHECK(!page->IsEvacuationCandidate());
+ CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+}
+
+
+HEAP_TEST(CompactionFullAbortedPage) {
+ // Test the scenario where we reach OOM during compaction and the whole page
+ // is aborted.
+
+ // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
+ // we can reach the state of a half aborted page.
+ FLAG_concurrent_sweeping = false;
+ FLAG_manual_evacuation_candidates_selection = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ {
+ HandleScope scope1(isolate);
+ PageIterator it(heap->old_space());
+ while (it.has_next()) {
+ it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ {
+ HandleScope scope2(isolate);
+ CHECK(heap->old_space()->Expand());
+ auto compaction_page_handles =
+ CreatePadding(heap, Page::kAllocatableMemory, TENURED);
+ Page* to_be_aborted_page =
+ Page::FromAddress(compaction_page_handles.front()->address());
+ to_be_aborted_page->SetFlag(
+ MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+
+ heap->set_force_oom(true);
+ heap->CollectAllGarbage();
+
+ // Check that all handles still point to the same page, i.e., compaction
+ // has been aborted on the page.
+ for (Handle<FixedArray> object : compaction_page_handles) {
+ CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
+ }
+ CheckInvariantsOfAbortedPage(to_be_aborted_page);
+ }
+ }
+}
+
+
+HEAP_TEST(CompactionPartiallyAbortedPage) {
+ // Test the scenario where we reach OOM during compaction and parts of the
+ // page have already been migrated to a new one.
+
+ // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
+ // we can reach the state of a half aborted page.
+ FLAG_concurrent_sweeping = false;
+ FLAG_manual_evacuation_candidates_selection = true;
+
+ const int object_size = 128 * KB;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ {
+ HandleScope scope1(isolate);
+ PageIterator it(heap->old_space());
+ while (it.has_next()) {
+ it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ {
+ HandleScope scope2(isolate);
+ // Fill another page with objects of size {object_size} (last one is
+ // properly adjusted).
+ CHECK(heap->old_space()->Expand());
+ auto compaction_page_handles =
+ CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
+ Page* to_be_aborted_page =
+ Page::FromAddress(compaction_page_handles.front()->address());
+ to_be_aborted_page->SetFlag(
+ MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+
+ {
+ // Add another page that is filled with {num_objects} objects of size
+ // {object_size}.
+ HandleScope scope3(isolate);
+ CHECK(heap->old_space()->Expand());
+ const int num_objects = 3;
+ std::vector<Handle<FixedArray>> page_to_fill_handles = CreatePadding(
+ heap, object_size * num_objects, TENURED, object_size);
+ Page* page_to_fill =
+ Page::FromAddress(page_to_fill_handles.front()->address());
+
+ heap->set_force_oom(true);
+ heap->CollectAllGarbage();
+
+ bool migration_aborted = false;
+ for (Handle<FixedArray> object : compaction_page_handles) {
+ // Once compaction has been aborted, all following objects still have
+ // to be on the initial page.
+ CHECK(!migration_aborted ||
+ (Page::FromAddress(object->address()) == to_be_aborted_page));
+ if (Page::FromAddress(object->address()) == to_be_aborted_page) {
+ // This object has not been migrated.
+ migration_aborted = true;
+ } else {
+ CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
+ }
+ }
+ // Check that we actually created a scenario with a partially aborted
+ // page.
+ CHECK(migration_aborted);
+ CheckInvariantsOfAbortedPage(to_be_aborted_page);
+ }
+ }
+ }
+}
+
+
+HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
+ // Test the scenario where we reach OOM during compaction and parts of the
+ // page have already been migrated to a new one. Objects on the aborted page
+ // are linked together. This test makes sure that intra-aborted page pointers
+ // get properly updated.
+
+ // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
+ // we can reach the state of a half aborted page.
+ FLAG_concurrent_sweeping = false;
+ FLAG_manual_evacuation_candidates_selection = true;
+
+ const int object_size = 128 * KB;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ {
+ HandleScope scope1(isolate);
+ Handle<FixedArray> root_array =
+ isolate->factory()->NewFixedArray(10, TENURED);
+
+ PageIterator it(heap->old_space());
+ while (it.has_next()) {
+ it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ Page* to_be_aborted_page = nullptr;
+ {
+ HandleScope temporary_scope(isolate);
+ // Fill a fresh page with objects of size {object_size} (last one is
+ // properly adjusted).
+ CHECK(heap->old_space()->Expand());
+ std::vector<Handle<FixedArray>> compaction_page_handles =
+ CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
+ to_be_aborted_page =
+ Page::FromAddress(compaction_page_handles.front()->address());
+ to_be_aborted_page->SetFlag(
+ MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
+ compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
+ }
+ root_array->set(0, *compaction_page_handles.back());
+ }
+
+ {
+ // Add another page that is filled with {num_objects} objects of size
+ // {object_size}.
+ HandleScope scope3(isolate);
+ CHECK(heap->old_space()->Expand());
+ const int num_objects = 2;
+ int used_memory = object_size * num_objects;
+ std::vector<Handle<FixedArray>> page_to_fill_handles =
+ CreatePadding(heap, used_memory, TENURED, object_size);
+ Page* page_to_fill =
+ Page::FromAddress(page_to_fill_handles.front()->address());
+
+ heap->set_force_oom(true);
+ heap->CollectAllGarbage();
+
+ // The following check makes sure that we compacted "some" objects, while
+ // leaving others in place.
+ bool in_place = true;
+ Handle<FixedArray> current = root_array;
+ while (current->get(0) != heap->undefined_value()) {
+ current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
+ CHECK(current->IsFixedArray());
+ if (Page::FromAddress(current->address()) != to_be_aborted_page) {
+ in_place = false;
+ }
+ bool on_aborted_page =
+ Page::FromAddress(current->address()) == to_be_aborted_page;
+ bool on_fill_page =
+ Page::FromAddress(current->address()) == page_to_fill;
+ CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
+ }
+ // Check that we at least migrated one object, as otherwise the test would
+ // not trigger.
+ CHECK(!in_place);
+ CheckInvariantsOfAbortedPage(to_be_aborted_page);
+ }
+ }
+}
+
+
+HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
+ // Test the scenario where we reach OOM during compaction and parts of the
+ // page have already been migrated to a new one. Objects on the aborted page
+ // are linked together and the very first object on the aborted page points
+ // into new space. The test verifies that the store buffer entries are
+ // properly cleared and rebuilt after aborting a page. Failing to do so can
+ // result in other objects being allocated in the free space where their
+ // payload looks like a valid new space pointer.
+
+ // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
+ // we can reach the state of a half aborted page.
+ FLAG_concurrent_sweeping = false;
+ FLAG_manual_evacuation_candidates_selection = true;
+
+ const int object_size = 128 * KB;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ {
+ HandleScope scope1(isolate);
+ Handle<FixedArray> root_array =
+ isolate->factory()->NewFixedArray(10, TENURED);
+ PageIterator it(heap->old_space());
+ while (it.has_next()) {
+ it.next()->SetFlag(Page::NEVER_ALLOCATE_ON_PAGE);
+ }
+
+ Page* to_be_aborted_page = nullptr;
+ {
+ HandleScope temporary_scope(isolate);
+ // Fill another page with objects of size {object_size} (last one is
+ // properly adjusted).
+ CHECK(heap->old_space()->Expand());
+ auto compaction_page_handles =
+ CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
+ // Sanity check that we have enough space for linking up arrays.
+ CHECK_GE(compaction_page_handles.front()->length(), 2);
+ to_be_aborted_page =
+ Page::FromAddress(compaction_page_handles.front()->address());
+ to_be_aborted_page->SetFlag(
+ MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+
+ for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
+ compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
+ }
+ root_array->set(0, *compaction_page_handles.back());
+ Handle<FixedArray> new_space_array =
+ isolate->factory()->NewFixedArray(1, NOT_TENURED);
+ CHECK(heap->InNewSpace(*new_space_array));
+ compaction_page_handles.front()->set(1, *new_space_array);
+ }
+
+ {
+ // Add another page that is filled with {num_objects} objects of size
+ // {object_size}.
+ HandleScope scope3(isolate);
+ CHECK(heap->old_space()->Expand());
+ const int num_objects = 2;
+ int used_memory = object_size * num_objects;
+ std::vector<Handle<FixedArray>> page_to_fill_handles =
+ CreatePadding(heap, used_memory, TENURED, object_size);
+ Page* page_to_fill =
+ Page::FromAddress(page_to_fill_handles.front()->address());
+
+ heap->set_force_oom(true);
+ heap->CollectAllGarbage();
+
+ // The following check makes sure that we compacted "some" objects, while
+ // leaving others in place.
+ bool in_place = true;
+ Handle<FixedArray> current = root_array;
+ while (current->get(0) != heap->undefined_value()) {
+ current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
+ CHECK(!heap->InNewSpace(*current));
+ CHECK(current->IsFixedArray());
+ if (Page::FromAddress(current->address()) != to_be_aborted_page) {
+ in_place = false;
+ }
+ bool on_aborted_page =
+ Page::FromAddress(current->address()) == to_be_aborted_page;
+ bool on_fill_page =
+ Page::FromAddress(current->address()) == page_to_fill;
+ CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
+ }
+ // Check that we at least migrated one object, as otherwise the test would
+ // not trigger.
+ CHECK(!in_place);
+ CheckInvariantsOfAbortedPage(to_be_aborted_page);
+
+ // Allocate a new object in new space.
+ Handle<FixedArray> holder =
+ isolate->factory()->NewFixedArray(10, NOT_TENURED);
+ // Create a broken address that looks like a tagged pointer to a new space
+ // object.
+ Address broken_address = holder->address() + 2 * kPointerSize + 1;
+ // Convert it to a vector to create a string from it.
+ Vector<const uint8_t> string_to_broken_addresss(
+ reinterpret_cast<const uint8_t*>(&broken_address), 8);
+
+ Handle<String> string;
+ do {
+ // We know that the interesting slot will be on the aborted page and
+ // hence we allocate until we get our string on the aborted page.
+ // We used slot 1 in the fixed size array which corresponds to the
+ // the first word in the string. Since the first object definitely
+ // migrated we can just allocate until we hit the aborted page.
+ string = isolate->factory()
+ ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
+ .ToHandleChecked();
+ } while (Page::FromAddress(string->address()) != to_be_aborted_page);
+
+ // If store buffer entries are not properly filtered/reset for aborted
+ // pages we have now a broken address at an object slot in old space and
+ // the following scavenge will crash.
+ heap->CollectGarbage(NEW_SPACE);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 41e6c24b9f..726887a23a 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include <utility>
@@ -41,9 +38,11 @@
#include "src/heap/memory-reducer.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/regexp/jsregexp.h"
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap-tester.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
#include "test/cctest/test-feedback-vector.h"
@@ -911,7 +910,7 @@ TEST(ObjectProperties) {
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
// delete first
- JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
+ CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first and then second
@@ -921,9 +920,9 @@ TEST(ObjectProperties) {
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
// delete first and then second
- JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
+ CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
- JSReceiver::DeleteProperty(obj, second, SLOPPY).Check();
+ CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
@@ -934,9 +933,9 @@ TEST(ObjectProperties) {
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
// delete second and then first
- JSReceiver::DeleteProperty(obj, second, SLOPPY).Check();
+ CHECK(Just(true) == JSReceiver::DeleteProperty(obj, second, SLOPPY));
CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
- JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
+ CHECK(Just(true) == JSReceiver::DeleteProperty(obj, first, SLOPPY));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
@@ -1500,10 +1499,10 @@ TEST(TestCodeFlushingIncrementalAbort) {
// disabled.
int position = 0;
Handle<Object> breakpoint_object(Smi::FromInt(0), isolate);
- EnableDebugger();
+ EnableDebugger(CcTest::isolate());
isolate->debug()->SetBreakPoint(function, breakpoint_object, &position);
isolate->debug()->ClearAllBreakPoints();
- DisableDebugger();
+ DisableDebugger(CcTest::isolate());
// Force optimization now that code flushing is disabled.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1568,18 +1567,26 @@ TEST(CompilationCacheCachingBehavior) {
language_mode);
CHECK(!info.is_null());
- heap->CollectAllGarbage();
-
- // On second compilation, the hash is replaced by a real cache entry mapping
- // the source to the shared function info containing the code.
- info = compilation_cache->LookupScript(
- source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(false, true, false), native_context,
- language_mode);
- CHECK(!info.is_null());
+ // Check that the code cache entry survives at least on GC.
+ // (Unless --optimize-for-size, in which case it might get collected
+ // immediately.)
+ if (!FLAG_optimize_for_size) {
+ heap->CollectAllGarbage();
+ info = compilation_cache->LookupScript(
+ source, Handle<Object>(), 0, 0,
+ v8::ScriptOriginOptions(false, true, false), native_context,
+ language_mode);
+ CHECK(!info.is_null());
+ }
+ // Progress code age until it's old and ready for GC.
while (!info.ToHandleChecked()->code()->IsOld()) {
- info.ToHandleChecked()->code()->MakeOlder(NO_MARKING_PARITY);
+ // To guarantee progress, we have to MakeOlder with different parities.
+ // We can't just use NO_MARKING_PARITY, since e.g. kExecutedOnceCodeAge is
+ // always NO_MARKING_PARITY and the code age only progresses if the parity
+ // is different.
+ info.ToHandleChecked()->code()->MakeOlder(ODD_MARKING_PARITY);
+ info.ToHandleChecked()->code()->MakeOlder(EVEN_MARKING_PARITY);
}
heap->CollectAllGarbage();
@@ -1643,8 +1650,7 @@ int CountNativeContexts() {
count++;
object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
}
- // Subtract one to compensate for the code stub context that is always present
- return count - 1;
+ return count;
}
@@ -1783,8 +1789,7 @@ static int CountNativeContextsWithGC(Isolate* isolate, int n) {
Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK),
isolate);
}
- // Subtract one to compensate for the code stub context that is always present
- return count - 1;
+ return count;
}
@@ -1869,7 +1874,7 @@ TEST(TestSizeOfRegExpCode) {
// Adjust source below and this check to match
// RegExpImple::kRegExpTooLargeToOptimize.
- DCHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB);
+ CHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 20 * KB);
// Compile a regexp that is much larger if we are using regexp optimizations.
CompileRun(
@@ -2361,10 +2366,7 @@ static int NumberOfGlobalObjects() {
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsJSGlobalObject()) count++;
}
- // Subtract two to compensate for the two global objects (not global
- // JSObjects, of which there would only be one) that are part of the code stub
- // context, which is always present.
- return count - 2;
+ return count;
}
@@ -2384,7 +2386,7 @@ TEST(LeakNativeContextViaMap) {
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(4, NumberOfGlobalObjects());
+ CHECK_EQ(2, NumberOfGlobalObjects());
{
v8::HandleScope inner_scope(isolate);
@@ -2410,7 +2412,7 @@ TEST(LeakNativeContextViaMap) {
isolate->ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(2, NumberOfGlobalObjects());
+ CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
@@ -2433,7 +2435,7 @@ TEST(LeakNativeContextViaFunction) {
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(4, NumberOfGlobalObjects());
+ CHECK_EQ(2, NumberOfGlobalObjects());
{
v8::HandleScope inner_scope(isolate);
@@ -2459,7 +2461,7 @@ TEST(LeakNativeContextViaFunction) {
isolate->ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(2, NumberOfGlobalObjects());
+ CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
@@ -2480,7 +2482,7 @@ TEST(LeakNativeContextViaMapKeyed) {
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(4, NumberOfGlobalObjects());
+ CHECK_EQ(2, NumberOfGlobalObjects());
{
v8::HandleScope inner_scope(isolate);
@@ -2506,7 +2508,7 @@ TEST(LeakNativeContextViaMapKeyed) {
isolate->ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(2, NumberOfGlobalObjects());
+ CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
@@ -2527,7 +2529,7 @@ TEST(LeakNativeContextViaMapProto) {
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(4, NumberOfGlobalObjects());
+ CHECK_EQ(2, NumberOfGlobalObjects());
{
v8::HandleScope inner_scope(isolate);
@@ -2557,7 +2559,7 @@ TEST(LeakNativeContextViaMapProto) {
isolate->ContextDisposedNotification();
}
CcTest::heap()->CollectAllAvailableGarbage();
- CHECK_EQ(2, NumberOfGlobalObjects());
+ CHECK_EQ(1, NumberOfGlobalObjects());
ctx2p.Reset();
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
@@ -2620,73 +2622,6 @@ TEST(InstanceOfStubWriteBarrier) {
}
-static int NumberOfProtoTransitions(Map* map) {
- return TransitionArray::NumberOfPrototypeTransitions(
- TransitionArray::GetPrototypeTransitions(map));
-}
-
-
-TEST(PrototypeTransitionClearing) {
- if (FLAG_never_compact) return;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
-
- CompileRun("var base = {};");
- i::Handle<JSObject> baseObject =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
- CcTest::global()->Get(ctx, v8_str("base")).ToLocalChecked()));
-
- int initialTransitions = NumberOfProtoTransitions(baseObject->map());
-
- CompileRun(
- "var live = [];"
- "for (var i = 0; i < 10; i++) {"
- " var object = {};"
- " var prototype = {};"
- " object.__proto__ = prototype;"
- " if (i >= 3) live.push(object, prototype);"
- "}");
-
- // Verify that only dead prototype transitions are cleared.
- CHECK_EQ(initialTransitions + 10,
- NumberOfProtoTransitions(baseObject->map()));
- CcTest::heap()->CollectAllGarbage();
- const int transitions = 10 - 3;
- CHECK_EQ(initialTransitions + transitions,
- NumberOfProtoTransitions(baseObject->map()));
-
- // Verify that prototype transitions array was compacted.
- FixedArray* trans =
- TransitionArray::GetPrototypeTransitions(baseObject->map());
- for (int i = initialTransitions; i < initialTransitions + transitions; i++) {
- int j = TransitionArray::kProtoTransitionHeaderSize + i;
- CHECK(trans->get(j)->IsWeakCell());
- CHECK(WeakCell::cast(trans->get(j))->value()->IsMap());
- }
-
- // Make sure next prototype is placed on an old-space evacuation candidate.
- Handle<JSObject> prototype;
- PagedSpace* space = CcTest::heap()->old_space();
- {
- AlwaysAllocateScope always_allocate(isolate);
- SimulateFullSpace(space);
- prototype = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS,
- Strength::WEAK, TENURED);
- }
-
- // Add a prototype on an evacuation candidate and verify that transition
- // clearing correctly records slots in prototype transition array.
- i::FLAG_always_compact = true;
- Handle<Map> map(baseObject->map());
- CHECK(!space->LastPage()->Contains(
- TransitionArray::GetPrototypeTransitions(*map)->address()));
- CHECK(space->LastPage()->Contains(prototype->address()));
-}
-
-
TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
@@ -2877,7 +2812,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
->Int32Value(ctx)
.FromJust());
- i::Handle<JSObject> o =
+ i::Handle<JSReceiver> o =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
CHECK(CcTest::heap()->InNewSpace(*o));
@@ -2918,14 +2853,14 @@ TEST(OptimizedPretenuringAllocationFolding) {
v8::Local<v8::Value> int_array =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
- i::Handle<JSObject> int_array_handle =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array));
+ i::Handle<JSObject> int_array_handle = i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array)));
v8::Local<v8::Value> double_array =
v8::Object::Cast(*res)->Get(ctx, v8_str("1")).ToLocalChecked();
- i::Handle<JSObject> double_array_handle =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array));
+ i::Handle<JSObject> double_array_handle = i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array)));
- i::Handle<JSObject> o =
+ i::Handle<JSReceiver> o =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
CHECK(CcTest::heap()->InOldSpace(*o));
CHECK(CcTest::heap()->InOldSpace(*int_array_handle));
@@ -2967,8 +2902,8 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
v8::Local<v8::Value> res = CompileRun(source.start());
- i::Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ i::Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(o->elements()));
CHECK(CcTest::heap()->InOldSpace(*o));
@@ -3008,8 +2943,8 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
v8::Local<v8::Value> res = CompileRun(source.start());
- i::Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ i::Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(*o));
FieldIndex idx1 = FieldIndex::ForPropertyIndex(o->map(), 0);
@@ -3065,8 +3000,8 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
v8::Local<v8::Value> res = CompileRun(source.start());
- i::Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ i::Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(*o));
CHECK(CcTest::heap()->InOldSpace(o->properties()));
@@ -3105,8 +3040,8 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
v8::Local<v8::Value> res = CompileRun(source.start());
- i::Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ i::Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(o->elements()));
CHECK(CcTest::heap()->InOldSpace(*o));
@@ -3146,14 +3081,15 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
v8::Local<v8::Value> int_array =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
- i::Handle<JSObject> int_array_handle =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array));
+ i::Handle<JSObject> int_array_handle = i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array)));
v8::Local<v8::Value> double_array =
v8::Object::Cast(*res)->Get(ctx, v8_str("1")).ToLocalChecked();
- i::Handle<JSObject> double_array_handle =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array));
+ i::Handle<JSObject> double_array_handle = i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array)));
- Handle<JSObject> o = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(*o));
CHECK(CcTest::heap()->InOldSpace(*int_array_handle));
CHECK(CcTest::heap()->InOldSpace(int_array_handle->elements()));
@@ -3196,14 +3132,15 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
v8::Local<v8::Value> int_array_1 =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
- Handle<JSObject> int_array_handle_1 =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array_1));
+ Handle<JSObject> int_array_handle_1 = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array_1)));
v8::Local<v8::Value> int_array_2 =
v8::Object::Cast(*res)->Get(ctx, v8_str("1")).ToLocalChecked();
- Handle<JSObject> int_array_handle_2 =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array_2));
+ Handle<JSObject> int_array_handle_2 = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(int_array_2)));
- Handle<JSObject> o = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(*o));
CHECK(CcTest::heap()->InOldSpace(*int_array_handle_1));
CHECK(CcTest::heap()->InOldSpace(int_array_handle_1->elements()));
@@ -3246,15 +3183,15 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
v8::Local<v8::Value> double_array_1 =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
- i::Handle<JSObject> double_array_handle_1 =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array_1));
+ i::Handle<JSObject> double_array_handle_1 = i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array_1)));
v8::Local<v8::Value> double_array_2 =
v8::Object::Cast(*res)->Get(ctx, v8_str("1")).ToLocalChecked();
- i::Handle<JSObject> double_array_handle_2 =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array_2));
+ i::Handle<JSObject> double_array_handle_2 = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(double_array_2)));
- i::Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ i::Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InOldSpace(*o));
CHECK(CcTest::heap()->InOldSpace(*double_array_handle_1));
CHECK(CcTest::heap()->InOldSpace(double_array_handle_1->elements()));
@@ -3286,8 +3223,8 @@ TEST(OptimizedAllocationArrayLiterals) {
->Int32Value(ctx)
.FromJust());
- i::Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
+ i::Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
CHECK(CcTest::heap()->InNewSpace(o->elements()));
}
@@ -3321,8 +3258,9 @@ TEST(Regress1465) {
CompileRun("var root = new F;");
}
- i::Handle<JSObject> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
- CcTest::global()->Get(ctx, v8_str("root")).ToLocalChecked()));
+ i::Handle<JSReceiver> root =
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ CcTest::global()->Get(ctx, v8_str("root")).ToLocalChecked()));
// Count number of live transitions before marking.
int transitions_before = CountMapTransitions(root->map());
@@ -3352,10 +3290,11 @@ static void AddTransitions(int transitions_count) {
static i::Handle<JSObject> GetByName(const char* name) {
- return v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
- CcTest::global()
- ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(name))
- .ToLocalChecked()));
+ return i::Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ CcTest::global()
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(name))
+ .ToLocalChecked())));
}
@@ -3475,7 +3414,7 @@ TEST(TransitionArraySimpleToFull) {
CompileRun("o = new F;"
"root = new F");
root = GetByName("root");
- DCHECK(TransitionArray::IsSimpleTransition(root->map()->raw_transitions()));
+ CHECK(TransitionArray::IsSimpleTransition(root->map()->raw_transitions()));
AddPropertyTo(2, root, "happy");
// Count number of live transitions after marking. Note that one transition
@@ -3515,7 +3454,7 @@ TEST(Regress2143a) {
// Explicitly request GC to perform final marking step and sweeping.
CcTest::heap()->CollectAllGarbage();
- Handle<JSObject> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
->Get(CcTest::isolate()->GetCurrentContext(), v8_str("root"))
.ToLocalChecked()));
@@ -3558,7 +3497,7 @@ TEST(Regress2143b) {
// Explicitly request GC to perform final marking step and sweeping.
CcTest::heap()->CollectAllGarbage();
- Handle<JSObject> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
->Get(CcTest::isolate()->GetCurrentContext(), v8_str("root"))
.ToLocalChecked()));
@@ -3670,19 +3609,16 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
v8::Local<v8::Value> fun1, fun2;
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
{
- LocalContext env;
CompileRun("function fun() {};");
- fun1 = env->Global()->Get(env.local(), v8_str("fun")).ToLocalChecked();
+ fun1 = CcTest::global()->Get(ctx, v8_str("fun")).ToLocalChecked();
}
{
- LocalContext env;
CompileRun("function fun() {};");
- fun2 = env->Global()->Get(env.local(), v8_str("fun")).ToLocalChecked();
+ fun2 = CcTest::global()->Get(ctx, v8_str("fun")).ToLocalChecked();
}
- // Prepare function f that contains type feedback for closures
- // originating from two different native contexts.
+ // Prepare function f that contains type feedback for the two closures.
CHECK(CcTest::global()->Set(ctx, v8_str("fun1"), fun1).FromJust());
CHECK(CcTest::global()->Set(ctx, v8_str("fun2"), fun2).FromJust());
CompileRun("function f(a, b) { a(); b(); } f(fun1, fun2);");
@@ -3713,7 +3649,6 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
static Code* FindFirstIC(Code* code, Code::Kind kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
@@ -3752,39 +3687,6 @@ static void CheckVectorICCleared(Handle<JSFunction> f, int slot_index) {
}
-TEST(ICInBuiltInIsClearedAppropriately) {
- if (i::FLAG_always_opt) return;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<JSFunction> apply;
- {
- LocalContext env;
- v8::Local<v8::Value> res = CompileRun("Function.apply");
- i::Handle<JSObject> maybe_apply =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res));
- apply = i::Handle<JSFunction>::cast(maybe_apply);
- i::Handle<TypeFeedbackVector> vector(apply->shared()->feedback_vector());
- FeedbackVectorHelper feedback_helper(vector);
- CHECK_EQ(1, feedback_helper.slot_count());
- CheckVectorIC(apply, 0, UNINITIALIZED);
- CompileRun(
- "function b(a1, a2, a3) { return a1 + a2 + a3; }"
- "function fun(bar) { bar.apply({}, [1, 2, 3]); };"
- "fun(b); fun(b)");
- CheckVectorIC(apply, 0, MONOMORPHIC);
- }
-
- // Fire context dispose notification.
- CcTest::isolate()->ContextDisposedNotification();
- SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
-
- // The IC in apply has been cleared, ready to learn again.
- CheckVectorIC(apply, 0, PREMONOMORPHIC);
-}
-
-
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -4500,6 +4402,115 @@ TEST(Regress514122) {
}
+TEST(OptimizedCodeMapReuseEntries) {
+ i::FLAG_flush_optimized_code_cache = false;
+ i::FLAG_allow_natives_syntax = true;
+ // BUG(v8:4598): Since TurboFan doesn't treat maps in code weakly, we can't
+ // run this test.
+ if (i::FLAG_turbo) return;
+ CcTest::InitializeVM();
+ v8::Isolate* v8_isolate = CcTest::isolate();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+
+ // Create 3 contexts, allow the 2nd one to be disposed, and verify that
+ // a 4th context will re-use the weak slots in the optimized code map
+ // to hold data, rather than expanding the map.
+ v8::Local<v8::Context> c1 = v8::Context::New(v8_isolate);
+ const char* source = "function foo(x) { var l = [1]; return x+l[0]; }";
+ v8::ScriptCompiler::Source script_source(
+ v8::String::NewFromUtf8(v8_isolate, source, v8::NewStringType::kNormal)
+ .ToLocalChecked());
+ v8::Local<v8::UnboundScript> indep =
+ v8::ScriptCompiler::CompileUnboundScript(v8_isolate, &script_source)
+ .ToLocalChecked();
+ const char* toplevel = "foo(3); %OptimizeFunctionOnNextCall(foo); foo(3);";
+ // Perfrom one initial GC to enable code flushing.
+ heap->CollectAllGarbage();
+
+ c1->Enter();
+ indep->BindToCurrentContext()->Run(c1).ToLocalChecked();
+ CompileRun(toplevel);
+
+ Handle<SharedFunctionInfo> shared;
+ Handle<JSFunction> foo = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CcTest::global()->Get(c1, v8_str("foo")).ToLocalChecked())));
+ CHECK(foo->shared()->is_compiled());
+ shared = handle(foo->shared());
+ c1->Exit();
+
+ {
+ HandleScope scope(isolate);
+ v8::Local<v8::Context> c2 = v8::Context::New(v8_isolate);
+ c2->Enter();
+ indep->BindToCurrentContext()->Run(c2).ToLocalChecked();
+ CompileRun(toplevel);
+ c2->Exit();
+ }
+
+ {
+ HandleScope scope(isolate);
+ v8::Local<v8::Context> c3 = v8::Context::New(v8_isolate);
+ c3->Enter();
+ indep->BindToCurrentContext()->Run(c3).ToLocalChecked();
+ CompileRun(toplevel);
+ c3->Exit();
+
+ // Now, collect garbage. Context c2 should have no roots to it, and it's
+ // entry in the optimized code map should be free for a new context.
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage();
+ }
+
+ Handle<FixedArray> optimized_code_map =
+ handle(shared->optimized_code_map());
+ // There should be 3 entries in the map.
+ CHECK_EQ(
+ 3, ((optimized_code_map->length() - SharedFunctionInfo::kEntriesStart) /
+ SharedFunctionInfo::kEntryLength));
+ // But one of them (formerly for c2) should be cleared.
+ int cleared_count = 0;
+ for (int i = SharedFunctionInfo::kEntriesStart;
+ i < optimized_code_map->length();
+ i += SharedFunctionInfo::kEntryLength) {
+ cleared_count +=
+ WeakCell::cast(
+ optimized_code_map->get(i + SharedFunctionInfo::kContextOffset))
+ ->cleared()
+ ? 1
+ : 0;
+ }
+ CHECK_EQ(1, cleared_count);
+
+ // Verify that a new context uses the cleared entry rather than creating a
+ // new
+ // optimized code map array.
+ v8::Local<v8::Context> c4 = v8::Context::New(v8_isolate);
+ c4->Enter();
+ indep->BindToCurrentContext()->Run(c4).ToLocalChecked();
+ CompileRun(toplevel);
+ c4->Exit();
+ CHECK_EQ(*optimized_code_map, shared->optimized_code_map());
+
+ // Now each entry is in use.
+ cleared_count = 0;
+ for (int i = SharedFunctionInfo::kEntriesStart;
+ i < optimized_code_map->length();
+ i += SharedFunctionInfo::kEntryLength) {
+ cleared_count +=
+ WeakCell::cast(
+ optimized_code_map->get(i + SharedFunctionInfo::kContextOffset))
+ ->cleared()
+ ? 1
+ : 0;
+ }
+ CHECK_EQ(0, cleared_count);
+ }
+}
+
+
TEST(Regress513496) {
i::FLAG_flush_optimized_code_cache = false;
i::FLAG_allow_natives_syntax = true;
@@ -4623,7 +4634,7 @@ TEST(DeferredHandles) {
}
// An entire block of handles has been filled.
// Next handle would require a new block.
- DCHECK(data->next == data->limit);
+ CHECK(data->next == data->limit);
DeferredHandleScope deferred(isolate);
DummyVisitor visitor;
@@ -4646,8 +4657,8 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
}
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- DCHECK(marking->IsComplete() ||
- marking->IsReadyToOverApproximateWeakClosure());
+ CHECK(marking->IsComplete() ||
+ marking->IsReadyToOverApproximateWeakClosure());
}
@@ -4722,12 +4733,12 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
CompileRun("%OptimizeFunctionOnNextCall(bar); bar();");
- DependentCode::GroupStartIndexes starts(site->dependent_code());
- CHECK_GE(starts.number_of_entries(), 1);
- int index = starts.at(DependentCode::kAllocationSiteTransitionChangedGroup);
- CHECK(site->dependent_code()->object_at(index)->IsWeakCell());
+ CHECK_EQ(DependentCode::kAllocationSiteTransitionChangedGroup,
+ site->dependent_code()->group());
+ CHECK_EQ(1, site->dependent_code()->count());
+ CHECK(site->dependent_code()->object_at(0)->IsWeakCell());
Code* function_bar = Code::cast(
- WeakCell::cast(site->dependent_code()->object_at(index))->value());
+ WeakCell::cast(site->dependent_code()->object_at(0))->value());
Handle<JSFunction> bar_handle = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()
@@ -4744,10 +4755,8 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// The site still exists because of our global handle, but the code is no
// longer referred to by dependent_code().
- DependentCode::GroupStartIndexes starts(site->dependent_code());
- int index = starts.at(DependentCode::kAllocationSiteTransitionChangedGroup);
- CHECK(site->dependent_code()->object_at(index)->IsWeakCell() &&
- WeakCell::cast(site->dependent_code()->object_at(index))->cleared());
+ CHECK(site->dependent_code()->object_at(0)->IsWeakCell() &&
+ WeakCell::cast(site->dependent_code()->object_at(0))->cleared());
}
@@ -4766,17 +4775,19 @@ TEST(CellsInOptimizedCodeAreWeak) {
LocalContext context;
HandleScope scope(heap->isolate());
- CompileRun("bar = (function() {"
- " function bar() {"
- " return foo(1);"
- " };"
- " var foo = function(x) { with (x) { return 1 + x; } };"
- " bar(foo);"
- " bar(foo);"
- " bar(foo);"
- " %OptimizeFunctionOnNextCall(bar);"
- " bar(foo);"
- " return bar;})();");
+ CompileRun(
+ "bar = (function() {"
+ " function bar() {"
+ " return foo(1);"
+ " };"
+ " var foo = function(x) { with (x) { return 1 + x; } };"
+ " %NeverOptimizeFunction(foo);"
+ " bar(foo);"
+ " bar(foo);"
+ " bar(foo);"
+ " %OptimizeFunctionOnNextCall(bar);"
+ " bar(foo);"
+ " return bar;})();");
Handle<JSFunction> bar = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CcTest::global()
@@ -4790,7 +4801,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
heap->CollectAllGarbage();
}
- DCHECK(code->marked_for_deoptimization());
+ CHECK(code->marked_for_deoptimization());
}
@@ -4809,15 +4820,17 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
LocalContext context;
HandleScope scope(heap->isolate());
- CompileRun("function bar() {"
- " return foo(1);"
- "};"
- "function foo(x) { with (x) { return 1 + x; } };"
- "bar();"
- "bar();"
- "bar();"
- "%OptimizeFunctionOnNextCall(bar);"
- "bar();");
+ CompileRun(
+ "function bar() {"
+ " return foo(1);"
+ "};"
+ "function foo(x) { with (x) { return 1 + x; } };"
+ "%NeverOptimizeFunction(foo);"
+ "bar();"
+ "bar();"
+ "bar();"
+ "%OptimizeFunctionOnNextCall(bar);"
+ "bar();");
Handle<JSFunction> bar = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CcTest::global()
@@ -4831,7 +4844,7 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
heap->CollectAllGarbage();
}
- DCHECK(code->marked_for_deoptimization());
+ CHECK(code->marked_for_deoptimization());
}
@@ -4947,7 +4960,8 @@ TEST(NextCodeLinkIsWeak) {
static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
i::byte buffer[i::Assembler::kMinimalBufferSize];
- MacroAssembler masm(isolate, buffer, sizeof(buffer));
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
CodeDesc desc;
masm.Push(isolate->factory()->undefined_value());
masm.Drop(1);
@@ -5519,7 +5533,7 @@ TEST(Regress357137) {
.ToLocalChecked(),
v8::FunctionTemplate::New(isolate, RequestInterrupt));
v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
- DCHECK(!context.IsEmpty());
+ CHECK(!context.IsEmpty());
v8::Context::Scope cscope(context);
v8::Local<v8::Value> result = CompileRun(
@@ -5573,8 +5587,8 @@ TEST(ArrayShiftSweeping) {
"array.shift();"
"array;");
- Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
+ Handle<JSObject> o = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result)));
CHECK(heap->InOldSpace(o->elements()));
CHECK(heap->InOldSpace(*o));
Page* page = Page::FromAddress(o->elements()->address());
@@ -5722,7 +5736,7 @@ TEST(Regress3631) {
CcTest::heap()->StartIncrementalMarking();
}
// Incrementally mark the backing store.
- Handle<JSObject> obj =
+ Handle<JSReceiver> obj =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj));
while (!Marking::IsBlack(
@@ -5782,7 +5796,7 @@ TEST(Regress3877) {
{
HandleScope inner_scope(isolate);
v8::Local<v8::Value> result = CompileRun("cls.prototype");
- Handle<JSObject> proto =
+ Handle<JSReceiver> proto =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
weak_prototype = inner_scope.CloseAndEscape(factory->NewWeakCell(proto));
}
@@ -5810,7 +5824,7 @@ Handle<WeakCell> AddRetainedMap(Isolate* isolate, Heap* heap) {
Handle<Map> map = Map::Create(isolate, 1);
v8::Local<v8::Value> result =
CompileRun("(function () { return {x : 10}; })();");
- Handle<JSObject> proto =
+ Handle<JSReceiver> proto =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Map::SetPrototype(map, proto);
heap->AddRetainedMap(map);
@@ -5825,9 +5839,11 @@ void CheckMapRetainingFor(int n) {
Handle<WeakCell> weak_cell = AddRetainedMap(isolate, heap);
CHECK(!weak_cell->cleared());
for (int i = 0; i < n; i++) {
+ SimulateIncrementalMarking(heap);
heap->CollectGarbage(OLD_SPACE);
}
CHECK(!weak_cell->cleared());
+ SimulateIncrementalMarking(heap);
heap->CollectGarbage(OLD_SPACE);
CHECK(weak_cell->cleared());
}
diff --git a/deps/v8/test/cctest/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index ef3bad68ae..5822d4a979 100644
--- a/deps/v8/test/cctest/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#ifdef __linux__
@@ -22,6 +19,8 @@
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/utils-inl.h"
+
using v8::IdleTask;
using v8::Task;
@@ -86,6 +85,28 @@ class MockPlatform : public v8::Platform {
delete task;
}
+ uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
+ const char* name, uint64_t id, uint64_t bind_id,
+ int numArgs, const char** argNames,
+ const uint8_t* argTypes, const uint64_t* argValues,
+ unsigned int flags) override {
+ return 0;
+ }
+
+ void UpdateTraceEventDuration(const uint8_t* categoryEnabledFlag,
+ const char* name, uint64_t handle) override {}
+
+ const uint8_t* GetCategoryGroupEnabled(const char* name) override {
+ static uint8_t no = 0;
+ return &no;
+ }
+
+ const char* GetCategoryGroupName(
+ const uint8_t* categoryEnabledFlag) override {
+ static const char* dummy = "dummy";
+ return dummy;
+ }
+
private:
v8::Platform* platform_;
IdleTask* idle_task_;
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
new file mode 100644
index 0000000000..770804f162
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -0,0 +1,284 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/globals.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+#include "src/heap/spaces-inl.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
+ AllocationResult result = heap->old_space()->AllocateRaw(
+ static_cast<int>(size_in_bytes), kDoubleAligned);
+ Object* obj = result.ToObjectChecked();
+ Address adr = HeapObject::cast(obj)->address();
+ return adr;
+}
+
+
+static void VerifyIterable(v8::internal::Address base,
+ v8::internal::Address limit,
+ std::vector<intptr_t> expected_size) {
+ CHECK_LE(reinterpret_cast<intptr_t>(base), reinterpret_cast<intptr_t>(limit));
+ HeapObject* object = nullptr;
+ size_t counter = 0;
+ while (base < limit) {
+ object = HeapObject::FromAddress(base);
+ CHECK(object->IsFiller());
+ CHECK_LT(counter, expected_size.size());
+ CHECK_EQ(expected_size[counter], object->Size());
+ base += object->Size();
+ counter++;
+ }
+}
+
+
+static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
+ intptr_t size_in_bytes,
+ AllocationAlignment alignment = kWordAligned) {
+ HeapObject* obj;
+ AllocationResult result =
+ lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
+ if (result.To(&obj)) {
+ heap->CreateFillerObjectAt(obj->address(), static_cast<int>(size_in_bytes));
+ return true;
+ }
+ return false;
+}
+
+
+TEST(InvalidLab) {
+ LocalAllocationBuffer lab = LocalAllocationBuffer::InvalidBuffer();
+ CHECK(!lab.IsValid());
+}
+
+
+TEST(UnusedLabImplicitClose) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ heap->root(Heap::kOnePointerFillerMapRootIndex);
+ const int kLabSize = 4 * KB;
+ Address base = AllocateLabBackingStore(heap, kLabSize);
+ Address limit = base + kLabSize;
+ intptr_t expected_sizes_raw[1] = {kLabSize};
+ std::vector<intptr_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 1);
+ {
+ AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ LocalAllocationBuffer lab =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
+ CHECK(lab.IsValid());
+ }
+ VerifyIterable(base, limit, expected_sizes);
+}
+
+
+TEST(SimpleAllocate) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ const int kLabSize = 4 * KB;
+ Address base = AllocateLabBackingStore(heap, kLabSize);
+ Address limit = base + kLabSize;
+ intptr_t sizes_raw[1] = {128};
+ intptr_t expected_sizes_raw[2] = {128, kLabSize - 128};
+ std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 1);
+ std::vector<intptr_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 2);
+ {
+ AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ LocalAllocationBuffer lab =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
+ CHECK(lab.IsValid());
+ intptr_t sum = 0;
+ for (auto size : sizes) {
+ if (AllocateFromLab(heap, &lab, size)) {
+ sum += size;
+ }
+ }
+ }
+ VerifyIterable(base, limit, expected_sizes);
+}
+
+
+TEST(AllocateUntilLabOOM) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ const int kLabSize = 2 * KB;
+ Address base = AllocateLabBackingStore(heap, kLabSize);
+ Address limit = base + kLabSize;
+ // The following objects won't fit in {kLabSize}.
+ intptr_t sizes_raw[5] = {512, 512, 128, 512, 512};
+ intptr_t expected_sizes_raw[5] = {512, 512, 128, 512, 384 /* left over */};
+ std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 5);
+ std::vector<intptr_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 5);
+ intptr_t sum = 0;
+ {
+ AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ LocalAllocationBuffer lab =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
+ CHECK(lab.IsValid());
+ for (auto size : sizes) {
+ if (AllocateFromLab(heap, &lab, size)) {
+ sum += size;
+ }
+ }
+ CHECK_EQ(kLabSize - sum, 384);
+ }
+ VerifyIterable(base, limit, expected_sizes);
+}
+
+
+TEST(AllocateExactlyUntilLimit) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ const int kLabSize = 2 * KB;
+ Address base = AllocateLabBackingStore(heap, kLabSize);
+ Address limit = base + kLabSize;
+ intptr_t sizes_raw[4] = {512, 512, 512, 512};
+ intptr_t expected_sizes_raw[5] = {512, 512, 512, 512, 0};
+ std::vector<intptr_t> sizes(sizes_raw, sizes_raw + 4);
+ std::vector<intptr_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 5);
+ {
+ AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ LocalAllocationBuffer lab =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
+ CHECK(lab.IsValid());
+ intptr_t sum = 0;
+ for (auto size : sizes) {
+ if (AllocateFromLab(heap, &lab, size)) {
+ sum += size;
+ } else {
+ break;
+ }
+ }
+ CHECK_EQ(kLabSize - sum, 0);
+ }
+ VerifyIterable(base, limit, expected_sizes);
+}
+
+
+TEST(MergeSuccessful) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ const int kLabSize = 2 * KB;
+ Address base1 = AllocateLabBackingStore(heap, kLabSize);
+ Address limit1 = base1 + kLabSize;
+ Address base2 = limit1;
+ Address limit2 = base2 + kLabSize;
+
+ intptr_t sizes1_raw[4] = {512, 512, 512, 256};
+ intptr_t expected_sizes1_raw[5] = {512, 512, 512, 256, 256};
+ std::vector<intptr_t> sizes1(sizes1_raw, sizes1_raw + 4);
+ std::vector<intptr_t> expected_sizes1(expected_sizes1_raw,
+ expected_sizes1_raw + 5);
+
+ intptr_t sizes2_raw[5] = {256, 512, 512, 512, 512};
+ intptr_t expected_sizes2_raw[10] = {512, 512, 512, 256, 256,
+ 512, 512, 512, 512, 0};
+ std::vector<intptr_t> sizes2(sizes2_raw, sizes2_raw + 5);
+ std::vector<intptr_t> expected_sizes2(expected_sizes2_raw,
+ expected_sizes2_raw + 10);
+
+ {
+ AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
+ LocalAllocationBuffer lab1 =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
+ CHECK(lab1.IsValid());
+ intptr_t sum = 0;
+ for (auto size : sizes1) {
+ if (AllocateFromLab(heap, &lab1, size)) {
+ sum += size;
+ } else {
+ break;
+ }
+ }
+
+ AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
+ LocalAllocationBuffer lab2 =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
+ CHECK(lab2.IsValid());
+ CHECK(lab2.TryMerge(&lab1));
+ CHECK(!lab1.IsValid());
+ for (auto size : sizes2) {
+ if (AllocateFromLab(heap, &lab2, size)) {
+ sum += size;
+ } else {
+ break;
+ }
+ }
+ CHECK_EQ(2 * kLabSize - sum, 0);
+ }
+ VerifyIterable(base1, limit1, expected_sizes1);
+ VerifyIterable(base1, limit2, expected_sizes2);
+}
+
+
+TEST(MergeFailed) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ const int kLabSize = 2 * KB;
+ Address base1 = AllocateLabBackingStore(heap, kLabSize);
+ Address base2 = base1 + kLabSize;
+ Address base3 = base2 + kLabSize;
+
+ {
+ AllocationResult lab_backing_store1(HeapObject::FromAddress(base1));
+ LocalAllocationBuffer lab1 =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store1, kLabSize);
+ CHECK(lab1.IsValid());
+
+ AllocationResult lab_backing_store2(HeapObject::FromAddress(base2));
+ LocalAllocationBuffer lab2 =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store2, kLabSize);
+ CHECK(lab2.IsValid());
+
+ AllocationResult lab_backing_store3(HeapObject::FromAddress(base3));
+ LocalAllocationBuffer lab3 =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store3, kLabSize);
+ CHECK(lab3.IsValid());
+
+ CHECK(!lab3.TryMerge(&lab1));
+ }
+}
+
+
+#ifdef V8_HOST_ARCH_32_BIT
+TEST(AllocateAligned) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ const int kLabSize = 2 * KB;
+ Address base = AllocateLabBackingStore(heap, kLabSize);
+ Address limit = base + kLabSize;
+ std::pair<intptr_t, AllocationAlignment> sizes_raw[2] = {
+ std::make_pair(116, kWordAligned), std::make_pair(64, kDoubleAligned)};
+ std::vector<std::pair<intptr_t, AllocationAlignment>> sizes(sizes_raw,
+ sizes_raw + 2);
+ intptr_t expected_sizes_raw[4] = {116, 4, 64, 1864};
+ std::vector<intptr_t> expected_sizes(expected_sizes_raw,
+ expected_sizes_raw + 4);
+
+ {
+ AllocationResult lab_backing_store(HeapObject::FromAddress(base));
+ LocalAllocationBuffer lab =
+ LocalAllocationBuffer::FromResult(heap, lab_backing_store, kLabSize);
+ CHECK(lab.IsValid());
+ for (auto pair : sizes) {
+ if (!AllocateFromLab(heap, &lab, pair.first, pair.second)) {
+ break;
+ }
+ }
+ }
+ VerifyIterable(base, limit, expected_sizes);
+}
+#endif // V8_HOST_ARCH_32_BIT
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index decf646b05..cfcf149c61 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -42,7 +42,9 @@
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap-tester.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
+
using namespace v8::internal;
using v8::Just;
@@ -239,7 +241,7 @@ static void WeakPointerCallback(
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
- DCHECK_EQ(1234, p->second);
+ CHECK_EQ(1234, p->second);
NumberOfWeakCalls++;
p->first->Reset();
}
@@ -360,7 +362,7 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
bool has_been_disposed() { return has_been_disposed_; }
virtual void Dispose() {
- DCHECK(!has_been_disposed_);
+ CHECK(!has_been_disposed_);
has_been_disposed_ = true;
}
@@ -385,7 +387,7 @@ TEST(EmptyObjectGroups) {
TestRetainedObjectInfo info;
global_handles->AddObjectGroup(NULL, 0, &info);
- DCHECK(info.has_been_disposed());
+ CHECK(info.has_been_disposed());
}
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index cf053d0a9c..2fe099d2e3 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -25,16 +25,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/base/platform/platform.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap-tester.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
namespace v8 {
namespace internal {
@@ -804,7 +802,7 @@ class Observer : public InlineAllocationObserver {
explicit Observer(intptr_t step_size)
: InlineAllocationObserver(step_size), count_(0) {}
- virtual void Step(int bytes_allocated) { count_++; }
+ void Step(int bytes_allocated, Address, size_t) override { count_++; }
int count() const { return count_; }
@@ -834,13 +832,8 @@ UNINITIALIZED_TEST(InlineAllocationObserver) {
AllocateUnaligned(new_space, 64);
CHECK_EQ(observer1.count(), 0);
- // The observer should not get called even when we have allocated exactly
- // 128 bytes.
+ // The observer should get called when we have allocated exactly 128 bytes.
AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 0);
-
- // The next allocation gets the notification.
- AllocateUnaligned(new_space, 8);
CHECK_EQ(observer1.count(), 1);
// Another >128 bytes should get another notification.
@@ -851,36 +844,83 @@ UNINITIALIZED_TEST(InlineAllocationObserver) {
AllocateUnaligned(new_space, 1024);
CHECK_EQ(observer1.count(), 3);
- // Allocating another 2048 bytes in small objects should get 12
+ // Allocating another 2048 bytes in small objects should get 16
// notifications.
for (int i = 0; i < 64; ++i) {
AllocateUnaligned(new_space, 32);
}
- CHECK_EQ(observer1.count(), 15);
+ CHECK_EQ(observer1.count(), 19);
// Multiple observers should work.
Observer observer2(96);
new_space->AddInlineAllocationObserver(&observer2);
AllocateUnaligned(new_space, 2048);
- CHECK_EQ(observer1.count(), 16);
+ CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 1);
AllocateUnaligned(new_space, 104);
- CHECK_EQ(observer1.count(), 16);
+ CHECK_EQ(observer1.count(), 20);
CHECK_EQ(observer2.count(), 2);
// Callback should stop getting called after an observer is removed.
new_space->RemoveInlineAllocationObserver(&observer1);
AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 16); // no more notifications.
+ CHECK_EQ(observer1.count(), 20); // no more notifications.
CHECK_EQ(observer2.count(), 3); // this one is still active.
+ // Ensure that PauseInlineAllocationObserversScope work correctly.
+ AllocateUnaligned(new_space, 48);
+ CHECK_EQ(observer2.count(), 3);
+ {
+ PauseInlineAllocationObserversScope pause_observers(new_space);
+ CHECK_EQ(observer2.count(), 3);
+ AllocateUnaligned(new_space, 384);
+ CHECK_EQ(observer2.count(), 3);
+ }
+ CHECK_EQ(observer2.count(), 3);
+ // Coupled with the 48 bytes allocated before the pause, another 48 bytes
+ // allocated here should trigger a notification.
+ AllocateUnaligned(new_space, 48);
+ CHECK_EQ(observer2.count(), 4);
+
new_space->RemoveInlineAllocationObserver(&observer2);
AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 16);
- CHECK_EQ(observer2.count(), 3);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 4);
+ }
+ isolate->Dispose();
+}
+
+
+UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ NewSpace* new_space = i_isolate->heap()->new_space();
+
+ Observer observer1(512);
+ new_space->AddInlineAllocationObserver(&observer1);
+ Observer observer2(576);
+ new_space->AddInlineAllocationObserver(&observer2);
+
+ for (int i = 0; i < 512; ++i) {
+ AllocateUnaligned(new_space, 32);
+ }
+
+ new_space->RemoveInlineAllocationObserver(&observer1);
+ new_space->RemoveInlineAllocationObserver(&observer2);
+
+ CHECK_EQ(observer1.count(), 32);
+ CHECK_EQ(observer2.count(), 28);
}
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/heap/utils-inl.h b/deps/v8/test/cctest/heap/utils-inl.h
new file mode 100644
index 0000000000..f255bb6c03
--- /dev/null
+++ b/deps/v8/test/cctest/heap/utils-inl.h
@@ -0,0 +1,137 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef HEAP_UTILS_H_
+#define HEAP_UTILS_H_
+
+#include "src/factory.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/isolate.h"
+
+
+namespace v8 {
+namespace internal {
+
+static int LenFromSize(int size) {
+ return (size - FixedArray::kHeaderSize) / kPointerSize;
+}
+
+
+static inline std::vector<Handle<FixedArray>> CreatePadding(
+ Heap* heap, int padding_size, PretenureFlag tenure,
+ int object_size = Page::kMaxRegularHeapObjectSize) {
+ std::vector<Handle<FixedArray>> handles;
+ Isolate* isolate = heap->isolate();
+ int allocate_memory;
+ int length;
+ int free_memory = padding_size;
+ if (tenure == i::TENURED) {
+ heap->old_space()->EmptyAllocationInfo();
+ int overall_free_memory = static_cast<int>(heap->old_space()->Available());
+ CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
+ } else {
+ heap->new_space()->DisableInlineAllocationSteps();
+ int overall_free_memory =
+ static_cast<int>(*heap->new_space()->allocation_limit_address() -
+ *heap->new_space()->allocation_top_address());
+ CHECK(padding_size <= overall_free_memory || overall_free_memory == 0);
+ }
+ while (free_memory > 0) {
+ if (free_memory > object_size) {
+ allocate_memory = object_size;
+ length = LenFromSize(allocate_memory);
+ } else {
+ allocate_memory = free_memory;
+ length = LenFromSize(allocate_memory);
+ if (length <= 0) {
+ // Not enough room to create another fixed array. Let's create a filler.
+ heap->CreateFillerObjectAt(*heap->old_space()->allocation_top_address(),
+ free_memory);
+ break;
+ }
+ }
+ handles.push_back(isolate->factory()->NewFixedArray(length, tenure));
+ CHECK((tenure == NOT_TENURED && heap->InNewSpace(*handles.back())) ||
+ (tenure == TENURED && heap->InOldSpace(*handles.back())));
+ free_memory -= allocate_memory;
+ }
+ return handles;
+}
+
+
+// Helper function that simulates a full new-space in the heap.
+static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
+ space->DisableInlineAllocationSteps();
+ int space_remaining = static_cast<int>(*space->allocation_limit_address() -
+ *space->allocation_top_address());
+ if (space_remaining == 0) return false;
+ CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
+ return true;
+}
+
+
+// Helper function that simulates a fill new-space in the heap.
+static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
+ int extra_bytes) {
+ space->DisableInlineAllocationSteps();
+ int space_remaining = static_cast<int>(*space->allocation_limit_address() -
+ *space->allocation_top_address());
+ CHECK(space_remaining >= extra_bytes);
+ int new_linear_size = space_remaining - extra_bytes;
+ if (new_linear_size == 0) return;
+ CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
+}
+
+
+static inline void FillCurrentPage(v8::internal::NewSpace* space) {
+ AllocateAllButNBytes(space, 0);
+}
+
+
+static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
+ FillCurrentPage(space);
+ while (FillUpOnePage(space)) {
+ }
+}
+
+
+// Helper function that simulates a full old-space in the heap.
+static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
+ space->EmptyAllocationInfo();
+ space->ResetFreeList();
+ space->ClearStats();
+}
+
+
+// Helper function that simulates many incremental marking steps until
+// marking is completed.
+static inline void SimulateIncrementalMarking(i::Heap* heap,
+ bool force_completion = true) {
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+ if (!force_completion) return;
+
+ while (!marking->IsComplete()) {
+ marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (marking->IsReadyToOverApproximateWeakClosure()) {
+ marking->FinalizeIncrementally();
+ }
+ }
+ CHECK(marking->IsComplete());
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // HEAP_UTILS_H_
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index c29eb9659d..2c06da26a1 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(rmcilroy): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/compiler.h"
@@ -26,12 +23,13 @@ class BytecodeGeneratorHelper {
-InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
BytecodeGeneratorHelper() {
- i::FLAG_vector_stores = true;
i::FLAG_ignition = true;
i::FLAG_ignition_fake_try_catch = true;
+ i::FLAG_ignition_fallback_on_eval_and_catch = false;
i::FLAG_ignition_filter = StrDup(kFunctionName);
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_legacy_const = true;
CcTest::i_isolate()->interpreter()->Initialize();
}
@@ -59,10 +57,22 @@ class BytecodeGeneratorHelper {
return handle(js_function->shared()->bytecode_array(), CcTest::i_isolate());
}
+ Handle<BytecodeArray> MakeBytecode(const char* script, const char* filter,
+ const char* function_name) {
+ const char* old_ignition_filter = i::FLAG_ignition_filter;
+ i::FLAG_ignition_filter = filter;
+ Handle<BytecodeArray> return_val = MakeBytecode(script, function_name);
+ i::FLAG_ignition_filter = old_ignition_filter;
+ return return_val;
+ }
+
Handle<BytecodeArray> MakeBytecodeForFunctionBody(const char* body) {
- ScopedVector<char> program(3072);
- SNPrintF(program, "function %s() { %s }\n%s();", kFunctionName, body,
- kFunctionName);
+ static const char kFormat[] = "function %s() { %s }\n%s();";
+ static const int kFormatLength = arraysize(kFormat);
+ int length = kFormatLength + 2 * StrLength(kFunctionName) + StrLength(body);
+ ScopedVector<char> program(length);
+ length = SNPrintF(program, kFormat, kFunctionName, body, kFunctionName);
+ CHECK_GT(length, 0);
return MakeBytecode(program.start(), kFunctionName);
}
@@ -73,14 +83,9 @@ class BytecodeGeneratorHelper {
}
Handle<BytecodeArray> MakeBytecodeForFunctionNoFilter(const char* function) {
- const char* old_ignition_filter = i::FLAG_ignition_filter;
- i::FLAG_ignition_filter = "*";
ScopedVector<char> program(3072);
SNPrintF(program, "%s\n%s();", function, kFunctionName);
- Handle<BytecodeArray> return_val =
- MakeBytecode(program.start(), kFunctionName);
- i::FLAG_ignition_filter = old_ignition_filter;
- return return_val;
+ return MakeBytecode(program.start(), "*", kFunctionName);
}
};
@@ -94,17 +99,25 @@ class BytecodeGeneratorHelper {
#if defined(V8_TARGET_LITTLE_ENDIAN)
#define U16(x) static_cast<uint8_t>((x) & 0xff), \
static_cast<uint8_t>(((x) >> kBitsPerByte) & 0xff)
+#define U16I(x) static_cast<uint8_t>((x) & 0xff), \
+ static_cast<uint8_t>(((x++) >> kBitsPerByte) & 0xff)
#elif defined(V8_TARGET_BIG_ENDIAN)
#define U16(x) static_cast<uint8_t>(((x) >> kBitsPerByte) & 0xff), \
static_cast<uint8_t>((x) & 0xff)
+#define U16I(x) static_cast<uint8_t>(((x) >> kBitsPerByte) & 0xff), \
+ static_cast<uint8_t>((x++) & 0xff)
#else
#error Unknown byte ordering
#endif
+#define XSTR(A) #A
+#define STR(A) XSTR(A)
+
#define COMMA() ,
#define SPACE()
+#define UNIQUE_VAR() "var a" STR(__COUNTER__) " = 0;\n"
-#define REPEAT_2(SEP, ...) \
+#define REPEAT_2(SEP, ...) \
__VA_ARGS__ SEP() __VA_ARGS__
#define REPEAT_4(SEP, ...) \
REPEAT_2(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__)
@@ -127,6 +140,16 @@ class BytecodeGeneratorHelper {
REPEAT_4(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__) SEP() \
__VA_ARGS__
+#define REPEAT_249(SEP, ...) \
+ REPEAT_127(SEP, __VA_ARGS__) SEP() REPEAT_64(SEP, __VA_ARGS__) SEP() \
+ REPEAT_32(SEP, __VA_ARGS__) SEP() REPEAT_16(SEP, __VA_ARGS__) SEP() \
+ REPEAT_8(SEP, __VA_ARGS__) SEP() REPEAT_2(SEP, __VA_ARGS__)
+
+#define REPEAT_249_UNIQUE_VARS() \
+UNIQUE_VAR() REPEAT_127(UNIQUE_VAR) UNIQUE_VAR() REPEAT_64(UNIQUE_VAR) \
+UNIQUE_VAR() REPEAT_32(UNIQUE_VAR) UNIQUE_VAR() REPEAT_16(UNIQUE_VAR) \
+UNIQUE_VAR() REPEAT_8(UNIQUE_VAR) UNIQUE_VAR() REPEAT_2(UNIQUE_VAR)
+
// Structure for containing expected bytecode snippets.
template<typename T, int C = 6>
struct ExpectedSnippet {
@@ -264,124 +287,134 @@ TEST(PrimitiveExpressions) {
{"var x = 0; return x;",
kPointerSize,
1,
- 6,
+ 4,
{B(LdaZero), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return)},
0},
{"var x = 0; return x + 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 8,
+ 10,
{B(LdaZero), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(Add), R(0), //
+ B(Add), R(1), //
B(Return)},
0},
{"var x = 0; return x - 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 8,
+ 10,
{B(LdaZero), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(Sub), R(0), //
+ B(Sub), R(1), //
B(Return)},
0},
{"var x = 4; return x * 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(4), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(Mul), R(0), //
+ B(Mul), R(1), //
B(Return)},
0},
{"var x = 4; return x / 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(4), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(Div), R(0), //
+ B(Div), R(1), //
B(Return)},
0},
{"var x = 4; return x % 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(4), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(Mod), R(0), //
+ B(Mod), R(1), //
B(Return)},
0},
{"var x = 1; return x | 2;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(2), //
- B(BitwiseOr), R(0), //
+ B(BitwiseOr), R(1), //
B(Return)},
0},
{"var x = 1; return x ^ 2;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(2), //
- B(BitwiseXor), R(0), //
+ B(BitwiseXor), R(1), //
B(Return)},
0},
{"var x = 1; return x & 2;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(2), //
- B(BitwiseAnd), R(0), //
+ B(BitwiseAnd), R(1), //
B(Return)},
0},
{"var x = 10; return x << 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(10), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(ShiftLeft), R(0), //
+ B(ShiftLeft), R(1), //
B(Return)},
0},
{"var x = 10; return x >> 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(10), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(ShiftRight), R(0), //
+ B(ShiftRight), R(1), //
B(Return)},
0},
{"var x = 10; return x >>> 3;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{B(LdaSmi8), U8(10), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(ShiftRightLogical), R(0), //
+ B(ShiftRightLogical), R(1), //
B(Return)},
0},
{"var x = 0; return (x, 3);",
- kPointerSize,
+ 1 * kPointerSize,
1,
6,
{B(LdaZero), //
@@ -402,27 +435,26 @@ TEST(LogicalExpressions) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
-
ExpectedSnippet<int> snippets[] = {
{"var x = 0; return x || 3;",
1 * kPointerSize,
1,
- 10,
+ 8,
{B(LdaZero), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanTrue), U8(4), //
B(LdaSmi8), U8(3), //
B(Return)},
0},
{"var x = 0; return (x == 1) || 3;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 12,
+ 14,
{B(LdaZero), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(1), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(JumpIfTrue), U8(4), //
B(LdaSmi8), U8(3), //
B(Return)},
@@ -430,22 +462,22 @@ TEST(LogicalExpressions) {
{"var x = 0; return x && 3;",
1 * kPointerSize,
1,
- 10,
+ 8,
{B(LdaZero), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanFalse), U8(4), //
B(LdaSmi8), U8(3), //
B(Return)},
0},
{"var x = 0; return (x == 0) && 3;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 11,
+ 13,
{B(LdaZero), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaZero), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
B(LdaSmi8), U8(3), //
B(Return)},
@@ -453,10 +485,9 @@ TEST(LogicalExpressions) {
{"var x = 0; return x || (1, 2, 3);",
1 * kPointerSize,
1,
- 10,
+ 8,
{B(LdaZero), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanTrue), U8(4), //
B(LdaSmi8), U8(3), //
B(Return)},
@@ -464,18 +495,22 @@ TEST(LogicalExpressions) {
{"var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);",
3 * kPointerSize,
1,
- 23,
- {B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(1), //
- B(LdaSmi8), U8(4), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(8), //
- B(LdaSmi8), U8(5), //
- B(Star), R(2), //
- B(LdaSmi8), U8(3), //
+ 31,
+ {B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(4), //
+ B(Star), R(2), //
+ B(Ldar), R(0), //
+ B(JumpIfToBooleanTrue), U8(16), //
+ B(Ldar), R(0), //
+ B(Ldar), R(1), //
+ B(Ldar), R(0), //
+ B(Ldar), R(1), //
+ B(LdaSmi8), U8(5), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(3), //
B(Return)},
0},
{"var x = 1; var a = 2, b = 3; return x || ("
@@ -493,10 +528,10 @@ TEST(LogicalExpressions) {
B(Ldar), R(0), //
B(JumpIfToBooleanTrueConstant), U8(0), //
REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(2)), //
B(LdaSmi8), U8(3), //
B(Return)},
1,
@@ -516,10 +551,10 @@ TEST(LogicalExpressions) {
B(Ldar), R(0), //
B(JumpIfToBooleanFalseConstant), U8(0), //
REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(2)), //
B(LdaSmi8), U8(3), //
B(Return)}, //
1,
@@ -527,23 +562,25 @@ TEST(LogicalExpressions) {
{"var x = 1; var a = 2, b = 3; return (x > 3) || ("
REPEAT_32(SPACE, "a = 1, b = 2, ")
"3);",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
- 277,
+ 281,
{B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
B(Star), R(2), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
B(LdaSmi8), U8(3), //
- B(TestGreaterThan), R(0), //
+ B(TestGreaterThan), R(3), //
B(JumpIfTrueConstant), U8(0), //
REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(2)), //
B(LdaSmi8), U8(3), //
B(Return)},
1,
@@ -551,23 +588,25 @@ TEST(LogicalExpressions) {
{"var x = 0; var a = 2, b = 3; return (x < 5) && ("
REPEAT_32(SPACE, "a = 1, b = 2, ")
"3);",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
- 276,
+ 280,
{B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
B(Star), R(2), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
B(LdaSmi8), U8(5), //
- B(TestLessThan), R(0), //
+ B(TestLessThan), R(3), //
B(JumpIfFalseConstant), U8(0), //
REPEAT_32(COMMA, //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Star), R(2)), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(2)), //
B(LdaSmi8), U8(3), //
B(Return)},
1,
@@ -589,10 +628,9 @@ TEST(LogicalExpressions) {
{"var x = 1; return x && 3 || 0, 1;",
1 * kPointerSize,
1,
- 16,
+ 14,
{B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanFalse), U8(4), //
B(LdaSmi8), U8(3), //
B(JumpIfToBooleanTrue), U8(3), //
@@ -856,154 +894,205 @@ TEST(PropertyLoads) {
ExpectedSnippet<const char*> snippets[] = {
{"function f(a) { return a.name; }\nf({name : \"test\"})",
- 0,
+ 1 * kPointerSize,
2,
- 5,
+ 9,
{
- B(LoadICSloppy), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Return), //
},
1,
{"name"}},
{"function f(a) { return a[\"key\"]; }\nf({key : \"test\"})",
- 0,
+ 1 * kPointerSize,
2,
- 5,
+ 9,
{
- B(LoadICSloppy), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(Return) //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Return) //
},
1,
{"key"}},
{"function f(a) { return a[100]; }\nf({100 : \"test\"})",
- 0,
+ 1 * kPointerSize,
2,
- 6,
+ 10,
{
- B(LdaSmi8), U8(100), //
- B(KeyedLoadICSloppy), A(1, 2), U8(vector->GetIndex(slot1)), //
- B(Return) //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(100), //
+ B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot1)), //
+ B(Return) //
},
0},
{"function f(a, b) { return a[b]; }\nf({arg : \"test\"}, \"arg\")",
- 0,
+ 1 * kPointerSize,
3,
- 6,
+ 10,
{
- B(Ldar), A(1, 2), //
- B(KeyedLoadICSloppy), A(1, 3), U8(vector->GetIndex(slot1)), //
- B(Return) //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(1, 2), //
+ B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot1)), //
+ B(Return) //
},
0},
{"function f(a) { var b = a.name; return a[-124]; }\n"
"f({\"-124\" : \"test\", name : 123 })",
- kPointerSize,
+ 2 * kPointerSize,
2,
- 12,
+ 20,
{
- B(LoadICSloppy), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(-124), //
- B(KeyedLoadICSloppy), A(1, 2), U8(vector->GetIndex(slot2)), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Star), R(0), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(-124), //
+ B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot2)), //
+ B(Return), //
},
1,
{"name"}},
{"function f(a) { \"use strict\"; return a.name; }\nf({name : \"test\"})",
- 0,
+ 1 * kPointerSize,
2,
- 5,
+ 9,
{
- B(LoadICStrict), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICStrict), R(0), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Return), //
},
1,
{"name"}},
- {
- "function f(a, b) { \"use strict\"; return a[b]; }\n"
- "f({arg : \"test\"}, \"arg\")",
- 0,
- 3,
- 6,
- {
- B(Ldar), A(2, 3), //
- B(KeyedLoadICStrict), A(1, 3), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 0},
- {
- "function f(a) {\n"
- " var b;\n"
- REPEAT_127(SPACE, " b = a.name; ")
- " return a.name; }\n"
- "f({name : \"test\"})\n",
- 1 * kPointerSize,
- 2,
- 769,
- {
- REPEAT_127(COMMA, //
- B(LoadICSloppy), A(1, 2), U8(0), U8((wide_idx_1 += 2)), //
- B(Star), R(0)), //
- B(LoadICSloppyWide), A(1, 2), U16(0), U16(wide_idx_1 + 2), //
- B(Return), //
- },
- 1,
- {"name"}},
- {
- "function f(a) {\n"
- " 'use strict'; var b;\n"
- REPEAT_127(SPACE, " b = a.name; ")
- " return a.name; }\n"
- "f({name : \"test\"})\n",
- 1 * kPointerSize,
- 2,
- 769,
- {
- REPEAT_127(COMMA, //
- B(LoadICStrict), A(1, 2), U8(0), U8((wide_idx_2 += 2)), //
- B(Star), R(0)), //
- B(LoadICStrictWide), A(1, 2), U16(0), U16(wide_idx_2 + 2), //
- B(Return), //
- },
- 1,
- {"name"}},
- {
- "function f(a, b) {\n"
- " var c;\n"
- REPEAT_127(SPACE, " c = a[b]; ")
- " return a[b]; }\n"
- "f({name : \"test\"}, \"name\")\n",
- 1 * kPointerSize,
- 3,
- 896,
- {
- REPEAT_127(COMMA, //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICSloppy), A(1, 3), U8((wide_idx_3 += 2)), //
- B(Star), R(0)), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICSloppyWide), A(1, 3), U16(wide_idx_3 + 2), //
- B(Return), //
- }},
- {
- "function f(a, b) {\n"
- " 'use strict'; var c;\n"
- REPEAT_127(SPACE, " c = a[b]; ")
- " return a[b]; }\n"
- "f({name : \"test\"}, \"name\")\n",
- 1 * kPointerSize,
- 3,
- 896,
- {
- REPEAT_127(COMMA, //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICStrict), A(1, 3), U8((wide_idx_4 += 2)), //
- B(Star), R(0)), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICStrictWide), A(1, 3), U16(wide_idx_4 + 2), //
- B(Return), //
- }},
- };
+ {"function f(a, b) { \"use strict\"; return a[b]; }\n"
+ "f({arg : \"test\"}, \"arg\")",
+ 1 * kPointerSize,
+ 3,
+ 10,
+ {
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICStrict), R(0), U8(vector->GetIndex(slot1)), //
+ B(Return), //
+ },
+ 0},
+ {"function f(a) {\n"
+ " var b;\n"
+ "b = a.name;"
+ REPEAT_127(SPACE, " b = a.name; ")
+ " return a.name; }\n"
+ "f({name : \"test\"})\n",
+ 2 * kPointerSize,
+ 2,
+ 1291,
+ {
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICSloppy), R(1), U8(0), U8(wide_idx_1 += 2), //
+ B(Star), R(0), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICSloppy), R(1), U8(0), //
+ U8((wide_idx_1 += 2)), //
+ B(Star), R(0)), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICSloppyWide), R(1), U16(0), U16(wide_idx_1 + 2), //
+ B(Return), //
+ },
+ 1,
+ {"name"}},
+ {"function f(a) {\n"
+ " 'use strict'; var b;\n"
+ " b = a.name;\n"
+ REPEAT_127(SPACE, " b = a.name; ")
+ " return a.name; }\n"
+ "f({name : \"test\"})\n",
+ 2 * kPointerSize,
+ 2,
+ 1291,
+ {
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICStrict), R(1), U8(0), U8((wide_idx_2 += 2)), //
+ B(Star), R(0), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICStrict), R(1), U8(0), //
+ U8((wide_idx_2 += 2)), //
+ B(Star), R(0)), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICStrictWide), R(1), U16(0), U16(wide_idx_2 + 2), //
+ B(Return), //
+ },
+ 1,
+ {"name"}},
+ {"function f(a, b) {\n"
+ " var c;\n"
+ " c = a[b];"
+ REPEAT_127(SPACE, " c = a[b]; ")
+ " return a[b]; }\n"
+ "f({name : \"test\"}, \"name\")\n",
+ 2 * kPointerSize,
+ 3,
+ 1419,
+ {
+ B(Ldar), A(1, 3), //
+ B(Star), R(1), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICSloppy), R(1), U8((wide_idx_3 += 2)), //
+ B(Star), R(0), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 3), //
+ B(Star), R(1), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICSloppy), R(1), U8((wide_idx_3 += 2)), //
+ B(Star), R(0)), //
+ B(Ldar), A(1, 3), //
+ B(Star), R(1), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICSloppyWide), R(1), U16(wide_idx_3 + 2), //
+ B(Return), //
+ }},
+ {"function f(a, b) {\n"
+ " 'use strict'; var c;\n"
+ " c = a[b];"
+ REPEAT_127(SPACE, " c = a[b]; ")
+ " return a[b]; }\n"
+ "f({name : \"test\"}, \"name\")\n",
+ 2 * kPointerSize,
+ 3,
+ 1419,
+ {
+ B(Ldar), A(1, 3), //
+ B(Star), R(1), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICStrict), R(1), U8((wide_idx_4 += 2)), //
+ B(Star), R(0), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 3), //
+ B(Star), R(1), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICStrict), R(1), U8((wide_idx_4 += 2)), //
+ B(Star), R(0)), //
+ B(Ldar), A(1, 3), //
+ B(Star), R(1), //
+ B(Ldar), A(2, 3), //
+ B(KeyedLoadICStrictWide), R(1), U16(wide_idx_4 + 2), //
+ B(Return), //
+ }},
+ };
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
@@ -1032,171 +1121,238 @@ TEST(PropertyStores) {
ExpectedSnippet<const char*> snippets[] = {
{"function f(a) { a.name = \"val\"; }\nf({name : \"test\"})",
- 0,
+ kPointerSize,
2,
- 8,
+ 12,
{
- B(LdaConstant), U8(1), //
- B(StoreICSloppy), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
- {"name", "val"}},
+ {"val", "name"}},
{"function f(a) { a[\"key\"] = \"val\"; }\nf({key : \"test\"})",
- 0,
+ kPointerSize,
2,
- 8,
+ 12,
{
- B(LdaConstant), U8(1), //
- B(StoreICSloppy), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
- {"key", "val"}},
+ {"val", "key"}},
{"function f(a) { a[100] = \"val\"; }\nf({100 : \"test\"})",
- kPointerSize,
+ 2 * kPointerSize,
2,
- 12,
+ 16,
{
- B(LdaSmi8), U8(100), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(KeyedStoreICSloppy), A(1, 2), R(0), //
- U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(100), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(KeyedStoreICSloppy), R(0), R(1), //
+ U8(vector->GetIndex(slot1)), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{"val"}},
{"function f(a, b) { a[b] = \"val\"; }\nf({arg : \"test\"}, \"arg\")",
- 0,
+ 2 * kPointerSize,
3,
- 8,
+ 16,
{
- B(LdaConstant), U8(0), //
- B(KeyedStoreICSloppy), A(1, 3), A(2, 3), //
- U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(KeyedStoreICSloppy), R(0), R(1), //
+ U8(vector->GetIndex(slot1)), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{"val"}},
{"function f(a) { a.name = a[-124]; }\n"
"f({\"-124\" : \"test\", name : 123 })",
- 0,
+ 2 * kPointerSize,
2,
- 11,
+ 19,
{
- B(LdaSmi8), U8(-124), //
- B(KeyedLoadICSloppy), A(1, 2), U8(vector->GetIndex(slot1)), //
- B(StoreICSloppy), A(1, 2), U8(0), U8(vector->GetIndex(slot2)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(-124), //
+ B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot1)), //
+ B(StoreICSloppy), R(0), U8(0), U8(vector->GetIndex(slot2)), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{"name"}},
{"function f(a) { \"use strict\"; a.name = \"val\"; }\n"
"f({name : \"test\"})",
- 0,
+ kPointerSize,
2,
- 8,
+ 12,
{
- B(LdaConstant), U8(1), //
- B(StoreICStrict), A(1, 2), U8(0), U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(StoreICStrict), R(0), U8(1), U8(vector->GetIndex(slot1)), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
- {"name", "val"}},
+ {"val", "name"}},
{"function f(a, b) { \"use strict\"; a[b] = \"val\"; }\n"
"f({arg : \"test\"}, \"arg\")",
- 0,
+ 2 * kPointerSize,
3,
- 8,
+ 16,
{
- B(LdaConstant), U8(0), //
- B(KeyedStoreICStrict), A(1, 3), A(2, 3), //
- U8(vector->GetIndex(slot1)), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(KeyedStoreICStrict), R(0), R(1), U8(vector->GetIndex(slot1)), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{"val"}},
{"function f(a) {\n"
+ "a.name = 1;"
REPEAT_127(SPACE, " a.name = 1; ")
" a.name = 2; }\n"
"f({name : \"test\"})\n",
- 0,
+ kPointerSize,
2,
- 772,
+ 1294,
{
- REPEAT_127(COMMA, //
- B(LdaSmi8), U8(1), //
- B(StoreICSloppy), A(1, 2), U8(0), U8((wide_idx_1 += 2))), //
- B(LdaSmi8), U8(2), //
- B(StoreICSloppyWide), A(1, 2), U16(0), U16(wide_idx_1 + 2), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(StoreICSloppy), R(0), U8(0), U8((wide_idx_1 += 2)), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(StoreICSloppy), R(0), U8(0), //
+ U8((wide_idx_1 += 2))), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(2), //
+ B(StoreICSloppyWide), R(0), U16(0), U16(wide_idx_1 + 2), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{"name"}},
{"function f(a) {\n"
- "'use strict';\n"
+ " 'use strict';\n"
+ " a.name = 1;"
REPEAT_127(SPACE, " a.name = 1; ")
" a.name = 2; }\n"
"f({name : \"test\"})\n",
- 0,
+ kPointerSize,
2,
- 772,
+ 1294,
{
- REPEAT_127(COMMA, //
- B(LdaSmi8), U8(1), //
- B(StoreICStrict), A(1, 2), U8(0), U8((wide_idx_2 += 2))), //
- B(LdaSmi8), U8(2), //
- B(StoreICStrictWide), A(1, 2), U16(0), U16(wide_idx_2 + 2), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(StoreICStrict), R(0), U8(0), U8(wide_idx_2 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(StoreICStrict), R(0), U8(0), //
+ U8((wide_idx_2 += 2))), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(2), //
+ B(StoreICStrictWide), R(0), U16(0), U16(wide_idx_2 + 2), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{"name"}},
{"function f(a, b) {\n"
- REPEAT_127(SPACE, " a[b] = 1; ")
+ " a[b] = 1;"
+ REPEAT_127(SPACE, " a[b] = 1; ")
" a[b] = 2; }\n"
"f({name : \"test\"})\n",
- 0,
+ 2 * kPointerSize,
3,
- 771,
- {
- REPEAT_127(COMMA, //
- B(LdaSmi8), U8(1), //
- B(KeyedStoreICSloppy), A(1, 3), A(2, 3), //
- U8((wide_idx_3 += 2))), //
- B(LdaSmi8), U8(2), //
- B(KeyedStoreICSloppyWide), A(1, 3), A(2, 3), //
- U16(wide_idx_3 + 2), //
- B(LdaUndefined), //
- B(Return), //
+ 1809,
+ {
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(KeyedStoreICSloppy), R(0), R(1), U8(wide_idx_3 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(KeyedStoreICSloppy), R(0), R(1), //
+ U8((wide_idx_3 += 2))), //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(KeyedStoreICSloppyWide), R(0), R(1), U16(wide_idx_3 + 2), //
+ B(LdaUndefined), //
+ B(Return), //
}},
{"function f(a, b) {\n"
- "'use strict';\n"
- REPEAT_127(SPACE, " a[b] = 1; ")
+ " 'use strict';\n"
+ " a[b] = 1;"
+ REPEAT_127(SPACE, " a[b] = 1; ")
" a[b] = 2; }\n"
"f({name : \"test\"})\n",
- 0,
+ 2 * kPointerSize,
3,
- 771,
- {
- REPEAT_127(COMMA, //
- B(LdaSmi8), U8(1), //
- B(KeyedStoreICStrict), A(1, 3), A(2, 3), //
- U8((wide_idx_4 += 2))), //
- B(LdaSmi8), U8(2), //
- B(KeyedStoreICStrictWide), A(1, 3), A(2, 3), //
- U16(wide_idx_4 + 2), //
- B(LdaUndefined), //
- B(Return), //
+ 1809,
+ {
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(KeyedStoreICStrict), R(0), R(1), U8(wide_idx_4 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(KeyedStoreICStrict), R(0), R(1), //
+ U8((wide_idx_4 += 2))), //
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
+ B(Ldar), A(2, 3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(KeyedStoreICStrictWide), R(0), R(1), U16(wide_idx_4 + 2), //
+ B(LdaUndefined), //
+ B(Return), //
}}};
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -1215,24 +1371,26 @@ TEST(PropertyCall) {
Zone zone;
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
- USE(slot1);
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // These are a hack used by the CallWide test below.
+ int wide_idx = vector->GetIndex(slot1) - 2;
+
ExpectedSnippet<const char*> snippets[] = {
{"function f(a) { return a.func(); }\nf(" FUNC_ARG ")",
2 * kPointerSize,
2,
- 15,
+ 16,
{
B(Ldar), A(1, 2), //
B(Star), R(1), //
B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
- B(Call), R(0), R(1), U8(0), //
+ B(Call), R(0), R(1), U8(0), U8(vector->GetIndex(slot1)), //
B(Return), //
},
1,
@@ -1240,7 +1398,7 @@ TEST(PropertyCall) {
{"function f(a, b, c) { return a.func(b, c); }\nf(" FUNC_ARG ", 1, 2)",
4 * kPointerSize,
4,
- 23,
+ 24,
{
B(Ldar), A(1, 4), //
B(Star), R(1), //
@@ -1250,7 +1408,7 @@ TEST(PropertyCall) {
B(Star), R(2), //
B(Ldar), A(3, 4), //
B(Star), R(3), //
- B(Call), R(0), R(1), U8(2), //
+ B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot1)), //
B(Return) //
},
1,
@@ -1258,22 +1416,49 @@ TEST(PropertyCall) {
{"function f(a, b) { return a.func(b + b, b); }\nf(" FUNC_ARG ", 1)",
4 * kPointerSize,
3,
- 25,
+ 30,
{
B(Ldar), A(1, 3), //
B(Star), R(1), //
B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
- B(Add), A(2, 3), //
+ B(Star), R(3), //
+ B(Ldar), A(2, 3), //
+ B(Add), R(3), //
B(Star), R(2), //
B(Ldar), A(2, 3), //
B(Star), R(3), //
- B(Call), R(0), R(1), U8(2), //
+ B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot1)), //
B(Return), //
},
1,
- {"func"}}};
+ {"func"}},
+ {"function f(a) {\n"
+ " a.func;\n"
+ REPEAT_127(SPACE, " a.func;\n")
+ " return a.func(); }\nf(" FUNC_ARG ")",
+ 2 * kPointerSize,
+ 2,
+ 1044,
+ {
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(wide_idx += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8((wide_idx += 2))), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(LoadICSloppyWide), R(1), U16(0), U16(wide_idx + 4), //
+ B(Star), R(0), //
+ B(CallWide), R(0), R(1), U16(0), U16(wide_idx + 2), //
+ B(Return), //
+ },
+ 1,
+ {"func"}},
+ };
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
@@ -1338,32 +1523,48 @@ TEST(LoadGlobal) {
},
1,
{"a"}},
- {"a = 1; function f(b) {\n"
- REPEAT_127(SPACE, "b.name; ")
- " return a; }\nf({name: 1});",
- 0,
+ {"a = 1;"
+ "function f(b) {\n"
+ " b.name;\n"
+ REPEAT_127(SPACE, "b.name; ")
+ " return a;"
+ "}\nf({name: 1});",
+ kPointerSize,
2,
- 514,
+ 1030,
{
- REPEAT_127(COMMA, //
- B(LoadICSloppy), A(1, 2), U8(0), U8(wide_idx_1 += 2)), //
- B(LdaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2)), //
+ B(LdaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
+ B(Return), //
},
2,
{"name", "a"}},
- {"a = 1; function f(b) {\n"
+ {"a = 1;"
+ "function f(b) {\n"
" 'use strict';\n"
- REPEAT_127(SPACE, "b.name; ")
- " return a; }\nf({name: 1});",
- 0,
+ " b.name\n"
+ REPEAT_127(SPACE, "b.name; ")
+ " return a;"
+ "}\nf({name: 1});",
+ kPointerSize,
2,
- 514,
+ 1030,
{
- REPEAT_127(COMMA, //
- B(LoadICStrict), A(1, 2), U8(0), U8(wide_idx_2 += 2)), //
- B(LdaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2)), //
+ B(LdaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
+ B(Return), //
},
2,
{"name", "a"}},
@@ -1441,36 +1642,52 @@ TEST(StoreGlobal) {
},
1,
{"a"}},
- {"a = 1; function f(b) {\n"
+ {"a = 1;"
+ "function f(b) {"
+ " b.name;\n"
REPEAT_127(SPACE, "b.name; ")
- " a = 2; }\nf({name: 1});",
- 0,
+ " a = 2; }\n"
+ "f({name: 1});",
+ kPointerSize,
2,
- 517,
+ 1033,
{
- REPEAT_127(COMMA, //
- B(LoadICSloppy), A(1, 2), U8(0), U8(wide_idx_1 += 2)), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2)), //
+ B(LdaSmi8), U8(2), //
+ B(StaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
{"name", "a"}},
- {"a = 1; function f(b) {\n"
+ {"a = 1;"
+ "function f(b) {\n"
" 'use strict';\n"
+ " b.name;\n"
REPEAT_127(SPACE, "b.name; ")
- " a = 2; }\nf({name: 1});",
- 0,
+ " a = 2; }\n"
+ "f({name: 1});",
+ kPointerSize,
2,
- 517,
+ 1033,
{
- REPEAT_127(COMMA, //
- B(LoadICStrict), A(1, 2), U8(0), U8(wide_idx_2 += 2)), //
- B(LdaSmi8), U8(2), //
- B(StaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2), //
+ REPEAT_127(COMMA, //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2)), //
+ B(LdaSmi8), U8(2), //
+ B(StaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
{"name", "a"}},
@@ -1490,9 +1707,8 @@ TEST(CallGlobal) {
Zone zone;
FeedbackVectorSpec feedback_spec(&zone);
- FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
- USE(slot1);
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
@@ -1501,34 +1717,34 @@ TEST(CallGlobal) {
{"function t() { }\nfunction f() { return t(); }\nf()",
2 * kPointerSize,
1,
- 13,
+ 14,
{
- B(LdaUndefined), //
- B(Star), R(1), //
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(Call), R(0), R(1), U8(0), //
- B(Return) //
+ B(LdaUndefined), //
+ B(Star), R(1), //
+ B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(Star), R(0), //
+ B(Call), R(0), R(1), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Return) //
},
1,
{"t"}},
{"function t(a, b, c) { }\nfunction f() { return t(1, 2, 3); }\nf()",
5 * kPointerSize,
1,
- 25,
+ 26,
{
- B(LdaUndefined), //
- B(Star), R(1), //
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(LdaSmi8), U8(2), //
- B(Star), R(3), //
- B(LdaSmi8), U8(3), //
- B(Star), R(4), //
- B(Call), R(0), R(1), U8(3), //
- B(Return) //
+ B(LdaUndefined), //
+ B(Star), R(1), //
+ B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(4), //
+ B(Call), R(0), R(1), U8(3), U8(vector->GetIndex(slot1)), //
+ B(Return) //
},
1,
{"t"}},
@@ -1589,12 +1805,11 @@ TEST(CallRuntime) {
"function f() { return %spread_iterable([1]) }\nf()",
2 * kPointerSize,
1,
- 16,
+ 15,
{
B(LdaUndefined), //
B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(0), U8(3), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
B(Star), R(1), //
B(CallJSRuntime), U16(Context::SPREAD_ITERABLE_INDEX), R(0), //
U8(1), //
@@ -1661,16 +1876,17 @@ TEST(IfConditions) {
0,
{unused, unused, unused, unused, unused, unused}},
{"function f() { var a = 1; if (a) { a += 1; } else { return 2; } } f();",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 21,
+ 23,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(JumpIfToBooleanFalse), U8(14), //
B(Ldar), R(0), //
- B(JumpIfToBooleanFalse), U8(10), //
+ B(Star), R(1), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(1), //
B(Star), R(0), //
B(Jump), U8(5), //
B(LdaSmi8), U8(2), //
@@ -1682,19 +1898,21 @@ TEST(IfConditions) {
{unused, unused, unused, unused, unused, unused}},
{"function f(a) { if (a <= 0) { return 200; } else { return -200; } }"
"f(99);",
- 0,
+ kPointerSize,
2,
- 13,
+ 17,
{
- B(LdaZero), //
- B(TestLessThanOrEqual), A(1, 2), //
- B(JumpIfFalse), U8(5), //
- B(LdaConstant), U8(0), //
- B(Return), //
- B(LdaConstant), U8(1), //
- B(Return), //
- B(LdaUndefined), //
- B(Return), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(TestLessThanOrEqual), R(0), //
+ B(JumpIfFalse), U8(5), //
+ B(LdaConstant), U8(0), //
+ B(Return), //
+ B(LdaConstant), U8(1), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
{helper.factory()->NewNumberFromInt(200),
@@ -1702,12 +1920,14 @@ TEST(IfConditions) {
unused}},
{"function f(a, b) { if (a in b) { return 200; } }"
"f('prop', { prop: 'yes'});",
- 0,
+ kPointerSize,
3,
- 11,
+ 15,
{
+ B(Ldar), A(1, 3), //
+ B(Star), R(0), //
B(Ldar), A(2, 3), //
- B(TestIn), A(1, 3), //
+ B(TestIn), R(0), //
B(JumpIfFalse), U8(5), //
B(LdaConstant), U8(0), //
B(Return), //
@@ -1718,23 +1938,24 @@ TEST(IfConditions) {
{helper.factory()->NewNumberFromInt(200), unused, unused, unused, unused,
unused}},
{"function f(z) { var a = 0; var b = 0; if (a === 0.01) { "
- REPEAT_32(SPACE, "b = a; a = b; ")
+ REPEAT_64(SPACE, "b = a; a = b; ")
" return 200; } else { return -200; } } f(0.001)",
- 2 * kPointerSize,
+ 3 * kPointerSize,
2,
- 276,
+ 282,
{
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaConstant), U8(0), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfFalseConstant), U8(2), //
- REPEAT_32(COMMA, //
- B(Ldar), R(0), //
+ B(Ldar), R(0), //
+ REPEAT_64(COMMA, //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0)), //
B(LdaConstant), U8(1), //
B(Return), //
@@ -1745,14 +1966,14 @@ TEST(IfConditions) {
4,
{helper.factory()->NewHeapNumber(0.01),
helper.factory()->NewNumberFromInt(200),
- helper.factory()->NewNumberFromInt(261),
+ helper.factory()->NewNumberFromInt(263),
helper.factory()->NewNumberFromInt(-200), unused, unused}},
{"function f() { var a = 0; var b = 0; if (a) { "
- REPEAT_32(SPACE, "b = a; a = b; ")
+ REPEAT_64(SPACE, "b = a; a = b; ")
" return 200; } else { return -200; } } f()",
2 * kPointerSize,
1,
- 274,
+ 276,
{
B(LdaZero), //
B(Star), R(0), //
@@ -1760,10 +1981,9 @@ TEST(IfConditions) {
B(Star), R(1), //
B(Ldar), R(0), //
B(JumpIfToBooleanFalseConstant), U8(1), //
- REPEAT_32(COMMA, //
- B(Ldar), R(0), //
+ B(Ldar), R(0), //
+ REPEAT_64(COMMA, //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0)), //
B(LdaConstant), U8(0), //
B(Return), //
@@ -1773,7 +1993,7 @@ TEST(IfConditions) {
B(Return)}, //
3,
{helper.factory()->NewNumberFromInt(200),
- helper.factory()->NewNumberFromInt(261),
+ helper.factory()->NewNumberFromInt(263),
helper.factory()->NewNumberFromInt(-200), unused, unused, unused}},
{"function f(a, b) {\n"
@@ -1787,13 +2007,15 @@ TEST(IfConditions) {
" if (a instanceof b) { return 1; }\n"
" return 0;\n"
"} f(1, 1);",
- 0,
+ kPointerSize,
3,
- 74,
+ 106,
{
#define IF_CONDITION_RETURN(condition) \
+ B(Ldar), A(1, 3), \
+ B(Star), R(0), \
B(Ldar), A(2, 3), \
- B(condition), A(1, 3), \
+ B(condition), R(0), \
B(JumpIfFalse), U8(5), \
B(LdaSmi8), U8(1), \
B(Return),
@@ -1820,11 +2042,10 @@ TEST(IfConditions) {
"f();",
1 * kPointerSize,
1,
- 15,
+ 13,
{
B(LdaZero), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanFalse), U8(5), //
B(LdaSmi8), U8(20), //
B(Return), //
@@ -1834,8 +2055,7 @@ TEST(IfConditions) {
B(Return)
},
0,
- {unused, unused, unused, unused, unused, unused}}
- };
+ {unused, unused, unused, unused, unused, unused}}};
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -1861,6 +2081,7 @@ TEST(DeclareGlobals) {
FeedbackVectorSpec feedback_spec_loads(&zone);
FeedbackVectorSlot load_slot_1 = feedback_spec_loads.AddLoadICSlot();
+ FeedbackVectorSlot call_slot_1 = feedback_spec_loads.AddCallICSlot();
Handle<i::TypeFeedbackVector> load_vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec_loads);
@@ -1907,7 +2128,7 @@ TEST(DeclareGlobals) {
{"var a = 1;\na=2;",
4 * kPointerSize,
1,
- 38,
+ 36,
{
B(LdaConstant), U8(0), //
B(Star), R(1), //
@@ -1925,7 +2146,6 @@ TEST(DeclareGlobals) {
B(StaGlobalSloppy), U8(1), //
U8(store_vector->GetIndex(store_slot_2)), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return) //
},
2,
@@ -1934,22 +2154,22 @@ TEST(DeclareGlobals) {
{"function f() {}\nf();",
3 * kPointerSize,
1,
- 29,
+ 28,
{
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(LdaZero), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
- B(LdaUndefined), //
- B(Star), R(2), //
- B(LdaGlobalSloppy), U8(1), //
- U8(load_vector->GetIndex(load_slot_1)), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(0), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Return) //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(LdaZero), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
+ B(LdaUndefined), //
+ B(Star), R(2), //
+ B(LdaGlobalSloppy), U8(1), //
+ U8(load_vector->GetIndex(load_slot_1)), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(0), //
+ U8(load_vector->GetIndex(call_slot_1)), //
+ B(Star), R(0), //
+ B(Return) //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
@@ -1964,42 +2184,178 @@ TEST(DeclareGlobals) {
}
+TEST(BreakableBlocks) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<int> snippets[] = {
+ {"var x = 0;\n"
+ "label: {\n"
+ " x = x + 1;\n"
+ " break label;\n"
+ " x = x + 1;\n"
+ "}\n"
+ "return x;",
+ 2 * kPointerSize,
+ 1,
+ 16,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(2), //
+ B(Ldar), R(0), //
+ B(Return) //
+ }},
+ {"var sum = 0;\n"
+ "outer: {\n"
+ " for (var x = 0; x < 10; ++x) {\n"
+ " for (var y = 0; y < 3; ++y) {\n"
+ " ++sum;\n"
+ " if (x + y == 12) { break outer; }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ 5 * kPointerSize,
+ 1,
+ 72,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(10), //
+ B(TestLessThan), R(3), //
+ B(JumpIfFalse), U8(55), //
+ B(LdaZero), //
+ B(Star), R(2), //
+ B(Ldar), R(2), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(3), //
+ B(TestLessThan), R(3), //
+ B(JumpIfFalse), U8(34), //
+ B(Ldar), R(0), //
+ B(ToNumber), //
+ B(Inc), //
+ B(Star), R(0), //
+ B(Ldar), R(1), //
+ B(Star), R(3), //
+ B(Ldar), R(2), //
+ B(Add), R(3), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(12), //
+ B(TestEqual), R(4), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(18), //
+ B(Ldar), R(2), //
+ B(ToNumber), //
+ B(Inc), //
+ B(Star), R(2), //
+ B(Jump), U8(-40), //
+ B(Ldar), R(1), //
+ B(ToNumber), //
+ B(Inc), //
+ B(Star), R(1), //
+ B(Jump), U8(-61), //
+ B(Ldar), R(0), //
+ B(Return), //
+ }},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
TEST(BasicLoops) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
ExpectedSnippet<int> snippets[] = {
+ {"var x = 0;\n"
+ "while (false) { x = 99; break; continue; }\n"
+ "return x;",
+ 1 * kPointerSize,
+ 1,
+ 4,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Return) //
+ }},
+ {"var x = 0;"
+ "while (false) {"
+ " x = x + 1;"
+ "};"
+ "return x;",
+ 1 * kPointerSize,
+ 1,
+ 4,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Return), //
+ },
+ 0},
{"var x = 0;"
"var y = 1;"
"while (x < 10) {"
" y = y * 12;"
" x = x + 1;"
+ " if (x == 3) continue;"
+ " if (x == 4) break;"
"}"
"return y;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 30,
+ 64,
{
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Jump), U8(14), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(10), //
+ B(TestLessThan), R(2), //
+ B(JumpIfFalse), U8(46), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
B(LdaSmi8), U8(12), //
- B(Mul), R(1), //
+ B(Mul), R(2), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(2), //
B(Star), R(0), //
- B(LdaSmi8), U8(10), //
- B(TestLessThan), R(0), //
- B(JumpIfTrue), U8(-16), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(3), //
+ B(TestEqual), R(2), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(-38), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(4), //
+ B(TestEqual), R(2), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(4), //
+ B(Jump), U8(-52), //
B(Ldar), R(1), //
B(Return), //
},
0},
{"var i = 0;"
- "while(true) {"
+ "while (true) {"
" if (i < 0) continue;"
" if (i == 3) break;"
" if (i == 4) break;"
@@ -2008,130 +2364,54 @@ TEST(BasicLoops) {
" i = i + 1;"
"}"
"return i;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 53,
+ 77,
{
B(LdaZero), //
B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaZero), //
- B(TestLessThan), R(0), //
+ B(TestLessThan), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(40), //
+ B(Jump), U8(-9), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(34), //
+ B(Jump), U8(50), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(4), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(26), //
+ B(Jump), U8(38), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(10), //
- B(TestEqual), R(0), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(16), //
- B(LdaSmi8), U8(5), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(10), //
- B(LdaSmi8), U8(1), //
- B(Add), R(0), //
- B(Star), R(0), //
B(Jump), U8(-45), //
B(Ldar), R(0), //
- B(Return), //
- },
- 0},
- {"var x = 0; var y = 1;"
- "do {"
- " y = y * 10;"
- " if (x == 5) break;"
- " if (x == 6) continue;"
- " x = x + 1;"
- "} while (x < 10);"
- "return y;",
- 2 * kPointerSize,
- 1,
- 44,
- {
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(10), //
- B(Mul), R(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(5), //
- B(TestEqual), R(0), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(22), //
- B(LdaSmi8), U8(6), //
- B(TestEqual), R(0), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(1), //
- B(Add), R(0), //
- B(Star), R(0), //
- B(LdaSmi8), U8(10), //
- B(TestLessThan), R(0), //
- B(JumpIfTrue), U8(-32), //
- B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 0; "
- "for(;;) {"
- " if (x == 1) break;"
- " x = x + 1;"
- "}",
- 1 * kPointerSize,
- 1,
- 21,
- {
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(TestEqual), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(5), //
+ B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(10), //
+ B(Jump), U8(14), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-14), //
- B(LdaUndefined), //
+ B(Jump), U8(-69), //
+ B(Ldar), R(0), //
B(Return), //
},
0},
- {"var u = 0;"
- "for(var i = 0; i < 100; i = i + 1) {"
- " u = u + 1;"
- " continue;"
- "}",
- 2 * kPointerSize,
- 1,
- 30,
- {
- B(LdaZero), //
- B(Star), R(0), //
- B(LdaZero), //
- B(Star), R(1), //
- B(Jump), U8(16), //
- B(LdaSmi8), U8(1), //
- B(Add), R(0), //
- B(Star), R(0), //
- B(Jump), U8(2), //
- B(LdaSmi8), U8(1), //
- B(Add), R(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(100), //
- B(TestLessThan), R(1), //
- B(JumpIfTrue), U8(-18), //
- B(LdaUndefined), //
- B(Return), //
- },
- 0},
{"var i = 0;"
- "while(true) {"
+ "while (true) {"
" while (i < 3) {"
" if (i == 2) break;"
" i = i + 1;"
@@ -2140,28 +2420,36 @@ TEST(BasicLoops) {
" break;"
"}"
"return i;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 38,
+ 54,
{
B(LdaZero), //
B(Star), R(0), //
- B(Jump), U8(16), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(3), //
+ B(TestLessThan), R(1), //
+ B(JumpIfFalse), U8(26), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(2), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
B(Jump), U8(14), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(1), //
B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(TestLessThan), R(0), //
- B(JumpIfTrue), U8(-18), //
+ B(Jump), U8(-32), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(1), //
B(Star), R(0), //
B(Jump), U8(4), //
- B(Jump), U8(-30), //
+ B(Jump), U8(-46), //
B(Ldar), R(0), //
B(Return), //
},
@@ -2173,78 +2461,106 @@ TEST(BasicLoops) {
" x = x - 1;"
"}"
"return y;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 29,
+ 37,
{
B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Jump), U8(14), //
- B(LdaSmi8), U8(12), //
- B(Mul), R(1), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Sub), R(0), //
- B(Star), R(0), //
B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(-14), //
+ B(JumpIfToBooleanFalse), U8(24), //
B(Ldar), R(1), //
- B(Return), //
- },
- 0},
- {"var x = 10;"
- "var y = 1;"
- "do {"
- " y = y * 12;"
- " x = x - 1;"
- "} while(x);"
- "return y;",
- 2 * kPointerSize,
- 1,
- 27,
- {
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(1), //
+ B(Star), R(2), //
B(LdaSmi8), U8(12), //
- B(Mul), R(1), //
+ B(Mul), R(2), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(Sub), R(0), //
+ B(Sub), R(2), //
B(Star), R(0), //
- B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(-14), //
+ B(Jump), U8(-24), //
B(Ldar), R(1), //
B(Return), //
- },
+ },
0},
- {"var y = 1;"
- "for (var x = 10; x; --x) {"
+ {"var x = 0; var y = 1;"
+ "do {"
+ " y = y * 10;"
+ " if (x == 5) break;"
+ " if (x == 6) continue;"
+ " x = x + 1;"
+ "} while (x < 10);"
+ "return y;",
+ 3 * kPointerSize,
+ 1,
+ 64,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(10), //
+ B(Mul), R(2), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(5), //
+ B(TestEqual), R(2), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(34), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(6), //
+ B(TestEqual), R(2), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(12), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(2), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(10), //
+ B(TestLessThan), R(2), //
+ B(JumpIfTrue), U8(-52), //
+ B(Ldar), R(1), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 10;"
+ "var y = 1;"
+ "do {"
" y = y * 12;"
- "}"
+ " x = x - 1;"
+ "} while (x);"
"return y;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 29,
+ 35,
{
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
B(LdaSmi8), U8(10), //
- B(Star), R(1), //
- B(Jump), U8(14), //
- B(LdaSmi8), U8(12), //
- B(Mul), R(0), //
B(Star), R(0), //
- B(Ldar), R(1), //
- B(ToNumber), //
- B(Dec), //
+ B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Ldar), R(1), //
- B(JumpIfToBooleanTrue), U8(-14), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(12), //
+ B(Mul), R(2), //
+ B(Star), R(1), //
B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Sub), R(2), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(JumpIfToBooleanTrue), U8(-22), //
+ B(Ldar), R(1), //
B(Return), //
},
0},
@@ -2256,26 +2572,33 @@ TEST(BasicLoops) {
" if (x == 6) continue;"
"} while (false);"
"return y;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 38,
+ 52,
{
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
B(LdaSmi8), U8(10), //
- B(Mul), R(1), //
+ B(Mul), R(2), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(5), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(16), //
+ B(Jump), U8(22), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(2), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(6), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
B(Jump), U8(2), //
B(Ldar), R(1), //
@@ -2290,50 +2613,237 @@ TEST(BasicLoops) {
" if (x == 6) continue;"
"} while (true);"
"return y;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 40,
+ 54,
{
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
B(LdaSmi8), U8(10), //
- B(Mul), R(1), //
+ B(Mul), R(2), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(5), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(18), //
+ B(Jump), U8(24), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(2), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(6), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(2), //
- B(Jump), U8(-28), //
+ B(Jump), U8(-40), //
+ B(Jump), U8(-42), //
B(Ldar), R(1), //
B(Return), //
},
0},
- {"var x = 0;"
- "while(false) {"
+ {"var x = 0; "
+ "for (;;) {"
+ " if (x == 1) break;"
+ " if (x == 2) continue;"
" x = x + 1;"
- "};"
- "return x;",
- 1 * kPointerSize,
+ "}",
+ 2 * kPointerSize,
1,
- 6,
+ 41,
{
- B(LdaZero), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Return), //
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(26), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(-22), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(-34), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 0},
+ {"for (var x = 0;;) {"
+ " if (x == 1) break;"
+ " if (x == 2) continue;"
+ " x = x + 1;"
+ "}",
+ 2 * kPointerSize,
+ 1,
+ 41,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(26), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(-22), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(-34), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 0; "
+ "for (;; x = x + 1) {"
+ " if (x == 1) break;"
+ " if (x == 2) continue;"
+ "}",
+ 2 * kPointerSize,
+ 1,
+ 41,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(26), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(2), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(-34), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 0},
+ {"for (var x = 0;; x = x + 1) {"
+ " if (x == 1) break;"
+ " if (x == 2) continue;"
+ "}",
+ 2 * kPointerSize,
+ 1,
+ 41,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(26), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(TestEqual), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(2), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(-34), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 0},
+ {"var u = 0;"
+ "for (var i = 0; i < 100; i = i + 1) {"
+ " u = u + 1;"
+ " continue;"
+ "}",
+ 3 * kPointerSize,
+ 1,
+ 42,
+ {
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(100), //
+ B(TestLessThan), R(2), //
+ B(JumpIfFalse), U8(26), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(2), //
+ B(Star), R(0), //
+ B(Jump), U8(2), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(2), //
+ B(Star), R(1), //
+ B(Jump), U8(-32), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 0},
+ {"var y = 1;"
+ "for (var x = 10; x; --x) {"
+ " y = y * 12;"
+ "}"
+ "return y;",
+ 3 * kPointerSize,
+ 1,
+ 33,
+ {
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(JumpIfToBooleanFalse), U8(20), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(12), //
+ B(Mul), R(2), //
+ B(Star), R(0), //
+ B(Ldar), R(1), //
+ B(ToNumber), //
+ B(Dec), //
+ B(Star), R(1), //
+ B(Jump), U8(-20), //
+ B(Ldar), R(0), //
+ B(Return), //
},
0},
{"var x = 0;"
- "for( var i = 0; false; i++) {"
+ "for (var i = 0; false; i++) {"
" x = x + 1;"
"};"
"return x;",
@@ -2350,31 +2860,34 @@ TEST(BasicLoops) {
},
0},
{"var x = 0;"
- "for( var i = 0; true; ++i) {"
+ "for (var i = 0; true; ++i) {"
" x = x + 1;"
" if (x == 20) break;"
"};"
"return x;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 31,
+ 37,
{
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(2), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(20), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
B(Jump), U8(10), //
B(Ldar), R(1), //
B(ToNumber), //
B(Inc), //
B(Star), R(1), //
- B(Jump), U8(-20), //
+ B(Jump), U8(-26), //
B(Ldar), R(0), //
B(Return), //
},
@@ -2389,6 +2902,83 @@ TEST(BasicLoops) {
}
+TEST(JumpsRequiringConstantWideOperands) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ int constant_count = 0;
+ ExpectedSnippet<Handle<Object>, 316> snippets[] = {
+ {
+ REPEAT_256(SPACE, "var x = 0.1;")
+ REPEAT_32(SPACE, "var x = 0.2;")
+ REPEAT_16(SPACE, "var x = 0.3;")
+ REPEAT_8(SPACE, "var x = 0.4;")
+ "for (var i = 0; i < 3; i++) {\n"
+ " if (i == 1) continue;\n"
+ " if (i == 2) break;\n"
+ "}\n"
+ "return 3;",
+ kPointerSize * 3,
+ 1,
+ 1359,
+ {
+#define L(c) B(LdaConstant), U8(c), B(Star), R(0)
+ REPEAT_256(COMMA, L(constant_count++)),
+#undef L
+#define LW(c) B(LdaConstantWide), U16I(c), B(Star), R(0)
+ REPEAT_32(COMMA, LW(constant_count)),
+ REPEAT_16(COMMA, LW(constant_count)),
+ REPEAT_8(COMMA, LW(constant_count)),
+#undef LW
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(3), //
+ B(TestLessThan), R(2), //
+ B(JumpIfFalseConstantWide), U16(313), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(TestEqual), R(2), //
+ B(JumpIfFalseConstantWide), U16(312), //
+ B(JumpConstantWide), U16(315), //
+ B(Ldar), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(2), //
+ B(TestEqual), R(2), //
+ B(JumpIfFalseConstantWide), U16(312), //
+ B(JumpConstantWide), U16(314), //
+ B(Ldar), R(1), //
+ B(ToNumber), //
+ B(Star), R(2), //
+ B(Inc), //
+ B(Star), R(1), //
+ B(Jump), U8(-47), //
+ B(LdaSmi8), U8(3), //
+ B(Return) //
+ },
+ 316,
+ {
+#define S(x) CcTest::i_isolate()->factory()->NewNumber(x)
+ REPEAT_256(COMMA, S(0.1)),
+ REPEAT_32(COMMA, S(0.2)),
+ REPEAT_16(COMMA, S(0.3)),
+ REPEAT_8(COMMA, S(0.4)),
+#undef S
+#define N(x) CcTest::i_isolate()->factory()->NewNumberFromInt(x)
+ N(6), N(41), N(13), N(17)
+#undef N
+ }}};
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
TEST(UnaryOperators) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
@@ -2399,20 +2989,24 @@ TEST(UnaryOperators) {
" x = x + 10;"
"}"
"return x;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 21,
+ 29,
{
B(LdaZero), //
B(Star), R(0), //
- B(Jump), U8(8), //
- B(LdaSmi8), U8(10), //
- B(Add), R(0), //
- B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(10), //
- B(TestEqual), R(0), //
+ B(TestEqual), R(1), //
B(LogicalNot), //
- B(JumpIfTrue), U8(-11), //
+ B(JumpIfFalse), U8(14), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(10), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(-21), //
B(Ldar), R(0), //
B(Return), //
},
@@ -2422,32 +3016,35 @@ TEST(UnaryOperators) {
" x = !x;"
"} while(x == false);"
"return x;",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 16,
+ 20,
{
B(LdaFalse), //
B(Star), R(0), //
B(Ldar), R(0), //
B(LogicalNot), //
B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
B(LdaFalse), //
- B(TestEqual), R(0), //
- B(JumpIfTrue), U8(-8), //
+ B(TestEqual), R(1), //
+ B(JumpIfTrue), U8(-12), //
B(Ldar), R(0), //
B(Return), //
},
0},
{"var x = 101;"
"return void(x * 3);",
- kPointerSize,
+ 2 * kPointerSize,
1,
- 10,
+ 12,
{
B(LdaSmi8), U8(101), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(3), //
- B(Mul), R(0), //
+ B(Mul), R(1), //
B(LdaUndefined), //
B(Return), //
},
@@ -2455,60 +3052,63 @@ TEST(UnaryOperators) {
{"var x = 1234;"
"var y = void (x * x - 1);"
"return y;",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
20,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(Ldar), R(0), //
- B(Mul), R(0), //
B(Star), R(2), //
+ B(Ldar), R(0), //
+ B(Mul), R(2), //
+ B(Star), R(3), //
B(LdaSmi8), U8(1), //
- B(Sub), R(2), //
+ B(Sub), R(3), //
B(LdaUndefined), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Return), //
},
1,
{1234}},
{"var x = 13;"
"return ~x;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{
B(LdaSmi8), U8(13), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(-1), //
- B(BitwiseXor), R(0), //
+ B(BitwiseXor), R(1), //
B(Return), //
},
0},
{"var x = 13;"
"return +x;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{
B(LdaSmi8), U8(13), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(1), //
- B(Mul), R(0), //
+ B(Mul), R(1), //
B(Return), //
},
0},
{"var x = 13;"
"return -x;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 11,
{
B(LdaSmi8), U8(13), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(-1), //
- B(Mul), R(0), //
+ B(Mul), R(1), //
B(Return), //
},
0}};
@@ -2539,11 +3139,10 @@ TEST(Typeof) {
"}; f();",
kPointerSize,
1,
- 8,
+ 6,
{
B(LdaSmi8), U8(13), //
- B(Star), R(0), // TODO(oth): Ldar R(X) following Star R(X)
- B(Ldar), R(0), // could be culled in bytecode array builder.
+ B(Star), R(0), //
B(TypeOf), //
B(Return), //
}},
@@ -2599,45 +3198,45 @@ TEST(Delete) {
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = {x:13, y:14}; return delete a.x;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 12,
+ 13,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(DeletePropertySloppy), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
+ B(DeletePropertySloppy), R(1), //
B(Return)
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"'use strict'; var a = {x:13, y:14}; return delete a.x;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 12,
+ 13,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(DeletePropertyStrict), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
+ B(DeletePropertyStrict), R(1), //
B(Return)
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = {1:13, 2:14}; return delete a[2];",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 12,
+ 13,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(DeletePropertySloppy), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(DeletePropertySloppy), R(1), //
B(Return)
},
1,
@@ -2659,20 +3258,18 @@ TEST(Delete) {
"return delete a[1];",
2 * kPointerSize,
1,
- 29,
+ 27,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaConstant), U8(1), //
- B(CreateClosure), U8(0), //
- B(LdaContextSlot), R(0), U8(first_context_slot), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(DeletePropertyStrict), R(1), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(LdaContextSlot), R(0), U8(first_context_slot), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(DeletePropertyStrict), R(1), //
B(Return)
},
2,
@@ -2703,7 +3300,8 @@ TEST(GlobalDelete) {
Zone zone;
int context = Register::function_context().index();
- int global_object_index = Context::GLOBAL_OBJECT_INDEX;
+ int native_context_index = Context::NATIVE_CONTEXT_INDEX;
+ int global_context_index = Context::EXTENSION_INDEX;
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
@@ -2715,13 +3313,11 @@ TEST(GlobalDelete) {
1 * kPointerSize,
1,
10,
- {
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(DeletePropertySloppy), R(0), //
- B(Return)
- },
+ {B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(1), //
+ B(DeletePropertySloppy), R(0), //
+ B(Return)},
2,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
@@ -2730,39 +3326,37 @@ TEST(GlobalDelete) {
1 * kPointerSize,
1,
10,
- {
- B(LdaGlobalStrict), U8(0), U8(vector->GetIndex(slot)), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(DeletePropertyStrict), R(0), //
- B(Return)
- },
+ {B(LdaGlobalStrict), U8(0), U8(vector->GetIndex(slot)), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(DeletePropertyStrict), R(0), //
+ B(Return)},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = {x:13, y:14};\n function f() { return delete a; };\n f();",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 10,
- {
- B(LdaContextSlot), R(context), U8(global_object_index), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(DeletePropertySloppy), R(0), //
- B(Return)
- },
+ 15,
+ {B(LdaContextSlot), R(context), U8(native_context_index), //
+ B(Star), R(0), //
+ B(LdaContextSlot), R(0), U8(global_context_index), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(DeletePropertySloppy), R(1), //
+ B(Return)},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"b = 30;\n function f() { return delete b; };\n f();",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 10,
- {
- B(LdaContextSlot), R(context), U8(global_object_index), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(DeletePropertySloppy), R(0), //
- B(Return)
- },
+ 15,
+ {B(LdaContextSlot), R(context), U8(native_context_index), //
+ B(Star), R(0), //
+ B(LdaContextSlot), R(0), U8(global_context_index), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(DeletePropertySloppy), R(1), //
+ B(Return)},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}}};
@@ -2777,16 +3371,22 @@ TEST(GlobalDelete) {
TEST(FunctionLiterals) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddCallICSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
ExpectedSnippet<InstanceType> snippets[] = {
{"return function(){ }",
0,
1,
- 5,
+ 4,
{
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
- B(Return) //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Return) //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
@@ -2795,13 +3395,12 @@ TEST(FunctionLiterals) {
1,
14,
{
- B(LdaUndefined), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
- B(Star), R(0), //
- B(Call), R(0), R(1), U8(0), //
- B(Return) //
+ B(LdaUndefined), //
+ B(Star), R(1), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(Call), R(0), R(1), U8(0), U8(vector->GetIndex(slot)), //
+ B(Return) //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
@@ -2810,15 +3409,14 @@ TEST(FunctionLiterals) {
1,
18,
{
- B(LdaUndefined), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
- B(Star), R(0), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(Call), R(0), R(1), U8(1), //
- B(Return) //
+ B(LdaUndefined), //
+ B(Star), R(1), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(2), //
+ B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot)), //
+ B(Return) //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
@@ -2838,58 +3436,82 @@ TEST(RegExpLiterals) {
Zone zone;
FeedbackVectorSpec feedback_spec(&zone);
- feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
FeedbackVectorSlot slot2 = feedback_spec.AddLoadICSlot();
+ uint8_t i_flags = JSRegExp::kIgnoreCase;
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
ExpectedSnippet<const char*> snippets[] = {
{"return /ab+d/;",
- 1 * kPointerSize,
+ 0 * kPointerSize,
1,
- 10,
+ 5,
{
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateRegExpLiteral), U8(0), R(0), //
- B(Return), //
+ B(CreateRegExpLiteral), U8(0), U8(0), U8(0), //
+ B(Return), //
},
- 2,
- {"", "ab+d"}},
+ 1,
+ {"ab+d"}},
{"return /(\\w+)\\s(\\w+)/i;",
- 1 * kPointerSize,
+ 0 * kPointerSize,
1,
- 10,
+ 5,
{
- B(LdaConstant), U8(0), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateRegExpLiteral), U8(0), R(0), //
- B(Return), //
+ B(CreateRegExpLiteral), U8(0), U8(0), U8(i_flags), //
+ B(Return), //
},
- 2,
- {"i", "(\\w+)\\s(\\w+)"}},
+ 1,
+ {"(\\w+)\\s(\\w+)"}},
{"return /ab+d/.exec('abdd');",
3 * kPointerSize,
1,
- 26,
+ 22,
{
- B(LdaConstant), U8(0), //
- B(Star), R(2), //
- B(LdaConstant), U8(1), //
- B(CreateRegExpLiteral), U8(0), R(2), //
+ B(CreateRegExpLiteral), U8(0), U8(0), U8(0), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(2), U8(vector->GetIndex(slot2)), //
+ B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
- B(LdaConstant), U8(3), //
+ B(LdaConstant), U8(2), //
B(Star), R(2), //
- B(Call), R(0), R(1), U8(1), //
+ B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot1)), //
B(Return), //
},
- 4,
- {"", "ab+d", "exec", "abdd"}},
+ 3,
+ {"ab+d", "exec", "abdd"}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(RegExpLiteralsWide) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ int wide_idx = 0;
+
+ ExpectedSnippet<InstanceType, 257> snippets[] = {
+ {"var a;" REPEAT_256(SPACE, "a = 1.23;") "return /ab+d/;",
+ 1 * kPointerSize,
+ 1,
+ 1031,
+ {
+ REPEAT_256(COMMA, //
+ B(LdaConstant), U8(wide_idx++), //
+ B(Star), R(0)), //
+ B(CreateRegExpLiteralWide), U16(256), U16(0), U8(0), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
for (size_t i = 0; i < arraysize(snippets); i++) {
@@ -2920,23 +3542,21 @@ TEST(ArrayLiterals) {
{"return [ 1, 2 ];",
0,
1,
- 6,
+ 5,
{
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(0), U8(simple_flags), //
- B(Return) //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(simple_flags), //
+ B(Return) //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"var a = 1; return [ a, a + 1 ];",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
- 35,
+ 38,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(0), U8(3), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
B(Star), R(2), //
B(LdaZero), //
B(Star), R(1), //
@@ -2944,8 +3564,10 @@ TEST(ArrayLiterals) {
B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot1)), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
B(LdaSmi8), U8(1), //
- B(Add), R(0), //
+ B(Add), R(3), //
B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot1)), //
B(Ldar), R(2), //
B(Return), //
@@ -2955,28 +3577,25 @@ TEST(ArrayLiterals) {
{"return [ [ 1, 2 ], [ 3 ] ];",
0,
1,
- 6,
+ 5,
{
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(2), U8(deep_elements_flags), //
- B(Return) //
+ B(CreateArrayLiteral), U8(0), U8(2), U8(deep_elements_flags), //
+ B(Return) //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"var a = 1; return [ [ a, 2 ], [ a + 2 ] ];",
- 5 * kPointerSize,
+ 6 * kPointerSize,
1,
- 67,
+ 68,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(2), U8(deep_elements_flags), //
+ B(CreateArrayLiteral), U8(0), U8(2), U8(deep_elements_flags), //
B(Star), R(2), //
B(LdaZero), //
B(Star), R(1), //
- B(LdaConstant), U8(1), //
- B(CreateArrayLiteral), U8(0), U8(simple_flags), //
+ B(CreateArrayLiteral), U8(1), U8(0), U8(simple_flags), //
B(Star), R(4), //
B(LdaZero), //
B(Star), R(3), //
@@ -2986,13 +3605,14 @@ TEST(ArrayLiterals) {
B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot3)), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(LdaConstant), U8(2), //
- B(CreateArrayLiteral), U8(1), U8(simple_flags), //
+ B(CreateArrayLiteral), U8(2), U8(1), U8(simple_flags), //
B(Star), R(4), //
B(LdaZero), //
B(Star), R(3), //
+ B(Ldar), R(0), //
+ B(Star), R(5), //
B(LdaSmi8), U8(2), //
- B(Add), R(0), //
+ B(Add), R(5), //
B(KeyedStoreICSloppy), R(4), R(3), U8(vector->GetIndex(slot2)), //
B(Ldar), R(4), //
B(KeyedStoreICSloppy), R(2), R(1), U8(vector->GetIndex(slot3)), //
@@ -3012,6 +3632,40 @@ TEST(ArrayLiterals) {
}
+TEST(ArrayLiteralsWide) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ int wide_idx = 0;
+ int simple_flags =
+ ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
+
+ ExpectedSnippet<InstanceType, 257> snippets[] = {
+ {"var a;" REPEAT_256(SPACE, "a = 1.23;") "return [ 1 , 2 ];",
+ 1 * kPointerSize,
+ 1,
+ 1031,
+ {
+ REPEAT_256(COMMA, //
+ B(LdaConstant), U8(wide_idx++), //
+ B(Star), R(0)), //
+ B(CreateArrayLiteralWide), U16(256), U16(0), U8(simple_flags), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::FIXED_ARRAY_TYPE}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
TEST(ObjectLiterals) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
@@ -3032,58 +3686,56 @@ TEST(ObjectLiterals) {
{"return { };",
0,
1,
- 6,
+ 5,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(simple_flags), //
- B(Return) //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(simple_flags), //
+ B(Return) //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"return { name: 'string', val: 9.2 };",
0,
1,
- 6,
+ 5,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Return) //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Return) //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"var a = 1; return { name: 'string', val: a };",
2 * kPointerSize,
1,
- 20,
+ 19,
{
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(Ldar), R(0), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(1), //
- B(Return), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Ldar), R(1), //
+ B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = 1; return { val: a, val: a + 1 };",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 22,
+ 25,
{
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Add), R(0), //
- B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(1), //
- B(Return), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(2), //
+ B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Ldar), R(1), //
+ B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
@@ -3091,51 +3743,45 @@ TEST(ObjectLiterals) {
{"return { func: function() { } };",
1 * kPointerSize,
1,
- 18,
+ 16,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(LdaConstant), U8(2), //
- B(CreateClosure), U8(0), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(0), //
- B(Return), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(StoreICSloppy), R(0), U8(2), U8(vector->GetIndex(slot1)), //
+ B(Ldar), R(0), //
+ B(Return), //
},
3,
{InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"return { func(a) { return a; } };",
1 * kPointerSize,
1,
- 18,
+ 16,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(LdaConstant), U8(2), //
- B(CreateClosure), U8(0), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(Ldar), R(0), //
- B(Return), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(StoreICSloppy), R(0), U8(2), U8(vector->GetIndex(slot1)), //
+ B(Ldar), R(0), //
+ B(Return), //
},
3,
{InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"return { get a() { return 2; } };",
5 * kPointerSize,
1,
- 31,
+ 29,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
B(Star), R(1), //
- B(LdaConstant), U8(2), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(2), U8(0), //
B(Star), R(2), //
B(LdaNull), //
B(Star), R(3), //
@@ -3153,18 +3799,15 @@ TEST(ObjectLiterals) {
{"return { get a() { return this.x; }, set a(val) { this.x = val } };",
5 * kPointerSize,
1,
- 34,
+ 31,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
B(Star), R(1), //
- B(LdaConstant), U8(2), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(2), U8(0), //
B(Star), R(2), //
- B(LdaConstant), U8(3), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(3), U8(0), //
B(Star), R(3), //
B(LdaZero), //
B(Star), R(4), //
@@ -3181,17 +3824,15 @@ TEST(ObjectLiterals) {
{"return { set b(val) { this.y = val } };",
5 * kPointerSize,
1,
- 31,
+ 29,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
B(Star), R(1), //
B(LdaNull), //
B(Star), R(2), //
- B(LdaConstant), U8(2), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(2), U8(0), //
B(Star), R(3), //
B(LdaZero), //
B(Star), R(4), //
@@ -3207,32 +3848,30 @@ TEST(ObjectLiterals) {
{"var a = 1; return { 1: a };",
5 * kPointerSize,
1,
- 30,
+ 29,
{
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(Ldar), R(0), //
- B(Star), R(3), //
- B(LdaZero), //
- B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kSetProperty), R(1), U8(4), //
- B(Ldar), R(1), //
- B(Return), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(2), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
+ B(LdaZero), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kSetProperty), R(1), U8(4), //
+ B(Ldar), R(1), //
+ B(Return), //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"return { __proto__: null }",
2 * kPointerSize,
1,
- 18,
+ 17,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(simple_flags), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(simple_flags), //
B(Star), R(0), //
B(LdaNull), B(Star), R(1), //
B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(0), U8(2), //
@@ -3244,12 +3883,11 @@ TEST(ObjectLiterals) {
{"var a = 'test'; return { [a]: 1 }",
5 * kPointerSize,
1,
- 31,
+ 30,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(0), U8(simple_flags), //
+ B(CreateObjectLiteral), U8(1), U8(0), U8(simple_flags), //
B(Star), R(1), //
B(Ldar), R(0), //
B(ToName), //
@@ -3269,12 +3907,11 @@ TEST(ObjectLiterals) {
{"var a = 'test'; return { val: a, [a]: 1 }",
5 * kPointerSize,
1,
- 37,
+ 36,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
+ B(CreateObjectLiteral), U8(1), U8(0), U8(deep_elements_flags), //
B(Star), R(1), //
B(Ldar), R(0), //
B(StoreICSloppy), R(1), U8(2), U8(vector->GetIndex(slot1)), //
@@ -3297,12 +3934,11 @@ TEST(ObjectLiterals) {
{"var a = 'test'; return { [a]: 1, __proto__: {} }",
5 * kPointerSize,
1,
- 43,
+ 41,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(1), U8(simple_flags), //
+ B(CreateObjectLiteral), U8(1), U8(1), U8(simple_flags), //
B(Star), R(1), //
B(Ldar), R(0), //
B(ToName), //
@@ -3313,8 +3949,7 @@ TEST(ObjectLiterals) {
B(Star), R(4), //
B(CallRuntime), U16(Runtime::kDefineDataPropertyUnchecked), R(1), //
U8(4), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(0), U8(13), //
+ B(CreateObjectLiteral), U8(1), U8(0), U8(13), //
B(Star), R(2), //
B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2), //
B(Ldar), R(1), //
@@ -3326,12 +3961,11 @@ TEST(ObjectLiterals) {
{"var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };",
5 * kPointerSize,
1,
- 69,
+ 64,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(0), U8(simple_flags), //
+ B(CreateObjectLiteral), U8(1), U8(0), U8(simple_flags), //
B(Star), R(1), //
B(Ldar), R(0), //
B(ToName), //
@@ -3343,20 +3977,16 @@ TEST(ObjectLiterals) {
B(CallRuntime), U16(Runtime::kDefineDataPropertyUnchecked), R(1), //
U8(4), //
B(LdaConstant), U8(3), //
- B(ToName), //
B(Star), R(2), //
- B(LdaConstant), U8(4), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(4), U8(0), //
B(Star), R(3), //
B(LdaZero), //
B(Star), R(4), //
B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), //
R(1), U8(4), //
B(LdaConstant), U8(3), //
- B(ToName), //
B(Star), R(2), //
- B(LdaConstant), U8(5), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(5), U8(0), //
B(Star), R(3), //
B(LdaZero), //
B(Star), R(4), //
@@ -3382,6 +4012,42 @@ TEST(ObjectLiterals) {
}
+TEST(ObjectLiteralsWide) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ int deep_elements_flags =
+ ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
+ int wide_idx = 0;
+
+ ExpectedSnippet<InstanceType, 257> snippets[] = {
+ {"var a;" REPEAT_256(SPACE,
+ "a = 1.23;") "return { name: 'string', val: 9.2 };",
+ 1 * kPointerSize,
+ 1,
+ 1031,
+ {
+ REPEAT_256(COMMA, //
+ B(LdaConstant), U8(wide_idx++), //
+ B(Star), R(0)), //
+ B(CreateObjectLiteralWide), U16(256), U16(0), //
+ U8(deep_elements_flags), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::FIXED_ARRAY_TYPE}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
TEST(TopLevelObjectLiterals) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
@@ -3393,7 +4059,7 @@ TEST(TopLevelObjectLiterals) {
{"var a = { func: function() { } };",
5 * kPointerSize,
1,
- 50,
+ 48,
{
B(LdaConstant), U8(0), //
B(Star), R(1), //
@@ -3404,12 +4070,10 @@ TEST(TopLevelObjectLiterals) {
B(Star), R(1), //
B(LdaZero), //
B(Star), R(2), //
- B(LdaConstant), U8(2), //
- B(CreateObjectLiteral), U8(0), U8(has_function_flags), //
+ B(CreateObjectLiteral), U8(2), U8(0), U8(has_function_flags), //
B(Star), R(4), //
- B(LdaConstant), U8(4), //
- B(CreateClosure), U8(1), //
- B(StoreICSloppy), R(4), U8(3), U8(5), //
+ B(CreateClosure), U8(3), U8(1), //
+ B(StoreICSloppy), R(4), U8(4), U8(3), //
B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1), //
B(Ldar), R(4), //
B(Star), R(3), //
@@ -3421,8 +4085,8 @@ TEST(TopLevelObjectLiterals) {
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
for (size_t i = 0; i < arraysize(snippets); i++) {
@@ -3532,11 +4196,10 @@ TEST(Throw) {
{"var a = 1; if (a) { throw 'Error'; };",
1 * kPointerSize,
1,
- 13,
+ 11,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanFalse), U8(5), //
B(LdaConstant), U8(0), //
B(Throw), //
@@ -3637,20 +4300,34 @@ TEST(CallNew) {
TEST(ContextVariables) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddCallICSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
int closure = Register::function_closure().index();
+ int new_target = Register::new_target().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+
+ // The wide check below relies on MIN_CONTEXT_SLOTS + 3 + 249 == 256, if this
+ // ever changes, the REPEAT_XXX should be changed to output the correct number
+ // of unique variables to trigger the wide slot load / store.
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS + 3 + 249 == 256);
+ int wide_slot = first_context_slot + 3;
+
ExpectedSnippet<InstanceType> snippets[] = {
{"var a; return function() { a = 1; };",
1 * kPointerSize,
1,
- 12,
+ 11,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
B(PushContext), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
1,
@@ -3658,15 +4335,14 @@ TEST(ContextVariables) {
{"var a = 1; return function() { a = 2; };",
1 * kPointerSize,
1,
- 17,
+ 16,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
B(PushContext), R(0), //
B(LdaSmi8), U8(1), //
B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
1,
@@ -3674,7 +4350,7 @@ TEST(ContextVariables) {
{"var a = 1; var b = 2; return function() { a = 2; b = 3 };",
1 * kPointerSize,
1,
- 22,
+ 21,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
@@ -3683,8 +4359,7 @@ TEST(ContextVariables) {
B(StaContextSlot), R(0), U8(first_context_slot), //
B(LdaSmi8), U8(2), //
B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
1,
@@ -3694,24 +4369,23 @@ TEST(ContextVariables) {
1,
24,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(LdaUndefined), //
- B(Star), R(2), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(0), //
- B(LdaContextSlot), R(0), U8(first_context_slot), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(LdaUndefined), //
+ B(Star), R(2), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(0), U8(vector->GetIndex(slot)), //
+ B(LdaContextSlot), R(0), U8(first_context_slot), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"'use strict'; let a = 1; { let b = 2; return function() { a + b; }; }",
4 * kPointerSize,
1,
- 45,
+ 44,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
@@ -3730,13 +4404,45 @@ TEST(ContextVariables) {
B(StaContextSlot), R(1), U8(first_context_slot), //
B(LdaSmi8), U8(2), //
B(StaContextSlot), R(1), U8(first_context_slot), //
- B(LdaConstant), U8(1), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(1), U8(0), //
B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ {"'use strict';\n"
+ REPEAT_249_UNIQUE_VARS()
+ "eval();"
+ "var b = 100;"
+ "return b",
+ 3 * kPointerSize,
+ 1,
+ 1041,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(CreateUnmappedArguments), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 2), //
+ REPEAT_249(COMMA, //
+ B(LdaZero), //
+ B(StaContextSlot), R(0), U8(wide_slot++)), //
+ B(LdaUndefined), //
+ B(Star), R(2), //
+ B(LdaGlobalStrict), U8(0), U8(1), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(0), U8(0), //
+ B(LdaSmi8), U8(100), //
+ B(StaContextSlotWide), R(0), U16(256), //
+ B(LdaContextSlotWide), R(0), U16(256), //
+ B(Return), //
+ },
+ 1,
+ {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
for (size_t i = 0; i < arraysize(snippets); i++) {
@@ -3758,15 +4464,14 @@ TEST(ContextParameters) {
{"function f(arg1) { return function() { arg1 = 2; }; }",
1 * kPointerSize,
2,
- 17,
+ 16,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
B(PushContext), R(0), //
B(Ldar), R(helper.kLastParamIndex), //
B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
1,
@@ -3774,15 +4479,14 @@ TEST(ContextParameters) {
{"function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }",
2 * kPointerSize,
2,
- 22,
+ 21,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
B(PushContext), R(1), //
B(Ldar), R(helper.kLastParamIndex), //
B(StaContextSlot), R(1), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
B(LdaContextSlot), R(1), U8(first_context_slot), //
B(Return), //
@@ -3792,7 +4496,7 @@ TEST(ContextParameters) {
{"function f(a1, a2, a3, a4) { return function() { a1 = a3; }; }",
1 * kPointerSize,
5,
- 22,
+ 21,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
@@ -3801,8 +4505,7 @@ TEST(ContextParameters) {
B(StaContextSlot), R(0), U8(first_context_slot + 1), //
B(Ldar), R(helper.kLastParamIndex -1), //
B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
1,
@@ -3810,15 +4513,14 @@ TEST(ContextParameters) {
{"function f() { var self = this; return function() { self = 2; }; }",
1 * kPointerSize,
1,
- 17,
+ 16,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
R(closure), U8(1), //
B(PushContext), R(0), //
B(Ldar), R(helper.kLastParamIndex), //
B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
1,
@@ -3924,11 +4626,10 @@ TEST(CountOperators) {
{"var a = 1; return ++a;",
1 * kPointerSize,
1,
- 11,
+ 9,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(ToNumber), //
B(Inc), //
B(Star), R(0), //
@@ -3937,11 +4638,10 @@ TEST(CountOperators) {
{"var a = 1; return a++;",
2 * kPointerSize,
1,
- 15,
+ 13,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(ToNumber), //
B(Star), R(1), //
B(Inc), //
@@ -3952,11 +4652,10 @@ TEST(CountOperators) {
{"var a = 1; return --a;",
1 * kPointerSize,
1,
- 11,
+ 9,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(ToNumber), //
B(Dec), //
B(Star), R(0), //
@@ -3965,11 +4664,10 @@ TEST(CountOperators) {
{"var a = 1; return a--;",
2 * kPointerSize,
1,
- 15,
+ 13,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(ToNumber), //
B(Star), R(1), //
B(Dec), //
@@ -3978,80 +4676,80 @@ TEST(CountOperators) {
B(Return), //
}},
{"var a = { val: 1 }; return a.val++;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 22,
+ 23,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(object_literal_flags), //
- B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Star), R(1), //
- B(Inc), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(1), //
- B(Return), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(ToNumber), //
+ B(Star), R(2), //
+ B(Inc), //
+ B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
+ B(Ldar), R(2), //
+ B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = { val: 1 }; return --a.val;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 18,
+ 19,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(object_literal_flags), //
- B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
- B(Dec), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot2)), //
- B(Return), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(ToNumber), //
+ B(Dec), //
+ B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
+ B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var name = 'var'; var a = { val: 1 }; return a[name]--;",
- 4 * kPointerSize,
+ 5 * kPointerSize,
1,
- 29,
+ 30,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(0), U8(object_literal_flags), //
+ B(CreateObjectLiteral), U8(1), U8(0), U8(object_literal_flags), //
B(Star), R(1), //
- B(Ldar), R(0), //
B(Star), R(2), //
- B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot1)), //
- B(ToNumber), //
+ B(Ldar), R(0), //
B(Star), R(3), //
+ B(KeyedLoadICSloppy), R(2), U8(vector->GetIndex(slot1)), //
+ B(ToNumber), //
+ B(Star), R(4), //
B(Dec), //
- B(KeyedStoreICSloppy), R(1), R(2), U8(vector->GetIndex(slot2)), //
- B(Ldar), R(3), //
+ B(KeyedStoreICSloppy), R(2), R(3), U8(vector->GetIndex(slot2)), //
+ B(Ldar), R(4), //
B(Return), //
},
2,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::FIXED_ARRAY_TYPE}},
{"var name = 'var'; var a = { val: 1 }; return ++a[name];",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
- 25,
+ 26,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateObjectLiteral), U8(0), U8(object_literal_flags), //
+ B(CreateObjectLiteral), U8(1), U8(0), U8(object_literal_flags), //
B(Star), R(1), //
- B(Ldar), R(0), //
B(Star), R(2), //
- B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot1)), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
+ B(KeyedLoadICSloppy), R(2), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Inc), //
- B(KeyedStoreICSloppy), R(1), R(2), U8(vector->GetIndex(slot2)), //
+ B(KeyedStoreICSloppy), R(2), R(3), U8(vector->GetIndex(slot2)), //
B(Return), //
},
2,
@@ -4060,15 +4758,14 @@ TEST(CountOperators) {
{"var a = 1; var b = function() { return a }; return ++a;",
2 * kPointerSize,
1,
- 27,
+ 26,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
U8(1), //
B(PushContext), R(1), //
B(LdaSmi8), U8(1), //
B(StaContextSlot), R(1), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
B(LdaContextSlot), R(1), U8(first_context_slot), //
B(ToNumber), //
@@ -4081,15 +4778,14 @@ TEST(CountOperators) {
{"var a = 1; var b = function() { return a }; return a--;",
3 * kPointerSize,
1,
- 31,
+ 30,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
U8(1), //
B(PushContext), R(1), //
B(LdaSmi8), U8(1), //
B(StaContextSlot), R(1), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
B(LdaContextSlot), R(1), U8(first_context_slot), //
B(ToNumber), //
@@ -4102,22 +4798,22 @@ TEST(CountOperators) {
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"var idx = 1; var a = [1, 2]; return a[idx++] = 2;",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
- 26,
+ 27,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(0), U8(array_literal_flags), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
B(Star), R(1), //
+ B(Star), R(2), //
B(Ldar), R(0), //
B(ToNumber), //
- B(Star), R(2), //
+ B(Star), R(3), //
B(Inc), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
- B(KeyedStoreICSloppy), R(1), R(2), //
+ B(KeyedStoreICSloppy), R(2), R(3), //
U8(store_vector->GetIndex(store_slot)), //
B(Return), //
},
@@ -4232,65 +4928,67 @@ TEST(CompoundExpressions) {
ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = 1; a += 2;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 12,
+ 14,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(2), //
- B(Add), R(0), //
+ B(Add), R(1), //
B(Star), R(0), //
B(LdaUndefined), //
B(Return), //
}},
{"var a = 1; a /= 2;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 12,
+ 14,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaSmi8), U8(2), //
- B(Div), R(0), //
+ B(Div), R(1), //
B(Star), R(0), //
B(LdaUndefined), //
B(Return), //
}},
{"var a = { val: 2 }; a.name *= 2;",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 23,
+ 24,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(object_literal_flags), //
- B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(1), U8(vector->GetIndex(slot1)), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(Mul), R(1), //
- B(StoreICSloppy), R(0), U8(1), U8(vector->GetIndex(slot2)), //
- B(LdaUndefined), //
- B(Return), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(2), //
+ B(Mul), R(2), //
+ B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = { 1: 2 }; a[1] ^= 2;",
- 3 * kPointerSize,
+ 4 * kPointerSize,
1,
- 26,
+ 27,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(object_literal_flags), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
B(Star), R(0), //
- B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot1)), //
+ B(LdaSmi8), U8(1), //
B(Star), R(2), //
+ B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot1)), //
+ B(Star), R(3), //
B(LdaSmi8), U8(2), //
- B(BitwiseXor), R(2), //
- B(KeyedStoreICSloppy), R(0), R(1), U8(vector->GetIndex(slot2)), //
+ B(BitwiseXor), R(3), //
+ B(KeyedStoreICSloppy), R(1), R(2), U8(vector->GetIndex(slot2)), //
B(LdaUndefined), //
B(Return), //
},
@@ -4299,15 +4997,14 @@ TEST(CompoundExpressions) {
{"var a = 1; (function f() { return a; }); a |= 24;",
2 * kPointerSize,
1,
- 30,
+ 29,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
U8(1), //
B(PushContext), R(0), //
B(LdaSmi8), U8(1), //
B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaConstant), U8(0), //
- B(CreateClosure), U8(0), //
+ B(CreateClosure), U8(0), U8(0), //
B(LdaContextSlot), R(0), U8(first_context_slot), //
B(Star), R(1), //
B(LdaSmi8), U8(24), //
@@ -4397,38 +5094,37 @@ TEST(CreateArguments) {
{"function f() { return arguments; }",
1 * kPointerSize,
1,
- 6,
+ 4,
{
B(CreateMappedArguments), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return), //
}},
{"function f() { return arguments[0]; }",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 8,
+ 10,
{
B(CreateMappedArguments), //
B(Star), R(0), //
+ B(Star), R(1), //
B(LdaZero), //
- B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot)), //
+ B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot)), //
B(Return), //
}},
{"function f() { 'use strict'; return arguments; }",
1 * kPointerSize,
1,
- 6,
+ 4,
{
B(CreateUnmappedArguments), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return), //
}},
{"function f(a) { return arguments[0]; }",
- 2 * kPointerSize,
+ 3 * kPointerSize,
2,
- 20,
+ 22,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
U8(1), //
@@ -4437,14 +5133,15 @@ TEST(CreateArguments) {
B(StaContextSlot), R(1), U8(first_context_slot), //
B(CreateMappedArguments), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaZero), //
- B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot)), //
+ B(KeyedLoadICSloppy), R(2), U8(vector->GetIndex(slot)), //
B(Return), //
}},
{"function f(a, b, c) { return arguments; }",
2 * kPointerSize,
4,
- 28,
+ 26,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
U8(1), //
@@ -4457,17 +5154,15 @@ TEST(CreateArguments) {
B(StaContextSlot), R(1), U8(first_context_slot), //
B(CreateMappedArguments), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return), //
}},
{"function f(a, b, c) { 'use strict'; return arguments; }",
1 * kPointerSize,
4,
- 6,
+ 4,
{
B(CreateUnmappedArguments), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return), //
}},
};
@@ -4484,21 +5179,25 @@ TEST(IllegalRedeclaration) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- ExpectedSnippet<const char*> snippets[] = {
+ CHECK_GE(MessageTemplate::kVarRedeclaration, 128);
+ // Must adapt bytecode if this changes.
+
+ ExpectedSnippet<Handle<Object>, 2> snippets[] = {
{"const a = 1; { var a = 2; }",
3 * kPointerSize,
1,
14,
{
- B(LdaSmi8), U8(MessageTemplate::kVarRedeclaration), //
- B(Star), R(1), //
B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
B(Star), R(2), //
B(CallRuntime), U16(Runtime::kNewSyntaxError), R(1), U8(2), //
B(Throw), //
},
- 1,
- {"a"}},
+ 2,
+ {helper.factory()->NewNumberFromInt(MessageTemplate::kVarRedeclaration),
+ helper.factory()->NewStringFromAsciiChecked("a")}},
};
for (size_t i = 0; i < arraysize(snippets); i++) {
@@ -4540,77 +5239,76 @@ TEST(ForIn) {
2,
{B(LdaUndefined), B(Return)},
0},
+ {"for (var p in undefined) {}",
+ 2 * kPointerSize,
+ 1,
+ 2,
+ {B(LdaUndefined), B(Return)},
+ 0},
{"var x = 'potatoes';\n"
"for (var p in x) { return p; }",
- 5 * kPointerSize,
+ 8 * kPointerSize,
1,
- 52,
+ 45,
{
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(Ldar), R(1), //
- B(JumpIfUndefined), U8(44), //
- B(JumpIfNull), U8(42), //
- B(ToObject), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kGetPropertyNamesFast), R(3), U8(1), //
- B(ForInPrepare), R(3), //
- B(JumpIfUndefined), U8(30), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(3), //
- B(ForInDone), R(4), //
- B(JumpIfTrue), U8(21), //
- B(ForInNext), R(4), R(3), //
- B(JumpIfUndefined), U8(11), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(Ldar), R(2), //
- B(Return), //
- B(Ldar), R(3), //
- B(Inc), //
- B(Jump), U8(-23), //
- B(LdaUndefined), //
- B(Return), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(JumpIfUndefined), U8(39), //
+ B(JumpIfNull), U8(37), //
+ B(ToObject), //
+ B(JumpIfNull), U8(34), //
+ B(Star), R(3), //
+ B(ForInPrepare), R(4), R(5), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(ForInDone), R(7), R(6), //
+ B(JumpIfTrue), U8(20), //
+ B(ForInNext), R(3), R(4), R(5), R(7), //
+ B(JumpIfUndefined), U8(7), //
+ B(Star), R(0), //
+ B(Star), R(2), //
+ B(Return), //
+ B(ForInStep), R(7), //
+ B(Star), R(7), //
+ B(Jump), U8(-21), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var x = 0;\n"
"for (var p in [1,2,3]) { x += p; }",
- 5 * kPointerSize,
+ 9 * kPointerSize,
1,
57,
{
- B(LdaZero), //
- B(Star), R(1), //
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(0), U8(simple_flags), //
- B(JumpIfUndefined), U8(47), //
- B(JumpIfNull), U8(45), //
- B(ToObject), //
- B(Star), R(3), //
- B(CallRuntime), U16(Runtime::kGetPropertyNamesFast), R(3), U8(1), //
- B(ForInPrepare), R(3), //
- B(JumpIfUndefined), U8(33), //
- B(Star), R(4), //
- B(LdaZero), //
- B(Star), R(3), //
- B(ForInDone), R(4), //
- B(JumpIfTrue), U8(24), //
- B(ForInNext), R(4), R(3), //
- B(JumpIfUndefined), U8(14), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(2), //
- B(Ldar), R(2), //
- B(Add), R(1), //
- B(Star), R(1), //
- B(Ldar), R(3), //
- B(Inc), //
- B(Jump), U8(-26), //
- B(LdaUndefined), //
- B(Return), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
+ B(JumpIfUndefined), U8(48), //
+ B(JumpIfNull), U8(46), //
+ B(ToObject), //
+ B(JumpIfNull), U8(43), //
+ B(Star), R(3), //
+ B(ForInPrepare), R(4), R(5), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(ForInDone), R(7), R(6), //
+ B(JumpIfTrue), U8(29), //
+ B(ForInNext), R(3), R(4), R(5), R(7), //
+ B(JumpIfUndefined), U8(16), //
+ B(Star), R(0), //
+ B(Star), R(2), //
+ B(Ldar), R(1), //
+ B(Star), R(8), //
+ B(Ldar), R(2), //
+ B(Add), R(8), //
+ B(Star), R(1), //
+ B(ForInStep), R(7), //
+ B(Star), R(7), //
+ B(Jump), U8(-30), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
@@ -4619,90 +5317,93 @@ TEST(ForIn) {
" if (x['a'] == 10) continue;\n"
" if (x['a'] == 20) break;\n"
"}",
- 4 * kPointerSize,
+ 8 * kPointerSize,
1,
- 83,
+ 94,
{
- B(LdaConstant), U8(0), //
- B(CreateObjectLiteral), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateArrayLiteral), U8(1), U8(simple_flags), //
- B(JumpIfUndefined), U8(69), //
- B(JumpIfNull), U8(67), //
- B(ToObject), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kGetPropertyNamesFast), R(1), U8(1), //
- B(ForInPrepare), R(1), //
- B(JumpIfUndefined), U8(55), //
- B(Star), R(2), //
- B(LdaZero), //
- B(Star), R(1), //
- B(ForInDone), R(2), //
- B(JumpIfTrue), U8(46), //
- B(ForInNext), R(2), R(1), //
- B(JumpIfUndefined), U8(36), //
- B(Star), R(3), //
- B(StoreICSloppy), R(0), U8(2), U8(vector->GetIndex(slot4)), //
- B(LoadICSloppy), R(0), U8(2), U8(vector->GetIndex(slot2)), //
- B(Star), R(3), //
- B(LdaSmi8), U8(10), //
- B(TestEqual), R(3), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(16), //
- B(LoadICSloppy), R(0), U8(2), U8(vector->GetIndex(slot3)), //
- B(Star), R(3), //
- B(LdaSmi8), U8(20), //
- B(TestEqual), R(3), //
- B(JumpIfFalse), U8(4), //
- B(Jump), U8(7), //
- B(Ldar), R(1), //
- B(Inc), //
- B(Jump), U8(-48), //
- B(LdaUndefined), //
- B(Return), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
+ B(CreateArrayLiteral), U8(1), U8(1), U8(simple_flags), //
+ B(JumpIfUndefined), U8(82), //
+ B(JumpIfNull), U8(80), //
+ B(ToObject), //
+ B(JumpIfNull), U8(77), //
+ B(Star), R(1), //
+ B(ForInPrepare), R(2), R(3), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(ForInDone), R(5), R(4), //
+ B(JumpIfTrue), U8(63), //
+ B(ForInNext), R(1), R(2), R(3), R(5), //
+ B(JumpIfUndefined), U8(50), //
+ B(Star), R(6), //
+ B(Ldar), R(0), //
+ B(Star), R(7), //
+ B(Ldar), R(6), //
+ B(StoreICSloppy), R(7), U8(2), U8(vector->GetIndex(slot4)), //
+ B(Ldar), R(0), //
+ B(Star), R(6), //
+ B(LoadICSloppy), R(6), U8(2), U8(vector->GetIndex(slot2)), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(TestEqual), R(7), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(20), //
+ B(Ldar), R(0), //
+ B(Star), R(6), //
+ B(LoadICSloppy), R(6), U8(2), U8(vector->GetIndex(slot3)), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(20), //
+ B(TestEqual), R(7), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(8), //
+ B(ForInStep), R(5), //
+ B(Star), R(5), //
+ B(Jump), U8(-64), //
+ B(LdaUndefined), //
+ B(Return), //
},
3,
{InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var x = [ 10, 11, 12 ] ;\n"
"for (x[0] in [1,2,3]) { return x[3]; }",
- 5 * kPointerSize,
+ 9 * kPointerSize,
1,
- 66,
+ 71,
{
- B(LdaConstant), U8(0), //
- B(CreateArrayLiteral), U8(0), U8(simple_flags), //
- B(Star), R(0), //
- B(LdaConstant), U8(1), //
- B(CreateArrayLiteral), U8(1), U8(simple_flags), //
- B(JumpIfUndefined), U8(52), //
- B(JumpIfNull), U8(50), //
- B(ToObject), //
- B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kGetPropertyNamesFast), R(1), U8(1), //
- B(ForInPrepare), R(1), //
- B(JumpIfUndefined), U8(38), //
- B(Star), R(2), //
- B(LdaZero), //
- B(Star), R(1), //
- B(ForInDone), R(2), //
- B(JumpIfTrue), U8(29), //
- B(ForInNext), R(2), R(1), //
- B(JumpIfUndefined), U8(19), //
- B(Star), R(3), //
- B(LdaZero), //
- B(Star), R(4), //
- B(Ldar), R(3), //
- B(KeyedStoreICSloppy), R(0), R(4), U8(vector->GetIndex(slot3)), //
- B(LdaSmi8), U8(3), //
- B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot2)), //
- B(Return), //
- B(Ldar), R(1), //
- B(Inc), //
- B(Jump), U8(-31), //
- B(LdaUndefined), //
- B(Return), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(simple_flags), //
+ B(Star), R(0), //
+ B(CreateArrayLiteral), U8(1), U8(1), U8(simple_flags), //
+ B(JumpIfUndefined), U8(59), //
+ B(JumpIfNull), U8(57), //
+ B(ToObject), //
+ B(JumpIfNull), U8(54), //
+ B(Star), R(1), //
+ B(ForInPrepare), R(2), R(3), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(ForInDone), R(5), R(4), //
+ B(JumpIfTrue), U8(40), //
+ B(ForInNext), R(1), R(2), R(3), R(5), //
+ B(JumpIfUndefined), U8(27), //
+ B(Star), R(6), //
+ B(Ldar), R(0), //
+ B(Star), R(7), //
+ B(LdaZero), //
+ B(Star), R(8), //
+ B(Ldar), R(6), //
+ B(KeyedStoreICSloppy), R(7), R(8), U8(vector->GetIndex(slot3)), //
+ B(Ldar), R(0), //
+ B(Star), R(6), //
+ B(LdaSmi8), U8(3), //
+ B(KeyedLoadICSloppy), R(6), U8(vector->GetIndex(slot2)), //
+ B(Return), //
+ B(ForInStep), R(5), //
+ B(Star), R(5), //
+ B(Jump), U8(-41), //
+ B(LdaUndefined), //
+ B(Return), //
},
2,
{InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE}},
@@ -4769,19 +5470,19 @@ TEST(Switch) {
" case 1: return 2;\n"
" case 2: return 3;\n"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
30,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), // The tag variable is allocated as a
- B(Ldar), R(1), // local by the parser, hence this
- B(Star), R(0), // strange shuffling.
+ B(Star), R(0), // local by the parser, hence the store
+ B(Star), R(2), // to another local register.
B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(7), //
B(Jump), U8(8), //
B(LdaSmi8), U8(2), //
@@ -4796,19 +5497,19 @@ TEST(Switch) {
" case 1: a = 2; break;\n"
" case 2: a = 3; break;\n"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
36,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(Jump), U8(14), //
B(LdaSmi8), U8(2), //
@@ -4825,19 +5526,19 @@ TEST(Switch) {
" case 1: a = 2; // fall-through\n"
" case 2: a = 3; break;\n"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
34,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(8), //
B(Jump), U8(12), //
B(LdaSmi8), U8(2), //
@@ -4854,19 +5555,19 @@ TEST(Switch) {
" case 3: break;\n"
" default: a = 1; break;\n"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
34,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(3), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(6), //
B(Jump), U8(6), //
B(Jump), U8(10), //
@@ -4883,20 +5584,20 @@ TEST(Switch) {
" case 3: a = 2; break;\n"
" default: a = 3; break;\n"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
43,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(TypeOf), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(3), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(Jump), U8(14), //
B(LdaSmi8), U8(1), //
@@ -4916,17 +5617,17 @@ TEST(Switch) {
" case typeof(a): a = 1; break;\n"
" default: a = 2; break;\n"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
31,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0), //
+ B(Star), R(2), //
B(Ldar), R(1), //
B(TypeOf), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(4), //
B(Jump), U8(8), //
B(LdaSmi8), U8(1), //
@@ -4944,24 +5645,24 @@ TEST(Switch) {
"break;\n"
" case 2: a = 3; break;"
"}\n",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
288,
{
B(LdaSmi8), U8(1), //
B(Star), R(1), //
- B(Ldar), R(1), //
B(Star), R(0), //
+ B(Star), R(2), //
B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(2), //
B(JumpIfTrueConstant), U8(0), //
B(JumpConstant), U8(1), //
REPEAT_64(COMMA, //
- B(LdaSmi8), U8(2), //
- B(Star), R(1)), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(1)), //
B(Jump), U8(8), //
B(LdaSmi8), U8(3), //
B(Star), R(1), //
@@ -4980,26 +5681,29 @@ TEST(Switch) {
" } // fall-through\n"
" case 2: a = 3;\n"
"}\n",
- 3 * kPointerSize,
+ 5 * kPointerSize,
1,
- 54,
+ 60,
{
B(LdaSmi8), U8(1), //
B(Star), R(2), //
- B(Ldar), R(2), //
B(Star), R(0), //
+ B(Star), R(3), //
B(LdaSmi8), U8(1), //
- B(TestEqualStrict), R(0), //
+ B(TestEqualStrict), R(3), //
B(JumpIfTrue), U8(10), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(0), //
- B(JumpIfTrue), U8(30), //
- B(Jump), U8(32), //
+ B(TestEqualStrict), R(3), //
+ B(JumpIfTrue), U8(36), //
+ B(Jump), U8(38), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
B(LdaSmi8), U8(1), //
- B(Add), R(2), //
+ B(Add), R(4), //
B(Star), R(1), //
+ B(Star), R(4), //
B(LdaSmi8), U8(2), //
- B(TestEqualStrict), R(1), //
+ B(TestEqualStrict), R(4), //
B(JumpIfTrue), U8(4), //
B(Jump), U8(8), //
B(LdaSmi8), U8(1), //
@@ -5027,20 +5731,21 @@ TEST(BasicBlockToBoolean) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- // Check that we don't omit ToBoolean calls if they are at the start of basic
+ // Check that we generate JumpIfToBoolean if they are at the start of basic
// blocks.
ExpectedSnippet<int> snippets[] = {
{"var a = 1; if (a || a < 0) { return 1; }",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 18,
+ 20,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(JumpIfToBooleanTrue), U8(9), //
B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(5), //
+ B(Star), R(1), //
B(LdaZero), //
- B(TestLessThan), R(0), //
+ B(TestLessThan), R(1), //
B(JumpIfToBooleanFalse), U8(5), //
B(LdaSmi8), U8(1), //
B(Return), //
@@ -5048,16 +5753,17 @@ TEST(BasicBlockToBoolean) {
B(Return), //
}},
{"var a = 1; if (a && a < 0) { return 1; }",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 18,
+ 20,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(JumpIfToBooleanFalse), U8(9), //
B(Ldar), R(0), //
- B(JumpIfToBooleanFalse), U8(5), //
+ B(Star), R(1), //
B(LdaZero), //
- B(TestLessThan), R(0), //
+ B(TestLessThan), R(1), //
B(JumpIfToBooleanFalse), U8(5), //
B(LdaSmi8), U8(1), //
B(Return), //
@@ -5065,16 +5771,17 @@ TEST(BasicBlockToBoolean) {
B(Return), //
}},
{"var a = 1; a = (a || a < 0) ? 2 : 3;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 23,
+ 25,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(JumpIfToBooleanTrue), U8(9), //
B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(5), //
+ B(Star), R(1), //
B(LdaZero), //
- B(TestLessThan), R(0), //
+ B(TestLessThan), R(1), //
B(JumpIfToBooleanFalse), U8(6), //
B(LdaSmi8), U8(2), //
B(Jump), U8(4), //
@@ -5127,11 +5834,10 @@ TEST(DeadCodeRemoval) {
{"var a = 1; if (a) { return 1; }; return 2;",
1 * kPointerSize,
1,
- 14,
+ 12,
{
B(LdaSmi8), U8(1), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(JumpIfToBooleanFalse), U8(5), //
B(LdaSmi8), U8(1), //
B(Return), //
@@ -5170,13 +5876,12 @@ TEST(ThisFunction) {
{"var f;\n f = function f() { return f; }",
1 * kPointerSize,
1,
- 10,
+ 8,
{
B(LdaTheHole), //
B(Star), R(0), //
B(Ldar), R(closure), //
B(Star), R(0), //
- B(Ldar), R(0), //
B(Return), //
}},
};
@@ -5193,17 +5898,27 @@ TEST(NewTarget) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ int new_target = Register::new_target().index();
+
ExpectedSnippet<int> snippets[] = {
{"return new.target;",
1 * kPointerSize,
1,
- 10,
+ 5,
{
- B(CallRuntime), U16(Runtime::kGetOriginalConstructor), R(0), //
- U8(0), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Return), //
+ B(Ldar), R(new_target), //
+ B(Star), R(0), //
+ B(Return), //
+ }},
+ {"new.target;",
+ 1 * kPointerSize,
+ 1,
+ 6,
+ {
+ B(Ldar), R(new_target), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return), //
}},
};
@@ -5214,6 +5929,799 @@ TEST(NewTarget) {
}
}
+
+TEST(RemoveRedundantLdar) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<int> snippets[] = {
+ {"var ld_a = 1;\n" // This test is to check Ldar does not
+ "while(true) {\n" // get removed if the preceding Star is
+ " ld_a = ld_a + ld_a;\n" // in a different basicblock.
+ " if (ld_a > 10) break;\n"
+ "}\n"
+ "return ld_a;",
+ 2 * kPointerSize,
+ 1,
+ 29,
+ {B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Ldar), R(0), // This load should not be removed as it
+ B(Star), R(1), // is the target of the branch.
+ B(Ldar), R(0), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(10), //
+ B(TestGreaterThan), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(4), //
+ B(Jump), U8(-20), //
+ B(Ldar), R(0), //
+ B(Return)}},
+ {"var ld_a = 1;\n"
+ "do {\n"
+ " ld_a = ld_a + ld_a;\n"
+ " if (ld_a > 10) continue;\n"
+ "} while(false);\n"
+ "return ld_a;",
+ 2 * kPointerSize,
+ 1,
+ 27,
+ {B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(10), //
+ B(TestGreaterThan), R(1), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(2), //
+ B(Ldar), R(0), //
+ B(Return)}},
+ {"var ld_a = 1;\n"
+ " ld_a = ld_a + ld_a;\n"
+ " return ld_a;",
+ 2 * kPointerSize,
+ 1,
+ 13,
+ {
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Return) //
+ }},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(AssignmentsInBinaryExpression) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"var x = 0, y = 1;\n"
+ "return (x = 2, y = 3, x = 4, y = 5)",
+ 2 * kPointerSize,
+ 1,
+ 24,
+ {
+ B(LdaZero), B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(4), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(5), //
+ B(Star), R(1), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 55;\n"
+ "var y = (x = 100);\n"
+ "return y",
+ 2 * kPointerSize,
+ 1,
+ 11,
+ {
+ B(LdaSmi8), U8(55), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(100), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 55;\n"
+ "x = x + (x = 100) + (x = 101);\n"
+ "return x;",
+ 3 * kPointerSize,
+ 1,
+ 23,
+ {
+ B(LdaSmi8), U8(55), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(100), //
+ B(Star), R(0), //
+ B(Add), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(101), //
+ B(Star), R(0), //
+ B(Add), R(2), //
+ B(Star), R(0), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 55;\n"
+ "x = (x = 56) - x + (x = 57);\n"
+ "x++;\n"
+ "return x;",
+ 3 * kPointerSize,
+ 1,
+ 31,
+ {
+ B(LdaSmi8), U8(55), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(56), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Sub), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(57), //
+ B(Star), R(0), //
+ B(Add), R(2), //
+ B(Star), R(0), //
+ B(ToNumber), //
+ B(Star), R(1), //
+ B(Inc), //
+ B(Star), R(0), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 55;\n"
+ "var y = x + (x = 1) + (x = 2) + (x = 3);\n"
+ "return y;",
+ 4 * kPointerSize,
+ 1,
+ 31,
+ {
+ B(LdaSmi8), U8(55), //
+ B(Star), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Add), R(2), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(Add), R(3), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(0), //
+ B(Add), R(2), //
+ B(Star), R(1), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 55;\n"
+ "var x = x + (x = 1) + (x = 2) + (x = 3);\n"
+ "return x;",
+ 3 * kPointerSize,
+ 1,
+ 31,
+ {
+ B(LdaSmi8), U8(55), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Add), R(1), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(Add), R(2), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(0), //
+ B(Add), R(1), //
+ B(Star), R(0), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 10, y = 20;\n"
+ "return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + "
+ "y;\n",
+ 5 * kPointerSize,
+ 1,
+ 69,
+ {
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Add), R(2), //
+ B(Star), R(3), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Add), R(2), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(1), //
+ B(Mul), R(4), //
+ B(Add), R(3), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(1), //
+ B(Add), R(2), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(4), //
+ B(Star), R(0), //
+ B(Add), R(3), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(5), //
+ B(Star), R(1), //
+ B(Add), R(2), //
+ B(Star), R(3), //
+ B(Ldar), R(1), //
+ B(Add), R(3), //
+ B(Return), //
+ },
+ 0},
+ {"var x = 17;\n"
+ "return 1 + x + (x++) + (++x);\n",
+ 4 * kPointerSize,
+ 1,
+ 37,
+ {
+ B(LdaSmi8), U8(17), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Add), R(1), //
+ B(Star), R(2), //
+ B(Ldar), R(0), //
+ B(ToNumber), //
+ B(Star), R(1), //
+ B(Inc), //
+ B(Star), R(0), //
+ B(Ldar), R(1), //
+ B(Add), R(2), //
+ B(Star), R(3), //
+ B(Ldar), R(0), //
+ B(ToNumber), //
+ B(Inc), //
+ B(Star), R(0), //
+ B(Add), R(3), //
+ B(Return), //
+ },
+ 0}};
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(Eval) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ int closure = Register::function_closure().index();
+ int context = Register::function_context().index();
+ int new_target = Register::new_target().index();
+
+ int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"return eval('1;');",
+ 9 * kPointerSize,
+ 1,
+ 67,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 2), //
+ B(Mov), R(context), R(3), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(4), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
+ R(3), U8(2), R(1), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(1), U8(0), //
+ B(Return), //
+ },
+ 2,
+ {"eval", "1;"}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(LookupSlot) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ int closure = Register::function_closure().index();
+ int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+ int context = Register::function_context().index();
+ int new_target = Register::new_target().index();
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"eval('var x = 10;'); return x;",
+ 9 * kPointerSize,
+ 1,
+ 69,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 2), //
+ B(Mov), R(context), R(3), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(4), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
+ R(3), U8(2), R(1), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(1), U8(0), //
+ B(LdaLookupSlot), U8(2), //
+ B(Return), //
+ },
+ 3,
+ {"eval", "var x = 10;", "x"}},
+ {"eval('var x = 10;'); return typeof x;",
+ 9 * kPointerSize,
+ 1,
+ 70,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 2), //
+ B(Mov), R(context), R(3), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(4), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
+ R(3), U8(2), R(1), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(1), U8(0), //
+ B(LdaLookupSlotInsideTypeof), U8(2), //
+ B(TypeOf), //
+ B(Return), //
+ },
+ 3,
+ {"eval", "var x = 10;", "x"}},
+ {"x = 20; return eval('');",
+ 9 * kPointerSize,
+ 1,
+ 71,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(0), U8(first_context_slot + 2), //
+ B(LdaSmi8), U8(20), //
+ B(StaLookupSlotSloppy), U8(0), //
+ B(Mov), R(context), R(3), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(4), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
+ R(3), U8(2), R(1), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(1), U8(0), //
+ B(Return), //
+ },
+ 3,
+ {"x", "eval", ""}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(CallLookupSlot) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot1 = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddCallICSlot();
+ USE(slot1);
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+
+ int closure = Register::function_closure().index();
+ int context = Register::function_context().index();
+ int new_target = Register::new_target().index();
+
+ ExpectedSnippet<InstanceType> snippets[] = {
+ {"g = function(){}; eval(''); return g();",
+ 9 * kPointerSize,
+ 1,
+ 90,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(0), U8(4), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(0), U8(5), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(0), U8(6), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(StaLookupSlotSloppy), U8(1), //
+ B(Mov), R(context), R(3), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(4), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
+ R(3), U8(2), R(1), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(1), U8(0), //
+ B(Mov), R(context), R(3), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(4), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
+ R(3), U8(2), R(1), //
+ B(Call), R(1), R(2), U8(0), U8(vector->GetIndex(slot2)), //
+ B(Return), //
+ },
+ 4,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(LookupSlotInEval) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ const char* function_prologue = "var f;"
+ "var x = 1;"
+ "function f1() {"
+ " eval(\"function t() {";
+ const char* function_epilogue = " }; f = t; f();\");"
+ "}"
+ "f1();";
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"return x;",
+ 0 * kPointerSize,
+ 1,
+ 3,
+ {
+ B(LdaLookupSlot), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ {"x = 10;",
+ 0 * kPointerSize,
+ 1,
+ 6,
+ {
+ B(LdaSmi8), U8(10), //
+ B(StaLookupSlotSloppy), U8(0), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 1,
+ {"x"}},
+ {"'use strict'; x = 10;",
+ 0 * kPointerSize,
+ 1,
+ 6,
+ {
+ B(LdaSmi8), U8(10), //
+ B(StaLookupSlotStrict), U8(0), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 1,
+ {"x"}},
+ {"return typeof x;",
+ 0 * kPointerSize,
+ 1,
+ 4,
+ {
+ B(LdaLookupSlotInsideTypeof), U8(0), //
+ B(TypeOf), //
+ B(Return), //
+ },
+ 1,
+ {"x"}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ std::string script = std::string(function_prologue) +
+ std::string(snippets[i].code_snippet) +
+ std::string(function_epilogue);
+ // TODO(mythria): use * as filter when function declarations are supported
+ // inside eval.
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(script.c_str(), "t", "f");
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(LookupSlotWideInEval) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ const char* function_prologue =
+ "var f;"
+ "var x = 1;"
+ "function f1() {"
+ " eval(\"function t() {";
+ const char* function_epilogue =
+ " }; f = t; f();\");"
+ "}"
+ "f1();";
+
+ int const_count[] = {0, 0, 0, 0};
+ ExpectedSnippet<InstanceType, 257> snippets[] = {
+ {REPEAT_256(SPACE, "var y = 2.3;")
+ "return x;",
+ 1 * kPointerSize,
+ 1,
+ 1028,
+ {
+ REPEAT_256(SPACE, //
+ B(LdaConstant), U8(const_count[0]++), //
+ B(Star), R(0), ) //
+ B(LdaLookupSlotWide), U16(256), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {REPEAT_256(SPACE, "var y = 2.3;")
+ "return typeof x;",
+ 1 * kPointerSize,
+ 1,
+ 1029,
+ {
+ REPEAT_256(SPACE, //
+ B(LdaConstant), U8(const_count[1]++), //
+ B(Star), R(0), ) //
+ B(LdaLookupSlotInsideTypeofWide), U16(256), //
+ B(TypeOf), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {REPEAT_256(SPACE, "var y = 2.3;")
+ "x = 10;",
+ 1 * kPointerSize,
+ 1,
+ 1031,
+ {
+ REPEAT_256(SPACE, //
+ B(LdaConstant), U8(const_count[2]++), //
+ B(Star), R(0), ) //
+ B(LdaSmi8), U8(10), //
+ B(StaLookupSlotSloppyWide), U16(256), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"'use strict';"
+ REPEAT_256(SPACE, "var y = 2.3;")
+ "x = 10;",
+ 1 * kPointerSize,
+ 1,
+ 1031,
+ {
+ REPEAT_256(SPACE,
+ B(LdaConstant), U8(const_count[3]++), //
+ B(Star), R(0), ) //
+ B(LdaSmi8), U8(10), //
+ B(StaLookupSlotStrictWide), U16(256), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 257,
+ {REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ std::string script = std::string(function_prologue) +
+ std::string(snippets[i].code_snippet) +
+ std::string(function_epilogue);
+ // TODO(mythria): use * as filter when function declarations are supported
+ // inside eval.
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(script.c_str(), "t", "f");
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(DeleteLookupSlotInEval) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ const char* function_prologue = "var f;"
+ "var x = 1;"
+ "z = 10;"
+ "function f1() {"
+ " var y;"
+ " eval(\"function t() {";
+ const char* function_epilogue = " }; f = t; f();\");"
+ "}"
+ "f1();";
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"delete x;",
+ 0 * kPointerSize,
+ 1,
+ 5,
+ {
+ B(LdaConstant), U8(0), //
+ B(DeleteLookupSlot), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ {"return delete y;",
+ 0 * kPointerSize,
+ 1,
+ 2,
+ {
+ B(LdaFalse), //
+ B(Return) //
+ },
+ 0},
+ {"return delete z;",
+ 0 * kPointerSize,
+ 1,
+ 4,
+ {
+ B(LdaConstant), U8(0), //
+ B(DeleteLookupSlot), //
+ B(Return) //
+ },
+ 1,
+ {"z"}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ std::string script = std::string(function_prologue) +
+ std::string(snippets[i].code_snippet) +
+ std::string(function_epilogue);
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(script.c_str(), "t", "f");
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index d274fa73cb..506cf00cd0 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(rmcilroy): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/execution.h"
@@ -67,9 +64,9 @@ class InterpreterTester {
source_(source),
bytecode_(bytecode),
feedback_vector_(feedback_vector) {
- i::FLAG_vector_stores = true;
i::FLAG_ignition = true;
i::FLAG_ignition_fake_try_catch = true;
+ i::FLAG_ignition_fallback_on_eval_and_catch = false;
i::FLAG_always_opt = false;
// Set ignition filter flag via SetFlagsFromString to avoid double-free
// (or potential leak with StrDup() based on ownership confusion).
@@ -344,7 +341,7 @@ TEST(InterpreterLoadLiteral) {
TEST(InterpreterLoadStoreRegisters) {
HandleAndZoneScope handles;
Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
- for (int i = 0; i <= Register::kMaxRegisterIndex; i++) {
+ for (int i = 0; i <= kMaxInt8; i++) {
BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(i + 1);
builder.set_context_count(0);
@@ -365,6 +362,117 @@ TEST(InterpreterLoadStoreRegisters) {
}
+TEST(InterpreterExchangeRegisters) {
+ for (int locals_count = 2; locals_count < 300; locals_count += 126) {
+ HandleAndZoneScope handles;
+ for (int exchanges = 1; exchanges < 4; exchanges++) {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(locals_count);
+ builder.set_context_count(0);
+ builder.set_parameter_count(0);
+
+ Register r0(0);
+ Register r1(locals_count - 1);
+ builder.LoadTrue();
+ builder.StoreAccumulatorInRegister(r0);
+ builder.ExchangeRegisters(r0, r1);
+ builder.LoadFalse();
+ builder.StoreAccumulatorInRegister(r0);
+
+ bool expected = false;
+ for (int i = 0; i < exchanges; i++) {
+ builder.ExchangeRegisters(r0, r1);
+ expected = !expected;
+ }
+ builder.LoadAccumulatorWithRegister(r0);
+ builder.Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ Handle<Object> expected_val =
+ handles.main_isolate()->factory()->ToBoolean(expected);
+ CHECK(return_val.is_identical_to(expected_val));
+ }
+ }
+}
+
+
+TEST(InterpreterExchangeRegistersWithParameter) {
+ for (int locals_count = 2; locals_count < 300; locals_count += 126) {
+ HandleAndZoneScope handles;
+ for (int exchanges = 1; exchanges < 4; exchanges++) {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(locals_count);
+ builder.set_context_count(0);
+ builder.set_parameter_count(3);
+
+ Register r0 = Register::FromParameterIndex(2, 3);
+ Register r1(locals_count - 1);
+ builder.LoadTrue();
+ builder.StoreAccumulatorInRegister(r0);
+ builder.ExchangeRegisters(r0, r1);
+ builder.LoadFalse();
+ builder.StoreAccumulatorInRegister(r0);
+
+ bool expected = false;
+ for (int i = 0; i < exchanges; i++) {
+ builder.ExchangeRegisters(r0, r1);
+ expected = !expected;
+ }
+ builder.LoadAccumulatorWithRegister(r0);
+ builder.Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ Handle<Object> expected_val =
+ handles.main_isolate()->factory()->ToBoolean(expected);
+ CHECK(return_val.is_identical_to(expected_val));
+ }
+ }
+}
+
+
+TEST(InterpreterExchangeWideRegisters) {
+ for (int locals_count = 3; locals_count < 300; locals_count += 126) {
+ HandleAndZoneScope handles;
+ for (int exchanges = 0; exchanges < 7; exchanges++) {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(locals_count);
+ builder.set_context_count(0);
+ builder.set_parameter_count(0);
+
+ Register r0(0);
+ Register r1(locals_count - 1);
+ Register r2(locals_count - 2);
+ builder.LoadLiteral(Smi::FromInt(200));
+ builder.StoreAccumulatorInRegister(r0);
+ builder.ExchangeRegisters(r0, r1);
+ builder.LoadLiteral(Smi::FromInt(100));
+ builder.StoreAccumulatorInRegister(r0);
+ builder.ExchangeRegisters(r0, r2);
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.StoreAccumulatorInRegister(r0);
+ for (int i = 0; i < exchanges; i++) {
+ builder.ExchangeRegisters(r1, r2);
+ builder.ExchangeRegisters(r0, r1);
+ }
+ builder.LoadAccumulatorWithRegister(r0);
+ builder.Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ Handle<Object> expected_val =
+ handles.main_isolate()->factory()->NewNumberFromInt(100 *
+ (exchanges % 3));
+ CHECK(return_val.is_identical_to(expected_val));
+ }
+ }
+}
+
+
static const Token::Value kShiftOperators[] = {
Token::Value::SHL, Token::Value::SAR, Token::Value::SHR};
@@ -778,9 +886,8 @@ TEST(InterpreterLoadNamedProperty) {
builder.set_locals_count(0);
builder.set_context_count(0);
builder.set_parameter_count(1);
- size_t name_index = builder.GetConstantPoolEntry(name);
- builder.LoadNamedProperty(builder.Parameter(0), name_index,
- vector->GetIndex(slot), i::SLOPPY)
+ builder.LoadNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot),
+ i::SLOPPY)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -880,10 +987,9 @@ TEST(InterpreterStoreNamedProperty) {
builder.set_locals_count(0);
builder.set_context_count(0);
builder.set_parameter_count(1);
- size_t name_index = builder.GetConstantPoolEntry(name);
builder.LoadLiteral(Smi::FromInt(999))
- .StoreNamedProperty(builder.Parameter(0), name_index,
- vector->GetIndex(slot), i::STRICT)
+ .StoreNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot),
+ i::STRICT)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -995,11 +1101,9 @@ TEST(InterpreterCall) {
builder.set_locals_count(1);
builder.set_context_count(0);
builder.set_parameter_count(1);
- size_t name_index = builder.GetConstantPoolEntry(name);
- builder.LoadNamedProperty(builder.Parameter(0), name_index, slot_index,
- i::SLOPPY)
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
.StoreAccumulatorInRegister(Register(0))
- .Call(Register(0), builder.Parameter(0), 0)
+ .Call(Register(0), builder.Parameter(0), 0, 0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1018,11 +1122,9 @@ TEST(InterpreterCall) {
builder.set_locals_count(1);
builder.set_context_count(0);
builder.set_parameter_count(1);
- size_t name_index = builder.GetConstantPoolEntry(name);
- builder.LoadNamedProperty(builder.Parameter(0), name_index, slot_index,
- i::SLOPPY)
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
.StoreAccumulatorInRegister(Register(0))
- .Call(Register(0), builder.Parameter(0), 0)
+ .Call(Register(0), builder.Parameter(0), 0, 0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1044,9 +1146,7 @@ TEST(InterpreterCall) {
builder.set_locals_count(4);
builder.set_context_count(0);
builder.set_parameter_count(1);
- size_t name_index = builder.GetConstantPoolEntry(name);
- builder.LoadNamedProperty(builder.Parameter(0), name_index, slot_index,
- i::SLOPPY)
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
.StoreAccumulatorInRegister(Register(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
.StoreAccumulatorInRegister(Register(1))
@@ -1054,7 +1154,7 @@ TEST(InterpreterCall) {
.StoreAccumulatorInRegister(Register(2))
.LoadLiteral(Smi::FromInt(11))
.StoreAccumulatorInRegister(Register(3))
- .Call(Register(0), Register(1), 2)
+ .Call(Register(0), Register(1), 2, 0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1075,9 +1175,7 @@ TEST(InterpreterCall) {
builder.set_locals_count(12);
builder.set_context_count(0);
builder.set_parameter_count(1);
- size_t name_index = builder.GetConstantPoolEntry(name);
- builder.LoadNamedProperty(builder.Parameter(0), name_index, slot_index,
- i::SLOPPY)
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
.StoreAccumulatorInRegister(Register(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
.StoreAccumulatorInRegister(Register(1))
@@ -1101,7 +1199,7 @@ TEST(InterpreterCall) {
.StoreAccumulatorInRegister(Register(10))
.LoadLiteral(factory->NewStringFromAsciiChecked("j"))
.StoreAccumulatorInRegister(Register(11))
- .Call(Register(0), Register(1), 10)
+ .Call(Register(0), Register(1), 10, 0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1245,8 +1343,8 @@ TEST(InterpreterConditionalJumps2) {
static const Token::Value kComparisonTypes[] = {
- Token::Value::EQ, Token::Value::NE, Token::Value::EQ_STRICT,
- Token::Value::NE_STRICT, Token::Value::LTE, Token::Value::LTE,
+ Token::Value::EQ, Token::Value::NE, Token::Value::EQ_STRICT,
+ Token::Value::NE_STRICT, Token::Value::LT, Token::Value::LTE,
Token::Value::GT, Token::Value::GTE};
@@ -1566,43 +1664,6 @@ static void LoadAny(BytecodeArrayBuilder* builder,
}
-TEST(InterpreterToBoolean) {
- HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
-
- std::pair<Handle<Object>, bool> object_type_tuples[] = {
- std::make_pair(factory->undefined_value(), false),
- std::make_pair(factory->null_value(), false),
- std::make_pair(factory->false_value(), false),
- std::make_pair(factory->true_value(), true),
- std::make_pair(factory->NewNumber(9.1), true),
- std::make_pair(factory->NewNumberFromInt(0), false),
- std::make_pair(
- Handle<Object>::cast(factory->NewStringFromStaticChars("hello")),
- true),
- std::make_pair(
- Handle<Object>::cast(factory->NewStringFromStaticChars("")), false),
- };
-
- for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- Register r0(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
- LoadAny(&builder, factory, object_type_tuples[i].first);
- builder.CastAccumulatorToBoolean();
- builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
- auto callable = tester.GetCallable<>();
- Handle<Object> return_value = callable().ToHandleChecked();
- CHECK(return_value->IsBoolean());
- CHECK_EQ(return_value->BooleanValue(), object_type_tuples[i].second);
- }
-}
-
-
TEST(InterpreterUnaryNotNonBoolean) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
@@ -1883,7 +1944,11 @@ TEST(InterpreterContextVariables) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
- std::pair<const char*, Handle<Object>> context_vars[] = {
+ std::ostringstream unique_vars;
+ for (int i = 0; i < 250; i++) {
+ unique_vars << "var a" << i << " = 0;";
+ }
+ std::pair<std::string, Handle<Object>> context_vars[] = {
std::make_pair("var a; (function() { a = 1; })(); return a;",
handle(Smi::FromInt(1), isolate)),
std::make_pair("var a = 10; (function() { a; })(); return a;",
@@ -1898,10 +1963,14 @@ TEST(InterpreterContextVariables) {
"{ let b = 20; var c = function() { [a, b] };\n"
" return a + b; }",
handle(Smi::FromInt(30), isolate)),
+ std::make_pair("'use strict';" + unique_vars.str() +
+ "eval(); var b = 100; return b;",
+ handle(Smi::FromInt(100), isolate)),
};
for (size_t i = 0; i < arraysize(context_vars); i++) {
- std::string source(InterpreterTester::SourceForBody(context_vars[i].first));
+ std::string source(
+ InterpreterTester::SourceForBody(context_vars[i].first.c_str()));
InterpreterTester tester(handles.main_isolate(), source.c_str());
auto callable = tester.GetCallable<>();
@@ -2162,6 +2231,19 @@ TEST(InterpreterCountOperators) {
handle(Smi::FromInt(3), isolate)),
std::make_pair("var a = 1; (function() { a = 2 })(); return a--;",
handle(Smi::FromInt(2), isolate)),
+ std::make_pair("var i = 5; while(i--) {}; return i;",
+ handle(Smi::FromInt(-1), isolate)),
+ std::make_pair("var i = 1; if(i--) { return 1; } else { return 2; };",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_pair("var i = -2; do {} while(i++) {}; return i;",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_pair("var i = -1; for(; i++; ) {}; return i",
+ handle(Smi::FromInt(1), isolate)),
+ std::make_pair("var i = 20; switch(i++) {\n"
+ " case 20: return 1;\n"
+ " default: return 2;\n"
+ "}",
+ handle(Smi::FromInt(1), isolate)),
};
for (size_t i = 0; i < arraysize(count_ops); i++) {
@@ -2618,7 +2700,6 @@ TEST(InterpreterBasicLoops) {
TEST(InterpreterForIn) {
HandleAndZoneScope handles;
- // TODO(oth): Add a test here for delete mid-loop when delete is ready.
std::pair<const char*, int> for_in_samples[] = {
{"function f() {\n"
" var r = -1;\n"
@@ -2795,7 +2876,27 @@ TEST(InterpreterForIn) {
" }\n"
" return flags;\n"
" }",
- 0}};
+ 0},
+ {"function f() {\n"
+ " var data = {x:23, y:34};\n"
+ " var result = 0;\n"
+ " var o = {};\n"
+ " var arr = [o];\n"
+ " for (arr[0].p in data)\n" // This is to test if value is loaded
+ " result += data[arr[0].p];\n" // back from accumulator before storing
+ " return result;\n" // named properties.
+ "}",
+ 57},
+ {"function f() {\n"
+ " var data = {x:23, y:34};\n"
+ " var result = 0;\n"
+ " var o = {};\n"
+ " var i = 0;\n"
+ " for (o[i++] in data)\n" // This is to test if value is loaded
+ " result += data[o[i-1]];\n" // back from accumulator before
+ " return result;\n" // storing keyed properties.
+ "}",
+ 57}};
for (size_t i = 0; i < arraysize(for_in_samples); i++) {
InterpreterTester tester(handles.main_isolate(), for_in_samples[i].first);
@@ -2951,6 +3052,516 @@ TEST(InterpreterNewTarget) {
CHECK(new_target_name->SameValue(*factory->NewStringFromStaticChars("f")));
}
+
+TEST(InterpreterAssignmentInExpressions) {
+ HandleAndZoneScope handles;
+
+ std::pair<const char*, int> samples[] = {
+ {"function f() {\n"
+ " var x = 7;\n"
+ " var y = x + (x = 1) + (x = 2);\n"
+ " return y;\n"
+ "}",
+ 10},
+ {"function f() {\n"
+ " var x = 7;\n"
+ " var y = x + (x = 1) + (x = 2);\n"
+ " return x;\n"
+ "}",
+ 2},
+ {"function f() {\n"
+ " var x = 55;\n"
+ " x = x + (x = 100) + (x = 101);\n"
+ " return x;\n"
+ "}",
+ 256},
+ {"function f() {\n"
+ " var x = 7;\n"
+ " return ++x + x + x++;\n"
+ "}",
+ 24},
+ {"function f() {\n"
+ " var x = 7;\n"
+ " var y = 1 + ++x + x + x++;\n"
+ " return x;\n"
+ "}",
+ 9},
+ {"function f() {\n"
+ " var x = 7;\n"
+ " var y = ++x + x + x++;\n"
+ " return x;\n"
+ "}",
+ 9},
+ {"function f() {\n"
+ " var x = 7, y = 100, z = 1000;\n"
+ " return x + (x += 3) + y + (y *= 10) + (z *= 7) + z;\n"
+ "}",
+ 15117},
+ {"function f() {\n"
+ " var inner = function (x) { return x + (x = 2) + (x = 4) + x; };\n"
+ " return inner(1);\n"
+ "}",
+ 11},
+ {"function f() {\n"
+ " var x = 1, y = 2;\n"
+ " x = x + (x = 3) + y + (y = 4), y = y + (y = 5) + y + x;\n"
+ " return x + y;\n"
+ "}",
+ 10 + 24},
+ {"function f() {\n"
+ " var x = 0;\n"
+ " var y = x | (x = 1) | (x = 2);\n"
+ " return x;\n"
+ "}",
+ 2},
+ {"function f() {\n"
+ " var x = 0;\n"
+ " var y = x || (x = 1);\n"
+ " return x;\n"
+ "}",
+ 1},
+ {"function f() {\n"
+ " var x = 1;\n"
+ " var y = x && (x = 2) && (x = 3);\n"
+ " return x;\n"
+ "}",
+ 3},
+ {"function f() {\n"
+ " var x = 1;\n"
+ " var y = x || (x = 2);\n"
+ " return x;\n"
+ "}",
+ 1},
+ {"function f() {\n"
+ " var x = 1;\n"
+ " x = (x << (x = 3)) | (x = 16);\n"
+ " return x;\n"
+ "}",
+ 24},
+ {"function f() {\n"
+ " var r = 7;\n"
+ " var s = 11;\n"
+ " var t = 13;\n"
+ " var u = r + s + t + (r = 10) + (s = 20) +"
+ " (t = (r + s)) + r + s + t;\n"
+ " return r + s + t + u;\n"
+ "}",
+ 211},
+ {"function f() {\n"
+ " var r = 7;\n"
+ " var s = 11;\n"
+ " var t = 13;\n"
+ " return r > (3 * s * (s = 1)) ? (t + (t += 1)) : (r + (r = 4));\n"
+ "}",
+ 11},
+ {"function f() {\n"
+ " var r = 7;\n"
+ " var s = 11;\n"
+ " var t = 13;\n"
+ " return r > (3 * s * (s = 0)) ? (t + (t += 1)) : (r + (r = 4));\n"
+ "}",
+ 27},
+ {"function f() {\n"
+ " var r = 7;\n"
+ " var s = 11;\n"
+ " var t = 13;\n"
+ " return (r + (r = 5)) > s ? r : t;\n"
+ "}",
+ 5},
+ {"function f(a) {\n"
+ " return a + (arguments[0] = 10);\n"
+ "}",
+ 50},
+ {"function f(a) {\n"
+ " return a + (arguments[0] = 10) + a;\n"
+ "}",
+ 60},
+ {"function f(a) {\n"
+ " return a + (arguments[0] = 10) + arguments[0];\n"
+ "}",
+ 60},
+ };
+
+ const int arg_value = 40;
+ for (size_t i = 0; i < arraysize(samples); i++) {
+ InterpreterTester tester(handles.main_isolate(), samples[i].first);
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_val =
+ callable(handle(Smi::FromInt(arg_value), handles.main_isolate()))
+ .ToHandleChecked();
+ CHECK_EQ(Handle<Smi>::cast(return_val)->value(), samples[i].second);
+ }
+}
+
+
+TEST(InterpreterToName) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> to_name_tests[] = {
+ {"var a = 'val'; var obj = {[a] : 10}; return obj.val;",
+ factory->NewNumberFromInt(10)},
+ {"var a = 20; var obj = {[a] : 10}; return obj['20'];",
+ factory->NewNumberFromInt(10)},
+ {"var a = 20; var obj = {[a] : 10}; return obj[20];",
+ factory->NewNumberFromInt(10)},
+ {"var a = {val:23}; var obj = {[a] : 10}; return obj[a];",
+ factory->NewNumberFromInt(10)},
+ {"var a = {val:23}; var obj = {[a] : 10};\n"
+ "return obj['[object Object]'];",
+ factory->NewNumberFromInt(10)},
+ {"var a = {toString : function() { return 'x'}};\n"
+ "var obj = {[a] : 10};\n"
+ "return obj.x;",
+ factory->NewNumberFromInt(10)},
+ {"var a = {valueOf : function() { return 'x'}};\n"
+ "var obj = {[a] : 10};\n"
+ "return obj.x;",
+ factory->undefined_value()},
+ {"var a = {[Symbol.toPrimitive] : function() { return 'x'}};\n"
+ "var obj = {[a] : 10};\n"
+ "return obj.x;",
+ factory->NewNumberFromInt(10)},
+ };
+
+ for (size_t i = 0; i < arraysize(to_name_tests); i++) {
+ std::string source(
+ InterpreterTester::SourceForBody(to_name_tests[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*to_name_tests[i].second));
+ }
+}
+
+
+TEST(TemporaryRegisterAllocation) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> reg_tests[] = {
+ {"function add(a, b, c) {"
+ " return a + b + c;"
+ "}"
+ "function f() {"
+ " var a = 10, b = 10;"
+ " return add(a, b++, b);"
+ "}",
+ factory->NewNumberFromInt(31)},
+ {"function add(a, b, c, d) {"
+ " return a + b + c + d;"
+ "}"
+ "function f() {"
+ " var x = 10, y = 20, z = 30;"
+ " return x + add(x, (y= x++), x, z);"
+ "}",
+ factory->NewNumberFromInt(71)},
+ };
+
+ for (size_t i = 0; i < arraysize(reg_tests); i++) {
+ InterpreterTester tester(handles.main_isolate(), reg_tests[i].first);
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*reg_tests[i].second));
+ }
+}
+
+
+TEST(InterpreterLookupSlot) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ // TODO(mythria): Add more tests when we have support for eval/with.
+ const char* function_prologue = "var f;"
+ "var x = 1;"
+ "function f1() {"
+ " eval(\"function t() {";
+ const char* function_epilogue = " }; f = t;\");"
+ "}"
+ "f1();";
+
+
+ std::pair<const char*, Handle<Object>> lookup_slot[] = {
+ {"return x;", handle(Smi::FromInt(1), isolate)},
+ {"return typeof x;", factory->NewStringFromStaticChars("number")},
+ {"return typeof dummy;", factory->NewStringFromStaticChars("undefined")},
+ {"x = 10; return x;", handle(Smi::FromInt(10), isolate)},
+ {"'use strict'; x = 20; return x;", handle(Smi::FromInt(20), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(lookup_slot); i++) {
+ std::string script = std::string(function_prologue) +
+ std::string(lookup_slot[i].first) +
+ std::string(function_epilogue);
+
+ InterpreterTester tester(handles.main_isolate(), script.c_str(), "t");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*lookup_slot[i].second));
+ }
+}
+
+
+TEST(InterpreterCallLookupSlot) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ std::pair<const char*, Handle<Object>> call_lookup[] = {
+ {"g = function(){ return 2 }; eval(''); return g();",
+ handle(Smi::FromInt(2), isolate)},
+ {"g = function(){ return 2 }; eval('g = function() {return 3}');\n"
+ "return g();",
+ handle(Smi::FromInt(3), isolate)},
+ {"g = { x: function(){ return this.y }, y: 20 };\n"
+ "eval('g = { x: g.x, y: 30 }');\n"
+ "return g.x();",
+ handle(Smi::FromInt(30), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(call_lookup); i++) {
+ std::string source(InterpreterTester::SourceForBody(call_lookup[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*call_lookup[i].second));
+ }
+}
+
+
+TEST(InterpreterLookupSlotWide) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ const char* function_prologue =
+ "var f;"
+ "var x = 1;"
+ "function f1() {"
+ " eval(\"function t() {";
+ const char* function_epilogue =
+ " }; f = t;\");"
+ "}"
+ "f1();";
+ std::ostringstream str;
+ str << "var y = 2.3;";
+ for (int i = 1; i < 256; i++) {
+ str << "y = " << 2.3 + i << ";";
+ }
+ std::string init_function_body = str.str();
+
+ std::pair<std::string, Handle<Object>> lookup_slot[] = {
+ {init_function_body + "return x;", handle(Smi::FromInt(1), isolate)},
+ {init_function_body + "return typeof x;",
+ factory->NewStringFromStaticChars("number")},
+ {init_function_body + "return x = 10;",
+ handle(Smi::FromInt(10), isolate)},
+ {"'use strict';" + init_function_body + "x = 20; return x;",
+ handle(Smi::FromInt(20), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(lookup_slot); i++) {
+ std::string script = std::string(function_prologue) + lookup_slot[i].first +
+ std::string(function_epilogue);
+
+ InterpreterTester tester(handles.main_isolate(), script.c_str(), "t");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*lookup_slot[i].second));
+ }
+}
+
+
+TEST(InterpreterDeleteLookupSlot) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ // TODO(mythria): Add more tests when we have support for eval/with.
+ const char* function_prologue = "var f;"
+ "var x = 1;"
+ "y = 10;"
+ "var obj = {val:10};"
+ "var z = 30;"
+ "function f1() {"
+ " var z = 20;"
+ " eval(\"function t() {";
+ const char* function_epilogue = " }; f = t;\");"
+ "}"
+ "f1();";
+
+
+ std::pair<const char*, Handle<Object>> delete_lookup_slot[] = {
+ {"return delete x;", factory->false_value()},
+ {"return delete y;", factory->true_value()},
+ {"return delete z;", factory->false_value()},
+ {"return delete obj.val;", factory->true_value()},
+ {"'use strict'; return delete obj.val;", factory->true_value()},
+ };
+
+ for (size_t i = 0; i < arraysize(delete_lookup_slot); i++) {
+ std::string script = std::string(function_prologue) +
+ std::string(delete_lookup_slot[i].first) +
+ std::string(function_epilogue);
+
+ InterpreterTester tester(handles.main_isolate(), script.c_str(), "t");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*delete_lookup_slot[i].second));
+ }
+}
+
+
+TEST(JumpWithConstantsAndWideConstants) {
+ HandleAndZoneScope handles;
+ auto isolate = handles.main_isolate();
+ auto factory = isolate->factory();
+ const int kStep = 13;
+ for (int constants = 3; constants < 256 + 3 * kStep; constants += kStep) {
+ std::ostringstream filler_os;
+ // Generate a string that consumes constant pool entries and
+ // spread out branch distances in script below.
+ for (int i = 0; i < constants; i++) {
+ filler_os << "var x_ = 'x_" << i << "';\n";
+ }
+ std::string filler(filler_os.str());
+ std::ostringstream script_os;
+ script_os << "function " << InterpreterTester::function_name() << "(a) {\n";
+ script_os << " " << filler;
+ script_os << " for (var i = a; i < 2; i++) {\n";
+ script_os << " " << filler;
+ script_os << " if (i == 0) { " << filler << "i = 10; continue; }\n";
+ script_os << " else if (i == a) { " << filler << "i = 12; break; }\n";
+ script_os << " else { " << filler << " }\n";
+ script_os << " }\n";
+ script_os << " return i;\n";
+ script_os << "}\n";
+ std::string script(script_os.str());
+ for (int a = 0; a < 3; a++) {
+ InterpreterTester tester(handles.main_isolate(), script.c_str());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_val =
+ callable(factory->NewNumberFromInt(a)).ToHandleChecked();
+ static const int results[] = {11, 12, 2};
+ CHECK_EQ(Handle<Smi>::cast(return_val)->value(), results[a]);
+ }
+ }
+}
+
+
+TEST(InterpreterEval) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> eval[] = {
+ {"return eval('1;');", handle(Smi::FromInt(1), isolate)},
+ {"return eval('100 * 20;');", handle(Smi::FromInt(2000), isolate)},
+ {"var x = 10; return eval('x + 20;');",
+ handle(Smi::FromInt(30), isolate)},
+ {"var x = 10; eval('x = 33;'); return x;",
+ handle(Smi::FromInt(33), isolate)},
+ {"'use strict'; var x = 20; var z = 0;\n"
+ "eval('var x = 33; z = x;'); return x + z;",
+ handle(Smi::FromInt(53), isolate)},
+ {"eval('var x = 33;'); eval('var y = x + 20'); return x + y;",
+ handle(Smi::FromInt(86), isolate)},
+ {"var x = 1; eval('for(i = 0; i < 10; i++) x = x + 1;'); return x",
+ handle(Smi::FromInt(11), isolate)},
+ {"var x = 10; eval('var x = 20;'); return x;",
+ handle(Smi::FromInt(20), isolate)},
+ {"var x = 1; eval('\"use strict\"; var x = 2;'); return x;",
+ handle(Smi::FromInt(1), isolate)},
+ {"'use strict'; var x = 1; eval('var x = 2;'); return x;",
+ handle(Smi::FromInt(1), isolate)},
+ {"var x = 10; eval('x + 20;'); return typeof x;",
+ factory->NewStringFromStaticChars("number")},
+ {"eval('var y = 10;'); return typeof unallocated;",
+ factory->NewStringFromStaticChars("undefined")},
+ {"'use strict'; eval('var y = 10;'); return typeof unallocated;",
+ factory->NewStringFromStaticChars("undefined")},
+ {"eval('var x = 10;'); return typeof x;",
+ factory->NewStringFromStaticChars("number")},
+ {"var x = {}; eval('var x = 10;'); return typeof x;",
+ factory->NewStringFromStaticChars("number")},
+ {"'use strict'; var x = {}; eval('var x = 10;'); return typeof x;",
+ factory->NewStringFromStaticChars("object")},
+ };
+
+ for (size_t i = 0; i < arraysize(eval); i++) {
+ std::string source(InterpreterTester::SourceForBody(eval[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*eval[i].second));
+ }
+}
+
+
+TEST(InterpreterEvalParams) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ std::pair<const char*, Handle<Object>> eval_params[] = {
+ {"var x = 10; return eval('x + p1;');",
+ handle(Smi::FromInt(30), isolate)},
+ {"var x = 10; eval('p1 = x;'); return p1;",
+ handle(Smi::FromInt(10), isolate)},
+ {"var a = 10;"
+ "function inner() { return eval('a + p1;');}"
+ "return inner();",
+ handle(Smi::FromInt(30), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(eval_params); i++) {
+ std::string source = "function " + InterpreterTester::function_name() +
+ "(p1) {" + eval_params[i].first + "}";
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<i::Object> return_value =
+ callable(handle(Smi::FromInt(20), isolate)).ToHandleChecked();
+ CHECK(return_value->SameValue(*eval_params[i].second));
+ }
+}
+
+
+TEST(InterpreterEvalGlobal) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> eval_global[] = {
+ {"function add_global() { eval('function test() { z = 33; }; test()'); };"
+ "function f() { add_global(); return z; }; f();",
+ handle(Smi::FromInt(33), isolate)},
+ {"function add_global() {\n"
+ " eval('\"use strict\"; function test() { y = 33; };"
+ " try { test() } catch(e) {}');\n"
+ "}\n"
+ "function f() { add_global(); return typeof y; } f();",
+ factory->NewStringFromStaticChars("undefined")},
+ };
+
+ for (size_t i = 0; i < arraysize(eval_global); i++) {
+ InterpreterTester tester(handles.main_isolate(), eval_global[i].first,
+ "test");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*eval_global[i].second));
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/print-extension.cc b/deps/v8/test/cctest/print-extension.cc
index 33f33cafc2..b5f6fb549e 100644
--- a/deps/v8/test/cctest/print-extension.cc
+++ b/deps/v8/test/cctest/print-extension.cc
@@ -30,9 +30,8 @@
namespace v8 {
namespace internal {
-v8::Handle<v8::FunctionTemplate> PrintExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> str) {
+v8::Local<v8::FunctionTemplate> PrintExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> str) {
return v8::FunctionTemplate::New(isolate, PrintExtension::Print);
}
diff --git a/deps/v8/test/cctest/print-extension.h b/deps/v8/test/cctest/print-extension.h
index c2961d0dd1..74d74ef81b 100644
--- a/deps/v8/test/cctest/print-extension.h
+++ b/deps/v8/test/cctest/print-extension.h
@@ -36,9 +36,8 @@ namespace internal {
class PrintExtension : public v8::Extension {
public:
PrintExtension() : v8::Extension("v8/print", "native function print();") { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
};
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index c8cb0fb7ca..a917932978 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -39,17 +39,25 @@ const char* ProfilerExtension::kSource =
"native function startProfiling();"
"native function stopProfiling();";
-v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Handle<v8::String> name) {
- if (name->Equals(v8::String::NewFromUtf8(isolate, "startProfiling"))) {
+v8::Local<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (name->Equals(context, v8::String::NewFromUtf8(isolate, "startProfiling",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate,
ProfilerExtension::StartProfiling);
- } else if (name->Equals(v8::String::NewFromUtf8(isolate, "stopProfiling"))) {
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "stopProfiling",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate,
ProfilerExtension::StopProfiling);
} else {
CHECK(false);
- return v8::Handle<v8::FunctionTemplate>();
+ return v8::Local<v8::FunctionTemplate>();
}
}
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 9b68820bd8..0687c33500 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index f6d1ef0718..cda16cdbcb 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -2,23 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/cctest.h"
#include "include/v8.h"
+#include "include/v8-experimental.h"
+
+namespace i = v8::internal;
-#ifdef V8_JS_ACCESSORS
-static void CppAccessor(const v8::FunctionCallbackInfo<v8::Value>& info) {
+static void CppAccessor42(const v8::FunctionCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(42);
}
-static const char* JsAccessor =
- "function firstChildJS(value) { return 41; }; firstChildJS";
-TEST(JavascriptAccessors) {
+static void CppAccessor41(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(41);
+}
+
+
+v8::experimental::FastAccessorBuilder* FastAccessor(v8::Isolate* isolate) {
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ builder->ReturnValue(builder->IntegerConstant(41));
+ return builder;
+}
+
+
+TEST(FastAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
@@ -38,13 +47,15 @@ TEST(JavascriptAccessors) {
// cpp accessor as "firstChild":
parent->PrototypeTemplate()->SetAccessorProperty(
v8_str("firstChild"),
- v8::FunctionTemplate::New(isolate, CppAccessor, v8::Local<v8::Value>(),
- signature));
+ v8::FunctionTemplate::New(isolate, CppAccessor42,
+ v8::Local<v8::Value>(), signature));
- // JS accessor as "firstChildJS":
- auto js_accessor = v8::Local<v8::Function>::Cast(CompileRun(JsAccessor));
- parent->PrototypeTemplate()->SetAccessorProperty(v8_str("firstChildJS"),
- js_accessor);
+ // JS accessor as "firstChildRaw":
+ parent->PrototypeTemplate()->SetAccessorProperty(
+ v8_str("firstChildRaw"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, CppAccessor41, FastAccessor(isolate),
+ v8::Local<v8::Value>(), signature));
}
// Setup child object ( =~ a specific DOM Node, e.g. a <div> ).
@@ -61,18 +72,18 @@ TEST(JavascriptAccessors) {
// The simple case: Run it once.
ExpectInt32("var n = new Node(); n.firstChild", 42);
- ExpectInt32("var n = new Node(); n.firstChildJS", 41);
+ ExpectInt32("var n = new Node(); n.firstChildRaw", 41);
// Run them in a loop. This will likely trigger the optimizing compiler:
ExpectInt32(
"var m = new Node(); "
"var sum = 0; "
- "for (var i = 0; i < 3; ++i) { "
+ "for (var i = 0; i < 10; ++i) { "
" sum += m.firstChild; "
- " sum += m.firstChildJS; "
+ " sum += m.firstChildRaw; "
"}; "
"sum;",
- 3 * (42 + 41));
+ 10 * (42 + 41));
// Obtain the accessor and call it via apply on the Node:
ExpectInt32(
@@ -84,10 +95,19 @@ TEST(JavascriptAccessors) {
ExpectInt32(
"var n = new Node(); "
"var g = Object.getOwnPropertyDescriptor("
- " n.__proto__.__proto__, 'firstChildJS')['get']; "
+ " n.__proto__.__proto__, 'firstChildRaw')['get']; "
"g.apply(n);",
41);
- // TODO(vogelheim): Verify compatible receiver check works.
+ ExpectInt32(
+ "var n = new Node();"
+ "var g = Object.getOwnPropertyDescriptor("
+ " n.__proto__.__proto__, 'firstChildRaw')['get'];"
+ "try {"
+ " var f = { firstChildRaw: '51' };"
+ " g.apply(f);"
+ "} catch(e) {"
+ " 31415;"
+ "}",
+ 31415);
}
-#endif // V8_JS_ACCESSORS
diff --git a/deps/v8/test/cctest/test-api-fast-accessor-builder.cc b/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
new file mode 100644
index 0000000000..1e1c972694
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
@@ -0,0 +1,288 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "include/v8.h"
+#include "include/v8-experimental.h"
+
+#include "src/api.h"
+#include "test/cctest/cctest.h"
+
+namespace {
+
+// These tests mean to exercise v8::FastAccessorBuilder. Since initially the
+// "native" accessor will get called, we need to "warmup" any accessor first,
+// to make sure we're actually testing the v8::FastAccessorBuilder result.
+// To accomplish this, we will
+// - call each accesssor N times before the actual test.
+// - wrap that call in a function, so that all such calls will go
+// through a single call site.
+// - bloat that function with a very long comment to prevent its inlining.
+// - register a native accessor which is different from the build one
+// (so that our tests will always fail if we don't end up in the 'fast'
+// accessor).
+//
+// FN_WARMUP(name, src) define a JS function "name" with body "src".
+// It adds the INLINE_SPOILER to prevent inlining and will call name()
+// repeatedly to guarantee it's "warm".
+//
+// Use:
+// CompileRun(FN_WARMUP("fn", "return something();"));
+// ExpectXXX("fn(1234)", 5678);
+
+#define INLINE_SPOILER \
+ " /* " \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
+ "*/ " // 16 lines * 64 'X' =~ 1024 character comment.
+#define FN_WARMUP(name, src) \
+ "function " name "() { " src INLINE_SPOILER \
+ " }; for(i = 0; i < 2; i++) { " name "() } "
+
+static void NativePropertyAccessor(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_num(123));
+}
+
+} // anonymous namespace
+
+
+// Build a simple "fast accessor" and verify that it is being called.
+TEST(FastAccessor) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::FunctionTemplate> foo = v8::FunctionTemplate::New(isolate);
+
+ // Native accessor, bar, returns 123.
+ foo->PrototypeTemplate()->SetAccessorProperty(
+ v8_str("bar"),
+ v8::FunctionTemplate::New(isolate, NativePropertyAccessor));
+
+ // Fast accessor, barf, returns 124.
+ auto fab = v8::experimental::FastAccessorBuilder::New(isolate);
+ fab->ReturnValue(fab->IntegerConstant(124));
+ foo->PrototypeTemplate()->SetAccessorProperty(
+ v8_str("barf"), v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, fab));
+
+ // Install foo on the global object.
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("foo"),
+ foo->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
+ // Wrap f.barf + IC warmup.
+ CompileRun(FN_WARMUP("barf", "f = new foo(); return f.barf"));
+
+ ExpectInt32("f = new foo(); f.bar", 123);
+ ExpectInt32("f = new foo(); f.barf", 123); // First call in this call site.
+ ExpectInt32("barf()", 124); // Call via warmed-up callsite.
+}
+
+
+void AddInternalFieldAccessor(v8::Isolate* isolate,
+ v8::Local<v8::Template> templ, const char* name,
+ int field_no) {
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ builder->ReturnValue(
+ builder->LoadInternalField(builder->GetReceiver(), field_no));
+ templ->SetAccessorProperty(v8_str(name),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+}
+
+
+// "Fast" accessor that accesses an internal field.
+TEST(FastAccessorWithInternalField) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ foo->SetInternalFieldCount(3);
+ AddInternalFieldAccessor(isolate, foo, "field0", 0);
+ AddInternalFieldAccessor(isolate, foo, "field1", 1);
+ AddInternalFieldAccessor(isolate, foo, "field2", 2);
+
+ // Create an instance w/ 3 internal fields, put in a string, a Smi, nothing.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+ obj->SetInternalField(0, v8_str("Hi there!"));
+ obj->SetInternalField(1, v8::Integer::New(isolate, 4321));
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Warmup.
+ CompileRun(FN_WARMUP("field0", "return obj.field0"));
+ CompileRun(FN_WARMUP("field1", "return obj.field1"));
+ CompileRun(FN_WARMUP("field2", "return obj.field2"));
+
+ // Access fields.
+ ExpectString("field0()", "Hi there!");
+ ExpectInt32("field1()", 4321);
+ ExpectUndefined("field2()");
+}
+
+
+// "Fast" accessor with control flow via ...OrReturnNull methods.
+TEST(FastAccessorOrReturnNull) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ foo->SetInternalFieldCount(2);
+ {
+ // accessor "nullcheck": Return null if field 0 is non-null object; else 5.
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ auto val = builder->LoadInternalField(builder->GetReceiver(), 0);
+ builder->CheckNotZeroOrReturnNull(val);
+ builder->ReturnValue(builder->IntegerConstant(5));
+ foo->SetAccessorProperty(v8_str("nullcheck"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+ {
+ // accessor "maskcheck": Return null if field 1 has 3rd bit set.
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ auto val = builder->LoadInternalField(builder->GetReceiver(), 1);
+ builder->CheckFlagSetOrReturnNull(val, 0x4);
+ builder->ReturnValue(builder->IntegerConstant(42));
+ foo->SetAccessorProperty(v8_str("maskcheck"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+
+ // Create an instance.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // CheckNotZeroOrReturnNull:
+ CompileRun(FN_WARMUP("nullcheck", "return obj.nullcheck"));
+ obj->SetAlignedPointerInInternalField(0, /* anything != nullptr */ isolate);
+ ExpectInt32("nullcheck()", 5);
+ obj->SetAlignedPointerInInternalField(0, nullptr);
+ ExpectNull("nullcheck()");
+
+ // CheckFlagSetOrReturnNull:
+ CompileRun(FN_WARMUP("maskcheck", "return obj.maskcheck"));
+ obj->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0xf0));
+ ExpectInt32("maskcheck()", 42);
+ obj->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0xfe));
+ ExpectNull("maskcheck()");
+}
+
+
+// "Fast" accessor with simple control flow via explicit labels.
+TEST(FastAccessorControlFlowWithLabels) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ foo->SetInternalFieldCount(1);
+ {
+ // accessor isnull: 0 for nullptr, else 1.
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ auto label = builder->MakeLabel();
+ auto val = builder->LoadInternalField(builder->GetReceiver(), 0);
+ builder->CheckNotZeroOrJump(val, label);
+ builder->ReturnValue(builder->IntegerConstant(0));
+ builder->SetLabel(label);
+ builder->ReturnValue(builder->IntegerConstant(1));
+ foo->SetAccessorProperty(v8_str("isnull"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+
+ // Create an instance.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // CheckNotZeroOrReturnNull:
+ CompileRun(FN_WARMUP("isnull", "return obj.isnull"));
+ obj->SetAlignedPointerInInternalField(0, /* anything != nullptr */ isolate);
+ ExpectInt32("isnull()", 1);
+ obj->SetAlignedPointerInInternalField(0, nullptr);
+ ExpectInt32("isnull()", 0);
+}
+
+
+// "Fast" accessor, loading things.
+TEST(FastAccessorLoad) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ foo->SetInternalFieldCount(1);
+
+ // Internal field 0 is a pointer to a C++ data structure that we wish to load
+ // field values from.
+ struct {
+ size_t intval;
+ v8::Local<v8::String> v8val;
+ } val = {54321, v8_str("Hello")};
+
+ {
+ // accessor intisnonzero
+ int intval_offset =
+ static_cast<int>(reinterpret_cast<intptr_t>(&val.intval) -
+ reinterpret_cast<intptr_t>(&val));
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ auto label = builder->MakeLabel();
+ auto val = builder->LoadValue(
+ builder->LoadInternalField(builder->GetReceiver(), 0), intval_offset);
+ builder->CheckNotZeroOrJump(val, label);
+ builder->ReturnValue(builder->IntegerConstant(0));
+ builder->SetLabel(label);
+ builder->ReturnValue(builder->IntegerConstant(1));
+ foo->SetAccessorProperty(v8_str("nonzero"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+ {
+ // accessor loadval
+ int v8val_offset = static_cast<int>(reinterpret_cast<intptr_t>(&val.v8val) -
+ reinterpret_cast<intptr_t>(&val));
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ builder->ReturnValue(builder->LoadObject(
+ builder->LoadInternalField(builder->GetReceiver(), 0), v8val_offset));
+ foo->SetAccessorProperty(v8_str("loadval"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+
+ // Create an instance.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+ obj->SetAlignedPointerInInternalField(0, &val);
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Access val.intval:
+ CompileRun(FN_WARMUP("nonzero", "return obj.nonzero"));
+ ExpectInt32("nonzero()", 1);
+ val.intval = 0;
+ ExpectInt32("nonzero()", 0);
+ val.intval = 27;
+ ExpectInt32("nonzero()", 1);
+
+ // Access val.v8val:
+ CompileRun(FN_WARMUP("loadval", "return obj.loadval"));
+ ExpectString("loadval()", "Hello");
+}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 3327d782f7..9f5eb21954 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -13,7 +13,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/objects.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
@@ -24,7 +24,6 @@ using ::v8::Context;
using ::v8::Extension;
using ::v8::Function;
using ::v8::FunctionTemplate;
-using ::v8::Handle;
using ::v8::HandleScope;
using ::v8::Local;
using ::v8::Name;
@@ -69,15 +68,18 @@ void EmptyInterceptorSetter(Local<Name> name, Local<Value> value,
void SimpleAccessorGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- Handle<Object> self = Handle<Object>::Cast(info.This());
- info.GetReturnValue().Set(
- self->Get(String::Concat(v8_str("accessor_"), name)));
+ Local<Object> self = Local<Object>::Cast(info.This());
+ info.GetReturnValue().Set(self->Get(info.GetIsolate()->GetCurrentContext(),
+ String::Concat(v8_str("accessor_"), name))
+ .ToLocalChecked());
}
void SimpleAccessorSetter(Local<String> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- Handle<Object> self = Handle<Object>::Cast(info.This());
- self->Set(String::Concat(v8_str("accessor_"), name), value);
+ Local<Object> self = Local<Object>::Cast(info.This());
+ self->Set(info.GetIsolate()->GetCurrentContext(),
+ String::Concat(v8_str("accessor_"), name), value)
+ .FromJust();
}
@@ -108,7 +110,7 @@ void StringInterceptorGetter(
for (i = 0; name_str[i] && prefix[i]; ++i) {
if (name_str[i] != prefix[i]) return;
}
- Handle<Object> self = Handle<Object>::Cast(info.This());
+ Local<Object> self = Local<Object>::Cast(info.This());
info.GetReturnValue().Set(
self->GetPrivate(
info.GetIsolate()->GetCurrentContext(),
@@ -130,10 +132,10 @@ void StringInterceptorSetter(Local<String> name, Local<Value> value,
}
if (!prefix[i]) return;
- if (value->IsInt32() && value->Int32Value() < 10000) {
- Handle<Object> self = Handle<Object>::Cast(info.This());
- Handle<Context> context = info.GetIsolate()->GetCurrentContext();
- Handle<v8::Private> symbol = v8::Private::ForApi(info.GetIsolate(), name);
+ Local<Context> context = info.GetIsolate()->GetCurrentContext();
+ if (value->IsInt32() && value->Int32Value(context).FromJust() < 10000) {
+ Local<Object> self = Local<Object>::Cast(info.This());
+ Local<v8::Private> symbol = v8::Private::ForApi(info.GetIsolate(), name);
self->SetPrivate(context, symbol, value).FromJust();
info.GetReturnValue().Set(value);
}
@@ -166,8 +168,9 @@ void GenericInterceptorGetter(Local<Name> generic_name,
str = String::Concat(v8_str("_str_"), name);
}
- Handle<Object> self = Handle<Object>::Cast(info.This());
- info.GetReturnValue().Set(self->Get(str));
+ Local<Object> self = Local<Object>::Cast(info.This());
+ info.GetReturnValue().Set(
+ self->Get(info.GetIsolate()->GetCurrentContext(), str).ToLocalChecked());
}
void GenericInterceptorSetter(Local<Name> generic_name, Local<Value> value,
@@ -185,31 +188,31 @@ void GenericInterceptorSetter(Local<Name> generic_name, Local<Value> value,
str = String::Concat(v8_str("_str_"), name);
}
- Handle<Object> self = Handle<Object>::Cast(info.This());
- self->Set(str, value);
+ Local<Object> self = Local<Object>::Cast(info.This());
+ self->Set(info.GetIsolate()->GetCurrentContext(), str, value).FromJust();
info.GetReturnValue().Set(value);
}
-void AddAccessor(Handle<FunctionTemplate> templ, Handle<String> name,
+void AddAccessor(Local<FunctionTemplate> templ, Local<String> name,
v8::AccessorGetterCallback getter,
v8::AccessorSetterCallback setter) {
templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
}
-void AddInterceptor(Handle<FunctionTemplate> templ,
+void AddInterceptor(Local<FunctionTemplate> templ,
v8::NamedPropertyGetterCallback getter,
v8::NamedPropertySetterCallback setter) {
templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
}
-void AddAccessor(Handle<FunctionTemplate> templ, Handle<Name> name,
+void AddAccessor(Local<FunctionTemplate> templ, Local<Name> name,
v8::AccessorNameGetterCallback getter,
v8::AccessorNameSetterCallback setter) {
templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
}
-void AddInterceptor(Handle<FunctionTemplate> templ,
+void AddInterceptor(Local<FunctionTemplate> templ,
v8::GenericNamedPropertyGetterCallback getter,
v8::GenericNamedPropertySetterCallback setter) {
templ->InstanceTemplate()->SetHandler(
@@ -217,20 +220,24 @@ void AddInterceptor(Handle<FunctionTemplate> templ,
}
-v8::Handle<v8::Object> bottom;
+v8::Local<v8::Object> bottom;
void CheckThisIndexedPropertyHandler(
uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyHandler));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
void CheckThisNamedPropertyHandler(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyHandler));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
void CheckThisIndexedPropertySetter(
@@ -238,7 +245,9 @@ void CheckThisIndexedPropertySetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertySetter));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -247,14 +256,18 @@ void CheckThisNamedPropertySetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertySetter));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
void CheckThisIndexedPropertyQuery(
uint32_t index, const v8::PropertyCallbackInfo<v8::Integer>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyQuery));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -262,7 +275,9 @@ void CheckThisNamedPropertyQuery(
Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyQuery));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -270,7 +285,9 @@ void CheckThisIndexedPropertyDeleter(
uint32_t index, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyDeleter));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -278,7 +295,9 @@ void CheckThisNamedPropertyDeleter(
Local<Name> property, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyDeleter));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -286,7 +305,9 @@ void CheckThisIndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisIndexedPropertyEnumerator));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -294,7 +315,9 @@ void CheckThisNamedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyEnumerator));
ApiTestFuzzer::Fuzz();
- CHECK(info.This()->Equals(bottom));
+ CHECK(info.This()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), bottom)
+ .FromJust());
}
@@ -304,7 +327,9 @@ int echo_named_call_count;
void EchoNamedProperty(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(v8_str("data")->Equals(info.Data()));
+ CHECK(v8_str("data")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), info.Data())
+ .FromJust());
echo_named_call_count++;
info.GetReturnValue().Set(name);
}
@@ -331,20 +356,23 @@ THREADED_TEST(InterceptorHasOwnProperty) {
Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
instance_templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetter));
- Local<Function> function = fun_templ->GetFunction();
- context->Global()->Set(v8_str("constructor"), function);
- v8::Handle<Value> value = CompileRun(
+ Local<Function> function =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
+ context->Global()
+ ->Set(context.local(), v8_str("constructor"), function)
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"var o = new constructor();"
"o.hasOwnProperty('ostehaps');");
- CHECK_EQ(false, value->BooleanValue());
+ CHECK_EQ(false, value->BooleanValue(context.local()).FromJust());
value = CompileRun(
"o.ostehaps = 42;"
"o.hasOwnProperty('ostehaps');");
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
value = CompileRun(
"var p = new constructor();"
"p.hasOwnProperty('ostehaps');");
- CHECK_EQ(false, value->BooleanValue());
+ CHECK_EQ(false, value->BooleanValue(context.local()).FromJust());
}
@@ -356,8 +384,11 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
instance_templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetterGC));
- Local<Function> function = fun_templ->GetFunction();
- context->Global()->Set(v8_str("constructor"), function);
+ Local<Function> function =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
+ context->Global()
+ ->Set(context.local(), v8_str("constructor"), function)
+ .FromJust();
// Let's first make some stuff so we can be sure to get a good GC.
CompileRun(
"function makestr(size) {"
@@ -371,11 +402,11 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
"var x = makestr(12345);"
"x = makestr(31415);"
"x = makestr(23456);");
- v8::Handle<Value> value = CompileRun(
+ v8::Local<Value> value = CompileRun(
"var o = new constructor();"
"o.__proto__ = new String(x);"
"o.hasOwnProperty('ostehaps');");
- CHECK_EQ(false, value->BooleanValue());
+ CHECK_EQ(false, value->BooleanValue(context.local()).FromJust());
}
@@ -384,13 +415,16 @@ static void CheckInterceptorLoadIC(
int expected) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(getter, 0, 0, 0, 0,
v8_str("data")));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(source);
- CHECK_EQ(expected, value->Int32Value());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(source);
+ CHECK_EQ(expected, value->Int32Value(context.local()).FromJust());
}
@@ -399,8 +433,9 @@ static void InterceptorLoadICGetter(
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, info.GetIsolate());
- CHECK(v8_str("data")->Equals(info.Data()));
- CHECK(v8_str("x")->Equals(name));
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ CHECK(v8_str("data")->Equals(context, info.Data()).FromJust());
+ CHECK(v8_str("x")->Equals(context, name).FromJust());
info.GetReturnValue().Set(v8::Integer::New(isolate, 42));
}
@@ -424,9 +459,11 @@ static void InterceptorLoadXICGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
info.GetReturnValue().Set(
- v8_str("x")->Equals(name)
- ? v8::Handle<v8::Value>(v8::Integer::New(info.GetIsolate(), 42))
- : v8::Handle<v8::Value>());
+ v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust()
+ ? v8::Local<v8::Value>(v8::Integer::New(info.GetIsolate(), 42))
+ : v8::Local<v8::Value>());
}
@@ -592,28 +629,33 @@ THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) {
static void SetOnThis(Local<String> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- Local<Object>::Cast(info.This())->ForceSet(name, value);
+ Local<Object>::Cast(info.This())
+ ->CreateDataProperty(info.GetIsolate()->GetCurrentContext(), name, value)
+ .FromJust();
}
THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
templ->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
// Check the case when receiver and interceptor's holder
// are the same objects.
- v8::Handle<Value> value = CompileRun(
+ v8::Local<Value> value = CompileRun(
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = o.y;"
"}");
- CHECK_EQ(239, value->Int32Value());
+ CHECK_EQ(239, value->Int32Value(context.local()).FromJust());
// Check the case when interceptor's holder is in proto chain
// of receiver.
@@ -623,32 +665,38 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
"for (var i = 0; i < 7; i++) {"
" result = r.y;"
"}");
- CHECK_EQ(239, value->Int32Value());
+ CHECK_EQ(239, value->Int32Value(context.local()).FromJust());
}
THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("p"),
+ templ_p->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
// Check the case when receiver and interceptor's holder
// are the same objects.
- v8::Handle<Value> value = CompileRun(
+ v8::Local<Value> value = CompileRun(
"o.__proto__ = p;"
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = o.x + o.y;"
"}");
- CHECK_EQ(239 + 42, value->Int32Value());
+ CHECK_EQ(239 + 42, value->Int32Value(context.local()).FromJust());
// Check the case when interceptor's holder is in proto chain
// of receiver.
@@ -658,22 +706,25 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
"for (var i = 0; i < 7; i++) {"
" result = r.x + r.y;"
"}");
- CHECK_EQ(239 + 42, value->Int32Value());
+ CHECK_EQ(239 + 42, value->Int32Value(context.local()).FromJust());
}
THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
templ->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
- v8::Handle<Value> value = CompileRun(
+ v8::Local<Value> value = CompileRun(
"fst = new Object(); fst.__proto__ = o;"
"snd = new Object(); snd.__proto__ = fst;"
"var result1 = 0;"
@@ -686,7 +737,7 @@ THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
" result = snd.x;"
"}"
"result + result1");
- CHECK_EQ(239 + 42, value->Int32Value());
+ CHECK_EQ(239 + 42, value->Int32Value(context.local()).FromJust());
}
@@ -695,17 +746,23 @@ THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("p"),
+ templ_p->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ v8::Local<Value> value = CompileRun(
"o.__proto__ = p;"
"for (var i = 0; i < 7; i++) {"
" o.x;"
@@ -716,7 +773,7 @@ THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
" result += o.x;"
"}"
"result");
- CHECK_EQ(42 * 7, value->Int32Value());
+ CHECK_EQ(42 * 7, value->Int32Value(context.local()).FromJust());
}
@@ -725,17 +782,23 @@ THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("p"),
+ templ_p->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ v8::Local<Value> value = CompileRun(
"inbetween = new Object();"
"o.__proto__ = inbetween;"
"inbetween.__proto__ = p;"
@@ -749,7 +812,7 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
" result += o.y;"
"}"
"result");
- CHECK_EQ(42 * 10, value->Int32Value());
+ CHECK_EQ(42 * 10, value->Int32Value(context.local()).FromJust());
}
@@ -759,17 +822,23 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
- v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
- context->Global()->Set(v8_str("p"), templ_p->NewInstance());
-
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("p"),
+ templ_p->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+
+ v8::Local<Value> value = CompileRun(
"o.__proto__ = this;"
"this.__proto__ = p;"
"for (var i = 0; i < 10; i++) {"
@@ -782,14 +851,16 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
" result += o.y;"
"}"
"result");
- CHECK_EQ(42 * 10, value->Int32Value());
+ CHECK_EQ(42 * 10, value->Int32Value(context.local()).FromJust());
}
static void InterceptorLoadICGetter0(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(v8_str("x")->Equals(name));
+ CHECK(v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust());
info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), 0));
}
@@ -803,8 +874,9 @@ THREADED_TEST(InterceptorReturningZero) {
static void InterceptorStoreICSetter(
Local<Name> key, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(v8_str("x")->Equals(key));
- CHECK_EQ(42, value->Int32Value());
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(v8_str("x")->Equals(context, key).FromJust());
+ CHECK_EQ(42, value->Int32Value(context).FromJust());
info.GetReturnValue().Set(value);
}
@@ -813,12 +885,15 @@ static void InterceptorStoreICSetter(
THREADED_TEST(InterceptorStoreIC) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorLoadICGetter, InterceptorStoreICSetter, 0, 0, 0,
v8_str("data")));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"for (var i = 0; i < 1000; i++) {"
" o.x = 42;"
@@ -829,30 +904,36 @@ THREADED_TEST(InterceptorStoreIC) {
THREADED_TEST(InterceptorStoreICWithNoSetter) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"for (var i = 0; i < 1000; i++) {"
" o.y = 239;"
"}"
"42 + o.y");
- CHECK_EQ(239 + 42, value->Int32Value());
+ CHECK_EQ(239 + 42, value->Int32Value(context.local()).FromJust());
}
THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddAccessor(parent, v8_str("age"), SimpleAccessorGetter,
SimpleAccessorSetter);
AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"child.age = 10;");
@@ -866,16 +947,19 @@ THREADED_TEST(LegacyInterceptorDoesNotSeeSymbols) {
LocalContext env;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> child = FunctionTemplate::New(isolate);
v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
child->Inherit(parent);
AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
AddInterceptor(child, StringInterceptorGetter, StringInterceptorSetter);
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- env->Global()->Set(v8_str("age"), age);
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
+ env->Global()->Set(env.local(), v8_str("age"), age).FromJust();
CompileRun(
"var child = new Child;"
"child[age] = 10;");
@@ -889,8 +973,8 @@ THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
LocalContext env;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> child = FunctionTemplate::New(isolate);
v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
v8::Local<v8::Symbol> anon = v8::Symbol::New(isolate);
@@ -898,9 +982,12 @@ THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
AddInterceptor(child, GenericInterceptorGetter, GenericInterceptorSetter);
- env->Global()->Set(v8_str("Child"), child->GetFunction());
- env->Global()->Set(v8_str("age"), age);
- env->Global()->Set(v8_str("anon"), anon);
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
+ env->Global()->Set(env.local(), v8_str("age"), age).FromJust();
+ env->Global()->Set(env.local(), v8_str("anon"), anon).FromJust();
CompileRun(
"var child = new Child;"
"child[age] = 10;");
@@ -921,23 +1008,40 @@ THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
THREADED_TEST(NamedPropertyHandlerGetter) {
echo_named_call_count = 0;
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::FunctionTemplate> templ =
+ v8::Local<v8::FunctionTemplate> templ =
v8::FunctionTemplate::New(CcTest::isolate());
templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
EchoNamedProperty, 0, 0, 0, 0, v8_str("data")));
LocalContext env;
- env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance());
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
CHECK_EQ(echo_named_call_count, 0);
- v8_compile("obj.x")->Run();
+ v8_compile("obj.x")->Run(env.local()).ToLocalChecked();
CHECK_EQ(echo_named_call_count, 1);
const char* code = "var str = 'oddle'; obj[str] + obj.poddle;";
- v8::Handle<Value> str = CompileRun(code);
+ v8::Local<Value> str = CompileRun(code);
String::Utf8Value value(str);
CHECK_EQ(0, strcmp(*value, "oddlepoddle"));
// Check default behavior
- CHECK_EQ(10, v8_compile("obj.flob = 10;")->Run()->Int32Value());
- CHECK(v8_compile("'myProperty' in obj")->Run()->BooleanValue());
- CHECK(v8_compile("delete obj.myProperty")->Run()->BooleanValue());
+ CHECK_EQ(10, v8_compile("obj.flob = 10;")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(v8_compile("'myProperty' in obj")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("delete obj.myProperty")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
}
@@ -947,7 +1051,9 @@ int echo_indexed_call_count = 0;
static void EchoIndexedProperty(
uint32_t index, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(v8_num(637)->Equals(info.Data()));
+ CHECK(v8_num(637)
+ ->Equals(info.GetIsolate()->GetCurrentContext(), info.Data())
+ .FromJust());
echo_indexed_call_count++;
info.GetReturnValue().Set(v8_num(index));
}
@@ -956,13 +1062,22 @@ static void EchoIndexedProperty(
THREADED_TEST(IndexedPropertyHandlerGetter) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
EchoIndexedProperty, 0, 0, 0, 0, v8_num(637)));
LocalContext env;
- env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance());
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
Local<Script> script = v8_compile("obj[900]");
- CHECK_EQ(script->Run()->Int32Value(), 900);
+ CHECK_EQ(script->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust(),
+ 900);
}
@@ -972,7 +1087,7 @@ THREADED_TEST(PropertyHandlerInPrototype) {
v8::HandleScope scope(isolate);
// Set up a prototype chain with three interceptors.
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter,
CheckThisIndexedPropertyQuery, CheckThisIndexedPropertyDeleter,
@@ -983,13 +1098,22 @@ THREADED_TEST(PropertyHandlerInPrototype) {
CheckThisNamedPropertyQuery, CheckThisNamedPropertyDeleter,
CheckThisNamedPropertyEnumerator));
- bottom = templ->GetFunction()->NewInstance();
- Local<v8::Object> top = templ->GetFunction()->NewInstance();
- Local<v8::Object> middle = templ->GetFunction()->NewInstance();
-
- bottom->SetPrototype(middle);
- middle->SetPrototype(top);
- env->Global()->Set(v8_str("obj"), bottom);
+ bottom = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ Local<v8::Object> top = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ Local<v8::Object> middle = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+
+ bottom->SetPrototype(env.local(), middle).FromJust();
+ middle->SetPrototype(env.local(), top).FromJust();
+ env->Global()->Set(env.local(), v8_str("obj"), bottom).FromJust();
// Indexed and named get.
CompileRun("obj[0]");
@@ -1016,7 +1140,10 @@ bool is_bootstrapping = false;
static void PrePropertyHandlerGet(
Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- if (!is_bootstrapping && v8_str("pre")->Equals(key)) {
+ if (!is_bootstrapping &&
+ v8_str("pre")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), key)
+ .FromJust()) {
info.GetReturnValue().Set(v8_str("PrePropertyHandler: pre"));
}
}
@@ -1024,7 +1151,10 @@ static void PrePropertyHandlerGet(
static void PrePropertyHandlerQuery(
Local<Name> key, const v8::PropertyCallbackInfo<v8::Integer>& info) {
- if (!is_bootstrapping && v8_str("pre")->Equals(key)) {
+ if (!is_bootstrapping &&
+ v8_str("pre")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), key)
+ .FromJust()) {
info.GetReturnValue().Set(static_cast<int32_t>(v8::None));
}
}
@@ -1033,28 +1163,33 @@ static void PrePropertyHandlerQuery(
THREADED_TEST(PrePropertyHandler) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
PrePropertyHandlerGet, 0, PrePropertyHandlerQuery));
is_bootstrapping = true;
LocalContext env(NULL, desc->InstanceTemplate());
is_bootstrapping = false;
CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
- v8::Handle<Value> result_pre = CompileRun("pre");
- CHECK(v8_str("PrePropertyHandler: pre")->Equals(result_pre));
- v8::Handle<Value> result_on = CompileRun("on");
- CHECK(v8_str("Object: on")->Equals(result_on));
- v8::Handle<Value> result_post = CompileRun("post");
+ v8::Local<Value> result_pre = CompileRun("pre");
+ CHECK(v8_str("PrePropertyHandler: pre")
+ ->Equals(env.local(), result_pre)
+ .FromJust());
+ v8::Local<Value> result_on = CompileRun("on");
+ CHECK(v8_str("Object: on")->Equals(env.local(), result_on).FromJust());
+ v8::Local<Value> result_post = CompileRun("post");
CHECK(result_post.IsEmpty());
}
THREADED_TEST(EmptyInterceptorBreakTransitions) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Constructor"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Constructor"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var o1 = new Constructor;"
"o1.a = 1;" // Ensure a and x share the descriptor array.
@@ -1069,12 +1204,15 @@ THREADED_TEST(EmptyInterceptorBreakTransitions) {
THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> child = FunctionTemplate::New(isolate);
child->Inherit(parent);
AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"var parent = child.__proto__;"
@@ -1092,14 +1230,17 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
THREADED_TEST(EmptyInterceptorDoesNotShadowApiAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> parent = FunctionTemplate::New(isolate);
auto returns_42 = FunctionTemplate::New(isolate, Returns42);
parent->PrototypeTemplate()->SetAccessorProperty(v8_str("age"), returns_42);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> child = FunctionTemplate::New(isolate);
child->Inherit(parent);
AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"var parent = child.__proto__;");
@@ -1119,12 +1260,15 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowApiAccessors) {
THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
- Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> child = FunctionTemplate::New(isolate);
child->Inherit(parent);
AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"var parent = child.__proto__;"
@@ -1140,11 +1284,14 @@ THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
THREADED_TEST(SwitchFromInterceptorToAccessor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddAccessor(templ, v8_str("age"), SimpleAccessorGetter, SimpleAccessorSetter);
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
"function setAge(i){ obj.age = i; };"
@@ -1158,11 +1305,14 @@ THREADED_TEST(SwitchFromInterceptorToAccessor) {
THREADED_TEST(SwitchFromAccessorToInterceptor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddAccessor(templ, v8_str("age"), SimpleAccessorGetter, SimpleAccessorSetter);
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
"function setAge(i){ obj.age = i; };"
@@ -1176,14 +1326,17 @@ THREADED_TEST(SwitchFromAccessorToInterceptor) {
THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddAccessor(parent, v8_str("age"), SimpleAccessorGetter,
SimpleAccessorSetter);
AddInterceptor(child, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"function setAge(i){ child.age = i; };"
@@ -1197,14 +1350,17 @@ THREADED_TEST(SwitchFromInterceptorToAccessorWithInheritance) {
THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddAccessor(parent, v8_str("age"), SimpleAccessorGetter,
SimpleAccessorSetter);
AddInterceptor(child, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"function setAge(i){ child.age = i; };"
@@ -1218,10 +1374,13 @@ THREADED_TEST(SwitchFromAccessorToInterceptorWithInheritance) {
THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
"function setter(i) { this.accessor_age = i; };"
@@ -1244,10 +1403,13 @@ THREADED_TEST(SwitchFromInterceptorToJSAccessor) {
THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
"function setter(i) { this.accessor_age = i; };"
@@ -1270,12 +1432,15 @@ THREADED_TEST(SwitchFromJSAccessorToInterceptor) {
THREADED_TEST(SwitchFromInterceptorToProperty) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddInterceptor(child, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"function setAge(i){ child.age = i; };"
@@ -1289,12 +1454,15 @@ THREADED_TEST(SwitchFromInterceptorToProperty) {
THREADED_TEST(SwitchFromPropertyToInterceptor) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
- Handle<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> child = FunctionTemplate::New(CcTest::isolate());
child->Inherit(parent);
AddInterceptor(child, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var child = new Child;"
"function setAge(i){ child.age = i; };"
@@ -1328,13 +1496,16 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) {
Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
instance_templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorForHiddenProperties));
- Local<v8::Function> function = fun_templ->GetFunction();
- Local<v8::Object> obj = function->NewInstance();
+ Local<v8::Function> function =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
+ Local<v8::Object> obj =
+ function->NewInstance(context.local()).ToLocalChecked();
CHECK(obj->SetPrivate(context.local(), key, v8::Integer::New(isolate, 2302))
.FromJust());
- CHECK_EQ(
- 2302,
- obj->GetPrivate(context.local(), key).ToLocalChecked()->Int32Value());
+ CHECK_EQ(2302, obj->GetPrivate(context.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK(!interceptor_for_hidden_properties_called);
}
@@ -1353,11 +1524,14 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
- Local<Value> result = script->Run();
- CHECK(result->Equals(v8_str("x")));
+ Local<Value> result = script->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_str("x")).FromJust());
}
}
@@ -1369,11 +1543,14 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
LocalContext context;
// Create an object with a named interceptor.
- context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("interceptor_obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
Local<Script> script = v8_compile("interceptor_obj.x");
for (int i = 0; i < 10; i++) {
- Local<Value> result = script->Run();
- CHECK(result->Equals(v8_str("x")));
+ Local<Value> result = script->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_str("x")).FromJust());
}
// Create a slow case object and a function accessing a property in
// that slow case object (with dictionary probing in generated
@@ -1389,7 +1566,7 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
"interceptor_obj.y = 10;"
"delete interceptor_obj.y;"
"get_x(interceptor_obj)");
- CHECK(result->Equals(v8_str("x")));
+ CHECK(result->Equals(context.local(), v8_str("x")).FromJust());
}
@@ -1402,8 +1579,10 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
// Create an object with a named interceptor.
- v8::Local<v8::Object> object = templ->NewInstance();
- context1->Global()->Set(v8_str("interceptor_obj"), object);
+ v8::Local<v8::Object> object = templ->NewInstance(context1).ToLocalChecked();
+ context1->Global()
+ ->Set(context1, v8_str("interceptor_obj"), object)
+ .FromJust();
// Force the object into the slow case.
CompileRun(
@@ -1415,7 +1594,9 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
// Introduce the object into a different context.
// Repeat named loads to exercise ICs.
LocalContext context2;
- context2->Global()->Set(v8_str("interceptor_obj"), object);
+ context2->Global()
+ ->Set(context2.local(), v8_str("interceptor_obj"), object)
+ .FromJust();
Local<Value> result = CompileRun(
"function get_x(o) { return o.x; }"
"interceptor_obj.x = 42;"
@@ -1424,7 +1605,7 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
"}"
"get_x(interceptor_obj)");
// Check that the interceptor was actually invoked.
- CHECK(result->Equals(v8_str("x")));
+ CHECK(result->Equals(context2.local(), v8_str("x")).FromJust());
}
// Return to the original context and force some object to the slow case
@@ -1438,9 +1619,11 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
static void SetXOnPrototypeGetter(
Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
// Set x on the prototype object and do not handle the get request.
- v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
- proto.As<v8::Object>()->Set(v8_str("x"),
- v8::Integer::New(info.GetIsolate(), 23));
+ v8::Local<v8::Value> proto = info.Holder()->GetPrototype();
+ proto.As<v8::Object>()
+ ->Set(info.GetIsolate()->GetCurrentContext(), v8_str("x"),
+ v8::Integer::New(info.GetIsolate(), 23))
+ .FromJust();
}
@@ -1456,12 +1639,15 @@ THREADED_TEST(NamedInterceptorMapTransitionRead) {
instance_template->SetHandler(
v8::NamedPropertyHandlerConfiguration(SetXOnPrototypeGetter));
LocalContext context;
- context->Global()->Set(v8_str("F"), function_template->GetFunction());
+ context->Global()
+ ->Set(context.local(), v8_str("F"),
+ function_template->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
// Create an instance of F and introduce a map transition for x.
CompileRun("var o = new F(); o.x = 23;");
// Create an instance of F and invoke the getter. The result should be 23.
Local<Value> result = CompileRun("o = new F(); o.x");
- CHECK_EQ(result->Int32Value(), 23);
+ CHECK_EQ(result->Int32Value(context.local()).FromJust(), 23);
}
@@ -1491,7 +1677,10 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
IndexedPropertyGetter, IndexedPropertySetter));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
Local<Script> getter_script =
v8_compile("obj.__defineGetter__(\"3\", function(){return 5;});obj[3];");
Local<Script> setter_script = v8_compile(
@@ -1503,14 +1692,14 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
"obj[39] = 47;"
"obj.foo;"); // This setter should not run, due to the interceptor.
Local<Script> interceptor_getter_script = v8_compile("obj[37];");
- Local<Value> result = getter_script->Run();
- CHECK(v8_num(5)->Equals(result));
- result = setter_script->Run();
- CHECK(v8_num(23)->Equals(result));
- result = interceptor_setter_script->Run();
- CHECK(v8_num(23)->Equals(result));
- result = interceptor_getter_script->Run();
- CHECK(v8_num(625)->Equals(result));
+ Local<Value> result = getter_script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(5)->Equals(context.local(), result).FromJust());
+ result = setter_script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(23)->Equals(context.local(), result).FromJust());
+ result = interceptor_setter_script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(23)->Equals(context.local(), result).FromJust());
+ result = interceptor_getter_script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(625)->Equals(context.local(), result).FromJust());
}
@@ -1540,7 +1729,9 @@ void UnboxedDoubleIndexedPropertyEnumerator(
"keys = new Array(); keys[125000] = 1;"
"for(i = 0; i < 80000; i++) { keys[i] = i; };"
"keys.length = 25; keys;");
- Local<Value> result = indexed_property_names_script->Run();
+ Local<Value> result =
+ indexed_property_names_script->Run(info.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked();
info.GetReturnValue().Set(Local<v8::Array>::Cast(result));
}
@@ -1555,18 +1746,25 @@ THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
UnboxedDoubleIndexedPropertyGetter, UnboxedDoubleIndexedPropertySetter, 0,
0, UnboxedDoubleIndexedPropertyEnumerator));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
// When obj is created, force it to be Stored in a FastDoubleArray.
Local<Script> create_unboxed_double_script = v8_compile(
"obj[125000] = 1; for(i = 0; i < 80000; i+=2) { obj[i] = i; } "
"key_count = 0; "
"for (x in obj) {key_count++;};"
"obj;");
- Local<Value> result = create_unboxed_double_script->Run();
- CHECK(result->ToObject(isolate)->HasRealIndexedProperty(2000));
+ Local<Value> result =
+ create_unboxed_double_script->Run(context.local()).ToLocalChecked();
+ CHECK(result->ToObject(context.local())
+ .ToLocalChecked()
+ ->HasRealIndexedProperty(context.local(), 2000)
+ .FromJust());
Local<Script> key_count_check = v8_compile("key_count;");
- result = key_count_check->Run();
- CHECK(v8_num(40013)->Equals(result));
+ result = key_count_check->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(40013)->Equals(context.local(), result).FromJust());
}
@@ -1579,10 +1777,12 @@ void SloppyArgsIndexedPropertyEnumerator(
"}"
"keys = f(0, 1, 2, 3);"
"keys;");
- Local<Object> result =
- Local<Object>::Cast(indexed_property_names_script->Run());
+ Local<Object> result = Local<Object>::Cast(
+ indexed_property_names_script->Run(info.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked());
// Have to populate the handle manually, as it's not Cast-able.
- i::Handle<i::JSObject> o = v8::Utils::OpenHandle<Object, i::JSObject>(result);
+ i::Handle<i::JSReceiver> o =
+ v8::Utils::OpenHandle<Object, i::JSReceiver>(result);
i::Handle<i::JSArray> array(reinterpret_cast<i::JSArray*>(*o));
info.GetReturnValue().Set(v8::Utils::ToLocal(array));
}
@@ -1607,12 +1807,16 @@ THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
SloppyIndexedPropertyGetter, 0, 0, 0,
SloppyArgsIndexedPropertyEnumerator));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
Local<Script> create_args_script = v8_compile(
"var key_count = 0;"
"for (x in obj) {key_count++;} key_count;");
- Local<Value> result = create_args_script->Run();
- CHECK(v8_num(4)->Equals(result));
+ Local<Value> result =
+ create_args_script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(4)->Equals(context.local(), result).FromJust());
}
@@ -1630,7 +1834,10 @@ THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
// Check fast object case.
const char* fast_case_code =
@@ -1653,7 +1860,10 @@ THREADED_TEST(IndexedInterceptorWithNoSetter) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
const char* code =
"try {"
@@ -1686,8 +1896,8 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
templ->SetAccessCheckCallback(AccessAlwaysBlocked);
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"var result = 'PASSED';"
@@ -1713,8 +1923,8 @@ THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"try {"
@@ -1738,8 +1948,8 @@ THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"try {"
@@ -1779,8 +1989,8 @@ THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"try {"
@@ -1810,8 +2020,8 @@ THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"var original = obj;"
@@ -1842,8 +2052,8 @@ THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"var original = obj;"
@@ -1874,8 +2084,8 @@ THREADED_TEST(IndexedInterceptorOnProto) {
v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
const char* code =
"var o = {__proto__: obj};"
@@ -1902,7 +2112,8 @@ static void NoBlockGetterI(uint32_t index,
static void PDeleter(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
- if (!name->Equals(v8_str("foo"))) {
+ if (!name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust()) {
return; // not intercepted
}
@@ -1923,37 +2134,66 @@ static void IDeleter(uint32_t index,
THREADED_TEST(Deleter) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX, NULL,
NULL, PDeleter, NULL));
obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
NoBlockGetterI, NULL, NULL, IDeleter, NULL));
LocalContext context;
- context->Global()->Set(v8_str("k"), obj->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("k"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"k.foo = 'foo';"
"k.bar = 'bar';"
"k[2] = 2;"
"k[4] = 4;");
- CHECK(v8_compile("delete k.foo")->Run()->IsFalse());
- CHECK(v8_compile("delete k.bar")->Run()->IsTrue());
-
- CHECK(v8_compile("k.foo")->Run()->Equals(v8_str("foo")));
- CHECK(v8_compile("k.bar")->Run()->IsUndefined());
-
- CHECK(v8_compile("delete k[2]")->Run()->IsFalse());
- CHECK(v8_compile("delete k[4]")->Run()->IsTrue());
-
- CHECK(v8_compile("k[2]")->Run()->Equals(v8_num(2)));
- CHECK(v8_compile("k[4]")->Run()->IsUndefined());
+ CHECK(v8_compile("delete k.foo")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->IsFalse());
+ CHECK(v8_compile("delete k.bar")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->IsTrue());
+
+ CHECK(v8_compile("k.foo")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->Equals(context.local(), v8_str("foo"))
+ .FromJust());
+ CHECK(v8_compile("k.bar")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->IsUndefined());
+
+ CHECK(v8_compile("delete k[2]")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->IsFalse());
+ CHECK(v8_compile("delete k[4]")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->IsTrue());
+
+ CHECK(v8_compile("k[2]")
+ ->Run(context.local())
+ .ToLocalChecked()
+ ->Equals(context.local(), v8_num(2))
+ .FromJust());
+ CHECK(
+ v8_compile("k[4]")->Run(context.local()).ToLocalChecked()->IsUndefined());
}
static void GetK(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- if (name->Equals(v8_str("foo")) || name->Equals(v8_str("bar")) ||
- name->Equals(v8_str("baz"))) {
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ if (name->Equals(context, v8_str("foo")).FromJust() ||
+ name->Equals(context, v8_str("bar")).FromJust() ||
+ name->Equals(context, v8_str("baz")).FromJust()) {
info.GetReturnValue().SetUndefined();
}
}
@@ -1968,19 +2208,26 @@ static void IndexedGetK(uint32_t index,
static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
- result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"));
- result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"));
- result->Set(v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"));
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("foo"))
+ .FromJust();
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("bar"))
+ .FromJust();
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 2), v8_str("baz"))
+ .FromJust();
info.GetReturnValue().Set(result);
}
static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
ApiTestFuzzer::Fuzz();
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
- result->Set(v8::Integer::New(info.GetIsolate(), 0), v8_str("0"));
- result->Set(v8::Integer::New(info.GetIsolate(), 1), v8_str("1"));
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 0), v8_str("0"))
+ .FromJust();
+ result->Set(context, v8::Integer::New(info.GetIsolate(), 1), v8_str("1"))
+ .FromJust();
info.GetReturnValue().Set(result);
}
@@ -1988,31 +2235,34 @@ static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
THREADED_TEST(Enumerators) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(
v8::NamedPropertyHandlerConfiguration(GetK, NULL, NULL, NULL, NamedEnum));
obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
IndexedGetK, NULL, NULL, NULL, IndexedEnum));
LocalContext context;
- context->Global()->Set(v8_str("k"), obj->NewInstance());
- v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
- "k[10] = 0;"
- "k.a = 0;"
- "k[5] = 0;"
- "k.b = 0;"
- "k[4294967294] = 0;"
- "k.c = 0;"
- "k[4294967295] = 0;"
- "k.d = 0;"
- "k[140000] = 0;"
- "k.e = 0;"
- "k[30000000000] = 0;"
- "k.f = 0;"
- "var result = [];"
- "for (var prop in k) {"
- " result.push(prop);"
- "}"
- "result"));
+ context->Global()
+ ->Set(context.local(), v8_str("k"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<v8::Array> result =
+ v8::Local<v8::Array>::Cast(CompileRun("k[10] = 0;"
+ "k.a = 0;"
+ "k[5] = 0;"
+ "k.b = 0;"
+ "k[4294967294] = 0;"
+ "k.c = 0;"
+ "k[4294967295] = 0;"
+ "k.d = 0;"
+ "k[140000] = 0;"
+ "k.e = 0;"
+ "k[30000000000] = 0;"
+ "k.f = 0;"
+ "var result = [];"
+ "for (var prop in k) {"
+ " result.push(prop);"
+ "}"
+ "result"));
// Check that we get all the property names returned including the
// ones from the enumerators in the right order: indexed properties
// in numerical order, indexed interceptor properties, named
@@ -2021,39 +2271,106 @@ THREADED_TEST(Enumerators) {
// documenting our behavior.
CHECK_EQ(17u, result->Length());
// Indexed properties + indexed interceptor properties in numerical order.
- CHECK(v8_str("0")->Equals(result->Get(v8::Integer::New(isolate, 0))));
- CHECK(v8_str("1")->Equals(result->Get(v8::Integer::New(isolate, 1))));
- CHECK(v8_str("5")->Equals(result->Get(v8::Integer::New(isolate, 2))));
- CHECK(v8_str("10")->Equals(result->Get(v8::Integer::New(isolate, 3))));
- CHECK(v8_str("140000")->Equals(result->Get(v8::Integer::New(isolate, 4))));
- CHECK(
- v8_str("4294967294")->Equals(result->Get(v8::Integer::New(isolate, 5))));
+ CHECK(v8_str("0")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("1")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("5")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("10")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 3))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("140000")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 4))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("4294967294")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 5))
+ .ToLocalChecked())
+ .FromJust());
// Named properties in insertion order.
- CHECK(v8_str("a")->Equals(result->Get(v8::Integer::New(isolate, 6))));
- CHECK(v8_str("b")->Equals(result->Get(v8::Integer::New(isolate, 7))));
- CHECK(v8_str("c")->Equals(result->Get(v8::Integer::New(isolate, 8))));
- CHECK(
- v8_str("4294967295")->Equals(result->Get(v8::Integer::New(isolate, 9))));
- CHECK(v8_str("d")->Equals(result->Get(v8::Integer::New(isolate, 10))));
- CHECK(v8_str("e")->Equals(result->Get(v8::Integer::New(isolate, 11))));
+ CHECK(v8_str("a")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 6))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("b")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 7))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("c")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 8))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("4294967295")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 9))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("d")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 10))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("e")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 11))
+ .ToLocalChecked())
+ .FromJust());
CHECK(v8_str("30000000000")
- ->Equals(result->Get(v8::Integer::New(isolate, 12))));
- CHECK(v8_str("f")->Equals(result->Get(v8::Integer::New(isolate, 13))));
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 12))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("f")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 13))
+ .ToLocalChecked())
+ .FromJust());
// Named interceptor properties.
- CHECK(v8_str("foo")->Equals(result->Get(v8::Integer::New(isolate, 14))));
- CHECK(v8_str("bar")->Equals(result->Get(v8::Integer::New(isolate, 15))));
- CHECK(v8_str("baz")->Equals(result->Get(v8::Integer::New(isolate, 16))));
+ CHECK(v8_str("foo")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 14))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("bar")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 15))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("baz")
+ ->Equals(context.local(),
+ result->Get(context.local(), v8::Integer::New(isolate, 16))
+ .ToLocalChecked())
+ .FromJust());
}
-v8::Handle<Value> call_ic_function;
-v8::Handle<Value> call_ic_function2;
-v8::Handle<Value> call_ic_function3;
+v8::Local<Value> call_ic_function;
+v8::Local<Value> call_ic_function2;
+v8::Local<Value> call_ic_function3;
static void InterceptorCallICGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(v8_str("x")->Equals(name));
+ CHECK(v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust());
info.GetReturnValue().Set(call_ic_function);
}
@@ -2062,18 +2379,23 @@ static void InterceptorCallICGetter(
THREADED_TEST(InterceptorCallIC) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function = v8_compile("function f(x) { return x + 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ call_ic_function = v8_compile("function f(x) { return x + 1; }; f")
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<Value> value = CompileRun(
"var result = 0;"
"for (var i = 0; i < 1000; i++) {"
" result = o.x(41);"
"}");
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
@@ -2082,25 +2404,30 @@ THREADED_TEST(InterceptorCallIC) {
THREADED_TEST(InterceptorCallICSeesOthers) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"o.x = function f(x) { return x + 1; };"
"var result = 0;"
"for (var i = 0; i < 7; i++) {"
" result = o.x(41);"
"}");
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
-static v8::Handle<Value> call_ic_function4;
+static v8::Local<Value> call_ic_function4;
static void InterceptorCallICGetter4(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(v8_str("x")->Equals(name));
+ CHECK(v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust());
info.GetReturnValue().Set(call_ic_function4);
}
@@ -2111,19 +2438,24 @@ static void InterceptorCallICGetter4(
THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter4));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function4 = v8_compile("function f(x) { return x - 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ call_ic_function4 = v8_compile("function f(x) { return x - 1; }; f")
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<Value> value = CompileRun(
"Object.getPrototypeOf(o).x = function(x) { return x + 1; };"
"var result = 0;"
"for (var i = 0; i < 1000; i++) {"
" result = o.x(42);"
"}");
- CHECK_EQ(41, value->Int32Value());
+ CHECK_EQ(41, value->Int32Value(context.local()).FromJust());
}
@@ -2132,11 +2464,14 @@ THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"proto1 = new Object();"
"proto2 = new Object();"
"o.__proto__ = proto1;"
@@ -2151,7 +2486,7 @@ THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
"for (var i = 0; i < 7; i++) {"
" result += o.y(42);"
"}");
- CHECK_EQ(41 * 7, value->Int32Value());
+ CHECK_EQ(41 * 7, value->Int32Value(context.local()).FromJust());
}
@@ -2160,11 +2495,14 @@ THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"o.x = inc;"
@@ -2172,15 +2510,18 @@ THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
"for (var i = 0; i < 1000; i++) {"
" result = o.x(42);"
"}");
- CHECK_EQ(43, value->Int32Value());
+ CHECK_EQ(43, value->Int32Value(context.local()).FromJust());
}
-static v8::Handle<Value> call_ic_function5;
+static v8::Local<Value> call_ic_function5;
static void InterceptorCallICGetter5(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name)) info.GetReturnValue().Set(call_ic_function5);
+ if (v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust())
+ info.GetReturnValue().Set(call_ic_function5);
}
@@ -2190,13 +2531,18 @@ static void InterceptorCallICGetter5(
THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter5));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function5 = v8_compile("function f(x) { return x - 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ call_ic_function5 = v8_compile("function f(x) { return x - 1; }; f")
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<Value> value = CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"o.x = inc;"
@@ -2204,15 +2550,18 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
"for (var i = 0; i < 1000; i++) {"
" result = o.x(42);"
"}");
- CHECK_EQ(41, value->Int32Value());
+ CHECK_EQ(41, value->Int32Value(context.local()).FromJust());
}
-static v8::Handle<Value> call_ic_function6;
+static v8::Local<Value> call_ic_function6;
static void InterceptorCallICGetter6(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name)) info.GetReturnValue().Set(call_ic_function6);
+ if (v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust())
+ info.GetReturnValue().Set(call_ic_function6);
}
@@ -2222,13 +2571,18 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter6));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- call_ic_function6 = v8_compile("function f(x) { return x - 1; }; f")->Run();
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ call_ic_function6 = v8_compile("function f(x) { return x - 1; }; f")
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<Value> value = CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"o.x = inc;"
@@ -2244,7 +2598,7 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
"test();"
"%OptimizeFunctionOnNextCall(test);"
"test()");
- CHECK_EQ(41, value->Int32Value());
+ CHECK_EQ(41, value->Int32Value(context.local()).FromJust());
}
@@ -2253,11 +2607,14 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"proto1 = new Object();"
@@ -2274,7 +2631,7 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
"for (var i = 0; i < 7; i++) {"
" result += o.y(42);"
"}");
- CHECK_EQ(41 * 7, value->Int32Value());
+ CHECK_EQ(41 * 7, value->Int32Value(context.local()).FromJust());
}
@@ -2284,11 +2641,14 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
- v8::Handle<Value> value = CompileRun(
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<Value> value = CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"o.__proto__ = this;"
@@ -2302,7 +2662,7 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
"for (var i = 0; i < 7; i++) {"
" result += o.y(42);"
"}");
- CHECK_EQ(41 * 7, value->Int32Value());
+ CHECK_EQ(41 * 7, value->Int32Value(context.local()).FromJust());
}
@@ -2310,13 +2670,16 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
THREADED_TEST(InterceptorCallICCachedFromGlobal) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
- v8::Handle<Value> value = CompileRun(
+ v8::Local<Value> value = CompileRun(
"try {"
" o.__proto__ = this;"
" for (var i = 0; i < 10; i++) {"
@@ -2332,16 +2695,18 @@ THREADED_TEST(InterceptorCallICCachedFromGlobal) {
"} catch(e) {"
" e"
"};");
- CHECK_EQ(239 * 10, value->Int32Value());
+ CHECK_EQ(239 * 10, value->Int32Value(context.local()).FromJust());
}
-v8::Handle<Value> keyed_call_ic_function;
+v8::Local<Value> keyed_call_ic_function;
static void InterceptorKeyedCallICGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- if (v8_str("x")->Equals(name)) {
+ if (v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust()) {
info.GetReturnValue().Set(keyed_call_ic_function);
}
}
@@ -2352,10 +2717,13 @@ static void InterceptorKeyedCallICGetter(
THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"proto = new Object();"
"proto.y = function(x) { return x + 1; };"
@@ -2367,8 +2735,11 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
" if (i == 5) { method = 'z'; };"
" result += o[method](41);"
"}");
- CHECK_EQ(42 * 5 + 40 * 5,
- context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42 * 5 + 40 * 5, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -2378,13 +2749,17 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorKeyedCallICGetter));
LocalContext context;
- context->Global()->Set(v8_str("proto1"), templ->NewInstance());
- keyed_call_ic_function =
- v8_compile("function f(x) { return x - 1; }; f")->Run();
+ context->Global()
+ ->Set(context.local(), v8_str("proto1"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ keyed_call_ic_function = v8_compile("function f(x) { return x - 1; }; f")
+ ->Run(context.local())
+ .ToLocalChecked();
CompileRun(
"o = new Object();"
"proto2 = new Object();"
@@ -2398,8 +2773,11 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
" if (i == 5) { method = 'y'; };"
" result += o[method](41);"
"}");
- CHECK_EQ(42 * 5 + 40 * 5,
- context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42 * 5 + 40 * 5, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -2408,10 +2786,13 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
@@ -2426,8 +2807,11 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
" if (i == 5) { method = 'y'; };"
" result += o[method](41);"
"}");
- CHECK_EQ(42 * 5 + 40 * 5,
- context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42 * 5 + 40 * 5, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -2435,10 +2819,13 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"function len(x) { return x.length; };"
@@ -2452,8 +2839,16 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
" };"
" result = o[m]('239');"
"}");
- CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_EQ(3, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(239, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -2461,10 +2856,13 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("proto"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var o = new Object();"
@@ -2476,8 +2874,11 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
" if (i == 5) { o.method = function(x) { return x - 1; }; };"
" result += o[m](41);"
"}");
- CHECK_EQ(42 * 5 + 40 * 5,
- context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42 * 5 + 40 * 5, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -2485,10 +2886,13 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
LocalContext context;
- context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ_o->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var proto = new Object();"
@@ -2500,8 +2904,11 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
" if (i == 5) { proto.method = function(x) { return x - 1; }; };"
" result += o[m](41);"
"}");
- CHECK_EQ(42 * 5 + 40 * 5,
- context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42 * 5 + 40 * 5, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -2510,7 +2917,10 @@ static int interceptor_call_count = 0;
static void InterceptorICRefErrorGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- if (!is_bootstrapping && v8_str("x")->Equals(name) &&
+ if (!is_bootstrapping &&
+ v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust() &&
interceptor_call_count++ < 20) {
info.GetReturnValue().Set(call_ic_function2);
}
@@ -2523,14 +2933,16 @@ static void InterceptorICRefErrorGetter(
THREADED_TEST(InterceptorICReferenceErrors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorICRefErrorGetter));
is_bootstrapping = true;
- LocalContext context(0, templ, v8::Handle<Value>());
+ LocalContext context(0, templ, v8::Local<Value>());
is_bootstrapping = false;
- call_ic_function2 = v8_compile("function h(x) { return x; }; h")->Run();
- v8::Handle<Value> value = CompileRun(
+ call_ic_function2 = v8_compile("function h(x) { return x; }; h")
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<Value> value = CompileRun(
"function f() {"
" for (var i = 0; i < 1000; i++) {"
" try { x; } catch(e) { return true; }"
@@ -2538,7 +2950,7 @@ THREADED_TEST(InterceptorICReferenceErrors) {
" return false;"
"};"
"f();");
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
interceptor_call_count = 0;
value = CompileRun(
"function g() {"
@@ -2548,7 +2960,7 @@ THREADED_TEST(InterceptorICReferenceErrors) {
" return false;"
"};"
"g();");
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
}
@@ -2558,7 +2970,10 @@ static void InterceptorICExceptionGetter(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
if (is_bootstrapping) return;
- if (v8_str("x")->Equals(name) && ++interceptor_ic_exception_get_count < 20) {
+ if (v8_str("x")
+ ->Equals(info.GetIsolate()->GetCurrentContext(), name)
+ .FromJust() &&
+ ++interceptor_ic_exception_get_count < 20) {
info.GetReturnValue().Set(call_ic_function3);
}
if (interceptor_ic_exception_get_count == 20) {
@@ -2574,14 +2989,16 @@ THREADED_TEST(InterceptorICGetterExceptions) {
interceptor_ic_exception_get_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorICExceptionGetter));
is_bootstrapping = true;
- LocalContext context(0, templ, v8::Handle<Value>());
+ LocalContext context(0, templ, v8::Local<Value>());
is_bootstrapping = false;
- call_ic_function3 = v8_compile("function h(x) { return x; }; h")->Run();
- v8::Handle<Value> value = CompileRun(
+ call_ic_function3 = v8_compile("function h(x) { return x; }; h")
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<Value> value = CompileRun(
"function f() {"
" for (var i = 0; i < 100; i++) {"
" try { x; } catch(e) { return true; }"
@@ -2589,7 +3006,7 @@ THREADED_TEST(InterceptorICGetterExceptions) {
" return false;"
"};"
"f();");
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
interceptor_ic_exception_get_count = 0;
value = CompileRun(
"function f() {"
@@ -2599,7 +3016,7 @@ THREADED_TEST(InterceptorICGetterExceptions) {
" return false;"
"};"
"f();");
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
}
@@ -2621,11 +3038,11 @@ THREADED_TEST(InterceptorICSetterExceptions) {
interceptor_ic_exception_set_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(
v8::NamedPropertyHandlerConfiguration(0, InterceptorICExceptionSetter));
- LocalContext context(0, templ, v8::Handle<Value>());
- v8::Handle<Value> value = CompileRun(
+ LocalContext context(0, templ, v8::Local<Value>());
+ v8::Local<Value> value = CompileRun(
"function f() {"
" for (var i = 0; i < 100; i++) {"
" try { x = 42; } catch(e) { return true; }"
@@ -2633,7 +3050,7 @@ THREADED_TEST(InterceptorICSetterExceptions) {
" return false;"
"};"
"f();");
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
}
@@ -2641,16 +3058,17 @@ THREADED_TEST(InterceptorICSetterExceptions) {
THREADED_TEST(NullNamedInterceptor) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
static_cast<v8::GenericNamedPropertyGetterCallback>(0)));
LocalContext context;
templ->Set(CcTest::isolate(), "x", v8_num(42));
- v8::Handle<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
- v8::Handle<Value> value = CompileRun("obj.x");
+ v8::Local<v8::Object> obj =
+ templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
+ v8::Local<Value> value = CompileRun("obj.x");
CHECK(value->IsInt32());
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
@@ -2658,27 +3076,33 @@ THREADED_TEST(NullNamedInterceptor) {
THREADED_TEST(NullIndexedInterceptor) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
static_cast<v8::IndexedPropertyGetterCallback>(0)));
LocalContext context;
templ->Set(CcTest::isolate(), "42", v8_num(42));
- v8::Handle<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
- v8::Handle<Value> value = CompileRun("obj[42]");
+ v8::Local<v8::Object> obj =
+ templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust();
+ v8::Local<Value> value = CompileRun("obj[42]");
CHECK(value->IsInt32());
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->InstanceTemplate()->SetHandler(
v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
LocalContext env;
- env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance());
+ env->Global()
+ ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust();
ExpectTrue("obj.x === 42");
ExpectTrue("!obj.propertyIsEnumerable('x')");
}
@@ -2688,9 +3112,12 @@ THREADED_TEST(Regress256330) {
i::FLAG_allow_natives_syntax = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
- context->Global()->Set(v8_str("Bug"), templ->GetFunction());
+ context->Global()
+ ->Set(context.local(), v8_str("Bug"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"\"use strict\"; var o = new Bug;"
"function f(o) { o.x = 10; };"
@@ -2704,10 +3131,13 @@ THREADED_TEST(Regress256330) {
THREADED_TEST(CrankshaftInterceptorSetter) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
// Initialize fields to avoid transitions later.
@@ -2731,10 +3161,13 @@ THREADED_TEST(CrankshaftInterceptorSetter) {
THREADED_TEST(CrankshaftInterceptorGetter) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
// Initialize fields to avoid transitions later.
@@ -2755,10 +3188,13 @@ THREADED_TEST(CrankshaftInterceptorGetter) {
THREADED_TEST(CrankshaftInterceptorFieldRead) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
"obj.__proto__.interceptor_age = 42;"
@@ -2776,10 +3212,13 @@ THREADED_TEST(CrankshaftInterceptorFieldRead) {
THREADED_TEST(CrankshaftInterceptorFieldWrite) {
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(templ, InterceptorGetter, InterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Obj"), templ->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Obj"),
+ templ->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var obj = new Obj;"
"obj.age = 100000;"
@@ -2797,19 +3236,25 @@ THREADED_TEST(CrankshaftInterceptorFieldWrite) {
THREADED_TEST(Regress149912) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Handle<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
- context->Global()->Set(v8_str("Bug"), templ->GetFunction());
+ context->Global()
+ ->Set(context.local(), v8_str("Bug"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun("Number.prototype.__proto__ = new Bug; var x = 0; x.foo();");
}
THREADED_TEST(Regress125988) {
v8::HandleScope scope(CcTest::isolate());
- Handle<FunctionTemplate> intercept = FunctionTemplate::New(CcTest::isolate());
+ Local<FunctionTemplate> intercept = FunctionTemplate::New(CcTest::isolate());
AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
LocalContext env;
- env->Global()->Set(v8_str("Intercept"), intercept->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("Intercept"),
+ intercept->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
CompileRun(
"var a = new Object();"
"var b = new Intercept();"
@@ -2832,17 +3277,21 @@ THREADED_TEST(Regress125988) {
static void IndexedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 1);
- result->Set(0, v8::Integer::New(info.GetIsolate(), 7));
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 1);
+ result->Set(info.GetIsolate()->GetCurrentContext(), 0,
+ v8::Integer::New(info.GetIsolate(), 7))
+ .FromJust();
info.GetReturnValue().Set(result);
}
static void NamedPropertyEnumerator(
const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
- result->Set(0, v8_str("x"));
- result->Set(1, v8::Symbol::GetIterator(info.GetIsolate()));
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ result->Set(context, 0, v8_str("x")).FromJust();
+ result->Set(context, 1, v8::Symbol::GetIterator(info.GetIsolate()))
+ .FromJust();
info.GetReturnValue().Set(result);
}
@@ -2850,8 +3299,7 @@ static void NamedPropertyEnumerator(
THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
@@ -2861,33 +3309,50 @@ THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
NULL, NULL, NULL, NULL, NamedPropertyEnumerator));
LocalContext context;
- v8::Handle<v8::Object> global = context->Global();
- global->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global = context->Global();
+ global->Set(context.local(), v8_str("object"),
+ obj_template->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
- v8::Handle<v8::Value> result =
+ v8::Local<v8::Value> result =
CompileRun("Object.getOwnPropertyNames(object)");
CHECK(result->IsArray());
- v8::Handle<v8::Array> result_array = v8::Handle<v8::Array>::Cast(result);
+ v8::Local<v8::Array> result_array = v8::Local<v8::Array>::Cast(result);
CHECK_EQ(2u, result_array->Length());
- CHECK(result_array->Get(0)->IsString());
- CHECK(result_array->Get(1)->IsString());
- CHECK(v8_str("7")->Equals(result_array->Get(0)));
- CHECK(v8_str("x")->Equals(result_array->Get(1)));
+ CHECK(result_array->Get(context.local(), 0).ToLocalChecked()->IsString());
+ CHECK(result_array->Get(context.local(), 1).ToLocalChecked()->IsString());
+ CHECK(v8_str("7")
+ ->Equals(context.local(),
+ result_array->Get(context.local(), 0).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("x")
+ ->Equals(context.local(),
+ result_array->Get(context.local(), 1).ToLocalChecked())
+ .FromJust());
result = CompileRun("var ret = []; for (var k in object) ret.push(k); ret");
CHECK(result->IsArray());
- result_array = v8::Handle<v8::Array>::Cast(result);
+ result_array = v8::Local<v8::Array>::Cast(result);
CHECK_EQ(2u, result_array->Length());
- CHECK(result_array->Get(0)->IsString());
- CHECK(result_array->Get(1)->IsString());
- CHECK(v8_str("7")->Equals(result_array->Get(0)));
- CHECK(v8_str("x")->Equals(result_array->Get(1)));
+ CHECK(result_array->Get(context.local(), 0).ToLocalChecked()->IsString());
+ CHECK(result_array->Get(context.local(), 1).ToLocalChecked()->IsString());
+ CHECK(v8_str("7")
+ ->Equals(context.local(),
+ result_array->Get(context.local(), 0).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("x")
+ ->Equals(context.local(),
+ result_array->Get(context.local(), 1).ToLocalChecked())
+ .FromJust());
result = CompileRun("Object.getOwnPropertySymbols(object)");
CHECK(result->IsArray());
- result_array = v8::Handle<v8::Array>::Cast(result);
+ result_array = v8::Local<v8::Array>::Cast(result);
CHECK_EQ(1u, result_array->Length());
- CHECK(result_array->Get(0)->Equals(v8::Symbol::GetIterator(isolate)));
+ CHECK(result_array->Get(context.local(), 0)
+ .ToLocalChecked()
+ ->Equals(context.local(), v8::Symbol::GetIterator(isolate))
+ .FromJust());
}
@@ -2900,8 +3365,7 @@ static void IndexedPropertyEnumeratorException(
THREADED_TEST(GetOwnPropertyNamesWithIndexedInterceptorExceptions_regress4026) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
@@ -2910,9 +3374,11 @@ THREADED_TEST(GetOwnPropertyNamesWithIndexedInterceptorExceptions_regress4026) {
NULL, NULL, NULL, NULL, IndexedPropertyEnumeratorException));
LocalContext context;
- v8::Handle<v8::Object> global = context->Global();
- global->Set(v8_str("object"), obj_template->NewInstance());
- v8::Handle<v8::Value> result = CompileRun(
+ v8::Local<v8::Object> global = context->Global();
+ global->Set(context.local(), v8_str("object"),
+ obj_template->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<v8::Value> result = CompileRun(
"var result = []; "
"try { "
" for (var k in object) result .push(k);"
@@ -2921,7 +3387,7 @@ THREADED_TEST(GetOwnPropertyNamesWithIndexedInterceptorExceptions_regress4026) {
"}"
"result ");
CHECK(!result->IsArray());
- CHECK(v8_num(42)->Equals(result));
+ CHECK(v8_num(42)->Equals(context.local(), result).FromJust());
result = CompileRun(
"var result = [];"
@@ -2932,7 +3398,7 @@ THREADED_TEST(GetOwnPropertyNamesWithIndexedInterceptorExceptions_regress4026) {
"}"
"result");
CHECK(!result->IsArray());
- CHECK(v8_num(42)->Equals(result));
+ CHECK(v8_num(42)->Equals(context.local(), result).FromJust());
}
@@ -2945,8 +3411,7 @@ static void NamedPropertyEnumeratorException(
THREADED_TEST(GetOwnPropertyNamesWithNamedInterceptorExceptions_regress4026) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
@@ -2955,10 +3420,12 @@ THREADED_TEST(GetOwnPropertyNamesWithNamedInterceptorExceptions_regress4026) {
NULL, NULL, NULL, NULL, NamedPropertyEnumeratorException));
LocalContext context;
- v8::Handle<v8::Object> global = context->Global();
- global->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global = context->Global();
+ global->Set(context.local(), v8_str("object"),
+ obj_template->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
- v8::Handle<v8::Value> result = CompileRun(
+ v8::Local<v8::Value> result = CompileRun(
"var result = []; "
"try { "
" for (var k in object) result.push(k);"
@@ -2967,7 +3434,7 @@ THREADED_TEST(GetOwnPropertyNamesWithNamedInterceptorExceptions_regress4026) {
"}"
"result");
CHECK(!result->IsArray());
- CHECK(v8_num(43)->Equals(result));
+ CHECK(v8_num(43)->Equals(context.local(), result).FromJust());
result = CompileRun(
"var result = [];"
@@ -2978,7 +3445,7 @@ THREADED_TEST(GetOwnPropertyNamesWithNamedInterceptorExceptions_regress4026) {
"}"
"result");
CHECK(!result->IsArray());
- CHECK(v8_num(43)->Equals(result));
+ CHECK(v8_num(43)->Equals(context.local(), result).FromJust());
}
namespace {
@@ -2987,7 +3454,8 @@ template <typename T>
Local<Object> BuildWrappedObject(v8::Isolate* isolate, T* data) {
auto templ = v8::ObjectTemplate::New(isolate);
templ->SetInternalFieldCount(1);
- auto instance = templ->NewInstance();
+ auto instance =
+ templ->NewInstance(isolate->GetCurrentContext()).ToLocalChecked();
instance->SetAlignedPointerInInternalField(0, data);
return instance;
}
@@ -3083,11 +3551,21 @@ TEST(NamedAllCanReadInterceptor) {
auto checked = v8::ObjectTemplate::New(isolate);
checked->SetAccessCheckCallback(SimpleAccessChecker);
- context->Global()->Set(v8_str("intercepted_0"), intercepted_0->NewInstance());
- context->Global()->Set(v8_str("intercepted_1"), intercepted_1->NewInstance());
- auto checked_instance = checked->NewInstance();
- checked_instance->Set(v8_str("whatever"), v8_num(17));
- context->Global()->Set(v8_str("checked"), checked_instance);
+ context->Global()
+ ->Set(context.local(), v8_str("intercepted_0"),
+ intercepted_0->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("intercepted_1"),
+ intercepted_1->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ auto checked_instance =
+ checked->NewInstance(context.local()).ToLocalChecked();
+ checked_instance->Set(context.local(), v8_str("whatever"), v8_num(17))
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("checked"), checked_instance)
+ .FromJust();
CompileRun(
"checked.__proto__ = intercepted_1;"
"intercepted_1.__proto__ = intercepted_0;");
@@ -3160,11 +3638,20 @@ TEST(IndexedAllCanReadInterceptor) {
auto checked = v8::ObjectTemplate::New(isolate);
checked->SetAccessCheckCallback(SimpleAccessChecker);
- context->Global()->Set(v8_str("intercepted_0"), intercepted_0->NewInstance());
- context->Global()->Set(v8_str("intercepted_1"), intercepted_1->NewInstance());
- auto checked_instance = checked->NewInstance();
- context->Global()->Set(v8_str("checked"), checked_instance);
- checked_instance->Set(15, v8_num(17));
+ context->Global()
+ ->Set(context.local(), v8_str("intercepted_0"),
+ intercepted_0->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("intercepted_1"),
+ intercepted_1->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ auto checked_instance =
+ checked->NewInstance(context.local()).ToLocalChecked();
+ context->Global()
+ ->Set(context.local(), v8_str("checked"), checked_instance)
+ .FromJust();
+ checked_instance->Set(context.local(), 15, v8_num(17)).FromJust();
CompileRun(
"checked.__proto__ = intercepted_1;"
"intercepted_1.__proto__ = intercepted_0;");
@@ -3214,8 +3701,11 @@ THREADED_TEST(NonMaskingInterceptorOwnProperty) {
conf.data = BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data);
interceptor_templ->SetHandler(conf);
- auto interceptor = interceptor_templ->NewInstance();
- context->Global()->Set(v8_str("obj"), interceptor);
+ auto interceptor =
+ interceptor_templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()
+ ->Set(context.local(), v8_str("obj"), interceptor)
+ .FromJust();
ExpectInt32("obj.whatever", 239);
@@ -3242,8 +3732,11 @@ THREADED_TEST(NonMaskingInterceptorPrototypeProperty) {
conf.data = BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data);
interceptor_templ->SetHandler(conf);
- auto interceptor = interceptor_templ->NewInstance();
- context->Global()->Set(v8_str("obj"), interceptor);
+ auto interceptor =
+ interceptor_templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()
+ ->Set(context.local(), v8_str("obj"), interceptor)
+ .FromJust();
ExpectInt32("obj.whatever", 239);
@@ -3270,8 +3763,11 @@ THREADED_TEST(NonMaskingInterceptorPrototypePropertyIC) {
conf.data = BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data);
interceptor_templ->SetHandler(conf);
- auto interceptor = interceptor_templ->NewInstance();
- context->Global()->Set(v8_str("obj"), interceptor);
+ auto interceptor =
+ interceptor_templ->NewInstance(context.local()).ToLocalChecked();
+ context->Global()
+ ->Set(context.local(), v8_str("obj"), interceptor)
+ .FromJust();
CompileRun(
"outer = {};"
@@ -3345,7 +3841,7 @@ void DatabaseSetter(Local<Name> name, Local<Value> value,
const v8::PropertyCallbackInfo<Value>& info) {
ApiTestFuzzer::Fuzz();
auto context = info.GetIsolate()->GetCurrentContext();
- if (name->Equals(v8_str("db"))) return;
+ if (name->Equals(context, v8_str("db")).FromJust()) return;
Local<v8::Object> db = info.Holder()
->GetRealNamedProperty(context, v8_str("db"))
.ToLocalChecked()
@@ -3367,10 +3863,14 @@ THREADED_TEST(NonMaskingInterceptorGlobalEvalRegression) {
conf.flags = v8::PropertyHandlerFlags::kNonMasking;
interceptor_templ->SetHandler(conf);
- context->Global()->Set(v8_str("intercepted_1"),
- interceptor_templ->NewInstance());
- context->Global()->Set(v8_str("intercepted_2"),
- interceptor_templ->NewInstance());
+ context->Global()
+ ->Set(context.local(), v8_str("intercepted_1"),
+ interceptor_templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
+ context->Global()
+ ->Set(context.local(), v8_str("intercepted_2"),
+ interceptor_templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust();
// Init dbs.
CompileRun(
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index aa2863638c..ca3f446a41 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -46,11 +46,12 @@
#include "src/execution.h"
#include "src/futex-emulation.h"
#include "src/objects.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
-#include "test/cctest/heap-tester.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
static const bool kLogThreading = false;
@@ -60,7 +61,6 @@ using ::v8::Context;
using ::v8::Extension;
using ::v8::Function;
using ::v8::FunctionTemplate;
-using ::v8::Handle;
using ::v8::HandleScope;
using ::v8::Local;
using ::v8::Maybe;
@@ -94,8 +94,7 @@ using ::v8::Value;
void RunWithProfiler(void (*test)()) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Local<v8::String> profile_name =
- v8::String::NewFromUtf8(env->GetIsolate(), "my_profile1");
+ v8::Local<v8::String> profile_name = v8_str("my_profile1");
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
cpu_profiler->StartProfiling(profile_name);
@@ -110,12 +109,21 @@ static void IncrementingSignatureCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
signature_callback_count++;
- CHECK(signature_expected_receiver->Equals(args.Holder()));
- CHECK(signature_expected_receiver->Equals(args.This()));
- v8::Handle<v8::Array> result =
+ CHECK(signature_expected_receiver->Equals(
+ args.GetIsolate()->GetCurrentContext(),
+ args.Holder())
+ .FromJust());
+ CHECK(signature_expected_receiver->Equals(
+ args.GetIsolate()->GetCurrentContext(),
+ args.This())
+ .FromJust());
+ v8::Local<v8::Array> result =
v8::Array::New(args.GetIsolate(), args.Length());
- for (int i = 0; i < args.Length(); i++)
- result->Set(v8::Integer::New(args.GetIsolate(), i), args[i]);
+ for (int i = 0; i < args.Length(); i++) {
+ CHECK(result->Set(args.GetIsolate()->GetCurrentContext(),
+ v8::Integer::New(args.GetIsolate(), i), args[i])
+ .FromJust());
+ }
args.GetReturnValue().Set(result);
}
@@ -154,13 +162,13 @@ THREADED_TEST(Handles) {
CHECK(!local_env.IsEmpty());
local_env->Enter();
- v8::Handle<v8::Primitive> undef = v8::Undefined(CcTest::isolate());
+ v8::Local<v8::Primitive> undef = v8::Undefined(CcTest::isolate());
CHECK(!undef.IsEmpty());
CHECK(undef->IsUndefined());
const char* source = "1 + 2 + 3";
Local<Script> script = v8_compile(source);
- CHECK_EQ(6, script->Run()->Int32Value());
+ CHECK_EQ(6, v8_run_int32value(script));
local_env->Exit();
}
@@ -168,7 +176,7 @@ THREADED_TEST(Handles) {
THREADED_TEST(IsolateOfContext) {
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<Context> env = Context::New(CcTest::isolate());
+ v8::Local<Context> env = Context::New(CcTest::isolate());
CHECK(!env->GetIsolate()->InContext());
CHECK(env->GetIsolate() == CcTest::isolate());
@@ -199,7 +207,11 @@ static void TestSignature(const char* loop_js, Local<Value> receiver,
CHECK_EQ(10, signature_callback_count);
} else {
CHECK(v8_str("TypeError: Illegal invocation")
- ->Equals(try_catch.Exception()->ToString(isolate)));
+ ->Equals(isolate->GetCurrentContext(),
+ try_catch.Exception()
+ ->ToString(isolate->GetCurrentContext())
+ .ToLocalChecked())
+ .FromJust());
}
}
@@ -209,32 +221,43 @@ THREADED_TEST(ReceiverSignature) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
// Setup templates.
- v8::Handle<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::Signature> sig = v8::Signature::New(isolate, fun);
- v8::Handle<v8::FunctionTemplate> callback_sig =
- v8::FunctionTemplate::New(
- isolate, IncrementingSignatureCallback, Local<Value>(), sig);
- v8::Handle<v8::FunctionTemplate> callback =
+ v8::Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::Signature> sig = v8::Signature::New(isolate, fun);
+ v8::Local<v8::FunctionTemplate> callback_sig = v8::FunctionTemplate::New(
+ isolate, IncrementingSignatureCallback, Local<Value>(), sig);
+ v8::Local<v8::FunctionTemplate> callback =
v8::FunctionTemplate::New(isolate, IncrementingSignatureCallback);
- v8::Handle<v8::FunctionTemplate> sub_fun = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> sub_fun = v8::FunctionTemplate::New(isolate);
sub_fun->Inherit(fun);
- v8::Handle<v8::FunctionTemplate> unrel_fun =
+ v8::Local<v8::FunctionTemplate> unrel_fun =
v8::FunctionTemplate::New(isolate);
// Install properties.
- v8::Handle<v8::ObjectTemplate> fun_proto = fun->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> fun_proto = fun->PrototypeTemplate();
fun_proto->Set(v8_str("prop_sig"), callback_sig);
fun_proto->Set(v8_str("prop"), callback);
fun_proto->SetAccessorProperty(
v8_str("accessor_sig"), callback_sig, callback_sig);
fun_proto->SetAccessorProperty(v8_str("accessor"), callback, callback);
// Instantiate templates.
- Local<Value> fun_instance = fun->InstanceTemplate()->NewInstance();
- Local<Value> sub_fun_instance = sub_fun->InstanceTemplate()->NewInstance();
+ Local<Value> fun_instance =
+ fun->InstanceTemplate()->NewInstance(env.local()).ToLocalChecked();
+ Local<Value> sub_fun_instance =
+ sub_fun->InstanceTemplate()->NewInstance(env.local()).ToLocalChecked();
// Setup global variables.
- env->Global()->Set(v8_str("Fun"), fun->GetFunction());
- env->Global()->Set(v8_str("UnrelFun"), unrel_fun->GetFunction());
- env->Global()->Set(v8_str("fun_instance"), fun_instance);
- env->Global()->Set(v8_str("sub_fun_instance"), sub_fun_instance);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("Fun"),
+ fun->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("UnrelFun"),
+ unrel_fun->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("fun_instance"), fun_instance)
+ .FromJust());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("sub_fun_instance"), sub_fun_instance)
+ .FromJust());
CompileRun(
"var accessor_sig_key = 'accessor_sig';"
"var accessor_key = 'accessor';"
@@ -283,8 +306,8 @@ THREADED_TEST(HulIgennem) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Primitive> undef = v8::Undefined(isolate);
- Local<String> undef_str = undef->ToString(isolate);
+ v8::Local<v8::Primitive> undef = v8::Undefined(isolate);
+ Local<String> undef_str = undef->ToString(env.local()).ToLocalChecked();
char* value = i::NewArray<char>(undef_str->Utf8Length() + 1);
undef_str->WriteUtf8(value);
CHECK_EQ(0, strcmp(value, "undefined"));
@@ -297,14 +320,16 @@ THREADED_TEST(Access) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
Local<v8::Object> obj = v8::Object::New(isolate);
- Local<Value> foo_before = obj->Get(v8_str("foo"));
+ Local<Value> foo_before =
+ obj->Get(env.local(), v8_str("foo")).ToLocalChecked();
CHECK(foo_before->IsUndefined());
Local<String> bar_str = v8_str("bar");
- obj->Set(v8_str("foo"), bar_str);
- Local<Value> foo_after = obj->Get(v8_str("foo"));
+ CHECK(obj->Set(env.local(), v8_str("foo"), bar_str).FromJust());
+ Local<Value> foo_after =
+ obj->Get(env.local(), v8_str("foo")).ToLocalChecked();
CHECK(!foo_after->IsUndefined());
CHECK(foo_after->IsString());
- CHECK(bar_str->Equals(foo_after));
+ CHECK(bar_str->Equals(env.local(), foo_after).FromJust());
}
@@ -312,18 +337,22 @@ THREADED_TEST(AccessElement) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<v8::Object> obj = v8::Object::New(env->GetIsolate());
- Local<Value> before = obj->Get(1);
+ Local<Value> before = obj->Get(env.local(), 1).ToLocalChecked();
CHECK(before->IsUndefined());
Local<String> bar_str = v8_str("bar");
- obj->Set(1, bar_str);
- Local<Value> after = obj->Get(1);
+ CHECK(obj->Set(env.local(), 1, bar_str).FromJust());
+ Local<Value> after = obj->Get(env.local(), 1).ToLocalChecked();
CHECK(!after->IsUndefined());
CHECK(after->IsString());
- CHECK(bar_str->Equals(after));
+ CHECK(bar_str->Equals(env.local(), after).FromJust());
Local<v8::Array> value = CompileRun("[\"a\", \"b\"]").As<v8::Array>();
- CHECK(v8_str("a")->Equals(value->Get(0)));
- CHECK(v8_str("b")->Equals(value->Get(1)));
+ CHECK(v8_str("a")
+ ->Equals(env.local(), value->Get(env.local(), 0).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("b")
+ ->Equals(env.local(), value->Get(env.local(), 1).ToLocalChecked())
+ .FromJust());
}
@@ -332,7 +361,7 @@ THREADED_TEST(Script) {
v8::HandleScope scope(env->GetIsolate());
const char* source = "1 + 2 + 3";
Local<Script> script = v8_compile(source);
- CHECK_EQ(6, script->Run()->Int32Value());
+ CHECK_EQ(6, v8_run_int32value(script));
}
@@ -403,11 +432,13 @@ THREADED_TEST(ScriptUsingStringResource) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
TestResource* resource = new TestResource(two_byte_source, &dispose_count);
- Local<String> source = String::NewExternal(env->GetIsolate(), resource);
+ Local<String> source =
+ String::NewExternalTwoByte(env->GetIsolate(), resource)
+ .ToLocalChecked();
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(7, value->Int32Value());
+ CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
@@ -432,7 +463,9 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
v8::HandleScope scope(env->GetIsolate());
TestOneByteResource* resource =
new TestOneByteResource(i::StrDup(c_source), &dispose_count);
- Local<String> source = String::NewExternal(env->GetIsolate(), resource);
+ Local<String> source =
+ String::NewExternalOneByte(env->GetIsolate(), resource)
+ .ToLocalChecked();
CHECK(source->IsExternalOneByte());
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalOneByteStringResource());
@@ -441,9 +474,9 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(7, value->Int32Value());
+ CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
@@ -460,7 +493,9 @@ THREADED_TEST(ScriptMakingExternalString) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<String> source =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_source);
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
@@ -473,9 +508,9 @@ THREADED_TEST(ScriptMakingExternalString) {
&dispose_count));
CHECK(success);
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(7, value->Int32Value());
+ CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
@@ -499,9 +534,9 @@ THREADED_TEST(ScriptMakingExternalOneByteString) {
new TestOneByteResource(i::StrDup(c_source), &dispose_count));
CHECK(success);
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(7, value->Int32Value());
+ CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
@@ -521,7 +556,9 @@ TEST(MakingExternalStringConditions) {
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
Local<String> small_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
i::DeleteArray(two_byte_string);
// We should refuse to externalize small strings.
@@ -533,7 +570,9 @@ TEST(MakingExternalStringConditions) {
CHECK(small_string->CanMakeExternal());
two_byte_string = AsciiToTwoByteString("small string 2");
- small_string = String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
+ small_string = String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
i::DeleteArray(two_byte_string);
const int buf_size = 10 * 1024;
@@ -543,7 +582,9 @@ TEST(MakingExternalStringConditions) {
two_byte_string = AsciiToTwoByteString(buf);
Local<String> large_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string);
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
i::DeleteArray(buf);
i::DeleteArray(two_byte_string);
// Large strings should be immediately accepted.
@@ -559,7 +600,7 @@ TEST(MakingExternalOneByteStringConditions) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
- Local<String> small_string = String::NewFromUtf8(env->GetIsolate(), "s1");
+ Local<String> small_string = v8_str("s1");
// We should refuse to externalize small strings.
CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
@@ -572,7 +613,7 @@ TEST(MakingExternalOneByteStringConditions) {
char* buf = i::NewArray<char>(buf_size);
memset(buf, 'a', buf_size);
buf[buf_size - 1] = '\0';
- Local<String> large_string = String::NewFromUtf8(env->GetIsolate(), buf);
+ Local<String> large_string = v8_str(buf);
i::DeleteArray(buf);
// Large strings should be immediately accepted.
CHECK(large_string->CanMakeExternal());
@@ -618,8 +659,10 @@ THREADED_TEST(UsingExternalString) {
{
v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
- Local<String> string = String::NewExternal(
- CcTest::isolate(), new TestResource(two_byte_string));
+ Local<String> string =
+ String::NewExternalTwoByte(CcTest::isolate(),
+ new TestResource(two_byte_string))
+ .ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -638,8 +681,11 @@ THREADED_TEST(UsingExternalOneByteString) {
{
v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
- Local<String> string = String::NewExternal(
- CcTest::isolate(), new TestOneByteResource(i::StrDup(one_byte_string)));
+ Local<String> string =
+ String::NewExternalOneByte(
+ CcTest::isolate(),
+ new TestOneByteResource(i::StrDup(one_byte_string)))
+ .ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
@@ -684,8 +730,9 @@ THREADED_TEST(NewExternalForVeryLongString) {
v8::HandleScope scope(isolate);
v8::TryCatch try_catch(isolate);
RandomLengthOneByteResource r(1 << 30);
- v8::Local<v8::String> str = v8::String::NewExternal(isolate, &r);
- CHECK(str.IsEmpty());
+ v8::MaybeLocal<v8::String> maybe_str =
+ v8::String::NewExternalOneByte(isolate, &r);
+ CHECK(maybe_str.IsEmpty());
CHECK(!try_catch.HasCaught());
}
@@ -693,8 +740,9 @@ THREADED_TEST(NewExternalForVeryLongString) {
v8::HandleScope scope(isolate);
v8::TryCatch try_catch(isolate);
RandomLengthResource r(1 << 30);
- v8::Local<v8::String> str = v8::String::NewExternal(isolate, &r);
- CHECK(str.IsEmpty());
+ v8::MaybeLocal<v8::String> maybe_str =
+ v8::String::NewExternalTwoByte(isolate, &r);
+ CHECK(maybe_str.IsEmpty());
CHECK(!try_catch.HasCaught());
}
}
@@ -708,8 +756,11 @@ THREADED_TEST(ScavengeExternalString) {
{
v8::HandleScope scope(CcTest::isolate());
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
- Local<String> string = String::NewExternal(
- CcTest::isolate(), new TestResource(two_byte_string, &dispose_count));
+ Local<String> string =
+ String::NewExternalTwoByte(
+ CcTest::isolate(),
+ new TestResource(two_byte_string, &dispose_count))
+ .ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
in_new_space = CcTest::heap()->InNewSpace(*istring);
@@ -729,9 +780,11 @@ THREADED_TEST(ScavengeExternalOneByteString) {
{
v8::HandleScope scope(CcTest::isolate());
const char* one_byte_string = "test string";
- Local<String> string = String::NewExternal(
- CcTest::isolate(),
- new TestOneByteResource(i::StrDup(one_byte_string), &dispose_count));
+ Local<String> string =
+ String::NewExternalOneByte(
+ CcTest::isolate(),
+ new TestOneByteResource(i::StrDup(one_byte_string), &dispose_count))
+ .ToLocalChecked();
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
in_new_space = CcTest::heap()->InNewSpace(*istring);
@@ -775,11 +828,13 @@ TEST(ExternalStringWithDisposeHandling) {
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> source = String::NewExternal(env->GetIsolate(), &res_stack);
+ Local<String> source =
+ String::NewExternalOneByte(env->GetIsolate(), &res_stack)
+ .ToLocalChecked();
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(7, value->Int32Value());
+ CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
}
@@ -796,11 +851,13 @@ TEST(ExternalStringWithDisposeHandling) {
{
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- Local<String> source = String::NewExternal(env->GetIsolate(), res_heap);
+ Local<String> source =
+ String::NewExternalOneByte(env->GetIsolate(), res_heap)
+ .ToLocalChecked();
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(7, value->Int32Value());
+ CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
CcTest::heap()->CollectAllAvailableGarbage();
CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
}
@@ -826,34 +883,41 @@ THREADED_TEST(StringConcat) {
uint16_t* two_byte_source = AsciiToTwoByteString(two_byte_string_1);
Local<String> right =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_source);
+ String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
i::DeleteArray(two_byte_source);
Local<String> source = String::Concat(left, right);
- right = String::NewExternal(
- env->GetIsolate(),
- new TestOneByteResource(i::StrDup(one_byte_extern_1)));
+ right = String::NewExternalOneByte(
+ env->GetIsolate(),
+ new TestOneByteResource(i::StrDup(one_byte_extern_1)))
+ .ToLocalChecked();
source = String::Concat(source, right);
- right = String::NewExternal(
- env->GetIsolate(),
- new TestResource(AsciiToTwoByteString(two_byte_extern_1)));
+ right = String::NewExternalTwoByte(
+ env->GetIsolate(),
+ new TestResource(AsciiToTwoByteString(two_byte_extern_1)))
+ .ToLocalChecked();
source = String::Concat(source, right);
right = v8_str(one_byte_string_2);
source = String::Concat(source, right);
two_byte_source = AsciiToTwoByteString(two_byte_string_2);
- right = String::NewFromTwoByte(env->GetIsolate(), two_byte_source);
+ right = String::NewFromTwoByte(env->GetIsolate(), two_byte_source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
i::DeleteArray(two_byte_source);
source = String::Concat(source, right);
- right = String::NewExternal(
- env->GetIsolate(),
- new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
+ right = String::NewExternalTwoByte(
+ env->GetIsolate(),
+ new TestResource(AsciiToTwoByteString(two_byte_extern_2)))
+ .ToLocalChecked();
source = String::Concat(source, right);
Local<Script> script = v8_compile(source);
- Local<Value> value = script->Run();
+ Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(68, value->Int32Value());
+ CHECK_EQ(68, value->Int32Value(env.local()).FromJust());
}
CcTest::i_isolate()->compilation_cache()->Clear();
CcTest::heap()->CollectAllGarbage();
@@ -864,10 +928,10 @@ THREADED_TEST(StringConcat) {
THREADED_TEST(GlobalProperties) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::Object> global = env->Global();
- global->Set(v8_str("pi"), v8_num(3.1415926));
- Local<Value> pi = global->Get(v8_str("pi"));
- CHECK_EQ(3.1415926, pi->NumberValue());
+ v8::Local<v8::Object> global = env->Global();
+ CHECK(global->Set(env.local(), v8_str("pi"), v8_num(3.1415926)).FromJust());
+ Local<Value> pi = global->Get(env.local(), v8_str("pi")).ToLocalChecked();
+ CHECK_EQ(3.1415926, pi->NumberValue(env.local()).FromJust());
}
@@ -893,8 +957,14 @@ static void construct_callback(
const v8::FunctionCallbackInfo<Value>& info) {
ApiTestFuzzer::Fuzz();
CheckReturnValue(info, FUNCTION_ADDR(construct_callback));
- info.This()->Set(v8_str("x"), v8_num(1));
- info.This()->Set(v8_str("y"), v8_num(2));
+ CHECK(
+ info.This()
+ ->Set(info.GetIsolate()->GetCurrentContext(), v8_str("x"), v8_num(1))
+ .FromJust());
+ CHECK(
+ info.This()
+ ->Set(info.GetIsolate()->GetCurrentContext(), v8_str("y"), v8_num(2))
+ .FromJust());
info.GetReturnValue().Set(v8_str("bad value"));
info.GetReturnValue().Set(info.This());
}
@@ -920,11 +990,11 @@ static void TestFunctionTemplateInitializer(Handler handler,
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate, handler);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
Local<Script> script = v8_compile("obj()");
for (int i = 0; i < 30; i++) {
- CHECK_EQ(102, script->Run()->Int32Value());
+ CHECK_EQ(102, v8_run_int32value(script));
}
}
// Use SetCallHandler to initialize a function template, should work like
@@ -936,11 +1006,11 @@ static void TestFunctionTemplateInitializer(Handler handler,
Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
fun_templ->SetCallHandler(handler_2);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
Local<Script> script = v8_compile("obj()");
for (int i = 0; i < 30; i++) {
- CHECK_EQ(102, script->Run()->Int32Value());
+ CHECK_EQ(102, v8_run_int32value(script));
}
}
}
@@ -956,19 +1026,20 @@ static void TestFunctionTemplateAccessor(Constructor constructor,
v8::FunctionTemplate::New(env->GetIsolate(), constructor);
fun_templ->SetClassName(v8_str("funky"));
fun_templ->InstanceTemplate()->SetAccessor(v8_str("m"), accessor);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
- Local<Value> result = v8_compile("(new obj()).toString()")->Run();
- CHECK(v8_str("[object funky]")->Equals(result));
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
+ Local<Value> result =
+ v8_compile("(new obj()).toString()")->Run(env.local()).ToLocalChecked();
+ CHECK(v8_str("[object funky]")->Equals(env.local(), result).FromJust());
CompileRun("var obj_instance = new obj();");
Local<Script> script;
script = v8_compile("obj_instance.x");
for (int i = 0; i < 30; i++) {
- CHECK_EQ(1, script->Run()->Int32Value());
+ CHECK_EQ(1, v8_run_int32value(script));
}
script = v8_compile("obj_instance.m");
for (int i = 0; i < 30; i++) {
- CHECK_EQ(239, script->Run()->Int32Value());
+ CHECK_EQ(239, v8_run_int32value(script));
}
}
@@ -992,20 +1063,24 @@ static void TestSimpleCallback(Callback callback) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
object_template->Set(isolate, "callback",
v8::FunctionTemplate::New(isolate, callback));
- v8::Local<v8::Object> object = object_template->NewInstance();
- (*env)->Global()->Set(v8_str("callback_object"), object);
- v8::Handle<v8::Script> script;
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(env.local()).ToLocalChecked();
+ CHECK((*env)
+ ->Global()
+ ->Set(env.local(), v8_str("callback_object"), object)
+ .FromJust());
+ v8::Local<v8::Script> script;
script = v8_compile("callback_object.callback(17)");
for (int i = 0; i < 30; i++) {
- CHECK_EQ(51424, script->Run()->Int32Value());
+ CHECK_EQ(51424, v8_run_int32value(script));
}
script = v8_compile("callback_object.callback(17, 24)");
for (int i = 0; i < 30; i++) {
- CHECK_EQ(51425, script->Run()->Int32Value());
+ CHECK_EQ(51425, v8_run_int32value(script));
}
}
@@ -1086,25 +1161,29 @@ void FastReturnValueCallback<void>(
template<>
void FastReturnValueCallback<Object>(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- v8::Handle<v8::Object> object;
+ v8::Local<v8::Object> object;
if (!fast_return_value_object_is_empty) {
object = Object::New(info.GetIsolate());
}
info.GetReturnValue().Set(object);
}
-template<typename T>
-Handle<Value> TestFastReturnValues() {
+template <typename T>
+Local<Value> TestFastReturnValues() {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::EscapableHandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
v8::FunctionCallback callback = &FastReturnValueCallback<T>;
object_template->Set(isolate, "callback",
v8::FunctionTemplate::New(isolate, callback));
- v8::Local<v8::Object> object = object_template->NewInstance();
- (*env)->Global()->Set(v8_str("callback_object"), object);
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(env.local()).ToLocalChecked();
+ CHECK((*env)
+ ->Global()
+ ->Set(env.local(), v8_str("callback_object"), object)
+ .FromJust());
return scope.Escape(CompileRun("callback_object.callback()"));
}
@@ -1113,7 +1192,7 @@ THREADED_PROFILED_TEST(FastReturnValues) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> value;
+ v8::Local<v8::Value> value;
// check int32_t and uint32_t
int32_t int_values[] = {
0, 234, -723,
@@ -1126,24 +1205,28 @@ THREADED_PROFILED_TEST(FastReturnValues) {
fast_return_value_int32 = int_value;
value = TestFastReturnValues<int32_t>();
CHECK(value->IsInt32());
- CHECK(fast_return_value_int32 == value->Int32Value());
+ CHECK_EQ(fast_return_value_int32,
+ value->Int32Value(env.local()).FromJust());
// check uint32_t
fast_return_value_uint32 = static_cast<uint32_t>(int_value);
value = TestFastReturnValues<uint32_t>();
CHECK(value->IsUint32());
- CHECK(fast_return_value_uint32 == value->Uint32Value());
+ CHECK_EQ(fast_return_value_uint32,
+ value->Uint32Value(env.local()).FromJust());
}
}
// check double
value = TestFastReturnValues<double>();
CHECK(value->IsNumber());
- CHECK_EQ(kFastReturnValueDouble, value->ToNumber(isolate)->Value());
+ CHECK_EQ(kFastReturnValueDouble,
+ value->ToNumber(env.local()).ToLocalChecked()->Value());
// check bool values
for (int i = 0; i < 2; i++) {
fast_return_value_bool = i == 0;
value = TestFastReturnValues<bool>();
CHECK(value->IsBoolean());
- CHECK_EQ(fast_return_value_bool, value->ToBoolean(isolate)->Value());
+ CHECK_EQ(fast_return_value_bool,
+ value->ToBoolean(env.local()).ToLocalChecked()->Value());
}
// check oddballs
ReturnValueOddball oddballs[] = {
@@ -1183,33 +1266,30 @@ THREADED_TEST(FunctionTemplateSetLength) {
v8::HandleScope scope(isolate);
{
Local<v8::FunctionTemplate> fun_templ =
- v8::FunctionTemplate::New(isolate,
- handle_callback,
- Handle<v8::Value>(),
- Handle<v8::Signature>(),
- 23);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
+ v8::FunctionTemplate::New(isolate, handle_callback, Local<v8::Value>(),
+ Local<v8::Signature>(), 23);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
Local<Script> script = v8_compile("obj.length");
- CHECK_EQ(23, script->Run()->Int32Value());
+ CHECK_EQ(23, v8_run_int32value(script));
}
{
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate, handle_callback);
fun_templ->SetLength(22);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
Local<Script> script = v8_compile("obj.length");
- CHECK_EQ(22, script->Run()->Int32Value());
+ CHECK_EQ(22, v8_run_int32value(script));
}
{
// Without setting length it defaults to 0.
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate, handle_callback);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("obj"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), fun).FromJust());
Local<Script> script = v8_compile("obj.length");
- CHECK_EQ(0, script->Run()->Int32Value());
+ CHECK_EQ(0, v8_run_int32value(script));
}
}
@@ -1227,19 +1307,22 @@ static void TestExternalPointerWrapping() {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> data =
- v8::External::New(isolate, expected_ptr);
+ v8::Local<v8::Value> data = v8::External::New(isolate, expected_ptr);
- v8::Handle<v8::Object> obj = v8::Object::New(isolate);
- obj->Set(v8_str("func"),
- v8::FunctionTemplate::New(isolate, callback, data)->GetFunction());
- env->Global()->Set(v8_str("obj"), obj);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ CHECK(obj->Set(env.local(), v8_str("func"),
+ v8::FunctionTemplate::New(isolate, callback, data)
+ ->GetFunction(env.local())
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
- CHECK(CompileRun(
- "function foo() {\n"
- " for (var i = 0; i < 13; i++) obj.func();\n"
- "}\n"
- "foo(), true")->BooleanValue());
+ CHECK(CompileRun("function foo() {\n"
+ " for (var i = 0; i < 13; i++) obj.func();\n"
+ "}\n"
+ "foo(), true")
+ ->BooleanValue(env.local())
+ .FromJust());
}
@@ -1299,28 +1382,43 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
Local<v8::FunctionTemplate> other = v8::FunctionTemplate::New(isolate);
derived->Inherit(base);
- Local<v8::Function> base_function = base->GetFunction();
- Local<v8::Function> derived_function = derived->GetFunction();
- Local<v8::Function> other_function = other->GetFunction();
-
- Local<v8::Object> base_instance = base_function->NewInstance();
- Local<v8::Object> derived_instance = derived_function->NewInstance();
- Local<v8::Object> derived_instance2 = derived_function->NewInstance();
- Local<v8::Object> other_instance = other_function->NewInstance();
- derived_instance2->Set(v8_str("__proto__"), derived_instance);
- other_instance->Set(v8_str("__proto__"), derived_instance2);
+ Local<v8::Function> base_function =
+ base->GetFunction(env.local()).ToLocalChecked();
+ Local<v8::Function> derived_function =
+ derived->GetFunction(env.local()).ToLocalChecked();
+ Local<v8::Function> other_function =
+ other->GetFunction(env.local()).ToLocalChecked();
+
+ Local<v8::Object> base_instance =
+ base_function->NewInstance(env.local()).ToLocalChecked();
+ Local<v8::Object> derived_instance =
+ derived_function->NewInstance(env.local()).ToLocalChecked();
+ Local<v8::Object> derived_instance2 =
+ derived_function->NewInstance(env.local()).ToLocalChecked();
+ Local<v8::Object> other_instance =
+ other_function->NewInstance(env.local()).ToLocalChecked();
+ CHECK(
+ derived_instance2->Set(env.local(), v8_str("__proto__"), derived_instance)
+ .FromJust());
+ CHECK(other_instance->Set(env.local(), v8_str("__proto__"), derived_instance2)
+ .FromJust());
// base_instance is only an instance of base.
- CHECK(
- base_instance->Equals(base_instance->FindInstanceInPrototypeChain(base)));
+ CHECK(base_instance->Equals(env.local(),
+ base_instance->FindInstanceInPrototypeChain(base))
+ .FromJust());
CHECK(base_instance->FindInstanceInPrototypeChain(derived).IsEmpty());
CHECK(base_instance->FindInstanceInPrototypeChain(other).IsEmpty());
// derived_instance is an instance of base and derived.
- CHECK(derived_instance->Equals(
- derived_instance->FindInstanceInPrototypeChain(base)));
- CHECK(derived_instance->Equals(
- derived_instance->FindInstanceInPrototypeChain(derived)));
+ CHECK(derived_instance->Equals(env.local(),
+ derived_instance->FindInstanceInPrototypeChain(
+ base))
+ .FromJust());
+ CHECK(derived_instance->Equals(env.local(),
+ derived_instance->FindInstanceInPrototypeChain(
+ derived))
+ .FromJust());
CHECK(derived_instance->FindInstanceInPrototypeChain(other).IsEmpty());
// other_instance is an instance of other and its immediate
@@ -1329,11 +1427,17 @@ THREADED_TEST(FindInstanceInPrototypeChain) {
// but it comes after derived_instance2 in the prototype chain of
// other_instance.
CHECK(derived_instance2->Equals(
- other_instance->FindInstanceInPrototypeChain(base)));
- CHECK(derived_instance2->Equals(
- other_instance->FindInstanceInPrototypeChain(derived)));
+ env.local(),
+ other_instance->FindInstanceInPrototypeChain(base))
+ .FromJust());
+ CHECK(derived_instance2->Equals(env.local(),
+ other_instance->FindInstanceInPrototypeChain(
+ derived))
+ .FromJust());
CHECK(other_instance->Equals(
- other_instance->FindInstanceInPrototypeChain(other)));
+ env.local(),
+ other_instance->FindInstanceInPrototypeChain(other))
+ .FromJust());
}
@@ -1463,12 +1567,12 @@ THREADED_TEST(OutOfSignedRangeUnsignedInteger) {
THREADED_TEST(IsNativeError) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> syntax_error = CompileRun(
+ v8::Local<Value> syntax_error = CompileRun(
"var out = 0; try { eval(\"#\"); } catch(x) { out = x; } out; ");
CHECK(syntax_error->IsNativeError());
- v8::Handle<Value> not_error = CompileRun("{a:42}");
+ v8::Local<Value> not_error = CompileRun("{a:42}");
CHECK(!not_error->IsNativeError());
- v8::Handle<Value> not_object = CompileRun("42");
+ v8::Local<Value> not_object = CompileRun("42");
CHECK(!not_object->IsNativeError());
}
@@ -1478,10 +1582,10 @@ THREADED_TEST(IsGeneratorFunctionOrObject) {
v8::HandleScope scope(env->GetIsolate());
CompileRun("function *gen() { yield 1; }\nfunction func() {}");
- v8::Handle<Value> gen = CompileRun("gen");
- v8::Handle<Value> genObj = CompileRun("gen()");
- v8::Handle<Value> object = CompileRun("{a:42}");
- v8::Handle<Value> func = CompileRun("func");
+ v8::Local<Value> gen = CompileRun("gen");
+ v8::Local<Value> genObj = CompileRun("gen()");
+ v8::Local<Value> object = CompileRun("{a:42}");
+ v8::Local<Value> func = CompileRun("func");
CHECK(gen->IsGeneratorFunction());
CHECK(gen->IsFunction());
@@ -1504,12 +1608,12 @@ THREADED_TEST(IsGeneratorFunctionOrObject) {
THREADED_TEST(ArgumentsObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> arguments_object =
+ v8::Local<Value> arguments_object =
CompileRun("var out = 0; (function(){ out = arguments; })(1,2,3); out;");
CHECK(arguments_object->IsArgumentsObject());
- v8::Handle<Value> array = CompileRun("[1,2,3]");
+ v8::Local<Value> array = CompileRun("[1,2,3]");
CHECK(!array->IsArgumentsObject());
- v8::Handle<Value> object = CompileRun("{a:42}");
+ v8::Local<Value> object = CompileRun("{a:42}");
CHECK(!object->IsArgumentsObject());
}
@@ -1517,10 +1621,10 @@ THREADED_TEST(ArgumentsObject) {
THREADED_TEST(IsMapOrSet) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> map = CompileRun("new Map()");
- v8::Handle<Value> set = CompileRun("new Set()");
- v8::Handle<Value> weak_map = CompileRun("new WeakMap()");
- v8::Handle<Value> weak_set = CompileRun("new WeakSet()");
+ v8::Local<Value> map = CompileRun("new Map()");
+ v8::Local<Value> set = CompileRun("new Set()");
+ v8::Local<Value> weak_map = CompileRun("new WeakMap()");
+ v8::Local<Value> weak_set = CompileRun("new WeakSet()");
CHECK(map->IsMap());
CHECK(set->IsSet());
CHECK(weak_map->IsWeakMap());
@@ -1542,7 +1646,7 @@ THREADED_TEST(IsMapOrSet) {
CHECK(!weak_set->IsSet());
CHECK(!weak_set->IsWeakMap());
- v8::Handle<Value> object = CompileRun("{a:42}");
+ v8::Local<Value> object = CompileRun("{a:42}");
CHECK(!object->IsMap());
CHECK(!object->IsSet());
CHECK(!object->IsWeakMap());
@@ -1553,20 +1657,20 @@ THREADED_TEST(IsMapOrSet) {
THREADED_TEST(StringObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> boxed_string = CompileRun("new String(\"test\")");
+ v8::Local<Value> boxed_string = CompileRun("new String(\"test\")");
CHECK(boxed_string->IsStringObject());
- v8::Handle<Value> unboxed_string = CompileRun("\"test\"");
+ v8::Local<Value> unboxed_string = CompileRun("\"test\"");
CHECK(!unboxed_string->IsStringObject());
- v8::Handle<Value> boxed_not_string = CompileRun("new Number(42)");
+ v8::Local<Value> boxed_not_string = CompileRun("new Number(42)");
CHECK(!boxed_not_string->IsStringObject());
- v8::Handle<Value> not_object = CompileRun("0");
+ v8::Local<Value> not_object = CompileRun("0");
CHECK(!not_object->IsStringObject());
- v8::Handle<v8::StringObject> as_boxed = boxed_string.As<v8::StringObject>();
+ v8::Local<v8::StringObject> as_boxed = boxed_string.As<v8::StringObject>();
CHECK(!as_boxed.IsEmpty());
Local<v8::String> the_string = as_boxed->ValueOf();
CHECK(!the_string.IsEmpty());
ExpectObject("\"test\"", the_string);
- v8::Handle<v8::Value> new_boxed_string = v8::StringObject::New(the_string);
+ v8::Local<v8::Value> new_boxed_string = v8::StringObject::New(the_string);
CHECK(new_boxed_string->IsStringObject());
as_boxed = new_boxed_string.As<v8::StringObject>();
the_string = as_boxed->ValueOf();
@@ -1578,28 +1682,28 @@ THREADED_TEST(StringObject) {
TEST(StringObjectDelete) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<Value> boxed_string = CompileRun("new String(\"test\")");
+ v8::Local<Value> boxed_string = CompileRun("new String(\"test\")");
CHECK(boxed_string->IsStringObject());
- v8::Handle<v8::Object> str_obj = boxed_string.As<v8::Object>();
- CHECK(!str_obj->Delete(2));
- CHECK(!str_obj->Delete(v8_num(2)));
+ v8::Local<v8::Object> str_obj = boxed_string.As<v8::Object>();
+ CHECK(!str_obj->Delete(context.local(), 2).FromJust());
+ CHECK(!str_obj->Delete(context.local(), v8_num(2)).FromJust());
}
THREADED_TEST(NumberObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> boxed_number = CompileRun("new Number(42)");
+ v8::Local<Value> boxed_number = CompileRun("new Number(42)");
CHECK(boxed_number->IsNumberObject());
- v8::Handle<Value> unboxed_number = CompileRun("42");
+ v8::Local<Value> unboxed_number = CompileRun("42");
CHECK(!unboxed_number->IsNumberObject());
- v8::Handle<Value> boxed_not_number = CompileRun("new Boolean(false)");
+ v8::Local<Value> boxed_not_number = CompileRun("new Boolean(false)");
CHECK(!boxed_not_number->IsNumberObject());
- v8::Handle<v8::NumberObject> as_boxed = boxed_number.As<v8::NumberObject>();
+ v8::Local<v8::NumberObject> as_boxed = boxed_number.As<v8::NumberObject>();
CHECK(!as_boxed.IsEmpty());
double the_number = as_boxed->ValueOf();
CHECK_EQ(42.0, the_number);
- v8::Handle<v8::Value> new_boxed_number =
+ v8::Local<v8::Value> new_boxed_number =
v8::NumberObject::New(env->GetIsolate(), 43);
CHECK(new_boxed_number->IsNumberObject());
as_boxed = new_boxed_number.As<v8::NumberObject>();
@@ -1611,19 +1715,20 @@ THREADED_TEST(NumberObject) {
THREADED_TEST(BooleanObject) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> boxed_boolean = CompileRun("new Boolean(true)");
+ v8::Local<Value> boxed_boolean = CompileRun("new Boolean(true)");
CHECK(boxed_boolean->IsBooleanObject());
- v8::Handle<Value> unboxed_boolean = CompileRun("true");
+ v8::Local<Value> unboxed_boolean = CompileRun("true");
CHECK(!unboxed_boolean->IsBooleanObject());
- v8::Handle<Value> boxed_not_boolean = CompileRun("new Number(42)");
+ v8::Local<Value> boxed_not_boolean = CompileRun("new Number(42)");
CHECK(!boxed_not_boolean->IsBooleanObject());
- v8::Handle<v8::BooleanObject> as_boxed =
- boxed_boolean.As<v8::BooleanObject>();
+ v8::Local<v8::BooleanObject> as_boxed = boxed_boolean.As<v8::BooleanObject>();
CHECK(!as_boxed.IsEmpty());
bool the_boolean = as_boxed->ValueOf();
CHECK_EQ(true, the_boolean);
- v8::Handle<v8::Value> boxed_true = v8::BooleanObject::New(true);
- v8::Handle<v8::Value> boxed_false = v8::BooleanObject::New(false);
+ v8::Local<v8::Value> boxed_true =
+ v8::BooleanObject::New(env->GetIsolate(), true);
+ v8::Local<v8::Value> boxed_false =
+ v8::BooleanObject::New(env->GetIsolate(), false);
CHECK(boxed_true->IsBooleanObject());
CHECK(boxed_false->IsBooleanObject());
as_boxed = boxed_true.As<v8::BooleanObject>();
@@ -1640,22 +1745,21 @@ THREADED_TEST(PrimitiveAndWrappedBooleans) {
Local<Value> primitive_false = Boolean::New(env->GetIsolate(), false);
CHECK(primitive_false->IsBoolean());
CHECK(!primitive_false->IsBooleanObject());
- CHECK(!primitive_false->BooleanValue());
+ CHECK(!primitive_false->BooleanValue(env.local()).FromJust());
CHECK(!primitive_false->IsTrue());
CHECK(primitive_false->IsFalse());
- Local<Value> false_value = BooleanObject::New(false);
+ Local<Value> false_value = BooleanObject::New(env->GetIsolate(), false);
CHECK(!false_value->IsBoolean());
CHECK(false_value->IsBooleanObject());
- CHECK(false_value->BooleanValue());
+ CHECK(false_value->BooleanValue(env.local()).FromJust());
CHECK(!false_value->IsTrue());
CHECK(!false_value->IsFalse());
Local<BooleanObject> false_boolean_object = false_value.As<BooleanObject>();
CHECK(!false_boolean_object->IsBoolean());
CHECK(false_boolean_object->IsBooleanObject());
- // TODO(svenpanne) Uncomment when BooleanObject::BooleanValue() is deleted.
- // CHECK(false_boolean_object->BooleanValue());
+ CHECK(false_boolean_object->BooleanValue(env.local()).FromJust());
CHECK(!false_boolean_object->ValueOf());
CHECK(!false_boolean_object->IsTrue());
CHECK(!false_boolean_object->IsFalse());
@@ -1663,22 +1767,21 @@ THREADED_TEST(PrimitiveAndWrappedBooleans) {
Local<Value> primitive_true = Boolean::New(env->GetIsolate(), true);
CHECK(primitive_true->IsBoolean());
CHECK(!primitive_true->IsBooleanObject());
- CHECK(primitive_true->BooleanValue());
+ CHECK(primitive_true->BooleanValue(env.local()).FromJust());
CHECK(primitive_true->IsTrue());
CHECK(!primitive_true->IsFalse());
- Local<Value> true_value = BooleanObject::New(true);
+ Local<Value> true_value = BooleanObject::New(env->GetIsolate(), true);
CHECK(!true_value->IsBoolean());
CHECK(true_value->IsBooleanObject());
- CHECK(true_value->BooleanValue());
+ CHECK(true_value->BooleanValue(env.local()).FromJust());
CHECK(!true_value->IsTrue());
CHECK(!true_value->IsFalse());
Local<BooleanObject> true_boolean_object = true_value.As<BooleanObject>();
CHECK(!true_boolean_object->IsBoolean());
CHECK(true_boolean_object->IsBooleanObject());
- // TODO(svenpanne) Uncomment when BooleanObject::BooleanValue() is deleted.
- // CHECK(true_boolean_object->BooleanValue());
+ CHECK(true_boolean_object->BooleanValue(env.local()).FromJust());
CHECK(true_boolean_object->ValueOf());
CHECK(!true_boolean_object->IsTrue());
CHECK(!true_boolean_object->IsFalse());
@@ -1690,7 +1793,7 @@ THREADED_TEST(Number) {
v8::HandleScope scope(env->GetIsolate());
double PI = 3.1415926;
Local<v8::Number> pi_obj = v8::Number::New(env->GetIsolate(), PI);
- CHECK_EQ(PI, pi_obj->NumberValue());
+ CHECK_EQ(PI, pi_obj->NumberValue(env.local()).FromJust());
}
@@ -1699,11 +1802,11 @@ THREADED_TEST(ToNumber) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<String> str = v8_str("3.1415926");
- CHECK_EQ(3.1415926, str->NumberValue());
- v8::Handle<v8::Boolean> t = v8::True(isolate);
- CHECK_EQ(1.0, t->NumberValue());
- v8::Handle<v8::Boolean> f = v8::False(isolate);
- CHECK_EQ(0.0, f->NumberValue());
+ CHECK_EQ(3.1415926, str->NumberValue(env.local()).FromJust());
+ v8::Local<v8::Boolean> t = v8::True(isolate);
+ CHECK_EQ(1.0, t->NumberValue(env.local()).FromJust());
+ v8::Local<v8::Boolean> f = v8::False(isolate);
+ CHECK_EQ(0.0, f->NumberValue(env.local()).FromJust());
}
@@ -1711,11 +1814,17 @@ THREADED_TEST(Date) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
double PI = 3.1415926;
- Local<Value> date = v8::Date::New(env->GetIsolate(), PI);
- CHECK_EQ(3.0, date->NumberValue());
- date.As<v8::Date>()->Set(v8_str("property"),
- v8::Integer::New(env->GetIsolate(), 42));
- CHECK_EQ(42, date.As<v8::Date>()->Get(v8_str("property"))->Int32Value());
+ Local<Value> date = v8::Date::New(env.local(), PI).ToLocalChecked();
+ CHECK_EQ(3.0, date->NumberValue(env.local()).FromJust());
+ CHECK(date.As<v8::Date>()
+ ->Set(env.local(), v8_str("property"),
+ v8::Integer::New(env->GetIsolate(), 42))
+ .FromJust());
+ CHECK_EQ(42, date.As<v8::Date>()
+ ->Get(env.local(), v8_str("property"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
@@ -1723,23 +1832,27 @@ THREADED_TEST(Boolean) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Boolean> t = v8::True(isolate);
+ v8::Local<v8::Boolean> t = v8::True(isolate);
CHECK(t->Value());
- v8::Handle<v8::Boolean> f = v8::False(isolate);
+ v8::Local<v8::Boolean> f = v8::False(isolate);
CHECK(!f->Value());
- v8::Handle<v8::Primitive> u = v8::Undefined(isolate);
- CHECK(!u->BooleanValue());
- v8::Handle<v8::Primitive> n = v8::Null(isolate);
- CHECK(!n->BooleanValue());
- v8::Handle<String> str1 = v8_str("");
- CHECK(!str1->BooleanValue());
- v8::Handle<String> str2 = v8_str("x");
- CHECK(str2->BooleanValue());
- CHECK(!v8::Number::New(isolate, 0)->BooleanValue());
- CHECK(v8::Number::New(isolate, -1)->BooleanValue());
- CHECK(v8::Number::New(isolate, 1)->BooleanValue());
- CHECK(v8::Number::New(isolate, 42)->BooleanValue());
- CHECK(!v8_compile("NaN")->Run()->BooleanValue());
+ v8::Local<v8::Primitive> u = v8::Undefined(isolate);
+ CHECK(!u->BooleanValue(env.local()).FromJust());
+ v8::Local<v8::Primitive> n = v8::Null(isolate);
+ CHECK(!n->BooleanValue(env.local()).FromJust());
+ v8::Local<String> str1 = v8_str("");
+ CHECK(!str1->BooleanValue(env.local()).FromJust());
+ v8::Local<String> str2 = v8_str("x");
+ CHECK(str2->BooleanValue(env.local()).FromJust());
+ CHECK(!v8::Number::New(isolate, 0)->BooleanValue(env.local()).FromJust());
+ CHECK(v8::Number::New(isolate, -1)->BooleanValue(env.local()).FromJust());
+ CHECK(v8::Number::New(isolate, 1)->BooleanValue(env.local()).FromJust());
+ CHECK(v8::Number::New(isolate, 42)->BooleanValue(env.local()).FromJust());
+ CHECK(!v8_compile("NaN")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
}
@@ -1759,19 +1872,19 @@ static void GetM(Local<String> name,
THREADED_TEST(GlobalPrototype) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> func_templ =
+ v8::Local<v8::FunctionTemplate> func_templ =
v8::FunctionTemplate::New(isolate);
func_templ->PrototypeTemplate()->Set(
isolate, "dummy", v8::FunctionTemplate::New(isolate, DummyCallHandler));
- v8::Handle<ObjectTemplate> templ = func_templ->InstanceTemplate();
+ v8::Local<ObjectTemplate> templ = func_templ->InstanceTemplate();
templ->Set(isolate, "x", v8_num(200));
templ->SetAccessor(v8_str("m"), GetM);
LocalContext env(0, templ);
- v8::Handle<Script> script(v8_compile("dummy()"));
- v8::Handle<Value> result(script->Run());
- CHECK_EQ(13.4, result->NumberValue());
- CHECK_EQ(200, v8_compile("x")->Run()->Int32Value());
- CHECK_EQ(876, v8_compile("m")->Run()->Int32Value());
+ v8::Local<Script> script(v8_compile("dummy()"));
+ v8::Local<Value> result(script->Run(env.local()).ToLocalChecked());
+ CHECK_EQ(13.4, result->NumberValue(env.local()).FromJust());
+ CHECK_EQ(200, v8_run_int32value(v8_compile("x")));
+ CHECK_EQ(876, v8_run_int32value(v8_compile("m")));
}
@@ -1779,29 +1892,54 @@ THREADED_TEST(ObjectTemplate) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
- v8::Local<v8::String> class_name =
- v8::String::NewFromUtf8(isolate, "the_class_name");
+ v8::Local<v8::String> class_name = v8_str("the_class_name");
fun->SetClassName(class_name);
Local<ObjectTemplate> templ1 = ObjectTemplate::New(isolate, fun);
templ1->Set(isolate, "x", v8_num(10));
templ1->Set(isolate, "y", v8_num(13));
LocalContext env;
- Local<v8::Object> instance1 = templ1->NewInstance();
+ Local<v8::Object> instance1 =
+ templ1->NewInstance(env.local()).ToLocalChecked();
CHECK(class_name->StrictEquals(instance1->GetConstructorName()));
- env->Global()->Set(v8_str("p"), instance1);
- CHECK(v8_compile("(p.x == 10)")->Run()->BooleanValue());
- CHECK(v8_compile("(p.y == 13)")->Run()->BooleanValue());
+ CHECK(env->Global()->Set(env.local(), v8_str("p"), instance1).FromJust());
+ CHECK(v8_compile("(p.x == 10)")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("(p.y == 13)")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
Local<v8::FunctionTemplate> fun2 = v8::FunctionTemplate::New(isolate);
fun2->PrototypeTemplate()->Set(isolate, "nirk", v8_num(123));
Local<ObjectTemplate> templ2 = fun2->InstanceTemplate();
templ2->Set(isolate, "a", v8_num(12));
templ2->Set(isolate, "b", templ1);
- Local<v8::Object> instance2 = templ2->NewInstance();
- env->Global()->Set(v8_str("q"), instance2);
- CHECK(v8_compile("(q.nirk == 123)")->Run()->BooleanValue());
- CHECK(v8_compile("(q.a == 12)")->Run()->BooleanValue());
- CHECK(v8_compile("(q.b.x == 10)")->Run()->BooleanValue());
- CHECK(v8_compile("(q.b.y == 13)")->Run()->BooleanValue());
+ Local<v8::Object> instance2 =
+ templ2->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("q"), instance2).FromJust());
+ CHECK(v8_compile("(q.nirk == 123)")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("(q.a == 12)")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("(q.b.x == 10)")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("(q.b.y == 13)")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
}
@@ -1821,7 +1959,7 @@ static void GetKnurd(Local<String> property,
THREADED_TEST(DescriptorInheritance) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> super = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> super = v8::FunctionTemplate::New(isolate);
super->PrototypeTemplate()->Set(isolate, "flabby",
v8::FunctionTemplate::New(isolate,
GetFlabby));
@@ -1829,50 +1967,121 @@ THREADED_TEST(DescriptorInheritance) {
super->InstanceTemplate()->SetAccessor(v8_str("knurd"), GetKnurd);
- v8::Handle<v8::FunctionTemplate> base1 = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> base1 = v8::FunctionTemplate::New(isolate);
base1->Inherit(super);
base1->PrototypeTemplate()->Set(isolate, "v1", v8_num(20.1));
- v8::Handle<v8::FunctionTemplate> base2 = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> base2 = v8::FunctionTemplate::New(isolate);
base2->Inherit(super);
base2->PrototypeTemplate()->Set(isolate, "v2", v8_num(10.1));
LocalContext env;
- env->Global()->Set(v8_str("s"), super->GetFunction());
- env->Global()->Set(v8_str("base1"), base1->GetFunction());
- env->Global()->Set(v8_str("base2"), base2->GetFunction());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("s"),
+ super->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("base1"),
+ base1->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("base2"),
+ base2->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
// Checks right __proto__ chain.
- CHECK(CompileRun("base1.prototype.__proto__ == s.prototype")->BooleanValue());
- CHECK(CompileRun("base2.prototype.__proto__ == s.prototype")->BooleanValue());
+ CHECK(CompileRun("base1.prototype.__proto__ == s.prototype")
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(CompileRun("base2.prototype.__proto__ == s.prototype")
+ ->BooleanValue(env.local())
+ .FromJust());
- CHECK(v8_compile("s.prototype.PI == 3.14")->Run()->BooleanValue());
+ CHECK(v8_compile("s.prototype.PI == 3.14")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
// Instance accessor should not be visible on function object or its prototype
- CHECK(CompileRun("s.knurd == undefined")->BooleanValue());
- CHECK(CompileRun("s.prototype.knurd == undefined")->BooleanValue());
- CHECK(CompileRun("base1.prototype.knurd == undefined")->BooleanValue());
-
- env->Global()->Set(v8_str("obj"),
- base1->GetFunction()->NewInstance());
- CHECK_EQ(17.2, v8_compile("obj.flabby()")->Run()->NumberValue());
- CHECK(v8_compile("'flabby' in obj")->Run()->BooleanValue());
- CHECK_EQ(15.2, v8_compile("obj.knurd")->Run()->NumberValue());
- CHECK(v8_compile("'knurd' in obj")->Run()->BooleanValue());
- CHECK_EQ(20.1, v8_compile("obj.v1")->Run()->NumberValue());
-
- env->Global()->Set(v8_str("obj2"),
- base2->GetFunction()->NewInstance());
- CHECK_EQ(17.2, v8_compile("obj2.flabby()")->Run()->NumberValue());
- CHECK(v8_compile("'flabby' in obj2")->Run()->BooleanValue());
- CHECK_EQ(15.2, v8_compile("obj2.knurd")->Run()->NumberValue());
- CHECK(v8_compile("'knurd' in obj2")->Run()->BooleanValue());
- CHECK_EQ(10.1, v8_compile("obj2.v2")->Run()->NumberValue());
+ CHECK(
+ CompileRun("s.knurd == undefined")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("s.prototype.knurd == undefined")
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK(CompileRun("base1.prototype.knurd == undefined")
+ ->BooleanValue(env.local())
+ .FromJust());
+
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("obj"), base1->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(17.2, v8_compile("obj.flabby()")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->NumberValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("'flabby' in obj")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(15.2, v8_compile("obj.knurd")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->NumberValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("'knurd' in obj")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(20.1, v8_compile("obj.v1")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->NumberValue(env.local())
+ .FromJust());
+
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("obj2"), base2->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(17.2, v8_compile("obj2.flabby()")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->NumberValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("'flabby' in obj2")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(15.2, v8_compile("obj2.knurd")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->NumberValue(env.local())
+ .FromJust());
+ CHECK(v8_compile("'knurd' in obj2")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
+ CHECK_EQ(10.1, v8_compile("obj2.v2")
+ ->Run(env.local())
+ .ToLocalChecked()
+ ->NumberValue(env.local())
+ .FromJust());
// base1 and base2 cannot cross reference to each's prototype
- CHECK(v8_compile("obj.v2")->Run()->IsUndefined());
- CHECK(v8_compile("obj2.v1")->Run()->IsUndefined());
+ CHECK(v8_compile("obj.v2")->Run(env.local()).ToLocalChecked()->IsUndefined());
+ CHECK(
+ v8_compile("obj2.v1")->Run(env.local()).ToLocalChecked()->IsUndefined());
}
@@ -1880,15 +2089,18 @@ THREADED_TEST(DescriptorInheritance) {
void SimpleAccessorGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- Handle<Object> self = Handle<Object>::Cast(info.This());
- info.GetReturnValue().Set(
- self->Get(String::Concat(v8_str("accessor_"), name)));
+ Local<Object> self = Local<Object>::Cast(info.This());
+ info.GetReturnValue().Set(self->Get(info.GetIsolate()->GetCurrentContext(),
+ String::Concat(v8_str("accessor_"), name))
+ .ToLocalChecked());
}
void SimpleAccessorSetter(Local<String> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- Handle<Object> self = Handle<Object>::Cast(info.This());
- self->Set(String::Concat(v8_str("accessor_"), name), value);
+ Local<Object> self = Local<Object>::Cast(info.This());
+ CHECK(self->Set(info.GetIsolate()->GetCurrentContext(),
+ String::Concat(v8_str("accessor_"), name), value)
+ .FromJust());
}
void SymbolAccessorGetter(Local<Name> name,
@@ -1928,7 +2140,7 @@ THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
v8::HandleScope scope(isolate);
LocalContext env;
v8::Local<v8::Value> res = CompileRun("var a = []; a;");
- i::Handle<i::JSObject> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
+ i::Handle<i::JSReceiver> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
CHECK(a->map()->instance_descriptors()->IsFixedArray());
CHECK_GT(i::FixedArray::cast(a->map()->instance_descriptors())->length(), 0);
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
@@ -1944,59 +2156,81 @@ THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
THREADED_TEST(UndefinedIsNotEnumerable) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<Value> result = CompileRun("this.propertyIsEnumerable(undefined)");
+ v8::Local<Value> result = CompileRun("this.propertyIsEnumerable(undefined)");
CHECK(result->IsFalse());
}
-v8::Handle<Script> call_recursively_script;
+v8::Local<Script> call_recursively_script;
static const int kTargetRecursionDepth = 150; // near maximum
static void CallScriptRecursivelyCall(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
- int depth = args.This()->Get(v8_str("depth"))->Int32Value();
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ int depth = args.This()
+ ->Get(context, v8_str("depth"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust();
if (depth == kTargetRecursionDepth) return;
- args.This()->Set(v8_str("depth"),
- v8::Integer::New(args.GetIsolate(), depth + 1));
- args.GetReturnValue().Set(call_recursively_script->Run());
+ CHECK(args.This()
+ ->Set(context, v8_str("depth"),
+ v8::Integer::New(args.GetIsolate(), depth + 1))
+ .FromJust());
+ args.GetReturnValue().Set(
+ call_recursively_script->Run(context).ToLocalChecked());
}
static void CallFunctionRecursivelyCall(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
- int depth = args.This()->Get(v8_str("depth"))->Int32Value();
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ int depth = args.This()
+ ->Get(context, v8_str("depth"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust();
if (depth == kTargetRecursionDepth) {
printf("[depth = %d]\n", depth);
return;
}
- args.This()->Set(v8_str("depth"),
- v8::Integer::New(args.GetIsolate(), depth + 1));
- v8::Handle<Value> function =
- args.This()->Get(v8_str("callFunctionRecursively"));
- args.GetReturnValue().Set(
- function.As<Function>()->Call(args.This(), 0, NULL));
+ CHECK(args.This()
+ ->Set(context, v8_str("depth"),
+ v8::Integer::New(args.GetIsolate(), depth + 1))
+ .FromJust());
+ v8::Local<Value> function =
+ args.This()
+ ->Get(context, v8_str("callFunctionRecursively"))
+ .ToLocalChecked();
+ args.GetReturnValue().Set(function.As<Function>()
+ ->Call(context, args.This(), 0, NULL)
+ .ToLocalChecked());
}
THREADED_TEST(DeepCrossLanguageRecursion) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
global->Set(v8_str("callScriptRecursively"),
v8::FunctionTemplate::New(isolate, CallScriptRecursivelyCall));
global->Set(v8_str("callFunctionRecursively"),
v8::FunctionTemplate::New(isolate, CallFunctionRecursivelyCall));
LocalContext env(NULL, global);
- env->Global()->Set(v8_str("depth"), v8::Integer::New(isolate, 0));
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("depth"), v8::Integer::New(isolate, 0))
+ .FromJust());
call_recursively_script = v8_compile("callScriptRecursively()");
- call_recursively_script->Run();
- call_recursively_script = v8::Handle<Script>();
+ call_recursively_script->Run(env.local()).ToLocalChecked();
+ call_recursively_script = v8::Local<Script>();
- env->Global()->Set(v8_str("depth"), v8::Integer::New(isolate, 0));
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("depth"), v8::Integer::New(isolate, 0))
+ .FromJust());
CompileRun("callFunctionRecursively()");
}
@@ -2022,17 +2256,20 @@ static void ThrowingPropertyHandlerSet(
THREADED_TEST(CallbackExceptionRegression) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
ThrowingPropertyHandlerGet, ThrowingPropertyHandlerSet));
LocalContext env;
- env->Global()->Set(v8_str("obj"), obj->NewInstance());
- v8::Handle<Value> otto =
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("obj"),
+ obj->NewInstance(env.local()).ToLocalChecked())
+ .FromJust());
+ v8::Local<Value> otto =
CompileRun("try { with (obj) { otto; } } catch (e) { e; }");
- CHECK(v8_str("otto")->Equals(otto));
- v8::Handle<Value> netto =
+ CHECK(v8_str("otto")->Equals(env.local(), otto).FromJust());
+ v8::Local<Value> netto =
CompileRun("try { with (obj) { netto = 4; } } catch (e) { e; }");
- CHECK(v8_str("netto")->Equals(netto));
+ CHECK(v8_str("netto")->Equals(env.local(), netto).FromJust());
}
@@ -2042,9 +2279,12 @@ THREADED_TEST(FunctionPrototype) {
Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New(isolate);
Foo->PrototypeTemplate()->Set(v8_str("plak"), v8_num(321));
LocalContext env;
- env->Global()->Set(v8_str("Foo"), Foo->GetFunction());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("Foo"),
+ Foo->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
Local<Script> script = v8_compile("Foo.prototype.plak");
- CHECK_EQ(script->Run()->Int32Value(), 321);
+ CHECK_EQ(v8_run_int32value(script), 321);
}
@@ -2056,11 +2296,14 @@ THREADED_TEST(InternalFields) {
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
instance_templ->SetInternalFieldCount(1);
- Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+ Local<v8::Object> obj = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
CHECK_EQ(1, obj->InternalFieldCount());
CHECK(obj->GetInternalField(0)->IsUndefined());
obj->SetInternalField(0, v8_num(17));
- CHECK_EQ(17, obj->GetInternalField(0)->Int32Value());
+ CHECK_EQ(17, obj->GetInternalField(0)->Int32Value(env.local()).FromJust());
}
@@ -2070,12 +2313,12 @@ THREADED_TEST(GlobalObjectInternalFields) {
Local<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(1);
LocalContext env(NULL, global_template);
- v8::Handle<v8::Object> global_proxy = env->Global();
- v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
+ v8::Local<v8::Object> global_proxy = env->Global();
+ v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(1, global->InternalFieldCount());
CHECK(global->GetInternalField(0)->IsUndefined());
global->SetInternalField(0, v8_num(17));
- CHECK_EQ(17, global->GetInternalField(0)->Int32Value());
+ CHECK_EQ(17, global->GetInternalField(0)->Int32Value(env.local()).FromJust());
}
@@ -2084,12 +2327,12 @@ THREADED_TEST(GlobalObjectHasRealIndexedProperty) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Object> global = env->Global();
- global->Set(0, v8::String::NewFromUtf8(CcTest::isolate(), "value"));
- CHECK(global->HasRealIndexedProperty(0));
+ CHECK(global->Set(env.local(), 0, v8_str("value")).FromJust());
+ CHECK(global->HasRealIndexedProperty(env.local(), 0).FromJust());
}
-static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
+static void CheckAlignedPointerInInternalField(Local<v8::Object> obj,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
obj->SetAlignedPointerInInternalField(0, value);
@@ -2106,7 +2349,10 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
instance_templ->SetInternalFieldCount(1);
- Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+ Local<v8::Object> obj = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
CHECK_EQ(1, obj->InternalFieldCount());
CheckAlignedPointerInInternalField(obj, NULL);
@@ -2169,7 +2415,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
static void CheckEmbedderData(LocalContext* env, int index,
- v8::Handle<Value> data) {
+ v8::Local<Value> data) {
(*env)->SetEmbedderData(index, data);
CHECK((*env)->GetEmbedderData(index)->StrictEquals(data));
}
@@ -2180,25 +2426,13 @@ THREADED_TEST(EmbedderData) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- CheckEmbedderData(
- &env, 3, v8::String::NewFromUtf8(isolate, "The quick brown fox jumps"));
- CheckEmbedderData(&env, 2,
- v8::String::NewFromUtf8(isolate, "over the lazy dog."));
+ CheckEmbedderData(&env, 3, v8_str("The quick brown fox jumps"));
+ CheckEmbedderData(&env, 2, v8_str("over the lazy dog."));
CheckEmbedderData(&env, 1, v8::Number::New(isolate, 1.2345));
CheckEmbedderData(&env, 0, v8::Boolean::New(isolate, true));
}
-THREADED_TEST(GetIsolate) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<v8::Object> obj = v8::Object::New(isolate);
- CHECK_EQ(isolate, obj->GetIsolate());
- CHECK_EQ(isolate, CcTest::global()->GetIsolate());
-}
-
-
THREADED_TEST(IdentityHash) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -2249,9 +2483,11 @@ void GlobalProxyIdentityHash(bool set_in_js) {
v8::Isolate* isolate = env->GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
v8::HandleScope scope(isolate);
- Handle<Object> global_proxy = env->Global();
+ Local<Object> global_proxy = env->Global();
i::Handle<i::Object> i_global_proxy = v8::Utils::OpenHandle(*global_proxy);
- env->Global()->Set(v8_str("global"), global_proxy);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("global"), global_proxy)
+ .FromJust());
i::Handle<i::Object> original_hash;
if (set_in_js) {
CompileRun("var m = new Set(); m.add(global);");
@@ -2268,7 +2504,7 @@ void GlobalProxyIdentityHash(bool set_in_js) {
CHECK_EQ(hash1, hash2);
{
// Re-attach global proxy to a new context, hash should stay the same.
- LocalContext env2(NULL, Handle<ObjectTemplate>(), global_proxy);
+ LocalContext env2(NULL, Local<ObjectTemplate>(), global_proxy);
int hash3 = global_proxy->GetIdentityHash();
CHECK_EQ(hash1, hash3);
}
@@ -2297,7 +2533,7 @@ TEST(SymbolIdentityHash) {
}
{
- v8::Handle<v8::Symbol> js_symbol =
+ v8::Local<v8::Symbol> js_symbol =
CompileRun("Symbol('foo')").As<v8::Symbol>();
int hash = js_symbol->GetIdentityHash();
int hash1 = js_symbol->GetIdentityHash();
@@ -2314,7 +2550,7 @@ TEST(StringIdentityHash) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- Local<v8::String> str = v8::String::NewFromUtf8(isolate, "str1");
+ Local<v8::String> str = v8_str("str1");
int hash = str->GetIdentityHash();
int hash1 = str->GetIdentityHash();
CHECK_EQ(hash, hash1);
@@ -2322,7 +2558,7 @@ TEST(StringIdentityHash) {
int hash3 = str->GetIdentityHash();
CHECK_EQ(hash, hash3);
- Local<v8::String> str2 = v8::String::NewFromUtf8(isolate, "str1");
+ Local<v8::String> str2 = v8_str("str1");
int hash4 = str2->GetIdentityHash();
CHECK_EQ(hash, hash4);
}
@@ -2345,92 +2581,145 @@ THREADED_TEST(SymbolProperties) {
CHECK(sym2->IsSymbol());
CHECK(!obj->IsSymbol());
- CHECK(sym1->Equals(sym1));
- CHECK(sym2->Equals(sym2));
- CHECK(!sym1->Equals(sym2));
- CHECK(!sym2->Equals(sym1));
+ CHECK(sym1->Equals(env.local(), sym1).FromJust());
+ CHECK(sym2->Equals(env.local(), sym2).FromJust());
+ CHECK(!sym1->Equals(env.local(), sym2).FromJust());
+ CHECK(!sym2->Equals(env.local(), sym1).FromJust());
CHECK(sym1->StrictEquals(sym1));
CHECK(sym2->StrictEquals(sym2));
CHECK(!sym1->StrictEquals(sym2));
CHECK(!sym2->StrictEquals(sym1));
- CHECK(sym2->Name()->Equals(v8_str("my-symbol")));
+ CHECK(sym2->Name()->Equals(env.local(), v8_str("my-symbol")).FromJust());
v8::Local<v8::Value> sym_val = sym2;
CHECK(sym_val->IsSymbol());
- CHECK(sym_val->Equals(sym2));
+ CHECK(sym_val->Equals(env.local(), sym2).FromJust());
CHECK(sym_val->StrictEquals(sym2));
- CHECK(v8::Symbol::Cast(*sym_val)->Equals(sym2));
+ CHECK(v8::Symbol::Cast(*sym_val)->Equals(env.local(), sym2).FromJust());
v8::Local<v8::Value> sym_obj = v8::SymbolObject::New(isolate, sym2);
CHECK(sym_obj->IsSymbolObject());
CHECK(!sym2->IsSymbolObject());
CHECK(!obj->IsSymbolObject());
- CHECK(sym_obj->Equals(sym2));
+ CHECK(sym_obj->Equals(env.local(), sym2).FromJust());
CHECK(!sym_obj->StrictEquals(sym2));
- CHECK(v8::SymbolObject::Cast(*sym_obj)->Equals(sym_obj));
- CHECK(v8::SymbolObject::Cast(*sym_obj)->ValueOf()->Equals(sym2));
+ CHECK(v8::SymbolObject::Cast(*sym_obj)
+ ->Equals(env.local(), sym_obj)
+ .FromJust());
+ CHECK(v8::SymbolObject::Cast(*sym_obj)
+ ->ValueOf()
+ ->Equals(env.local(), sym2)
+ .FromJust());
// Make sure delete of a non-existent symbol property works.
- CHECK(obj->Delete(sym1));
- CHECK(!obj->Has(sym1));
-
- CHECK(obj->Set(sym1, v8::Integer::New(isolate, 1503)));
- CHECK(obj->Has(sym1));
- CHECK_EQ(1503, obj->Get(sym1)->Int32Value());
- CHECK(obj->Set(sym1, v8::Integer::New(isolate, 2002)));
- CHECK(obj->Has(sym1));
- CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK_EQ(v8::None, obj->GetPropertyAttributes(sym1));
-
- CHECK_EQ(0u, obj->GetOwnPropertyNames()->Length());
- unsigned num_props = obj->GetPropertyNames()->Length();
- CHECK(obj->Set(v8::String::NewFromUtf8(isolate, "bla"),
- v8::Integer::New(isolate, 20)));
- CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
- CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
+ CHECK(obj->Delete(env.local(), sym1).FromJust());
+ CHECK(!obj->Has(env.local(), sym1).FromJust());
+
+ CHECK(
+ obj->Set(env.local(), sym1, v8::Integer::New(isolate, 1503)).FromJust());
+ CHECK(obj->Has(env.local(), sym1).FromJust());
+ CHECK_EQ(1503, obj->Get(env.local(), sym1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(
+ obj->Set(env.local(), sym1, v8::Integer::New(isolate, 2002)).FromJust());
+ CHECK(obj->Has(env.local(), sym1).FromJust());
+ CHECK_EQ(2002, obj->Get(env.local(), sym1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(v8::None, obj->GetPropertyAttributes(env.local(), sym1).FromJust());
+
+ CHECK_EQ(0u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+ unsigned num_props =
+ obj->GetPropertyNames(env.local()).ToLocalChecked()->Length();
+ CHECK(obj->Set(env.local(), v8_str("bla"), v8::Integer::New(isolate, 20))
+ .FromJust());
+ CHECK_EQ(1u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+ CHECK_EQ(num_props + 1,
+ obj->GetPropertyNames(env.local()).ToLocalChecked()->Length());
CcTest::heap()->CollectAllGarbage();
- CHECK(obj->SetAccessor(sym3, SymbolAccessorGetter, SymbolAccessorSetter));
- CHECK(obj->Get(sym3)->IsUndefined());
- CHECK(obj->Set(sym3, v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))
- ->Equals(v8::Integer::New(isolate, 42)));
+ CHECK(obj->SetAccessor(env.local(), sym3, SymbolAccessorGetter,
+ SymbolAccessorSetter)
+ .FromJust());
+ CHECK(obj->Get(env.local(), sym3).ToLocalChecked()->IsUndefined());
+ CHECK(obj->Set(env.local(), sym3, v8::Integer::New(isolate, 42)).FromJust());
+ CHECK(obj->Get(env.local(), sym3)
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 42))
+ .FromJust());
+ CHECK(obj->Get(env.local(), v8_str("accessor_sym3"))
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 42))
+ .FromJust());
// Add another property and delete it afterwards to force the object in
// slow case.
- CHECK(obj->Set(sym2, v8::Integer::New(isolate, 2008)));
- CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK_EQ(2008, obj->Get(sym2)->Int32Value());
- CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK_EQ(2u, obj->GetOwnPropertyNames()->Length());
-
- CHECK(obj->Has(sym1));
- CHECK(obj->Has(sym2));
- CHECK(obj->Has(sym3));
- CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
- CHECK(obj->Delete(sym2));
- CHECK(obj->Has(sym1));
- CHECK(!obj->Has(sym2));
- CHECK(obj->Has(sym3));
- CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
- CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
- CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))
- ->Equals(v8::Integer::New(isolate, 42)));
- CHECK_EQ(2u, obj->GetOwnPropertyNames()->Length());
+ CHECK(
+ obj->Set(env.local(), sym2, v8::Integer::New(isolate, 2008)).FromJust());
+ CHECK_EQ(2002, obj->Get(env.local(), sym1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2008, obj->Get(env.local(), sym2)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2002, obj->Get(env.local(), sym1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+
+ CHECK(obj->Has(env.local(), sym1).FromJust());
+ CHECK(obj->Has(env.local(), sym2).FromJust());
+ CHECK(obj->Has(env.local(), sym3).FromJust());
+ CHECK(obj->Has(env.local(), v8_str("accessor_sym3")).FromJust());
+ CHECK(obj->Delete(env.local(), sym2).FromJust());
+ CHECK(obj->Has(env.local(), sym1).FromJust());
+ CHECK(!obj->Has(env.local(), sym2).FromJust());
+ CHECK(obj->Has(env.local(), sym3).FromJust());
+ CHECK(obj->Has(env.local(), v8_str("accessor_sym3")).FromJust());
+ CHECK_EQ(2002, obj->Get(env.local(), sym1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(obj->Get(env.local(), sym3)
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 42))
+ .FromJust());
+ CHECK(obj->Get(env.local(), v8_str("accessor_sym3"))
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 42))
+ .FromJust());
+ CHECK_EQ(2u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
// Symbol properties are inherited.
v8::Local<v8::Object> child = v8::Object::New(isolate);
- child->SetPrototype(obj);
- CHECK(child->Has(sym1));
- CHECK_EQ(2002, child->Get(sym1)->Int32Value());
- CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
- CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))
- ->Equals(v8::Integer::New(isolate, 42)));
- CHECK_EQ(0u, child->GetOwnPropertyNames()->Length());
+ CHECK(child->SetPrototype(env.local(), obj).FromJust());
+ CHECK(child->Has(env.local(), sym1).FromJust());
+ CHECK_EQ(2002, child->Get(env.local(), sym1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(obj->Get(env.local(), sym3)
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 42))
+ .FromJust());
+ CHECK(obj->Get(env.local(), v8_str("accessor_sym3"))
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 42))
+ .FromJust());
+ CHECK_EQ(0u,
+ child->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
}
@@ -2442,9 +2731,110 @@ THREADED_TEST(SymbolTemplateProperties) {
v8::Local<v8::Name> name = v8::Symbol::New(isolate);
CHECK(!name.IsEmpty());
foo->PrototypeTemplate()->Set(name, v8::FunctionTemplate::New(isolate));
- v8::Local<v8::Object> new_instance = foo->InstanceTemplate()->NewInstance();
+ v8::Local<v8::Object> new_instance =
+ foo->InstanceTemplate()->NewInstance(env.local()).ToLocalChecked();
CHECK(!new_instance.IsEmpty());
- CHECK(new_instance->Has(name));
+ CHECK(new_instance->Has(env.local(), name).FromJust());
+}
+
+
+THREADED_TEST(PrivatePropertiesOnProxies) {
+ i::FLAG_harmony_proxies = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> target = CompileRun("({})").As<v8::Object>();
+ v8::Local<v8::Object> handler = CompileRun("({})").As<v8::Object>();
+
+ v8::Local<v8::Proxy> proxy =
+ v8::Proxy::New(env.local(), target, handler).ToLocalChecked();
+
+ v8::Local<v8::Private> priv1 = v8::Private::New(isolate);
+ v8::Local<v8::Private> priv2 =
+ v8::Private::New(isolate, v8_str("my-private"));
+
+ CcTest::heap()->CollectAllGarbage();
+
+ CHECK(priv2->Name()
+ ->Equals(env.local(),
+ v8::String::NewFromUtf8(isolate, "my-private",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust());
+
+ // Make sure delete of a non-existent private symbol property works.
+ proxy->DeletePrivate(env.local(), priv1).FromJust();
+ CHECK(!proxy->HasPrivate(env.local(), priv1).FromJust());
+
+ CHECK(proxy->SetPrivate(env.local(), priv1, v8::Integer::New(isolate, 1503))
+ .FromJust());
+ CHECK(proxy->HasPrivate(env.local(), priv1).FromJust());
+ CHECK_EQ(1503, proxy->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(proxy->SetPrivate(env.local(), priv1, v8::Integer::New(isolate, 2002))
+ .FromJust());
+ CHECK(proxy->HasPrivate(env.local(), priv1).FromJust());
+ CHECK_EQ(2002, proxy->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+
+ CHECK_EQ(0u,
+ proxy->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+ unsigned num_props =
+ proxy->GetPropertyNames(env.local()).ToLocalChecked()->Length();
+ CHECK(proxy->Set(env.local(), v8::String::NewFromUtf8(
+ isolate, "bla", v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Integer::New(isolate, 20))
+ .FromJust());
+ CHECK_EQ(1u,
+ proxy->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+ CHECK_EQ(num_props + 1,
+ proxy->GetPropertyNames(env.local()).ToLocalChecked()->Length());
+
+ CcTest::heap()->CollectAllGarbage();
+
+ // Add another property and delete it afterwards to force the object in
+ // slow case.
+ CHECK(proxy->SetPrivate(env.local(), priv2, v8::Integer::New(isolate, 2008))
+ .FromJust());
+ CHECK_EQ(2002, proxy->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2008, proxy->GetPrivate(env.local(), priv2)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2002, proxy->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(1u,
+ proxy->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+
+ CHECK(proxy->HasPrivate(env.local(), priv1).FromJust());
+ CHECK(proxy->HasPrivate(env.local(), priv2).FromJust());
+ CHECK(proxy->DeletePrivate(env.local(), priv2).FromJust());
+ CHECK(proxy->HasPrivate(env.local(), priv1).FromJust());
+ CHECK(!proxy->HasPrivate(env.local(), priv2).FromJust());
+ CHECK_EQ(2002, proxy->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(1u,
+ proxy->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+
+ // Private properties are not inherited (for the time being).
+ v8::Local<v8::Object> child = v8::Object::New(isolate);
+ CHECK(child->SetPrototype(env.local(), proxy).FromJust());
+ CHECK(!child->HasPrivate(env.local(), priv1).FromJust());
+ CHECK_EQ(0u,
+ child->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
}
@@ -2460,7 +2850,12 @@ THREADED_TEST(PrivateProperties) {
CcTest::heap()->CollectAllGarbage();
- CHECK(priv2->Name()->Equals(v8::String::NewFromUtf8(isolate, "my-private")));
+ CHECK(priv2->Name()
+ ->Equals(env.local(),
+ v8::String::NewFromUtf8(isolate, "my-private",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust());
// Make sure delete of a non-existent private symbol property works.
obj->DeletePrivate(env.local(), priv1).FromJust();
@@ -2469,20 +2864,31 @@ THREADED_TEST(PrivateProperties) {
CHECK(obj->SetPrivate(env.local(), priv1, v8::Integer::New(isolate, 1503))
.FromJust());
CHECK(obj->HasPrivate(env.local(), priv1).FromJust());
- CHECK_EQ(1503,
- obj->GetPrivate(env.local(), priv1).ToLocalChecked()->Int32Value());
+ CHECK_EQ(1503, obj->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK(obj->SetPrivate(env.local(), priv1, v8::Integer::New(isolate, 2002))
.FromJust());
CHECK(obj->HasPrivate(env.local(), priv1).FromJust());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), priv1).ToLocalChecked()->Int32Value());
-
- CHECK_EQ(0u, obj->GetOwnPropertyNames()->Length());
- unsigned num_props = obj->GetPropertyNames()->Length();
- CHECK(obj->Set(v8::String::NewFromUtf8(isolate, "bla"),
- v8::Integer::New(isolate, 20)));
- CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
- CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+
+ CHECK_EQ(0u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+ unsigned num_props =
+ obj->GetPropertyNames(env.local()).ToLocalChecked()->Length();
+ CHECK(obj->Set(env.local(), v8::String::NewFromUtf8(
+ isolate, "bla", v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Integer::New(isolate, 20))
+ .FromJust());
+ CHECK_EQ(1u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
+ CHECK_EQ(num_props + 1,
+ obj->GetPropertyNames(env.local()).ToLocalChecked()->Length());
CcTest::heap()->CollectAllGarbage();
@@ -2490,28 +2896,39 @@ THREADED_TEST(PrivateProperties) {
// slow case.
CHECK(obj->SetPrivate(env.local(), priv2, v8::Integer::New(isolate, 2008))
.FromJust());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), priv1).ToLocalChecked()->Int32Value());
- CHECK_EQ(2008,
- obj->GetPrivate(env.local(), priv2).ToLocalChecked()->Int32Value());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), priv1).ToLocalChecked()->Int32Value());
- CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2008, obj->GetPrivate(env.local(), priv2)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(1u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
CHECK(obj->HasPrivate(env.local(), priv1).FromJust());
CHECK(obj->HasPrivate(env.local(), priv2).FromJust());
CHECK(obj->DeletePrivate(env.local(), priv2).FromJust());
CHECK(obj->HasPrivate(env.local(), priv1).FromJust());
CHECK(!obj->HasPrivate(env.local(), priv2).FromJust());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), priv1).ToLocalChecked()->Int32Value());
- CHECK_EQ(1u, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), priv1)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(1u,
+ obj->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
// Private properties are not inherited (for the time being).
v8::Local<v8::Object> child = v8::Object::New(isolate);
- child->SetPrototype(obj);
+ CHECK(child->SetPrototype(env.local(), obj).FromJust());
CHECK(!child->HasPrivate(env.local(), priv1).FromJust());
- CHECK_EQ(0u, child->GetOwnPropertyNames()->Length());
+ CHECK_EQ(0u,
+ child->GetOwnPropertyNames(env.local()).ToLocalChecked()->Length());
}
@@ -2534,7 +2951,8 @@ THREADED_TEST(GlobalSymbols) {
CHECK(!sym->SameValue(glob));
CompileRun("var sym2 = Symbol.for('my-symbol')");
- v8::Local<Value> sym2 = env->Global()->Get(v8_str("sym2"));
+ v8::Local<Value> sym2 =
+ env->Global()->Get(env.local(), v8_str("sym2")).ToLocalChecked();
CHECK(sym2->SameValue(glob));
CHECK(!sym2->SameValue(glob_api));
}
@@ -2549,7 +2967,8 @@ static void CheckWellKnownSymbol(v8::Local<v8::Symbol>(*getter)(v8::Isolate*),
v8::Local<v8::Symbol> symbol = getter(isolate);
std::string script = std::string("var sym = ") + name;
CompileRun(script.c_str());
- v8::Local<Value> value = env->Global()->Get(v8_str("sym"));
+ v8::Local<Value> value =
+ env->Global()->Get(env.local(), v8_str("sym")).ToLocalChecked();
CHECK(!value.IsEmpty());
CHECK(!symbol.IsEmpty());
@@ -2582,8 +3001,9 @@ THREADED_TEST(GlobalPrivates) {
CHECK(!obj->HasPrivate(env.local(), priv).FromJust());
CompileRun("var intern = %CreatePrivateSymbol('my-private')");
- v8::Local<Value> intern = env->Global()->Get(v8_str("intern"));
- CHECK(!obj->Has(intern));
+ v8::Local<Value> intern =
+ env->Global()->Get(env.local(), v8_str("intern")).ToLocalChecked();
+ CHECK(!obj->Has(env.local(), intern).FromJust());
}
@@ -2600,10 +3020,12 @@ class ScopedArrayBufferContents {
};
template <typename T>
-static void CheckInternalFieldsAreZero(v8::Handle<T> value) {
+static void CheckInternalFieldsAreZero(v8::Local<T> value) {
CHECK_EQ(T::kInternalFieldCount, value->InternalFieldCount());
for (int i = 0; i < value->InternalFieldCount(); i++) {
- CHECK_EQ(0, value->GetInternalField(i)->Int32Value());
+ CHECK_EQ(0, value->GetInternalField(i)
+ ->Int32Value(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
}
}
@@ -2624,24 +3046,24 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
- DCHECK(data != NULL);
- env->Global()->Set(v8_str("ab"), ab);
+ CHECK(data != NULL);
+ CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
- v8::Handle<v8::Value> result = CompileRun("ab.byteLength");
- CHECK_EQ(1024, result->Int32Value());
+ v8::Local<v8::Value> result = CompileRun("ab.byteLength");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
result = CompileRun(
"var u8 = new Uint8Array(ab);"
"u8[0] = 0xFF;"
"u8[1] = 0xAA;"
"u8.length");
- CHECK_EQ(1024, result->Int32Value());
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
CHECK_EQ(0xFF, data[0]);
CHECK_EQ(0xAA, data[1]);
data[0] = 0xCC;
data[1] = 0x11;
result = CompileRun("u8[0] + u8[1]");
- CHECK_EQ(0xDD, result->Int32Value());
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
@@ -2664,18 +3086,18 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
CHECK(ab1->IsExternal());
result = CompileRun("ab1.byteLength");
- CHECK_EQ(2, result->Int32Value());
+ CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
result = CompileRun("u8_a[0]");
- CHECK_EQ(0xAA, result->Int32Value());
+ CHECK_EQ(0xAA, result->Int32Value(env.local()).FromJust());
result = CompileRun("u8_a[1]");
- CHECK_EQ(0xFF, result->Int32Value());
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
result = CompileRun(
"var u8_b = new Uint8Array(ab1);"
"u8_b[0] = 0xBB;"
"u8_a[0]");
- CHECK_EQ(0xBB, result->Int32Value());
+ CHECK_EQ(0xBB, result->Int32Value(env.local()).FromJust());
result = CompileRun("u8_b[1]");
- CHECK_EQ(0xFF, result->Int32Value());
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
@@ -2684,7 +3106,7 @@ THREADED_TEST(ArrayBuffer_JSInternalToExternal) {
ab1_data[0] = 0xCC;
ab1_data[1] = 0x11;
result = CompileRun("u8_a[0] + u8_a[1]");
- CHECK_EQ(0xDD, result->Int32Value());
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
@@ -2701,23 +3123,23 @@ THREADED_TEST(ArrayBuffer_External) {
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
CHECK(ab3->IsExternal());
- env->Global()->Set(v8_str("ab3"), ab3);
+ CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
- v8::Handle<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value());
+ v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
result = CompileRun(
"var u8_b = new Uint8Array(ab3);"
"u8_b[0] = 0xBB;"
"u8_b[1] = 0xCC;"
"u8_b.length");
- CHECK_EQ(100, result->Int32Value());
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
CHECK_EQ(0xBB, my_data[0]);
CHECK_EQ(0xCC, my_data[1]);
my_data[0] = 0xCC;
my_data[1] = 0x11;
result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value());
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
@@ -2739,13 +3161,13 @@ THREADED_TEST(ArrayBuffer_DisableNeuter) {
}
-static void CheckDataViewIsNeutered(v8::Handle<v8::DataView> dv) {
+static void CheckDataViewIsNeutered(v8::Local<v8::DataView> dv) {
CHECK_EQ(0, static_cast<int>(dv->ByteLength()));
CHECK_EQ(0, static_cast<int>(dv->ByteOffset()));
}
-static void CheckIsNeutered(v8::Handle<v8::TypedArray> ta) {
+static void CheckIsNeutered(v8::Local<v8::TypedArray> ta) {
CHECK_EQ(0, static_cast<int>(ta->ByteLength()));
CHECK_EQ(0, static_cast<int>(ta->Length()));
CHECK_EQ(0, static_cast<int>(ta->ByteOffset()));
@@ -2758,16 +3180,16 @@ static void CheckIsTypedArrayVarNeutered(const char* name) {
"%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
name, name, name);
CHECK(CompileRun(source.start())->IsTrue());
- v8::Handle<v8::TypedArray> ta =
- v8::Handle<v8::TypedArray>::Cast(CompileRun(name));
+ v8::Local<v8::TypedArray> ta =
+ v8::Local<v8::TypedArray>::Cast(CompileRun(name));
CheckIsNeutered(ta);
}
template <typename TypedArray, int kElementSize>
-static Handle<TypedArray> CreateAndCheck(Handle<v8::ArrayBuffer> ab,
- int byteOffset, int length) {
- v8::Handle<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
+static Local<TypedArray> CreateAndCheck(Local<v8::ArrayBuffer> ab,
+ int byteOffset, int length) {
+ v8::Local<TypedArray> ta = TypedArray::New(ab, byteOffset, length);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(ta);
CHECK_EQ(byteOffset, static_cast<int>(ta->ByteOffset()));
CHECK_EQ(length, static_cast<int>(ta->Length()));
@@ -2781,31 +3203,31 @@ THREADED_TEST(ArrayBuffer_NeuteringApi) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1024);
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1024);
- v8::Handle<v8::Uint8Array> u8a =
+ v8::Local<v8::Uint8Array> u8a =
CreateAndCheck<v8::Uint8Array, 1>(buffer, 1, 1023);
- v8::Handle<v8::Uint8ClampedArray> u8c =
+ v8::Local<v8::Uint8ClampedArray> u8c =
CreateAndCheck<v8::Uint8ClampedArray, 1>(buffer, 1, 1023);
- v8::Handle<v8::Int8Array> i8a =
+ v8::Local<v8::Int8Array> i8a =
CreateAndCheck<v8::Int8Array, 1>(buffer, 1, 1023);
- v8::Handle<v8::Uint16Array> u16a =
+ v8::Local<v8::Uint16Array> u16a =
CreateAndCheck<v8::Uint16Array, 2>(buffer, 2, 511);
- v8::Handle<v8::Int16Array> i16a =
+ v8::Local<v8::Int16Array> i16a =
CreateAndCheck<v8::Int16Array, 2>(buffer, 2, 511);
- v8::Handle<v8::Uint32Array> u32a =
+ v8::Local<v8::Uint32Array> u32a =
CreateAndCheck<v8::Uint32Array, 4>(buffer, 4, 255);
- v8::Handle<v8::Int32Array> i32a =
+ v8::Local<v8::Int32Array> i32a =
CreateAndCheck<v8::Int32Array, 4>(buffer, 4, 255);
- v8::Handle<v8::Float32Array> f32a =
+ v8::Local<v8::Float32Array> f32a =
CreateAndCheck<v8::Float32Array, 4>(buffer, 4, 255);
- v8::Handle<v8::Float64Array> f64a =
+ v8::Local<v8::Float64Array> f64a =
CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
- v8::Handle<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
+ v8::Local<v8::DataView> dv = v8::DataView::New(buffer, 1, 1023);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
CHECK_EQ(1, static_cast<int>(dv->ByteOffset()));
CHECK_EQ(1023, static_cast<int>(dv->ByteLength()));
@@ -2844,16 +3266,15 @@ THREADED_TEST(ArrayBuffer_NeuteringScript) {
"var f64a = new Float64Array(ab, 8, 127);"
"var dv = new DataView(ab, 1, 1023);");
- v8::Handle<v8::ArrayBuffer> ab =
+ v8::Local<v8::ArrayBuffer> ab =
Local<v8::ArrayBuffer>::Cast(CompileRun("ab"));
- v8::Handle<v8::DataView> dv =
- v8::Handle<v8::DataView>::Cast(CompileRun("dv"));
+ v8::Local<v8::DataView> dv = v8::Local<v8::DataView>::Cast(CompileRun("dv"));
ScopedArrayBufferContents contents(ab->Externalize());
ab->Neuter();
CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
- CHECK_EQ(0, CompileRun("ab.byteLength")->Int32Value());
+ CHECK_EQ(0, v8_run_int32value(v8_compile("ab.byteLength")));
CheckIsTypedArrayVarNeutered("u8a");
CheckIsTypedArrayVarNeutered("u8c");
@@ -2901,24 +3322,24 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
- DCHECK(data != NULL);
- env->Global()->Set(v8_str("ab"), ab);
+ CHECK(data != NULL);
+ CHECK(env->Global()->Set(env.local(), v8_str("ab"), ab).FromJust());
- v8::Handle<v8::Value> result = CompileRun("ab.byteLength");
- CHECK_EQ(1024, result->Int32Value());
+ v8::Local<v8::Value> result = CompileRun("ab.byteLength");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
result = CompileRun(
"var u8 = new Uint8Array(ab);"
"u8[0] = 0xFF;"
"u8[1] = 0xAA;"
"u8.length");
- CHECK_EQ(1024, result->Int32Value());
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
CHECK_EQ(0xFF, data[0]);
CHECK_EQ(0xAA, data[1]);
data[0] = 0xCC;
data[1] = 0x11;
result = CompileRun("u8[0] + u8[1]");
- CHECK_EQ(0xDD, result->Int32Value());
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
@@ -2942,18 +3363,18 @@ THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
CHECK(ab1->IsExternal());
result = CompileRun("ab1.byteLength");
- CHECK_EQ(2, result->Int32Value());
+ CHECK_EQ(2, result->Int32Value(env.local()).FromJust());
result = CompileRun("u8_a[0]");
- CHECK_EQ(0xAA, result->Int32Value());
+ CHECK_EQ(0xAA, result->Int32Value(env.local()).FromJust());
result = CompileRun("u8_a[1]");
- CHECK_EQ(0xFF, result->Int32Value());
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
result = CompileRun(
"var u8_b = new Uint8Array(ab1);"
"u8_b[0] = 0xBB;"
"u8_a[0]");
- CHECK_EQ(0xBB, result->Int32Value());
+ CHECK_EQ(0xBB, result->Int32Value(env.local()).FromJust());
result = CompileRun("u8_b[1]");
- CHECK_EQ(0xFF, result->Int32Value());
+ CHECK_EQ(0xFF, result->Int32Value(env.local()).FromJust());
CHECK_EQ(2, static_cast<int>(ab1_contents.ByteLength()));
uint8_t* ab1_data = static_cast<uint8_t*>(ab1_contents.Data());
@@ -2962,7 +3383,7 @@ THREADED_TEST(SharedArrayBuffer_JSInternalToExternal) {
ab1_data[0] = 0xCC;
ab1_data[1] = 0x11;
result = CompileRun("u8_a[0] + u8_a[1]");
- CHECK_EQ(0xDD, result->Int32Value());
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
@@ -2980,23 +3401,23 @@ THREADED_TEST(SharedArrayBuffer_External) {
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
CHECK(ab3->IsExternal());
- env->Global()->Set(v8_str("ab3"), ab3);
+ CHECK(env->Global()->Set(env.local(), v8_str("ab3"), ab3).FromJust());
- v8::Handle<v8::Value> result = CompileRun("ab3.byteLength");
- CHECK_EQ(100, result->Int32Value());
+ v8::Local<v8::Value> result = CompileRun("ab3.byteLength");
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
result = CompileRun(
"var u8_b = new Uint8Array(ab3);"
"u8_b[0] = 0xBB;"
"u8_b[1] = 0xCC;"
"u8_b.length");
- CHECK_EQ(100, result->Int32Value());
+ CHECK_EQ(100, result->Int32Value(env.local()).FromJust());
CHECK_EQ(0xBB, my_data[0]);
CHECK_EQ(0xCC, my_data[1]);
my_data[0] = 0xCC;
my_data[1] = 0x11;
result = CompileRun("u8_b[0] + u8_b[1]");
- CHECK_EQ(0xDD, result->Int32Value());
+ CHECK_EQ(0xDD, result->Int32Value(env.local()).FromJust());
}
@@ -3018,40 +3439,64 @@ THREADED_TEST(HiddenProperties) {
CHECK(obj->SetPrivate(env.local(), key, v8::Integer::New(isolate, 1503))
.FromJust());
- CHECK_EQ(1503,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
+ CHECK_EQ(1503, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK(obj->SetPrivate(env.local(), key, v8::Integer::New(isolate, 2002))
.FromJust());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CcTest::heap()->CollectAllGarbage();
// Make sure we do not find the hidden property.
- CHECK(!obj->Has(empty));
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
- CHECK(obj->Get(empty)->IsUndefined());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
- CHECK(obj->Set(empty, v8::Integer::New(isolate, 2003)));
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
- CHECK_EQ(2003, obj->Get(empty)->Int32Value());
+ CHECK(!obj->Has(env.local(), empty).FromJust());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(obj->Get(env.local(), empty).ToLocalChecked()->IsUndefined());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(
+ obj->Set(env.local(), empty, v8::Integer::New(isolate, 2003)).FromJust());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2003, obj->Get(env.local(), empty)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CcTest::heap()->CollectAllGarbage();
// Add another property and delete it afterwards to force the object in
// slow case.
- CHECK(obj->Set(prop_name, v8::Integer::New(isolate, 2008)));
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
- CHECK_EQ(2008, obj->Get(prop_name)->Int32Value());
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
- CHECK(obj->Delete(prop_name));
- CHECK_EQ(2002,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
+ CHECK(obj->Set(env.local(), prop_name, v8::Integer::New(isolate, 2008))
+ .FromJust());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2008, obj->Get(env.local(), prop_name)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(obj->Delete(env.local(), prop_name).FromJust());
+ CHECK_EQ(2002, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CcTest::heap()->CollectAllGarbage();
@@ -3089,8 +3534,10 @@ THREADED_TEST(Regress97784) {
obj->SetPrivate(env.local(), key, v8::Integer::New(env->GetIsolate(), 42))
.FromJust());
ExpectFalse("set_called");
- CHECK_EQ(42,
- obj->GetPrivate(env.local(), key).ToLocalChecked()->Int32Value());
+ CHECK_EQ(42, obj->GetPrivate(env.local(), key)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
@@ -3099,9 +3546,9 @@ THREADED_TEST(External) {
int x = 3;
Local<v8::External> ext = v8::External::New(CcTest::isolate(), &x);
LocalContext env;
- env->Global()->Set(v8_str("ext"), ext);
+ CHECK(env->Global()->Set(env.local(), v8_str("ext"), ext).FromJust());
Local<Value> reext_obj = CompileRun("this.ext");
- v8::Handle<v8::External> reext = reext_obj.As<v8::External>();
+ v8::Local<v8::External> reext = reext_obj.As<v8::External>();
int* ptr = static_cast<int*>(reext->Value());
CHECK_EQ(x, 3);
*ptr = 10;
@@ -3397,7 +3844,7 @@ Local<v8::Object> NewObjectForIntKey(
v8::Isolate* isolate, const v8::Global<v8::ObjectTemplate>& templ,
int key) {
auto local = Local<v8::ObjectTemplate>::New(isolate, templ);
- auto obj = local->NewInstance();
+ auto obj = local->NewInstance(isolate->GetCurrentContext()).ToLocalChecked();
obj->SetAlignedPointerInInternalField(0, IntKeyToVoidPointer(key));
return obj;
}
@@ -3468,10 +3915,10 @@ void TestGlobalValueMap() {
map.Set(7, expected);
CHECK_EQ(1, static_cast<int>(map.Size()));
obj = map.Get(7);
- CHECK(expected->Equals(obj));
+ CHECK(expected->Equals(env.local(), obj).FromJust());
{
typename Map::PersistentValueReference ref = map.GetReference(7);
- CHECK(expected->Equals(ref.NewLocal(isolate)));
+ CHECK(expected->Equals(env.local(), ref.NewLocal(isolate)).FromJust());
}
v8::Global<v8::Object> removed = map.Remove(7);
CHECK_EQ(0, static_cast<int>(map.Size()));
@@ -3488,7 +3935,7 @@ void TestGlobalValueMap() {
removed = map.Set(8, v8::Global<v8::Object>(isolate, expected2), &ref);
CHECK_EQ(1, static_cast<int>(map.Size()));
CHECK(expected == removed);
- CHECK(expected2->Equals(ref.NewLocal(isolate)));
+ CHECK(expected2->Equals(env.local(), ref.NewLocal(isolate)).FromJust());
}
}
CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
@@ -3553,10 +4000,10 @@ TEST(PersistentValueVector) {
CHECK(!vector.IsEmpty());
CHECK_EQ(5, static_cast<int>(vector.Size()));
CHECK(obj3.IsEmpty());
- CHECK(obj1->Equals(vector.Get(0)));
- CHECK(obj1->Equals(vector.Get(2)));
- CHECK(obj1->Equals(vector.Get(4)));
- CHECK(obj2->Equals(vector.Get(1)));
+ CHECK(obj1->Equals(env.local(), vector.Get(0)).FromJust());
+ CHECK(obj1->Equals(env.local(), vector.Get(2)).FromJust());
+ CHECK(obj1->Equals(env.local(), vector.Get(4)).FromJust());
+ CHECK(obj2->Equals(env.local(), vector.Get(1)).FromJust());
CHECK_EQ(5 + handle_count, global_handles->global_handles_count());
@@ -3707,9 +4154,11 @@ THREADED_TEST(ApiObjectGroups) {
{
HandleScope scope(iso);
CHECK(Local<Object>::New(iso, g1s2.handle.As<Object>())
- ->Set(0, Local<Value>::New(iso, g2s2.handle)));
+ ->Set(env.local(), 0, Local<Value>::New(iso, g2s2.handle))
+ .FromJust());
CHECK(Local<Object>::New(iso, g2s1.handle.As<Object>())
- ->Set(0, Local<Value>::New(iso, g1s1.handle)));
+ ->Set(env.local(), 0, Local<Value>::New(iso, g1s1.handle))
+ .FromJust());
}
{
@@ -3783,8 +4232,8 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
{
HandleScope scope(iso);
g1s1.handle.Reset(iso, Object::New(iso));
- g1s2.handle.Reset(iso, String::NewFromUtf8(iso, "foo1"));
- g1c1.handle.Reset(iso, String::NewFromUtf8(iso, "foo2"));
+ g1s2.handle.Reset(iso, v8_str("foo1"));
+ g1c1.handle.Reset(iso, v8_str("foo2"));
g1s1.handle.SetWeak(&g1s1, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
g1s2.handle.SetWeak(&g1s2, &WeakPointerCallback,
@@ -3793,8 +4242,8 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
v8::WeakCallbackType::kParameter);
g2s1.handle.Reset(iso, Object::New(iso));
- g2s2.handle.Reset(iso, String::NewFromUtf8(iso, "foo3"));
- g2c1.handle.Reset(iso, String::NewFromUtf8(iso, "foo4"));
+ g2s2.handle.Reset(iso, v8_str("foo3"));
+ g2c1.handle.Reset(iso, v8_str("foo4"));
g2s1.handle.SetWeak(&g2s1, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
g2s2.handle.SetWeak(&g2s2, &WeakPointerCallback,
@@ -3810,9 +4259,11 @@ THREADED_TEST(ApiObjectGroupsForSubtypes) {
{
HandleScope scope(iso);
CHECK(Local<Object>::New(iso, g1s1.handle)
- ->Set(0, Local<Object>::New(iso, g2s1.handle)));
+ ->Set(env.local(), 0, Local<Object>::New(iso, g2s1.handle))
+ .FromJust());
CHECK(Local<Object>::New(iso, g2s1.handle)
- ->Set(0, Local<Object>::New(iso, g1s1.handle)));
+ ->Set(env.local(), 0, Local<Object>::New(iso, g1s1.handle))
+ .FromJust());
}
{
@@ -4048,15 +4499,18 @@ TEST(ApiObjectGroupsCycleForScavenger) {
iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
Local<Object>::New(iso, g1s1.handle.As<Object>())
- ->Set(v8_str("x"), Local<Value>::New(iso, g2s1.handle));
+ ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g2s1.handle))
+ .FromJust();
iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
Local<Object>::New(iso, g2s1.handle.As<Object>())
- ->Set(v8_str("x"), Local<Value>::New(iso, g3s1.handle));
+ ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g3s1.handle))
+ .FromJust();
iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
Local<Object>::New(iso, g3s1.handle.As<Object>())
- ->Set(v8_str("x"), Local<Value>::New(iso, g1s1.handle));
+ ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g1s1.handle))
+ .FromJust();
}
v8::internal::Heap* heap =
@@ -4083,15 +4537,18 @@ TEST(ApiObjectGroupsCycleForScavenger) {
iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
Local<Object>::New(iso, g1s1.handle.As<Object>())
- ->Set(v8_str("x"), Local<Value>::New(iso, g2s1.handle));
+ ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g2s1.handle))
+ .FromJust();
iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
Local<Object>::New(iso, g2s1.handle.As<Object>())
- ->Set(v8_str("x"), Local<Value>::New(iso, g3s1.handle));
+ ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g3s1.handle))
+ .FromJust();
iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
Local<Object>::New(iso, g3s1.handle.As<Object>())
- ->Set(v8_str("x"), Local<Value>::New(iso, g1s1.handle));
+ ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g1s1.handle))
+ .FromJust();
}
heap->CollectAllGarbage();
@@ -4106,7 +4563,7 @@ THREADED_TEST(ScriptException) {
v8::HandleScope scope(env->GetIsolate());
Local<Script> script = v8_compile("throw 'panama!';");
v8::TryCatch try_catch(env->GetIsolate());
- Local<Value> result = script->Run();
+ v8::MaybeLocal<Value> result = script->Run(env.local());
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
@@ -4124,19 +4581,26 @@ TEST(TryCatchCustomException) {
"(function f() { throw new CustomError(); })();");
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()
- ->ToObject(isolate)
- ->Get(v8_str("a"))
- ->Equals(v8_str("b")));
+ ->ToObject(env.local())
+ .ToLocalChecked()
+ ->Get(env.local(), v8_str("a"))
+ .ToLocalChecked()
+ ->Equals(env.local(), v8_str("b"))
+ .FromJust());
}
bool message_received;
-static void check_message_0(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
- CHECK_EQ(5.76, data->NumberValue());
- CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
+static void check_message_0(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
+ CHECK_EQ(5.76, data->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
+ CHECK_EQ(6.75, message->GetScriptOrigin()
+ .ResourceName()
+ ->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
CHECK(!message->IsSharedCrossOrigin());
message_received = true;
}
@@ -4147,19 +4611,20 @@ THREADED_TEST(MessageHandler0) {
v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
LocalContext context;
- v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
- v8::Handle<v8::Script> script = CompileWithOrigin("throw 'error'", "6.75");
- script->Run();
+ CcTest::isolate()->AddMessageListener(check_message_0, v8_num(5.76));
+ v8::Local<v8::Script> script = CompileWithOrigin("throw 'error'", "6.75");
+ CHECK(script->Run(context.local()).IsEmpty());
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_0);
+ CcTest::isolate()->RemoveMessageListeners(check_message_0);
}
-static void check_message_1(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void check_message_1(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
CHECK(data->IsNumber());
- CHECK_EQ(1337, data->Int32Value());
+ CHECK_EQ(1337,
+ data->Int32Value(CcTest::isolate()->GetCurrentContext()).FromJust());
CHECK(!message->IsSharedCrossOrigin());
message_received = true;
}
@@ -4169,17 +4634,17 @@ TEST(MessageHandler1) {
message_received = false;
v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_1);
+ CcTest::isolate()->AddMessageListener(check_message_1);
LocalContext context;
CompileRun("throw 1337;");
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_1);
+ CcTest::isolate()->RemoveMessageListeners(check_message_1);
}
-static void check_message_2(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void check_message_2(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
LocalContext context;
CHECK(data->IsObject());
v8::Local<v8::Value> hidden_property =
@@ -4188,7 +4653,9 @@ static void check_message_2(v8::Handle<v8::Message> message,
context.local(),
v8::Private::ForApi(CcTest::isolate(), v8_str("hidden key")))
.ToLocalChecked();
- CHECK(v8_str("hidden value")->Equals(hidden_property));
+ CHECK(v8_str("hidden value")
+ ->Equals(context.local(), hidden_property)
+ .FromJust());
CHECK(!message->IsSharedCrossOrigin());
message_received = true;
}
@@ -4198,7 +4665,7 @@ TEST(MessageHandler2) {
message_received = false;
v8::HandleScope scope(CcTest::isolate());
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_2);
+ CcTest::isolate()->AddMessageListener(check_message_2);
LocalContext context;
v8::Local<v8::Value> error = v8::Exception::Error(v8_str("custom error"));
v8::Object::Cast(*error)
@@ -4206,22 +4673,30 @@ TEST(MessageHandler2) {
v8::Private::ForApi(CcTest::isolate(), v8_str("hidden key")),
v8_str("hidden value"))
.FromJust();
- context->Global()->Set(v8_str("error"), error);
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("error"), error)
+ .FromJust());
CompileRun("throw error;");
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_2);
+ CcTest::isolate()->RemoveMessageListeners(check_message_2);
}
-static void check_message_3(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void check_message_3(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
CHECK(message->IsSharedCrossOrigin());
CHECK(message->GetScriptOrigin().Options().IsSharedCrossOrigin());
CHECK(message->GetScriptOrigin().Options().IsEmbedderDebugScript());
CHECK(message->GetScriptOrigin().Options().IsOpaque());
- CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
- CHECK_EQ(7.40, message->GetScriptOrigin().SourceMapUrl()->NumberValue());
+ CHECK_EQ(6.75, message->GetScriptOrigin()
+ .ResourceName()
+ ->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
+ CHECK_EQ(7.40, message->GetScriptOrigin()
+ .SourceMapUrl()
+ ->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
message_received = true;
}
@@ -4231,25 +4706,29 @@ TEST(MessageHandler3) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_3);
+ isolate->AddMessageListener(check_message_3);
LocalContext context;
v8::ScriptOrigin origin = v8::ScriptOrigin(
v8_str("6.75"), v8::Integer::New(isolate, 1),
- v8::Integer::New(isolate, 2), v8::True(isolate), Handle<v8::Integer>(),
+ v8::Integer::New(isolate, 2), v8::True(isolate), Local<v8::Integer>(),
v8::True(isolate), v8_str("7.40"), v8::True(isolate));
- v8::Handle<v8::Script> script =
- Script::Compile(v8_str("throw 'error'"), &origin);
- script->Run();
+ v8::Local<v8::Script> script =
+ Script::Compile(context.local(), v8_str("throw 'error'"), &origin)
+ .ToLocalChecked();
+ CHECK(script->Run(context.local()).IsEmpty());
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_3);
+ isolate->RemoveMessageListeners(check_message_3);
}
-static void check_message_4(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void check_message_4(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
CHECK(!message->IsSharedCrossOrigin());
- CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
+ CHECK_EQ(6.75, message->GetScriptOrigin()
+ .ResourceName()
+ ->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
message_received = true;
}
@@ -4259,32 +4738,39 @@ TEST(MessageHandler4) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_4);
+ isolate->AddMessageListener(check_message_4);
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
v8::Integer::New(isolate, 2), v8::False(isolate));
- v8::Handle<v8::Script> script =
- Script::Compile(v8_str("throw 'error'"), &origin);
- script->Run();
+ v8::Local<v8::Script> script =
+ Script::Compile(context.local(), v8_str("throw 'error'"), &origin)
+ .ToLocalChecked();
+ CHECK(script->Run(context.local()).IsEmpty());
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_4);
+ isolate->RemoveMessageListeners(check_message_4);
}
-static void check_message_5a(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void check_message_5a(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
CHECK(message->IsSharedCrossOrigin());
- CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
+ CHECK_EQ(6.75, message->GetScriptOrigin()
+ .ResourceName()
+ ->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
message_received = true;
}
-static void check_message_5b(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void check_message_5b(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
CHECK(!message->IsSharedCrossOrigin());
- CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
+ CHECK_EQ(6.75, message->GetScriptOrigin()
+ .ResourceName()
+ ->NumberValue(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
message_received = true;
}
@@ -4294,28 +4780,30 @@ TEST(MessageHandler5) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_5a);
+ isolate->AddMessageListener(check_message_5a);
LocalContext context;
v8::ScriptOrigin origin1 =
v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
v8::Integer::New(isolate, 2), v8::True(isolate));
- v8::Handle<v8::Script> script =
- Script::Compile(v8_str("throw 'error'"), &origin1);
- script->Run();
+ v8::Local<v8::Script> script =
+ Script::Compile(context.local(), v8_str("throw 'error'"), &origin1)
+ .ToLocalChecked();
+ CHECK(script->Run(context.local()).IsEmpty());
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_5a);
+ isolate->RemoveMessageListeners(check_message_5a);
message_received = false;
- v8::V8::AddMessageListener(check_message_5b);
+ isolate->AddMessageListener(check_message_5b);
v8::ScriptOrigin origin2 =
v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
v8::Integer::New(isolate, 2), v8::False(isolate));
- script = Script::Compile(v8_str("throw 'error'"), &origin2);
- script->Run();
+ script = Script::Compile(context.local(), v8_str("throw 'error'"), &origin2)
+ .ToLocalChecked();
+ CHECK(script->Run(context.local()).IsEmpty());
CHECK(message_received);
// clear out the message listener
- v8::V8::RemoveMessageListeners(check_message_5b);
+ isolate->RemoveMessageListeners(check_message_5b);
}
@@ -4333,7 +4821,7 @@ TEST(NativeWeakMap) {
CHECK(weak_map->Get(local1)->IsUndefined());
weak_map->Set(local1, value);
CHECK(weak_map->Has(local1));
- CHECK(value->Equals(weak_map->Get(local1)));
+ CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
WeakCallCounter counter(1234);
WeakCallCounterAndPersistent<Value> o1(&counter);
@@ -4358,18 +4846,24 @@ TEST(NativeWeakMap) {
CHECK(weak_map->Has(obj2));
CHECK(weak_map->Has(sym1));
- CHECK(value->Equals(weak_map->Get(local1)));
- CHECK(value->Equals(weak_map->Get(obj1)));
- CHECK(value->Equals(weak_map->Get(obj2)));
- CHECK(value->Equals(weak_map->Get(sym1)));
+ CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
+ CHECK(value->Equals(env.local(), weak_map->Get(obj1)).FromJust());
+ CHECK(value->Equals(env.local(), weak_map->Get(obj2)).FromJust());
+ CHECK(value->Equals(env.local(), weak_map->Get(sym1)).FromJust());
}
CcTest::heap()->CollectAllGarbage();
{
HandleScope scope(isolate);
- CHECK(value->Equals(weak_map->Get(local1)));
- CHECK(value->Equals(weak_map->Get(Local<Value>::New(isolate, o1.handle))));
- CHECK(value->Equals(weak_map->Get(Local<Value>::New(isolate, o2.handle))));
- CHECK(value->Equals(weak_map->Get(Local<Value>::New(isolate, s1.handle))));
+ CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
+ CHECK(value->Equals(env.local(),
+ weak_map->Get(Local<Value>::New(isolate, o1.handle)))
+ .FromJust());
+ CHECK(value->Equals(env.local(),
+ weak_map->Get(Local<Value>::New(isolate, o2.handle)))
+ .FromJust());
+ CHECK(value->Equals(env.local(),
+ weak_map->Get(Local<Value>::New(isolate, s1.handle)))
+ .FromJust());
}
o1.handle.SetWeak(&o1, &WeakPointerCallback,
@@ -4386,7 +4880,7 @@ TEST(NativeWeakMap) {
CHECK(o2.handle.IsEmpty());
CHECK(s1.handle.IsEmpty());
- CHECK(value->Equals(weak_map->Get(local1)));
+ CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
CHECK(weak_map->Delete(local1));
CHECK(!weak_map->Has(local1));
CHECK(weak_map->Get(local1)->IsUndefined());
@@ -4397,30 +4891,71 @@ THREADED_TEST(GetSetProperty) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- context->Global()->Set(v8_str("foo"), v8_num(14));
- context->Global()->Set(v8_str("12"), v8_num(92));
- context->Global()->Set(v8::Integer::New(isolate, 16), v8_num(32));
- context->Global()->Set(v8_num(13), v8_num(56));
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("foo"), v8_num(14))
+ .FromJust());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("12"), v8_num(92))
+ .FromJust());
+ CHECK(context->Global()
+ ->Set(context.local(), v8::Integer::New(isolate, 16), v8_num(32))
+ .FromJust());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_num(13), v8_num(56))
+ .FromJust());
Local<Value> foo = CompileRun("this.foo");
- CHECK_EQ(14, foo->Int32Value());
+ CHECK_EQ(14, foo->Int32Value(context.local()).FromJust());
Local<Value> twelve = CompileRun("this[12]");
- CHECK_EQ(92, twelve->Int32Value());
+ CHECK_EQ(92, twelve->Int32Value(context.local()).FromJust());
Local<Value> sixteen = CompileRun("this[16]");
- CHECK_EQ(32, sixteen->Int32Value());
+ CHECK_EQ(32, sixteen->Int32Value(context.local()).FromJust());
Local<Value> thirteen = CompileRun("this[13]");
- CHECK_EQ(56, thirteen->Int32Value());
- CHECK_EQ(92,
- context->Global()->Get(v8::Integer::New(isolate, 12))->Int32Value());
- CHECK_EQ(92, context->Global()->Get(v8_str("12"))->Int32Value());
- CHECK_EQ(92, context->Global()->Get(v8_num(12))->Int32Value());
- CHECK_EQ(32,
- context->Global()->Get(v8::Integer::New(isolate, 16))->Int32Value());
- CHECK_EQ(32, context->Global()->Get(v8_str("16"))->Int32Value());
- CHECK_EQ(32, context->Global()->Get(v8_num(16))->Int32Value());
- CHECK_EQ(56,
- context->Global()->Get(v8::Integer::New(isolate, 13))->Int32Value());
- CHECK_EQ(56, context->Global()->Get(v8_str("13"))->Int32Value());
- CHECK_EQ(56, context->Global()->Get(v8_num(13))->Int32Value());
+ CHECK_EQ(56, thirteen->Int32Value(context.local()).FromJust());
+ CHECK_EQ(92, context->Global()
+ ->Get(context.local(), v8::Integer::New(isolate, 12))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(92, context->Global()
+ ->Get(context.local(), v8_str("12"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(92, context->Global()
+ ->Get(context.local(), v8_num(12))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(32, context->Global()
+ ->Get(context.local(), v8::Integer::New(isolate, 16))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(32, context->Global()
+ ->Get(context.local(), v8_str("16"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(32, context->Global()
+ ->Get(context.local(), v8_num(16))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(56, context->Global()
+ ->Get(context.local(), v8::Integer::New(isolate, 13))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(56, context->Global()
+ ->Get(context.local(), v8_str("13"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(56, context->Global()
+ ->Get(context.local(), v8_num(13))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -4429,38 +4964,78 @@ THREADED_TEST(PropertyAttributes) {
v8::HandleScope scope(context->GetIsolate());
// none
Local<String> prop = v8_str("none");
- context->Global()->Set(prop, v8_num(7));
- CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(prop));
+ CHECK(context->Global()->Set(context.local(), prop, v8_num(7)).FromJust());
+ CHECK_EQ(v8::None, context->Global()
+ ->GetPropertyAttributes(context.local(), prop)
+ .FromJust());
// read-only
prop = v8_str("read_only");
- context->Global()->ForceSet(prop, v8_num(7), v8::ReadOnly);
- CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
- CHECK_EQ(v8::ReadOnly, context->Global()->GetPropertyAttributes(prop));
+ context->Global()
+ ->DefineOwnProperty(context.local(), prop, v8_num(7), v8::ReadOnly)
+ .FromJust();
+ CHECK_EQ(7, context->Global()
+ ->Get(context.local(), prop)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(v8::ReadOnly, context->Global()
+ ->GetPropertyAttributes(context.local(), prop)
+ .FromJust());
CompileRun("read_only = 9");
- CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
- context->Global()->Set(prop, v8_num(10));
- CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
+ CHECK_EQ(7, context->Global()
+ ->Get(context.local(), prop)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(context->Global()->Set(context.local(), prop, v8_num(10)).FromJust());
+ CHECK_EQ(7, context->Global()
+ ->Get(context.local(), prop)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
// dont-delete
prop = v8_str("dont_delete");
- context->Global()->ForceSet(prop, v8_num(13), v8::DontDelete);
- CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
+ context->Global()
+ ->DefineOwnProperty(context.local(), prop, v8_num(13), v8::DontDelete)
+ .FromJust();
+ CHECK_EQ(13, context->Global()
+ ->Get(context.local(), prop)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CompileRun("delete dont_delete");
- CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
- CHECK_EQ(v8::DontDelete, context->Global()->GetPropertyAttributes(prop));
+ CHECK_EQ(13, context->Global()
+ ->Get(context.local(), prop)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(v8::DontDelete, context->Global()
+ ->GetPropertyAttributes(context.local(), prop)
+ .FromJust());
// dont-enum
prop = v8_str("dont_enum");
- context->Global()->ForceSet(prop, v8_num(28), v8::DontEnum);
- CHECK_EQ(v8::DontEnum, context->Global()->GetPropertyAttributes(prop));
+ context->Global()
+ ->DefineOwnProperty(context.local(), prop, v8_num(28), v8::DontEnum)
+ .FromJust();
+ CHECK_EQ(v8::DontEnum, context->Global()
+ ->GetPropertyAttributes(context.local(), prop)
+ .FromJust());
// absent
prop = v8_str("absent");
- CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(prop));
+ CHECK_EQ(v8::None, context->Global()
+ ->GetPropertyAttributes(context.local(), prop)
+ .FromJust());
Local<Value> fake_prop = v8_num(1);
- CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(fake_prop));
+ CHECK_EQ(v8::None, context->Global()
+ ->GetPropertyAttributes(context.local(), fake_prop)
+ .FromJust());
// exception
TryCatch try_catch(context->GetIsolate());
Local<Value> exception =
CompileRun("({ toString: function() { throw 'exception';} })");
- CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(exception));
+ CHECK(context->Global()
+ ->GetPropertyAttributes(context.local(), exception)
+ .IsNothing());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(0, strcmp("exception", *exception_value));
@@ -4473,22 +5048,34 @@ THREADED_TEST(Array) {
v8::HandleScope scope(context->GetIsolate());
Local<v8::Array> array = v8::Array::New(context->GetIsolate());
CHECK_EQ(0u, array->Length());
- CHECK(array->Get(0)->IsUndefined());
- CHECK(!array->Has(0));
- CHECK(array->Get(100)->IsUndefined());
- CHECK(!array->Has(100));
- array->Set(2, v8_num(7));
+ CHECK(array->Get(context.local(), 0).ToLocalChecked()->IsUndefined());
+ CHECK(!array->Has(context.local(), 0).FromJust());
+ CHECK(array->Get(context.local(), 100).ToLocalChecked()->IsUndefined());
+ CHECK(!array->Has(context.local(), 100).FromJust());
+ CHECK(array->Set(context.local(), 2, v8_num(7)).FromJust());
CHECK_EQ(3u, array->Length());
- CHECK(!array->Has(0));
- CHECK(!array->Has(1));
- CHECK(array->Has(2));
- CHECK_EQ(7, array->Get(2)->Int32Value());
+ CHECK(!array->Has(context.local(), 0).FromJust());
+ CHECK(!array->Has(context.local(), 1).FromJust());
+ CHECK(array->Has(context.local(), 2).FromJust());
+ CHECK_EQ(7, array->Get(context.local(), 2)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
Local<Value> obj = CompileRun("[1, 2, 3]");
Local<v8::Array> arr = obj.As<v8::Array>();
CHECK_EQ(3u, arr->Length());
- CHECK_EQ(1, arr->Get(0)->Int32Value());
- CHECK_EQ(2, arr->Get(1)->Int32Value());
- CHECK_EQ(3, arr->Get(2)->Int32Value());
+ CHECK_EQ(1, arr->Get(context.local(), 0)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(2, arr->Get(context.local(), 1)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(3, arr->Get(context.local(), 2)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
array = v8::Array::New(context->GetIsolate(), 27);
CHECK_EQ(27u, array->Length());
array = v8::Array::New(context->GetIsolate(), -27);
@@ -4500,7 +5087,10 @@ void HandleF(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::EscapableHandleScope scope(args.GetIsolate());
ApiTestFuzzer::Fuzz();
Local<v8::Array> result = v8::Array::New(args.GetIsolate(), args.Length());
- for (int i = 0; i < args.Length(); i++) result->Set(i, args[i]);
+ for (int i = 0; i < args.Length(); i++) {
+ CHECK(result->Set(CcTest::isolate()->GetCurrentContext(), i, args[i])
+ .FromJust());
+ }
args.GetReturnValue().Set(scope.Escape(result));
}
@@ -4519,28 +5109,58 @@ THREADED_TEST(Vector) {
const char* fun2 = "f(11)";
Local<v8::Array> a1 = CompileRun(fun2).As<v8::Array>();
CHECK_EQ(1u, a1->Length());
- CHECK_EQ(11, a1->Get(0)->Int32Value());
+ CHECK_EQ(11, a1->Get(context.local(), 0)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
const char* fun3 = "f(12, 13)";
Local<v8::Array> a2 = CompileRun(fun3).As<v8::Array>();
CHECK_EQ(2u, a2->Length());
- CHECK_EQ(12, a2->Get(0)->Int32Value());
- CHECK_EQ(13, a2->Get(1)->Int32Value());
+ CHECK_EQ(12, a2->Get(context.local(), 0)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(13, a2->Get(context.local(), 1)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
const char* fun4 = "f(14, 15, 16)";
Local<v8::Array> a3 = CompileRun(fun4).As<v8::Array>();
CHECK_EQ(3u, a3->Length());
- CHECK_EQ(14, a3->Get(0)->Int32Value());
- CHECK_EQ(15, a3->Get(1)->Int32Value());
- CHECK_EQ(16, a3->Get(2)->Int32Value());
+ CHECK_EQ(14, a3->Get(context.local(), 0)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(15, a3->Get(context.local(), 1)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(16, a3->Get(context.local(), 2)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
const char* fun5 = "f(17, 18, 19, 20)";
Local<v8::Array> a4 = CompileRun(fun5).As<v8::Array>();
CHECK_EQ(4u, a4->Length());
- CHECK_EQ(17, a4->Get(0)->Int32Value());
- CHECK_EQ(18, a4->Get(1)->Int32Value());
- CHECK_EQ(19, a4->Get(2)->Int32Value());
- CHECK_EQ(20, a4->Get(3)->Int32Value());
+ CHECK_EQ(17, a4->Get(context.local(), 0)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(18, a4->Get(context.local(), 1)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(19, a4->Get(context.local(), 2)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(20, a4->Get(context.local(), 3)
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -4563,67 +5183,126 @@ THREADED_TEST(FunctionCall) {
" 'use strict';"
" return this;"
"}");
- Local<Function> Foo =
- Local<Function>::Cast(context->Global()->Get(v8_str("Foo")));
- Local<Function> ReturnThisSloppy =
- Local<Function>::Cast(context->Global()->Get(v8_str("ReturnThisSloppy")));
- Local<Function> ReturnThisStrict =
- Local<Function>::Cast(context->Global()->Get(v8_str("ReturnThisStrict")));
-
- v8::Handle<Value>* args0 = NULL;
- Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->Call(Foo, 0, args0));
+ Local<Function> Foo = Local<Function>::Cast(
+ context->Global()->Get(context.local(), v8_str("Foo")).ToLocalChecked());
+ Local<Function> ReturnThisSloppy = Local<Function>::Cast(
+ context->Global()
+ ->Get(context.local(), v8_str("ReturnThisSloppy"))
+ .ToLocalChecked());
+ Local<Function> ReturnThisStrict = Local<Function>::Cast(
+ context->Global()
+ ->Get(context.local(), v8_str("ReturnThisStrict"))
+ .ToLocalChecked());
+
+ v8::Local<Value>* args0 = NULL;
+ Local<v8::Array> a0 = Local<v8::Array>::Cast(
+ Foo->Call(context.local(), Foo, 0, args0).ToLocalChecked());
CHECK_EQ(0u, a0->Length());
- v8::Handle<Value> args1[] = {v8_num(1.1)};
- Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->Call(Foo, 1, args1));
+ v8::Local<Value> args1[] = {v8_num(1.1)};
+ Local<v8::Array> a1 = Local<v8::Array>::Cast(
+ Foo->Call(context.local(), Foo, 1, args1).ToLocalChecked());
CHECK_EQ(1u, a1->Length());
- CHECK_EQ(1.1, a1->Get(v8::Integer::New(isolate, 0))->NumberValue());
-
- v8::Handle<Value> args2[] = {v8_num(2.2), v8_num(3.3)};
- Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->Call(Foo, 2, args2));
+ CHECK_EQ(1.1, a1->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ v8::Local<Value> args2[] = {v8_num(2.2), v8_num(3.3)};
+ Local<v8::Array> a2 = Local<v8::Array>::Cast(
+ Foo->Call(context.local(), Foo, 2, args2).ToLocalChecked());
CHECK_EQ(2u, a2->Length());
- CHECK_EQ(2.2, a2->Get(v8::Integer::New(isolate, 0))->NumberValue());
- CHECK_EQ(3.3, a2->Get(v8::Integer::New(isolate, 1))->NumberValue());
-
- v8::Handle<Value> args3[] = {v8_num(4.4), v8_num(5.5), v8_num(6.6)};
- Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->Call(Foo, 3, args3));
+ CHECK_EQ(2.2, a2->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(3.3, a2->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ v8::Local<Value> args3[] = {v8_num(4.4), v8_num(5.5), v8_num(6.6)};
+ Local<v8::Array> a3 = Local<v8::Array>::Cast(
+ Foo->Call(context.local(), Foo, 3, args3).ToLocalChecked());
CHECK_EQ(3u, a3->Length());
- CHECK_EQ(4.4, a3->Get(v8::Integer::New(isolate, 0))->NumberValue());
- CHECK_EQ(5.5, a3->Get(v8::Integer::New(isolate, 1))->NumberValue());
- CHECK_EQ(6.6, a3->Get(v8::Integer::New(isolate, 2))->NumberValue());
-
- v8::Handle<Value> args4[] = {v8_num(7.7), v8_num(8.8), v8_num(9.9),
- v8_num(10.11)};
- Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->Call(Foo, 4, args4));
+ CHECK_EQ(4.4, a3->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(5.5, a3->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(6.6, a3->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ v8::Local<Value> args4[] = {v8_num(7.7), v8_num(8.8), v8_num(9.9),
+ v8_num(10.11)};
+ Local<v8::Array> a4 = Local<v8::Array>::Cast(
+ Foo->Call(context.local(), Foo, 4, args4).ToLocalChecked());
CHECK_EQ(4u, a4->Length());
- CHECK_EQ(7.7, a4->Get(v8::Integer::New(isolate, 0))->NumberValue());
- CHECK_EQ(8.8, a4->Get(v8::Integer::New(isolate, 1))->NumberValue());
- CHECK_EQ(9.9, a4->Get(v8::Integer::New(isolate, 2))->NumberValue());
- CHECK_EQ(10.11, a4->Get(v8::Integer::New(isolate, 3))->NumberValue());
-
- Local<v8::Value> r1 = ReturnThisSloppy->Call(v8::Undefined(isolate), 0, NULL);
+ CHECK_EQ(7.7, a4->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(8.8, a4->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(9.9, a4->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(10.11, a4->Get(context.local(), v8::Integer::New(isolate, 3))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ Local<v8::Value> r1 =
+ ReturnThisSloppy->Call(context.local(), v8::Undefined(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(r1->StrictEquals(context->Global()));
- Local<v8::Value> r2 = ReturnThisSloppy->Call(v8::Null(isolate), 0, NULL);
+ Local<v8::Value> r2 =
+ ReturnThisSloppy->Call(context.local(), v8::Null(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(r2->StrictEquals(context->Global()));
- Local<v8::Value> r3 = ReturnThisSloppy->Call(v8_num(42), 0, NULL);
+ Local<v8::Value> r3 =
+ ReturnThisSloppy->Call(context.local(), v8_num(42), 0, NULL)
+ .ToLocalChecked();
CHECK(r3->IsNumberObject());
CHECK_EQ(42.0, r3.As<v8::NumberObject>()->ValueOf());
- Local<v8::Value> r4 = ReturnThisSloppy->Call(v8_str("hello"), 0, NULL);
+ Local<v8::Value> r4 =
+ ReturnThisSloppy->Call(context.local(), v8_str("hello"), 0, NULL)
+ .ToLocalChecked();
CHECK(r4->IsStringObject());
CHECK(r4.As<v8::StringObject>()->ValueOf()->StrictEquals(v8_str("hello")));
- Local<v8::Value> r5 = ReturnThisSloppy->Call(v8::True(isolate), 0, NULL);
+ Local<v8::Value> r5 =
+ ReturnThisSloppy->Call(context.local(), v8::True(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(r5->IsBooleanObject());
CHECK(r5.As<v8::BooleanObject>()->ValueOf());
- Local<v8::Value> r6 = ReturnThisStrict->Call(v8::Undefined(isolate), 0, NULL);
+ Local<v8::Value> r6 =
+ ReturnThisStrict->Call(context.local(), v8::Undefined(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(r6->IsUndefined());
- Local<v8::Value> r7 = ReturnThisStrict->Call(v8::Null(isolate), 0, NULL);
+ Local<v8::Value> r7 =
+ ReturnThisStrict->Call(context.local(), v8::Null(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(r7->IsNull());
- Local<v8::Value> r8 = ReturnThisStrict->Call(v8_num(42), 0, NULL);
+ Local<v8::Value> r8 =
+ ReturnThisStrict->Call(context.local(), v8_num(42), 0, NULL)
+ .ToLocalChecked();
CHECK(r8->StrictEquals(v8_num(42)));
- Local<v8::Value> r9 = ReturnThisStrict->Call(v8_str("hello"), 0, NULL);
+ Local<v8::Value> r9 =
+ ReturnThisStrict->Call(context.local(), v8_str("hello"), 0, NULL)
+ .ToLocalChecked();
CHECK(r9->StrictEquals(v8_str("hello")));
- Local<v8::Value> r10 = ReturnThisStrict->Call(v8::True(isolate), 0, NULL);
+ Local<v8::Value> r10 =
+ ReturnThisStrict->Call(context.local(), v8::True(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(r10->StrictEquals(v8::True(isolate)));
}
@@ -4640,47 +5319,74 @@ THREADED_TEST(ConstructCall) {
" }"
" return result;"
"}");
- Local<Function> Foo =
- Local<Function>::Cast(context->Global()->Get(v8_str("Foo")));
+ Local<Function> Foo = Local<Function>::Cast(
+ context->Global()->Get(context.local(), v8_str("Foo")).ToLocalChecked());
- v8::Handle<Value>* args0 = NULL;
- Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->NewInstance(0, args0));
+ v8::Local<Value>* args0 = NULL;
+ Local<v8::Array> a0 = Local<v8::Array>::Cast(
+ Foo->NewInstance(context.local(), 0, args0).ToLocalChecked());
CHECK_EQ(0u, a0->Length());
- v8::Handle<Value> args1[] = {v8_num(1.1)};
- Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->NewInstance(1, args1));
+ v8::Local<Value> args1[] = {v8_num(1.1)};
+ Local<v8::Array> a1 = Local<v8::Array>::Cast(
+ Foo->NewInstance(context.local(), 1, args1).ToLocalChecked());
CHECK_EQ(1u, a1->Length());
- CHECK_EQ(1.1, a1->Get(v8::Integer::New(isolate, 0))->NumberValue());
-
- v8::Handle<Value> args2[] = {v8_num(2.2), v8_num(3.3)};
- Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->NewInstance(2, args2));
+ CHECK_EQ(1.1, a1->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ v8::Local<Value> args2[] = {v8_num(2.2), v8_num(3.3)};
+ Local<v8::Array> a2 = Local<v8::Array>::Cast(
+ Foo->NewInstance(context.local(), 2, args2).ToLocalChecked());
CHECK_EQ(2u, a2->Length());
- CHECK_EQ(2.2, a2->Get(v8::Integer::New(isolate, 0))->NumberValue());
- CHECK_EQ(3.3, a2->Get(v8::Integer::New(isolate, 1))->NumberValue());
-
- v8::Handle<Value> args3[] = {v8_num(4.4), v8_num(5.5), v8_num(6.6)};
- Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->NewInstance(3, args3));
+ CHECK_EQ(2.2, a2->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(3.3, a2->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ v8::Local<Value> args3[] = {v8_num(4.4), v8_num(5.5), v8_num(6.6)};
+ Local<v8::Array> a3 = Local<v8::Array>::Cast(
+ Foo->NewInstance(context.local(), 3, args3).ToLocalChecked());
CHECK_EQ(3u, a3->Length());
- CHECK_EQ(4.4, a3->Get(v8::Integer::New(isolate, 0))->NumberValue());
- CHECK_EQ(5.5, a3->Get(v8::Integer::New(isolate, 1))->NumberValue());
- CHECK_EQ(6.6, a3->Get(v8::Integer::New(isolate, 2))->NumberValue());
-
- v8::Handle<Value> args4[] = {v8_num(7.7), v8_num(8.8), v8_num(9.9),
- v8_num(10.11)};
- Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->NewInstance(4, args4));
+ CHECK_EQ(4.4, a3->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(5.5, a3->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(6.6, a3->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+
+ v8::Local<Value> args4[] = {v8_num(7.7), v8_num(8.8), v8_num(9.9),
+ v8_num(10.11)};
+ Local<v8::Array> a4 = Local<v8::Array>::Cast(
+ Foo->NewInstance(context.local(), 4, args4).ToLocalChecked());
CHECK_EQ(4u, a4->Length());
- CHECK_EQ(7.7, a4->Get(v8::Integer::New(isolate, 0))->NumberValue());
- CHECK_EQ(8.8, a4->Get(v8::Integer::New(isolate, 1))->NumberValue());
- CHECK_EQ(9.9, a4->Get(v8::Integer::New(isolate, 2))->NumberValue());
- CHECK_EQ(10.11, a4->Get(v8::Integer::New(isolate, 3))->NumberValue());
-}
-
-
-static void CheckUncle(v8::TryCatch* try_catch) {
- CHECK(try_catch->HasCaught());
- String::Utf8Value str_value(try_catch->Exception());
- CHECK_EQ(0, strcmp(*str_value, "uncle?"));
- try_catch->Reset();
+ CHECK_EQ(7.7, a4->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(8.8, a4->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(9.9, a4->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
+ CHECK_EQ(10.11, a4->Get(context.local(), v8::Integer::New(isolate, 3))
+ .ToLocalChecked()
+ ->NumberValue(context.local())
+ .FromJust());
}
@@ -4690,47 +5396,57 @@ THREADED_TEST(ConversionNumber) {
v8::HandleScope scope(isolate);
// Very large number.
CompileRun("var obj = Math.pow(2,32) * 1237;");
- Local<Value> obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(5312874545152.0, obj->ToNumber(isolate)->Value());
- CHECK_EQ(0, obj->ToInt32(isolate)->Value());
+ Local<Value> obj =
+ env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(5312874545152.0,
+ obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(0, obj->ToInt32(env.local()).ToLocalChecked()->Value());
CHECK(0u ==
- obj->ToUint32(isolate)->Value()); // NOLINT - no CHECK_EQ for unsigned.
+ obj->ToUint32(env.local())
+ .ToLocalChecked()
+ ->Value()); // NOLINT - no CHECK_EQ for unsigned.
// Large number.
CompileRun("var obj = -1234567890123;");
- obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(-1234567890123.0, obj->ToNumber(isolate)->Value());
- CHECK_EQ(-1912276171, obj->ToInt32(isolate)->Value());
- CHECK(2382691125u == obj->ToUint32(isolate)->Value()); // NOLINT
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(-1234567890123.0,
+ obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(-1912276171, obj->ToInt32(env.local()).ToLocalChecked()->Value());
+ CHECK(2382691125u ==
+ obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
// Small positive integer.
CompileRun("var obj = 42;");
- obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(42.0, obj->ToNumber(isolate)->Value());
- CHECK_EQ(42, obj->ToInt32(isolate)->Value());
- CHECK(42u == obj->ToUint32(isolate)->Value()); // NOLINT
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(42.0, obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(42, obj->ToInt32(env.local()).ToLocalChecked()->Value());
+ CHECK(42u == obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
// Negative integer.
CompileRun("var obj = -37;");
- obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(-37.0, obj->ToNumber(isolate)->Value());
- CHECK_EQ(-37, obj->ToInt32(isolate)->Value());
- CHECK(4294967259u == obj->ToUint32(isolate)->Value()); // NOLINT
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(-37.0, obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(-37, obj->ToInt32(env.local()).ToLocalChecked()->Value());
+ CHECK(4294967259u ==
+ obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
// Positive non-int32 integer.
CompileRun("var obj = 0x81234567;");
- obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(2166572391.0, obj->ToNumber(isolate)->Value());
- CHECK_EQ(-2128394905, obj->ToInt32(isolate)->Value());
- CHECK(2166572391u == obj->ToUint32(isolate)->Value()); // NOLINT
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(2166572391.0, obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(-2128394905, obj->ToInt32(env.local()).ToLocalChecked()->Value());
+ CHECK(2166572391u ==
+ obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
// Fraction.
CompileRun("var obj = 42.3;");
- obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(42.3, obj->ToNumber(isolate)->Value());
- CHECK_EQ(42, obj->ToInt32(isolate)->Value());
- CHECK(42u == obj->ToUint32(isolate)->Value()); // NOLINT
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(42.3, obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(42, obj->ToInt32(env.local()).ToLocalChecked()->Value());
+ CHECK(42u == obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
// Large negative fraction.
CompileRun("var obj = -5726623061.75;");
- obj = env->Global()->Get(v8_str("obj"));
- CHECK_EQ(-5726623061.75, obj->ToNumber(isolate)->Value());
- CHECK_EQ(-1431655765, obj->ToInt32(isolate)->Value());
- CHECK(2863311531u == obj->ToUint32(isolate)->Value()); // NOLINT
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
+ CHECK_EQ(-5726623061.75,
+ obj->ToNumber(env.local()).ToLocalChecked()->Value());
+ CHECK_EQ(-1431655765, obj->ToInt32(env.local()).ToLocalChecked()->Value());
+ CHECK(2863311531u ==
+ obj->ToUint32(env.local()).ToLocalChecked()->Value()); // NOLINT
}
@@ -4739,52 +5455,61 @@ THREADED_TEST(isNumberType) {
v8::HandleScope scope(env->GetIsolate());
// Very large number.
CompileRun("var obj = Math.pow(2,32) * 1237;");
- Local<Value> obj = env->Global()->Get(v8_str("obj"));
+ Local<Value> obj =
+ env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(!obj->IsUint32());
// Large negative number.
CompileRun("var obj = -1234567890123;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(!obj->IsUint32());
// Small positive integer.
CompileRun("var obj = 42;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(obj->IsInt32());
CHECK(obj->IsUint32());
// Negative integer.
CompileRun("var obj = -37;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(obj->IsInt32());
CHECK(!obj->IsUint32());
// Positive non-int32 integer.
CompileRun("var obj = 0x81234567;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(obj->IsUint32());
// Fraction.
CompileRun("var obj = 42.3;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(!obj->IsUint32());
// Large negative fraction.
CompileRun("var obj = -5726623061.75;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(!obj->IsUint32());
// Positive zero
CompileRun("var obj = 0.0;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(obj->IsInt32());
CHECK(obj->IsUint32());
// Positive zero
CompileRun("var obj = -0.0;");
- obj = env->Global()->Get(v8_str("obj"));
+ obj = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
CHECK(!obj->IsInt32());
CHECK(!obj->IsUint32());
}
+static void CheckUncle(v8::TryCatch* try_catch) {
+ CHECK(try_catch->HasCaught());
+ String::Utf8Value str_value(try_catch->Exception());
+ CHECK_EQ(0, strcmp(*str_value, "uncle?"));
+ try_catch->Reset();
+}
+
+
THREADED_TEST(ConversionException) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4793,49 +5518,40 @@ THREADED_TEST(ConversionException) {
"function TestClass() { };"
"TestClass.prototype.toString = function () { throw 'uncle?'; };"
"var obj = new TestClass();");
- Local<Value> obj = env->Global()->Get(v8_str("obj"));
+ Local<Value> obj =
+ env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
v8::TryCatch try_catch(isolate);
- Local<Value> to_string_result = obj->ToString(isolate);
- CHECK(to_string_result.IsEmpty());
+ CHECK(obj->ToString(env.local()).IsEmpty());
CheckUncle(&try_catch);
- Local<Value> to_number_result = obj->ToNumber(isolate);
- CHECK(to_number_result.IsEmpty());
+ CHECK(obj->ToNumber(env.local()).IsEmpty());
CheckUncle(&try_catch);
- Local<Value> to_integer_result = obj->ToInteger(isolate);
- CHECK(to_integer_result.IsEmpty());
+ CHECK(obj->ToInteger(env.local()).IsEmpty());
CheckUncle(&try_catch);
- Local<Value> to_uint32_result = obj->ToUint32(isolate);
- CHECK(to_uint32_result.IsEmpty());
+ CHECK(obj->ToUint32(env.local()).IsEmpty());
CheckUncle(&try_catch);
- Local<Value> to_int32_result = obj->ToInt32(isolate);
- CHECK(to_int32_result.IsEmpty());
+ CHECK(obj->ToInt32(env.local()).IsEmpty());
CheckUncle(&try_catch);
- Local<Value> to_object_result = v8::Undefined(isolate)->ToObject(isolate);
- CHECK(to_object_result.IsEmpty());
+ CHECK(v8::Undefined(isolate)->ToObject(env.local()).IsEmpty());
CHECK(try_catch.HasCaught());
try_catch.Reset();
- int32_t int32_value = obj->Int32Value();
- CHECK_EQ(0, int32_value);
+ CHECK(obj->Int32Value(env.local()).IsNothing());
CheckUncle(&try_catch);
- uint32_t uint32_value = obj->Uint32Value();
- CHECK_EQ(0u, uint32_value);
+ CHECK(obj->Uint32Value(env.local()).IsNothing());
CheckUncle(&try_catch);
- double number_value = obj->NumberValue();
- CHECK(std::isnan(number_value));
+ CHECK(obj->NumberValue(env.local()).IsNothing());
CheckUncle(&try_catch);
- int64_t integer_value = obj->IntegerValue();
- CHECK_EQ(0, integer_value);
+ CHECK(obj->IntegerValue(env.local()).IsNothing());
CheckUncle(&try_catch);
}
@@ -4853,7 +5569,10 @@ void CCatcher(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
v8::HandleScope scope(args.GetIsolate());
v8::TryCatch try_catch(args.GetIsolate());
- Local<Value> result = CompileRun(args[0]->ToString(args.GetIsolate()));
+ Local<Value> result =
+ CompileRun(args[0]
+ ->ToString(args.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked());
CHECK(!try_catch.HasCaught() || result.IsEmpty());
args.GetReturnValue().Set(try_catch.HasCaught());
}
@@ -4873,8 +5592,10 @@ THREADED_TEST(APICatch) {
"} catch (e) {"
" thrown = true;"
"}");
- Local<Value> thrown = context->Global()->Get(v8_str("thrown"));
- CHECK(thrown->BooleanValue());
+ Local<Value> thrown = context->Global()
+ ->Get(context.local(), v8_str("thrown"))
+ .ToLocalChecked();
+ CHECK(thrown->BooleanValue(context.local()).FromJust());
}
@@ -4916,75 +5637,20 @@ TEST(TryCatchInTryFinally) {
}
-static void check_reference_error_message(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
- const char* reference_error = "Uncaught ReferenceError: asdf is not defined";
- CHECK(message->Get()->Equals(v8_str(reference_error)));
-}
-
-
-static void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
- ApiTestFuzzer::Fuzz();
- CHECK(false);
-}
-
-
-// Test that overwritten methods are not invoked on uncaught exception
-// formatting. However, they are invoked when performing normal error
-// string conversions.
-TEST(APIThrowMessageOverwrittenToString) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::V8::AddMessageListener(check_reference_error_message);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("fail"), v8::FunctionTemplate::New(isolate, Fail));
- LocalContext context(NULL, templ);
- CompileRun("asdf;");
- CompileRun(
- "var limit = {};"
- "limit.valueOf = fail;"
- "Error.stackTraceLimit = limit;");
- CompileRun("asdf");
- CompileRun("Array.prototype.pop = fail;");
- CompileRun("Object.prototype.hasOwnProperty = fail;");
- CompileRun("Object.prototype.toString = function f() { return 'Yikes'; }");
- CompileRun("Number.prototype.toString = function f() { return 'Yikes'; }");
- CompileRun("String.prototype.toString = function f() { return 'Yikes'; }");
- CompileRun(
- "ReferenceError.prototype.toString ="
- " function() { return 'Whoops' }");
- CompileRun("asdf;");
- CompileRun("ReferenceError.prototype.constructor.name = void 0;");
- CompileRun("asdf;");
- CompileRun("ReferenceError.prototype.constructor = void 0;");
- CompileRun("asdf;");
- CompileRun("ReferenceError.prototype.__proto__ = new Object();");
- CompileRun("asdf;");
- CompileRun("ReferenceError.prototype = new Object();");
- CompileRun("asdf;");
- v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
- CHECK(string->Equals(v8_str("Whoops")));
- CompileRun(
- "ReferenceError.prototype.constructor = new Object();"
- "ReferenceError.prototype.constructor.name = 1;"
- "Number.prototype.toString = function() { return 'Whoops'; };"
- "ReferenceError.prototype.toString = Object.prototype.toString;");
- CompileRun("asdf;");
- v8::V8::RemoveMessageListeners(check_reference_error_message);
-}
-
-
-static void check_custom_error_tostring(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void check_custom_error_tostring(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
const char* uncaught_error = "Uncaught MyError toString";
- CHECK(message->Get()->Equals(v8_str(uncaught_error)));
+ CHECK(message->Get()
+ ->Equals(CcTest::isolate()->GetCurrentContext(),
+ v8_str(uncaught_error))
+ .FromJust());
}
TEST(CustomErrorToString) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::V8::AddMessageListener(check_custom_error_tostring);
+ context->GetIsolate()->AddMessageListener(check_custom_error_tostring);
CompileRun(
"function MyError(name, message) { "
" this.name = name; "
@@ -4995,22 +5661,25 @@ TEST(CustomErrorToString) {
" return 'MyError toString'; "
"}; "
"throw new MyError('my name', 'my message'); ");
- v8::V8::RemoveMessageListeners(check_custom_error_tostring);
+ context->GetIsolate()->RemoveMessageListeners(check_custom_error_tostring);
}
-static void check_custom_error_message(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void check_custom_error_message(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
const char* uncaught_error = "Uncaught MyError: my message";
printf("%s\n", *v8::String::Utf8Value(message->Get()));
- CHECK(message->Get()->Equals(v8_str(uncaught_error)));
+ CHECK(message->Get()
+ ->Equals(CcTest::isolate()->GetCurrentContext(),
+ v8_str(uncaught_error))
+ .FromJust());
}
TEST(CustomErrorMessage) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::V8::AddMessageListener(check_custom_error_message);
+ context->GetIsolate()->AddMessageListener(check_custom_error_message);
// Handlebars.
CompileRun(
@@ -5046,32 +5715,36 @@ TEST(CustomErrorMessage) {
"MyError.prototype = Object.create(Error.prototype); "
"throw new MyError('my message'); ");
- v8::V8::RemoveMessageListeners(check_custom_error_message);
+ context->GetIsolate()->RemoveMessageListeners(check_custom_error_message);
}
-static void check_custom_rethrowing_message(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void check_custom_rethrowing_message(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
const char* uncaught_error = "Uncaught exception";
- CHECK(message->Get()->Equals(v8_str(uncaught_error)));
+ CHECK(message->Get()
+ ->Equals(CcTest::isolate()->GetCurrentContext(),
+ v8_str(uncaught_error))
+ .FromJust());
}
TEST(CustomErrorRethrowsOnToString) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::V8::AddMessageListener(check_custom_rethrowing_message);
+ context->GetIsolate()->AddMessageListener(check_custom_rethrowing_message);
CompileRun(
"var e = { toString: function() { throw e; } };"
"try { throw e; } finally {}");
- v8::V8::RemoveMessageListeners(check_custom_rethrowing_message);
+ context->GetIsolate()->RemoveMessageListeners(
+ check_custom_rethrowing_message);
}
-static void receive_message(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void receive_message(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
message->Get();
message_received = true;
}
@@ -5081,14 +5754,14 @@ TEST(APIThrowMessage) {
message_received = false;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::V8::AddMessageListener(receive_message);
+ isolate->AddMessageListener(receive_message);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
LocalContext context(0, templ);
CompileRun("ThrowFromC();");
CHECK(message_received);
- v8::V8::RemoveMessageListeners(receive_message);
+ isolate->RemoveMessageListeners(receive_message);
}
@@ -5096,7 +5769,7 @@ TEST(APIThrowMessageAndVerboseTryCatch) {
message_received = false;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::V8::AddMessageListener(receive_message);
+ isolate->AddMessageListener(receive_message);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("ThrowFromC"),
v8::FunctionTemplate::New(isolate, ThrowFromC));
@@ -5107,7 +5780,7 @@ TEST(APIThrowMessageAndVerboseTryCatch) {
CHECK(try_catch.HasCaught());
CHECK(result.IsEmpty());
CHECK(message_received);
- v8::V8::RemoveMessageListeners(receive_message);
+ isolate->RemoveMessageListeners(receive_message);
}
@@ -5115,14 +5788,14 @@ TEST(APIStackOverflowAndVerboseTryCatch) {
message_received = false;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::V8::AddMessageListener(receive_message);
+ context->GetIsolate()->AddMessageListener(receive_message);
v8::TryCatch try_catch(context->GetIsolate());
try_catch.SetVerbose(true);
Local<Value> result = CompileRun("function foo() { foo(); } foo();");
CHECK(try_catch.HasCaught());
CHECK(result.IsEmpty());
CHECK(message_received);
- v8::V8::RemoveMessageListeners(receive_message);
+ context->GetIsolate()->RemoveMessageListeners(receive_message);
}
@@ -5146,19 +5819,23 @@ THREADED_TEST(ExternalScriptException) {
void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(4, args.Length());
- int count = args[0]->Int32Value();
- int cInterval = args[2]->Int32Value();
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ int count = args[0]->Int32Value(context).FromJust();
+ int cInterval = args[2]->Int32Value(context).FromJust();
if (count == 0) {
args.GetIsolate()->ThrowException(v8_str("FromC"));
return;
} else {
- Local<v8::Object> global = args.GetIsolate()->GetCurrentContext()->Global();
- Local<Value> fun = global->Get(v8_str("JSThrowCountDown"));
- v8::Handle<Value> argv[] = {v8_num(count - 1), args[1], args[2], args[3]};
+ Local<v8::Object> global = context->Global();
+ Local<Value> fun =
+ global->Get(context, v8_str("JSThrowCountDown")).ToLocalChecked();
+ v8::Local<Value> argv[] = {v8_num(count - 1), args[1], args[2], args[3]};
if (count % cInterval == 0) {
v8::TryCatch try_catch(args.GetIsolate());
- Local<Value> result = fun.As<Function>()->Call(global, 4, argv);
- int expected = args[3]->Int32Value();
+ Local<Value> result = fun.As<Function>()
+ ->Call(context, global, 4, argv)
+ .FromMaybe(Local<Value>());
+ int expected = args[3]->Int32Value(context).FromJust();
if (try_catch.HasCaught()) {
CHECK_EQ(expected, count);
CHECK(result.IsEmpty());
@@ -5169,7 +5846,9 @@ void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(result);
return;
} else {
- args.GetReturnValue().Set(fun.As<Function>()->Call(global, 4, argv));
+ args.GetReturnValue().Set(fun.As<Function>()
+ ->Call(context, global, 4, argv)
+ .FromMaybe(v8::Local<v8::Value>()));
return;
}
}
@@ -5179,9 +5858,10 @@ void CThrowCountDown(const v8::FunctionCallbackInfo<v8::Value>& args) {
void JSCheck(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
CHECK_EQ(3, args.Length());
- bool equality = args[0]->BooleanValue();
- int count = args[1]->Int32Value();
- int expected = args[2]->Int32Value();
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ bool equality = args[0]->BooleanValue(context).FromJust();
+ int count = args[1]->Int32Value(context).FromJust();
+ int expected = args[2]->Int32Value(context).FromJust();
if (equality) {
CHECK_EQ(count, expected);
} else {
@@ -5252,35 +5932,37 @@ TEST(ExceptionOrder) {
" return CThrowCountDown(count - 1, jsInterval, cInterval, expected);"
" }"
"}");
- Local<Function> fun =
- Local<Function>::Cast(context->Global()->Get(v8_str("JSThrowCountDown")));
+ Local<Function> fun = Local<Function>::Cast(
+ context->Global()
+ ->Get(context.local(), v8_str("JSThrowCountDown"))
+ .ToLocalChecked());
const int argc = 4;
// count jsInterval cInterval expected
// *JS[4] *C[3] @JS[2] C[1] JS[0]
- v8::Handle<Value> a0[argc] = {v8_num(4), v8_num(2), v8_num(3), v8_num(2)};
- fun->Call(fun, argc, a0);
+ v8::Local<Value> a0[argc] = {v8_num(4), v8_num(2), v8_num(3), v8_num(2)};
+ fun->Call(context.local(), fun, argc, a0).ToLocalChecked();
// JS[5] *C[4] JS[3] @C[2] JS[1] C[0]
- v8::Handle<Value> a1[argc] = {v8_num(5), v8_num(6), v8_num(1), v8_num(2)};
- fun->Call(fun, argc, a1);
+ v8::Local<Value> a1[argc] = {v8_num(5), v8_num(6), v8_num(1), v8_num(2)};
+ fun->Call(context.local(), fun, argc, a1).ToLocalChecked();
// JS[6] @C[5] JS[4] C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a2[argc] = {v8_num(6), v8_num(7), v8_num(5), v8_num(5)};
- fun->Call(fun, argc, a2);
+ v8::Local<Value> a2[argc] = {v8_num(6), v8_num(7), v8_num(5), v8_num(5)};
+ fun->Call(context.local(), fun, argc, a2).ToLocalChecked();
// @JS[6] C[5] JS[4] C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a3[argc] = {v8_num(6), v8_num(6), v8_num(7), v8_num(6)};
- fun->Call(fun, argc, a3);
+ v8::Local<Value> a3[argc] = {v8_num(6), v8_num(6), v8_num(7), v8_num(6)};
+ fun->Call(context.local(), fun, argc, a3).ToLocalChecked();
// JS[6] *C[5] @JS[4] C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a4[argc] = {v8_num(6), v8_num(4), v8_num(5), v8_num(4)};
- fun->Call(fun, argc, a4);
+ v8::Local<Value> a4[argc] = {v8_num(6), v8_num(4), v8_num(5), v8_num(4)};
+ fun->Call(context.local(), fun, argc, a4).ToLocalChecked();
// JS[6] C[5] *JS[4] @C[3] JS[2] C[1] JS[0]
- v8::Handle<Value> a5[argc] = {v8_num(6), v8_num(4), v8_num(3), v8_num(3)};
- fun->Call(fun, argc, a5);
+ v8::Local<Value> a5[argc] = {v8_num(6), v8_num(4), v8_num(3), v8_num(3)};
+ fun->Call(context.local(), fun, argc, a5).ToLocalChecked();
}
@@ -5297,24 +5979,40 @@ THREADED_TEST(ThrowValues) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(isolate, ThrowValue));
LocalContext context(0, templ);
- v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
- "function Run(obj) {"
- " try {"
- " Throw(obj);"
- " } catch (e) {"
- " return e;"
- " }"
- " return 'no exception';"
- "}"
- "[Run('str'), Run(1), Run(0), Run(null), Run(void 0)];"));
+ v8::Local<v8::Array> result = v8::Local<v8::Array>::Cast(
+ CompileRun("function Run(obj) {"
+ " try {"
+ " Throw(obj);"
+ " } catch (e) {"
+ " return e;"
+ " }"
+ " return 'no exception';"
+ "}"
+ "[Run('str'), Run(1), Run(0), Run(null), Run(void 0)];"));
CHECK_EQ(5u, result->Length());
- CHECK(result->Get(v8::Integer::New(isolate, 0))->IsString());
- CHECK(result->Get(v8::Integer::New(isolate, 1))->IsNumber());
- CHECK_EQ(1, result->Get(v8::Integer::New(isolate, 1))->Int32Value());
- CHECK(result->Get(v8::Integer::New(isolate, 2))->IsNumber());
- CHECK_EQ(0, result->Get(v8::Integer::New(isolate, 2))->Int32Value());
- CHECK(result->Get(v8::Integer::New(isolate, 3))->IsNull());
- CHECK(result->Get(v8::Integer::New(isolate, 4))->IsUndefined());
+ CHECK(result->Get(context.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked()
+ ->IsString());
+ CHECK(result->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->IsNumber());
+ CHECK_EQ(1, result->Get(context.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(result->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked()
+ ->IsNumber());
+ CHECK_EQ(0, result->Get(context.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(result->Get(context.local(), v8::Integer::New(isolate, 3))
+ .ToLocalChecked()
+ ->IsNull());
+ CHECK(result->Get(context.local(), v8::Integer::New(isolate, 4))
+ .ToLocalChecked()
+ ->IsUndefined());
}
@@ -5325,12 +6023,12 @@ THREADED_TEST(CatchZero) {
CHECK(!try_catch.HasCaught());
CompileRun("throw 10");
CHECK(try_catch.HasCaught());
- CHECK_EQ(10, try_catch.Exception()->Int32Value());
+ CHECK_EQ(10, try_catch.Exception()->Int32Value(context.local()).FromJust());
try_catch.Reset();
CHECK(!try_catch.HasCaught());
CompileRun("throw 0");
CHECK(try_catch.HasCaught());
- CHECK_EQ(0, try_catch.Exception()->Int32Value());
+ CHECK_EQ(0, try_catch.Exception()->Int32Value(context.local()).FromJust());
}
@@ -5364,9 +6062,12 @@ THREADED_TEST(TryCatchAndFinally) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- context->Global()->Set(
- v8_str("native_with_try_catch"),
- v8::FunctionTemplate::New(isolate, WithTryCatch)->GetFunction());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("native_with_try_catch"),
+ v8::FunctionTemplate::New(isolate, WithTryCatch)
+ ->GetFunction(context.local())
+ .ToLocalChecked())
+ .FromJust());
v8::TryCatch try_catch(isolate);
CHECK(!try_catch.HasCaught());
CompileRun(
@@ -5430,13 +6131,15 @@ TEST(TryCatchNested) {
void TryCatchMixedNestingCheck(v8::TryCatch* try_catch) {
CHECK(try_catch->HasCaught());
- Handle<Message> message = try_catch->Message();
- Handle<Value> resource = message->GetScriptOrigin().ResourceName();
+ Local<Message> message = try_catch->Message();
+ Local<Value> resource = message->GetScriptOrigin().ResourceName();
CHECK_EQ(0, strcmp(*v8::String::Utf8Value(resource), "inner"));
CHECK_EQ(0,
strcmp(*v8::String::Utf8Value(message->Get()), "Uncaught Error: a"));
- CHECK_EQ(1, message->GetLineNumber());
- CHECK_EQ(0, message->GetStartColumn());
+ CHECK_EQ(1, message->GetLineNumber(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
+ CHECK_EQ(0, message->GetStartColumn(CcTest::isolate()->GetCurrentContext())
+ .FromJust());
}
@@ -5522,14 +6225,14 @@ THREADED_TEST(Equality) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(context->GetIsolate());
// Check that equality works at all before relying on CHECK_EQ
- CHECK(v8_str("a")->Equals(v8_str("a")));
- CHECK(!v8_str("a")->Equals(v8_str("b")));
+ CHECK(v8_str("a")->Equals(context.local(), v8_str("a")).FromJust());
+ CHECK(!v8_str("a")->Equals(context.local(), v8_str("b")).FromJust());
- CHECK(v8_str("a")->Equals(v8_str("a")));
- CHECK(!v8_str("a")->Equals(v8_str("b")));
- CHECK(v8_num(1)->Equals(v8_num(1)));
- CHECK(v8_num(1.00)->Equals(v8_num(1)));
- CHECK(!v8_num(1)->Equals(v8_num(2)));
+ CHECK(v8_str("a")->Equals(context.local(), v8_str("a")).FromJust());
+ CHECK(!v8_str("a")->Equals(context.local(), v8_str("b")).FromJust());
+ CHECK(v8_num(1)->Equals(context.local(), v8_num(1)).FromJust());
+ CHECK(v8_num(1.00)->Equals(context.local(), v8_num(1)).FromJust());
+ CHECK(!v8_num(1)->Equals(context.local(), v8_num(2)).FromJust());
// Assume String is not internalized.
CHECK(v8_str("a")->StrictEquals(v8_str("a")));
@@ -5543,7 +6246,7 @@ THREADED_TEST(Equality) {
CHECK(v8::False(isolate)->StrictEquals(v8::False(isolate)));
CHECK(!v8::False(isolate)->StrictEquals(v8::Undefined(isolate)));
- v8::Handle<v8::Object> obj = v8::Object::New(isolate);
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
v8::Persistent<v8::Object> alias(isolate, obj);
CHECK(v8::Local<v8::Object>::New(isolate, alias)->StrictEquals(obj));
alias.Reset();
@@ -5564,15 +6267,20 @@ THREADED_TEST(MultiRun) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Local<Script> script = v8_compile("x");
- for (int i = 0; i < 10; i++) script->Run();
+ for (int i = 0; i < 10; i++) {
+ script->Run(context.local()).IsEmpty();
+ }
}
-static void GetXValue(Local<String> name,
+static void GetXValue(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(info.Data()->Equals(v8_str("donut")));
- CHECK(name->Equals(v8_str("x")));
+ CHECK(info.Data()
+ ->Equals(CcTest::isolate()->GetCurrentContext(), v8_str("donut"))
+ .FromJust());
+ CHECK(name->Equals(CcTest::isolate()->GetCurrentContext(), v8_str("x"))
+ .FromJust());
info.GetReturnValue().Set(name);
}
@@ -5583,11 +6291,14 @@ THREADED_TEST(SimplePropertyRead) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
Local<Script> script = v8_compile("obj.x");
for (int i = 0; i < 10; i++) {
- Local<Value> result = script->Run();
- CHECK(result->Equals(v8_str("x")));
+ Local<Value> result = script->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_str("x")).FromJust());
}
}
@@ -5598,15 +6309,18 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
// Uses getOwnPropertyDescriptor to check the configurable status
Local<Script> script_desc = v8_compile(
"var prop = Object.getOwnPropertyDescriptor( "
"obj, 'x');"
"prop.configurable;");
- Local<Value> result = script_desc->Run();
- CHECK_EQ(result->BooleanValue(), true);
+ Local<Value> result = script_desc->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(result->BooleanValue(context.local()).FromJust(), true);
// Redefine get - but still configurable
Local<Script> script_define = v8_compile(
@@ -5614,12 +6328,12 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
" configurable: true };"
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
- result = script_define->Run();
- CHECK(result->Equals(v8_num(42)));
+ result = script_define->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_num(42)).FromJust());
// Check that the accessor is still configurable
- result = script_desc->Run();
- CHECK_EQ(result->BooleanValue(), true);
+ result = script_desc->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(result->BooleanValue(context.local()).FromJust(), true);
// Redefine to a non-configurable
script_define = v8_compile(
@@ -5627,14 +6341,14 @@ THREADED_TEST(DefinePropertyOnAPIAccessor) {
" configurable: false };"
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
- result = script_define->Run();
- CHECK(result->Equals(v8_num(43)));
- result = script_desc->Run();
- CHECK_EQ(result->BooleanValue(), false);
+ result = script_define->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_num(43)).FromJust());
+ result = script_desc->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(result->BooleanValue(context.local()).FromJust(), false);
// Make sure that it is not possible to redefine again
v8::TryCatch try_catch(isolate);
- result = script_define->Run();
+ CHECK(script_define->Run(context.local()).IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(0,
@@ -5648,42 +6362,43 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
Local<Script> script_desc = v8_compile(
"var prop ="
"Object.getOwnPropertyDescriptor( "
"obj, 'x');"
"prop.configurable;");
- Local<Value> result = script_desc->Run();
- CHECK_EQ(result->BooleanValue(), true);
+ Local<Value> result = script_desc->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(result->BooleanValue(context.local()).FromJust(), true);
Local<Script> script_define = v8_compile(
"var desc = {get: function(){return 42; },"
" configurable: true };"
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
- result = script_define->Run();
- CHECK(result->Equals(v8_num(42)));
-
-
- result = script_desc->Run();
- CHECK_EQ(result->BooleanValue(), true);
+ result = script_define->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_num(42)).FromJust());
+ result = script_desc->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(result->BooleanValue(context.local()).FromJust(), true);
script_define = v8_compile(
"var desc = {get: function(){return 43; },"
" configurable: false };"
"Object.defineProperty(obj, 'x', desc);"
"obj.x");
- result = script_define->Run();
- CHECK(result->Equals(v8_num(43)));
- result = script_desc->Run();
+ result = script_define->Run(context.local()).ToLocalChecked();
+ CHECK(result->Equals(context.local(), v8_num(43)).FromJust());
- CHECK_EQ(result->BooleanValue(), false);
+ result = script_desc->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(result->BooleanValue(context.local()).FromJust(), false);
v8::TryCatch try_catch(isolate);
- result = script_define->Run();
+ CHECK(script_define->Run(context.local()).IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value(try_catch.Exception());
CHECK_EQ(0,
@@ -5691,9 +6406,13 @@ THREADED_TEST(DefinePropertyOnDefineGetterSetter) {
}
-static v8::Handle<v8::Object> GetGlobalProperty(LocalContext* context,
- char const* name) {
- return v8::Handle<v8::Object>::Cast((*context)->Global()->Get(v8_str(name)));
+static v8::Local<v8::Object> GetGlobalProperty(LocalContext* context,
+ char const* name) {
+ return v8::Local<v8::Object>::Cast(
+ (*context)
+ ->Global()
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(name))
+ .ToLocalChecked());
}
@@ -5703,20 +6422,27 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
- context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj1"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj2 = {};");
CHECK(CompileRun("obj1.x")->IsUndefined());
CHECK(CompileRun("obj2.x")->IsUndefined());
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .FromJust());
ExpectString("obj1.x", "x");
CHECK(CompileRun("obj2.x")->IsUndefined());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .FromJust());
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
@@ -5742,9 +6468,13 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .FromJust());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .FromJust());
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
@@ -5759,17 +6489,20 @@ THREADED_TEST(DefineAPIAccessorOnObject) {
CompileRun(
"Object.defineProperty(obj2, 'x',"
"{ get: function() { return 'z'; }, configurable: false })");
-
ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
- CHECK(!GetGlobalProperty(&context, "obj1")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
- CHECK(!GetGlobalProperty(&context, "obj2")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .IsNothing());
+ CHECK(GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .IsNothing());
ExpectString("obj1.x", "z");
ExpectString("obj2.x", "z");
@@ -5782,15 +6515,20 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
- context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj1"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj2 = {};");
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"),
- v8::DEFAULT, v8::DontDelete));
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"), v8::DEFAULT, v8::DontDelete)
+ .FromJust());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut"),
- v8::DEFAULT, v8::DontDelete));
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"), v8::DEFAULT, v8::DontDelete)
+ .FromJust());
ExpectString("obj1.x", "x");
ExpectString("obj2.x", "x");
@@ -5798,10 +6536,14 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
- CHECK(!GetGlobalProperty(&context, "obj1")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
- CHECK(!GetGlobalProperty(&context, "obj2")
- ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+ CHECK(GetGlobalProperty(&context, "obj1")
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .IsNothing());
+ CHECK(GetGlobalProperty(&context, "obj2")
+ ->SetAccessor(context.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("donut"))
+ .IsNothing());
{
v8::TryCatch try_catch(isolate);
@@ -5826,11 +6568,14 @@ THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
}
-static void Get239Value(Local<String> name,
+static void Get239Value(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CHECK(info.Data()->Equals(v8_str("donut")));
- CHECK(name->Equals(v8_str("239")));
+ CHECK(info.Data()
+ ->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("donut"))
+ .FromJust());
+ CHECK(name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("239"))
+ .FromJust());
info.GetReturnValue().Set(name);
}
@@ -5841,13 +6586,20 @@ THREADED_TEST(ElementAPIAccessor) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
- context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj1"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj2 = {};");
CHECK(GetGlobalProperty(&context, "obj1")
- ->SetAccessor(v8_str("239"), Get239Value, NULL, v8_str("donut")));
+ ->SetAccessor(context.local(), v8_str("239"), Get239Value, NULL,
+ v8_str("donut"))
+ .FromJust());
CHECK(GetGlobalProperty(&context, "obj2")
- ->SetAccessor(v8_str("239"), Get239Value, NULL, v8_str("donut")));
+ ->SetAccessor(context.local(), v8_str("239"), Get239Value, NULL,
+ v8_str("donut"))
+ .FromJust());
ExpectString("obj1[239]", "239");
ExpectString("obj2[239]", "239");
@@ -5859,11 +6611,12 @@ THREADED_TEST(ElementAPIAccessor) {
v8::Persistent<Value> xValue;
-static void SetXValue(Local<String> name, Local<Value> value,
+static void SetXValue(Local<Name> name, Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- CHECK(value->Equals(v8_num(4)));
- CHECK(info.Data()->Equals(v8_str("donut")));
- CHECK(name->Equals(v8_str("x")));
+ Local<Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(value->Equals(context, v8_num(4)).FromJust());
+ CHECK(info.Data()->Equals(context, v8_str("donut")).FromJust());
+ CHECK(name->Equals(context, v8_str("x")).FromJust());
CHECK(xValue.IsEmpty());
xValue.Reset(info.GetIsolate(), value);
}
@@ -5875,12 +6628,18 @@ THREADED_TEST(SimplePropertyWrite) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetXValue, SetXValue, v8_str("donut"));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
Local<Script> script = v8_compile("obj.x = 4");
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
- script->Run();
- CHECK(v8_num(4)->Equals(Local<Value>::New(CcTest::isolate(), xValue)));
+ script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(4)
+ ->Equals(context.local(),
+ Local<Value>::New(CcTest::isolate(), xValue))
+ .FromJust());
xValue.Reset();
}
}
@@ -5892,12 +6651,18 @@ THREADED_TEST(SetterOnly) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), NULL, SetXValue, v8_str("donut"));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
Local<Script> script = v8_compile("obj.x = 4; obj.x");
for (int i = 0; i < 10; i++) {
CHECK(xValue.IsEmpty());
- script->Run();
- CHECK(v8_num(4)->Equals(Local<Value>::New(CcTest::isolate(), xValue)));
+ script->Run(context.local()).ToLocalChecked();
+ CHECK(v8_num(4)
+ ->Equals(context.local(),
+ Local<Value>::New(CcTest::isolate(), xValue))
+ .FromJust());
xValue.Reset();
}
}
@@ -5910,10 +6675,13 @@ THREADED_TEST(NoAccessors) {
templ->SetAccessor(v8_str("x"), static_cast<v8::AccessorGetterCallback>(NULL),
NULL, v8_str("donut"));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
Local<Script> script = v8_compile("obj.x = 4; obj.x");
for (int i = 0; i < 10; i++) {
- script->Run();
+ script->Run(context.local()).ToLocalChecked();
}
}
@@ -5921,7 +6689,7 @@ THREADED_TEST(NoAccessors) {
THREADED_TEST(MultiContexts) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("dummy"),
v8::FunctionTemplate::New(isolate, DummyCallHandler));
@@ -5930,26 +6698,43 @@ THREADED_TEST(MultiContexts) {
// Create an environment
LocalContext context0(0, templ);
context0->SetSecurityToken(password);
- v8::Handle<v8::Object> global0 = context0->Global();
- global0->Set(v8_str("custom"), v8_num(1234));
- CHECK_EQ(1234, global0->Get(v8_str("custom"))->Int32Value());
+ v8::Local<v8::Object> global0 = context0->Global();
+ CHECK(global0->Set(context0.local(), v8_str("custom"), v8_num(1234))
+ .FromJust());
+ CHECK_EQ(1234, global0->Get(context0.local(), v8_str("custom"))
+ .ToLocalChecked()
+ ->Int32Value(context0.local())
+ .FromJust());
// Create an independent environment
LocalContext context1(0, templ);
context1->SetSecurityToken(password);
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("custom"), v8_num(1234));
- CHECK(!global0->Equals(global1));
- CHECK_EQ(1234, global0->Get(v8_str("custom"))->Int32Value());
- CHECK_EQ(1234, global1->Get(v8_str("custom"))->Int32Value());
+ v8::Local<v8::Object> global1 = context1->Global();
+ CHECK(global1->Set(context1.local(), v8_str("custom"), v8_num(1234))
+ .FromJust());
+ CHECK(!global0->Equals(context1.local(), global1).FromJust());
+ CHECK_EQ(1234, global0->Get(context1.local(), v8_str("custom"))
+ .ToLocalChecked()
+ ->Int32Value(context0.local())
+ .FromJust());
+ CHECK_EQ(1234, global1->Get(context1.local(), v8_str("custom"))
+ .ToLocalChecked()
+ ->Int32Value(context1.local())
+ .FromJust());
// Now create a new context with the old global
LocalContext context2(0, templ, global1);
context2->SetSecurityToken(password);
- v8::Handle<v8::Object> global2 = context2->Global();
- CHECK(global1->Equals(global2));
- CHECK_EQ(0, global1->Get(v8_str("custom"))->Int32Value());
- CHECK_EQ(0, global2->Get(v8_str("custom"))->Int32Value());
+ v8::Local<v8::Object> global2 = context2->Global();
+ CHECK(global1->Equals(context2.local(), global2).FromJust());
+ CHECK_EQ(0, global1->Get(context2.local(), v8_str("custom"))
+ .ToLocalChecked()
+ ->Int32Value(context1.local())
+ .FromJust());
+ CHECK_EQ(0, global2->Get(context2.local(), v8_str("custom"))
+ .ToLocalChecked()
+ ->Int32Value(context2.local())
+ .FromJust());
}
@@ -5960,24 +6745,34 @@ THREADED_TEST(FunctionPrototypeAcrossContexts) {
v8::HandleScope scope(CcTest::isolate());
LocalContext env0;
- v8::Handle<v8::Object> global0 = env0->Global();
- v8::Handle<v8::Object> object0 =
- global0->Get(v8_str("Object")).As<v8::Object>();
- v8::Handle<v8::Object> tostring0 =
- object0->Get(v8_str("toString")).As<v8::Object>();
- v8::Handle<v8::Object> proto0 =
- tostring0->Get(v8_str("__proto__")).As<v8::Object>();
- proto0->Set(v8_str("custom"), v8_num(1234));
+ v8::Local<v8::Object> global0 = env0->Global();
+ v8::Local<v8::Object> object0 = global0->Get(env0.local(), v8_str("Object"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ v8::Local<v8::Object> tostring0 =
+ object0->Get(env0.local(), v8_str("toString"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ v8::Local<v8::Object> proto0 =
+ tostring0->Get(env0.local(), v8_str("__proto__"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ CHECK(proto0->Set(env0.local(), v8_str("custom"), v8_num(1234)).FromJust());
LocalContext env1;
- v8::Handle<v8::Object> global1 = env1->Global();
- v8::Handle<v8::Object> object1 =
- global1->Get(v8_str("Object")).As<v8::Object>();
- v8::Handle<v8::Object> tostring1 =
- object1->Get(v8_str("toString")).As<v8::Object>();
- v8::Handle<v8::Object> proto1 =
- tostring1->Get(v8_str("__proto__")).As<v8::Object>();
- CHECK(!proto1->Has(v8_str("custom")));
+ v8::Local<v8::Object> global1 = env1->Global();
+ v8::Local<v8::Object> object1 = global1->Get(env1.local(), v8_str("Object"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ v8::Local<v8::Object> tostring1 =
+ object1->Get(env1.local(), v8_str("toString"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ v8::Local<v8::Object> proto1 =
+ tostring1->Get(env1.local(), v8_str("__proto__"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ CHECK(!proto1->Has(env1.local(), v8_str("custom")).FromJust());
}
@@ -5997,11 +6792,17 @@ THREADED_TEST(Regress892105) {
LocalContext env0;
Local<Script> script0 = v8_compile(source);
- CHECK_EQ(8901.0, script0->Run()->NumberValue());
+ CHECK_EQ(8901.0, script0->Run(env0.local())
+ .ToLocalChecked()
+ ->NumberValue(env0.local())
+ .FromJust());
LocalContext env1;
Local<Script> script1 = v8_compile(source);
- CHECK_EQ(8901.0, script1->Run()->NumberValue());
+ CHECK_EQ(8901.0, script1->Run(env1.local())
+ .ToLocalChecked()
+ ->NumberValue(env1.local())
+ .FromJust());
}
@@ -6013,8 +6814,12 @@ THREADED_TEST(UndetectableObject) {
v8::FunctionTemplate::New(env->GetIsolate());
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
- Local<v8::Object> obj = desc->GetFunction()->NewInstance();
- env->Global()->Set(v8_str("undetectable"), obj);
+ Local<v8::Object> obj = desc->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ CHECK(
+ env->Global()->Set(env.local(), v8_str("undetectable"), obj).FromJust());
ExpectString("undetectable.toString()", "[object Object]");
ExpectString("typeof undetectable", "undefined");
@@ -6057,8 +6862,12 @@ THREADED_TEST(VoidLiteral) {
Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
- Local<v8::Object> obj = desc->GetFunction()->NewInstance();
- env->Global()->Set(v8_str("undetectable"), obj);
+ Local<v8::Object> obj = desc->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ CHECK(
+ env->Global()->Set(env.local(), v8_str("undetectable"), obj).FromJust());
ExpectBoolean("undefined == void 0", true);
ExpectBoolean("undetectable == void 0", true);
@@ -6103,8 +6912,12 @@ THREADED_TEST(ExtensibleOnUndetectable) {
Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable
- Local<v8::Object> obj = desc->GetFunction()->NewInstance();
- env->Global()->Set(v8_str("undetectable"), obj);
+ Local<v8::Object> obj = desc->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ CHECK(
+ env->Global()->Set(env.local(), v8_str("undetectable"), obj).FromJust());
Local<String> source = v8_str(
"undetectable.x = 42;"
@@ -6112,18 +6925,20 @@ THREADED_TEST(ExtensibleOnUndetectable) {
Local<Script> script = v8_compile(source);
- CHECK(v8::Integer::New(isolate, 42)->Equals(script->Run()));
+ CHECK(v8::Integer::New(isolate, 42)
+ ->Equals(env.local(), script->Run(env.local()).ToLocalChecked())
+ .FromJust());
ExpectBoolean("Object.isExtensible(undetectable)", true);
source = v8_str("Object.preventExtensions(undetectable);");
script = v8_compile(source);
- script->Run();
+ script->Run(env.local()).ToLocalChecked();
ExpectBoolean("Object.isExtensible(undetectable)", false);
source = v8_str("undetectable.y = 2000;");
script = v8_compile(source);
- script->Run();
+ script->Run(env.local()).ToLocalChecked();
ExpectBoolean("undetectable.y == undefined", true);
}
@@ -6175,10 +6990,11 @@ TEST(SimpleExtensions) {
v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource));
const char* extension_names[] = {"simpletest"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("Foo()");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 4)));
+ v8::Local<Value> result = CompileRun("Foo()");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 4))
+ .FromJust());
}
@@ -6197,15 +7013,15 @@ TEST(StackTraceInExtension) {
new Extension("stacktracetest", kStackTraceFromExtensionSource));
const char* extension_names[] = {"stacktracetest"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
CompileRun(
"function user() { bar(); }"
"var error;"
"try{ user(); } catch (e) { error = e; }");
- CHECK_EQ(-1, CompileRun("error.stack.indexOf('foo')")->Int32Value());
- CHECK_EQ(-1, CompileRun("error.stack.indexOf('bar')")->Int32Value());
- CHECK_NE(-1, CompileRun("error.stack.indexOf('user')")->Int32Value());
+ CHECK_EQ(-1, v8_run_int32value(v8_compile("error.stack.indexOf('foo')")));
+ CHECK_EQ(-1, v8_run_int32value(v8_compile("error.stack.indexOf('bar')")));
+ CHECK_NE(-1, v8_run_int32value(v8_compile("error.stack.indexOf('user')")));
}
@@ -6214,10 +7030,11 @@ TEST(NullExtensions) {
v8::RegisterExtension(new Extension("nulltest", NULL));
const char* extension_names[] = {"nulltest"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("1+3");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 4)));
+ v8::Local<Value> result = CompileRun("1+3");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 4))
+ .FromJust());
}
@@ -6233,7 +7050,7 @@ TEST(ExtensionMissingSourceLength) {
new Extension("srclentest_fail", kEmbeddedExtensionSource));
const char* extension_names[] = {"srclentest_fail"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(0 == *context);
}
@@ -6248,11 +7065,13 @@ TEST(ExtensionWithSourceLength) {
extension_name.start(), kEmbeddedExtensionSource, 0, 0, source_len));
const char* extension_names[1] = {extension_name.start()};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("Ret54321()");
- CHECK(v8::Integer::New(CcTest::isolate(), 54321)->Equals(result));
+ v8::Local<Value> result = CompileRun("Ret54321()");
+ CHECK(v8::Integer::New(CcTest::isolate(), 54321)
+ ->Equals(context, result)
+ .FromJust());
} else {
// Anything but exactly the right length should fail to compile.
CHECK(0 == *context);
@@ -6284,12 +7103,14 @@ TEST(UseEvalFromExtension) {
v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
const char* extension_names[] = {"evaltest1", "evaltest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("UseEval1()");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 42)));
+ v8::Local<Value> result = CompileRun("UseEval1()");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 42))
+ .FromJust());
result = CompileRun("UseEval2()");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 42)));
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 42))
+ .FromJust());
}
@@ -6316,12 +7137,14 @@ TEST(UseWithFromExtension) {
v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
const char* extension_names[] = {"withtest1", "withtest2"};
v8::ExtensionConfiguration extensions(2, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("UseWith1()");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 87)));
+ v8::Local<Value> result = CompileRun("UseWith1()");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 87))
+ .FromJust());
result = CompileRun("UseWith2()");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 87)));
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 87))
+ .FromJust());
}
@@ -6330,10 +7153,11 @@ TEST(AutoExtensions) {
Extension* extension = new Extension("autotest", kSimpleExtensionSource);
extension->set_auto_enable(true);
v8::RegisterExtension(extension);
- v8::Handle<Context> context = Context::New(CcTest::isolate());
+ v8::Local<Context> context = Context::New(CcTest::isolate());
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("Foo()");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 4)));
+ v8::Local<Value> result = CompileRun("Foo()");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 4))
+ .FromJust());
}
@@ -6348,7 +7172,7 @@ TEST(SyntaxErrorExtensions) {
new Extension("syntaxerror", kSyntaxErrorInExtensionSource));
const char* extension_names[] = {"syntaxerror"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -6364,7 +7188,7 @@ TEST(ExceptionExtensions) {
new Extension("exception", kExceptionInExtensionSource));
const char* extension_names[] = {"exception"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -6385,10 +7209,11 @@ TEST(NativeCallInExtensions) {
new Extension("nativecall", kNativeCallInExtensionSource));
const char* extension_names[] = {"nativecall"};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun(kNativeCallTest);
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 3)));
+ v8::Local<Value> result = CompileRun(kNativeCallTest);
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 3))
+ .FromJust());
}
@@ -6398,8 +7223,8 @@ class NativeFunctionExtension : public Extension {
v8::FunctionCallback fun = &Echo)
: Extension(name, source), function_(fun) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Handle<v8::String> name) {
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
return v8::FunctionTemplate::New(isolate, function_);
}
@@ -6419,10 +7244,11 @@ TEST(NativeFunctionDeclaration) {
new NativeFunctionExtension(name, "native function foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = CompileRun("foo(42);");
- CHECK(result->Equals(v8::Integer::New(CcTest::isolate(), 42)));
+ v8::Local<Value> result = CompileRun("foo(42);");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 42))
+ .FromJust());
}
@@ -6434,7 +7260,7 @@ TEST(NativeFunctionDeclarationError) {
new NativeFunctionExtension(name, "native\nfunction foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -6448,7 +7274,7 @@ TEST(NativeFunctionDeclarationErrorEscape) {
new NativeFunctionExtension(name, "nativ\\u0065 function foo();"));
const char* extension_names[] = {name};
v8::ExtensionConfiguration extensions(1, extension_names);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &extensions);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
CHECK(context.IsEmpty());
}
@@ -6457,8 +7283,12 @@ static void CheckDependencies(const char* name, const char* expected) {
v8::HandleScope handle_scope(CcTest::isolate());
v8::ExtensionConfiguration config(1, &name);
LocalContext context(&config);
- CHECK(String::NewFromUtf8(CcTest::isolate(), expected)
- ->Equals(context->Global()->Get(v8_str("loaded"))));
+ CHECK(
+ v8_str(expected)
+ ->Equals(context.local(), context->Global()
+ ->Get(context.local(), v8_str("loaded"))
+ .ToLocalChecked())
+ .FromJust());
}
@@ -6487,8 +7317,12 @@ THREADED_TEST(ExtensionDependency) {
static const char* exts[2] = {"C", "E"};
v8::ExtensionConfiguration config(2, exts);
LocalContext context(&config);
- CHECK(v8_str("undefinedACBDE")
- ->Equals(context->Global()->Get(v8_str("loaded"))));
+ CHECK(
+ v8_str("undefinedACBDE")
+ ->Equals(context.local(), context->Global()
+ ->Get(context.local(), v8_str("loaded"))
+ .ToLocalChecked())
+ .FromJust());
}
@@ -6506,7 +7340,10 @@ static const char* kExtensionTestScript =
static void CallFun(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
if (args.IsConstructCall()) {
- args.This()->Set(v8_str("data"), args.Data());
+ CHECK(args.This()
+ ->Set(args.GetIsolate()->GetCurrentContext(), v8_str("data"),
+ args.Data())
+ .FromJust());
args.GetReturnValue().SetNull();
return;
}
@@ -6517,26 +7354,28 @@ static void CallFun(const v8::FunctionCallbackInfo<v8::Value>& args) {
class FunctionExtension : public Extension {
public:
FunctionExtension() : Extension("functiontest", kExtensionTestScript) {}
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Handle<String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<String> name);
};
static int lookup_count = 0;
-v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Handle<String> name) {
+v8::Local<v8::FunctionTemplate> FunctionExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<String> name) {
lookup_count++;
- if (name->Equals(v8_str("A"))) {
+ if (name->Equals(isolate->GetCurrentContext(), v8_str("A")).FromJust()) {
return v8::FunctionTemplate::New(isolate, CallFun,
v8::Integer::New(isolate, 8));
- } else if (name->Equals(v8_str("B"))) {
+ } else if (name->Equals(isolate->GetCurrentContext(), v8_str("B"))
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate, CallFun,
v8::Integer::New(isolate, 7));
- } else if (name->Equals(v8_str("C"))) {
+ } else if (name->Equals(isolate->GetCurrentContext(), v8_str("C"))
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate, CallFun,
v8::Integer::New(isolate, 6));
} else {
- return v8::Handle<v8::FunctionTemplate>();
+ return v8::Local<v8::FunctionTemplate>();
}
}
@@ -6548,9 +7387,15 @@ THREADED_TEST(FunctionLookup) {
v8::ExtensionConfiguration config(1, exts);
LocalContext context(&config);
CHECK_EQ(3, lookup_count);
- CHECK(v8::Integer::New(CcTest::isolate(), 8)->Equals(CompileRun("Foo(0)")));
- CHECK(v8::Integer::New(CcTest::isolate(), 7)->Equals(CompileRun("Foo(1)")));
- CHECK(v8::Integer::New(CcTest::isolate(), 6)->Equals(CompileRun("Foo(2)")));
+ CHECK(v8::Integer::New(CcTest::isolate(), 8)
+ ->Equals(context.local(), CompileRun("Foo(0)"))
+ .FromJust());
+ CHECK(v8::Integer::New(CcTest::isolate(), 7)
+ ->Equals(context.local(), CompileRun("Foo(1)"))
+ .FromJust());
+ CHECK(v8::Integer::New(CcTest::isolate(), 6)
+ ->Equals(context.local(), CompileRun("Foo(2)"))
+ .FromJust());
}
@@ -6564,11 +7409,14 @@ THREADED_TEST(NativeFunctionConstructCall) {
// Run a few times to ensure that allocation of objects doesn't
// change behavior of a constructor function.
CHECK(v8::Integer::New(CcTest::isolate(), 8)
- ->Equals(CompileRun("(new A()).data")));
+ ->Equals(context.local(), CompileRun("(new A()).data"))
+ .FromJust());
CHECK(v8::Integer::New(CcTest::isolate(), 7)
- ->Equals(CompileRun("(new B()).data")));
+ ->Equals(context.local(), CompileRun("(new B()).data"))
+ .FromJust());
CHECK(v8::Integer::New(CcTest::isolate(), 6)
- ->Equals(CompileRun("(new C()).data")));
+ ->Equals(context.local(), CompileRun("(new C()).data"))
+ .FromJust());
}
}
@@ -6587,35 +7435,39 @@ void StoringErrorCallback(const char* location, const char* message) {
// tests that the fatal error handler gets called. This renders V8
// unusable and therefore this test cannot be run in parallel.
TEST(ErrorReporting) {
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ CcTest::isolate()->SetFatalErrorHandler(StoringErrorCallback);
static const char* aDeps[] = {"B"};
v8::RegisterExtension(new Extension("A", "", 1, aDeps));
static const char* bDeps[] = {"A"};
v8::RegisterExtension(new Extension("B", "", 1, bDeps));
last_location = NULL;
v8::ExtensionConfiguration config(1, bDeps);
- v8::Handle<Context> context = Context::New(CcTest::isolate(), &config);
+ v8::Local<Context> context = Context::New(CcTest::isolate(), &config);
CHECK(context.IsEmpty());
CHECK(last_location);
}
-static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
- v8::Handle<Value> data) {
+static void MissingScriptInfoMessageListener(v8::Local<v8::Message> message,
+ v8::Local<Value> data) {
+ v8::Isolate* isolate = CcTest::isolate();
+ Local<Context> context = isolate->GetCurrentContext();
CHECK(message->GetScriptOrigin().ResourceName()->IsUndefined());
- CHECK(v8::Undefined(CcTest::isolate())
- ->Equals(message->GetScriptOrigin().ResourceName()));
- message->GetLineNumber();
- message->GetSourceLine();
+ CHECK(v8::Undefined(isolate)
+ ->Equals(context, message->GetScriptOrigin().ResourceName())
+ .FromJust());
+ message->GetLineNumber(context).FromJust();
+ message->GetSourceLine(context).ToLocalChecked();
}
THREADED_TEST(ErrorWithMissingScriptInfo) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::V8::AddMessageListener(MissingScriptInfoMessageListener);
+ context->GetIsolate()->AddMessageListener(MissingScriptInfoMessageListener);
CompileRun("throw Error()");
- v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener);
+ context->GetIsolate()->RemoveMessageListeners(
+ MissingScriptInfoMessageListener);
}
@@ -6634,7 +7486,7 @@ static void SetFlag(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
static void IndependentWeakHandle(bool global_gc, bool interlinked) {
v8::Isolate* iso = CcTest::isolate();
v8::HandleScope scope(iso);
- v8::Handle<Context> context = Context::New(iso);
+ v8::Local<Context> context = Context::New(iso);
Context::Scope context_scope(context);
FlagAndPersistent object_a, object_b;
@@ -6648,8 +7500,8 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
object_a.handle.Reset(iso, a);
object_b.handle.Reset(iso, b);
if (interlinked) {
- a->Set(v8_str("x"), b);
- b->Set(v8_str("x"), a);
+ a->Set(context, v8_str("x"), b).FromJust();
+ b->Set(context, v8_str("x"), a).FromJust();
}
if (global_gc) {
CcTest::heap()->CollectAllGarbage();
@@ -6658,8 +7510,8 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
}
// We are relying on this creating a big flag array and reserving the space
// up front.
- v8::Handle<Value> big_array = CompileRun("new Array(5000)");
- a->Set(v8_str("y"), big_array);
+ v8::Local<Value> big_array = CompileRun("new Array(5000)");
+ a->Set(context, v8_str("y"), big_array).FromJust();
big_heap_size = CcTest::heap()->SizeOfObjects();
}
@@ -6726,8 +7578,8 @@ void CheckInternalFields(
const v8::WeakCallbackInfo<v8::Persistent<v8::Object>>& data) {
v8::Persistent<v8::Object>* handle = data.GetParameter();
handle->Reset();
- Trivial* t1 = reinterpret_cast<Trivial*>(data.GetInternalField1());
- Trivial2* t2 = reinterpret_cast<Trivial2*>(data.GetInternalField2());
+ Trivial* t1 = reinterpret_cast<Trivial*>(data.GetInternalField(0));
+ Trivial2* t2 = reinterpret_cast<Trivial2*>(data.GetInternalField(1));
CHECK_EQ(42, t1->x());
CHECK_EQ(103, t2->x());
t1->set_x(1729);
@@ -6747,7 +7599,10 @@ void InternalFieldCallback(bool global_gc) {
instance_templ->SetInternalFieldCount(2);
{
v8::HandleScope scope(isolate);
- Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+ Local<v8::Object> obj = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
v8::Persistent<v8::Object> handle(isolate, obj);
CHECK_EQ(2, obj->InternalFieldCount());
CHECK(obj->GetInternalField(0)->IsUndefined());
@@ -6805,7 +7660,7 @@ void v8::internal::HeapTester::ResetWeakHandle(bool global_gc) {
v8::Isolate* iso = CcTest::isolate();
v8::HandleScope scope(iso);
- v8::Handle<Context> context = Context::New(iso);
+ v8::Local<Context> context = Context::New(iso);
Context::Scope context_scope(context);
FlagAndPersistent object_a, object_b;
@@ -6886,7 +7741,7 @@ THREADED_TEST(GCFromWeakCallbacks) {
v8::Isolate* isolate = CcTest::isolate();
v8::Locker locker(CcTest::isolate());
v8::HandleScope scope(isolate);
- v8::Handle<Context> context = Context::New(isolate);
+ v8::Local<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
static const int kNumberOfGCTypes = 2;
@@ -6916,19 +7771,20 @@ THREADED_TEST(GCFromWeakCallbacks) {
}
-v8::Handle<Function> args_fun;
+v8::Local<Function> args_fun;
static void ArgumentsTestCallback(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = args.GetIsolate();
- CHECK(args_fun->Equals(args.Callee()));
+ Local<Context> context = isolate->GetCurrentContext();
+ CHECK(args_fun->Equals(context, args.Callee()).FromJust());
CHECK_EQ(3, args.Length());
- CHECK(v8::Integer::New(isolate, 1)->Equals(args[0]));
- CHECK(v8::Integer::New(isolate, 2)->Equals(args[1]));
- CHECK(v8::Integer::New(isolate, 3)->Equals(args[2]));
- CHECK(v8::Undefined(isolate)->Equals(args[3]));
+ CHECK(v8::Integer::New(isolate, 1)->Equals(context, args[0]).FromJust());
+ CHECK(v8::Integer::New(isolate, 2)->Equals(context, args[1]).FromJust());
+ CHECK(v8::Integer::New(isolate, 3)->Equals(context, args[2]).FromJust());
+ CHECK(v8::Undefined(isolate)->Equals(context, args[3]).FromJust());
v8::HandleScope scope(args.GetIsolate());
CcTest::heap()->CollectAllGarbage();
}
@@ -6937,12 +7793,15 @@ static void ArgumentsTestCallback(
THREADED_TEST(Arguments) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> global = ObjectTemplate::New(isolate);
global->Set(v8_str("f"),
v8::FunctionTemplate::New(isolate, ArgumentsTestCallback));
LocalContext context(NULL, global);
- args_fun = context->Global()->Get(v8_str("f")).As<Function>();
- v8_compile("f(1, 2, 3)")->Run();
+ args_fun = context->Global()
+ ->Get(context.local(), v8_str("f"))
+ .ToLocalChecked()
+ .As<Function>();
+ v8_compile("f(1, 2, 3)")->Run(context.local()).ToLocalChecked();
}
@@ -6950,29 +7809,47 @@ static int p_getter_count;
static int p_getter_count2;
-static void PGetter(Local<String> name,
+static void PGetter(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
p_getter_count++;
- v8::Handle<v8::Object> global =
- info.GetIsolate()->GetCurrentContext()->Global();
- CHECK(info.Holder()->Equals(global->Get(v8_str("o1"))));
- if (name->Equals(v8_str("p1"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o1"))));
- } else if (name->Equals(v8_str("p2"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o2"))));
- } else if (name->Equals(v8_str("p3"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o3"))));
- } else if (name->Equals(v8_str("p4"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o4"))));
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ v8::Local<v8::Object> global = context->Global();
+ CHECK(
+ info.Holder()
+ ->Equals(context, global->Get(context, v8_str("o1")).ToLocalChecked())
+ .FromJust());
+ if (name->Equals(context, v8_str("p1")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o1")).ToLocalChecked())
+ .FromJust());
+ } else if (name->Equals(context, v8_str("p2")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o2")).ToLocalChecked())
+ .FromJust());
+ } else if (name->Equals(context, v8_str("p3")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o3")).ToLocalChecked())
+ .FromJust());
+ } else if (name->Equals(context, v8_str("p4")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o4")).ToLocalChecked())
+ .FromJust());
}
}
-static void RunHolderTest(v8::Handle<v8::ObjectTemplate> obj) {
+static void RunHolderTest(v8::Local<v8::ObjectTemplate> obj) {
ApiTestFuzzer::Fuzz();
LocalContext context;
- context->Global()->Set(v8_str("o1"), obj->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o1"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"o1.__proto__ = { };"
"var o2 = { __proto__: o1 };"
@@ -6989,17 +7866,32 @@ static void PGetter2(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
p_getter_count2++;
- v8::Handle<v8::Object> global =
- info.GetIsolate()->GetCurrentContext()->Global();
- CHECK(info.Holder()->Equals(global->Get(v8_str("o1"))));
- if (name->Equals(v8_str("p1"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o1"))));
- } else if (name->Equals(v8_str("p2"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o2"))));
- } else if (name->Equals(v8_str("p3"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o3"))));
- } else if (name->Equals(v8_str("p4"))) {
- CHECK(info.This()->Equals(global->Get(v8_str("o4"))));
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ v8::Local<v8::Object> global = context->Global();
+ CHECK(
+ info.Holder()
+ ->Equals(context, global->Get(context, v8_str("o1")).ToLocalChecked())
+ .FromJust());
+ if (name->Equals(context, v8_str("p1")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o1")).ToLocalChecked())
+ .FromJust());
+ } else if (name->Equals(context, v8_str("p2")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o2")).ToLocalChecked())
+ .FromJust());
+ } else if (name->Equals(context, v8_str("p3")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o3")).ToLocalChecked())
+ .FromJust());
+ } else if (name->Equals(context, v8_str("p4")).FromJust()) {
+ CHECK(info.This()
+ ->Equals(context,
+ global->Get(context, v8_str("o4")).ToLocalChecked())
+ .FromJust());
}
}
@@ -7007,7 +7899,7 @@ static void PGetter2(Local<Name> name,
THREADED_TEST(GetterHolders) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), PGetter);
obj->SetAccessor(v8_str("p2"), PGetter);
obj->SetAccessor(v8_str("p3"), PGetter);
@@ -7021,7 +7913,7 @@ THREADED_TEST(GetterHolders) {
THREADED_TEST(PreInterceptorHolders) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetHandler(v8::NamedPropertyHandlerConfiguration(PGetter2));
p_getter_count2 = 0;
RunHolderTest(obj);
@@ -7032,19 +7924,26 @@ THREADED_TEST(PreInterceptorHolders) {
THREADED_TEST(ObjectInstantiation) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("t"), PGetter2);
LocalContext context;
- context->Global()->Set(v8_str("o"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
for (int i = 0; i < 100; i++) {
v8::HandleScope inner_scope(CcTest::isolate());
- v8::Handle<v8::Object> obj = templ->NewInstance();
- CHECK(!obj->Equals(context->Global()->Get(v8_str("o"))));
- context->Global()->Set(v8_str("o2"), obj);
- v8::Handle<Value> value =
- CompileRun("o.__proto__ === o2.__proto__");
- CHECK(v8::True(isolate)->Equals(value));
- context->Global()->Set(v8_str("o"), obj);
+ v8::Local<v8::Object> obj =
+ templ->NewInstance(context.local()).ToLocalChecked();
+ CHECK(!obj->Equals(context.local(), context->Global()
+ ->Get(context.local(), v8_str("o"))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(
+ context->Global()->Set(context.local(), v8_str("o2"), obj).FromJust());
+ v8::Local<Value> value = CompileRun("o.__proto__ === o2.__proto__");
+ CHECK(v8::True(isolate)->Equals(context.local(), value).FromJust());
+ CHECK(context->Global()->Set(context.local(), v8_str("o"), obj).FromJust());
}
}
@@ -7070,7 +7969,7 @@ static int StrNCmp16(uint16_t* a, uint16_t* b, int n) {
}
-int GetUtf8Length(Handle<String> str) {
+int GetUtf8Length(Local<String> str) {
int len = str->Utf8Length();
if (len < 0) {
i::Handle<i::String> istr(v8::Utils::OpenHandle(*str));
@@ -7084,27 +7983,37 @@ int GetUtf8Length(Handle<String> str) {
THREADED_TEST(StringWrite) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<String> str = v8_str("abcde");
+ v8::Local<String> str = v8_str("abcde");
// abc<Icelandic eth><Unicode snowman>.
- v8::Handle<String> str2 = v8_str("abc\303\260\342\230\203");
- v8::Handle<String> str3 = v8::String::NewFromUtf8(
- context->GetIsolate(), "abc\0def", v8::String::kNormalString, 7);
+ v8::Local<String> str2 = v8_str("abc\303\260\342\230\203");
+ v8::Local<String> str3 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "abc\0def",
+ v8::NewStringType::kNormal, 7)
+ .ToLocalChecked();
// "ab" + lead surrogate + "cd" + trail surrogate + "ef"
uint16_t orphans[8] = { 0x61, 0x62, 0xd800, 0x63, 0x64, 0xdc00, 0x65, 0x66 };
- v8::Handle<String> orphans_str = v8::String::NewFromTwoByte(
- context->GetIsolate(), orphans, v8::String::kNormalString, 8);
+ v8::Local<String> orphans_str =
+ v8::String::NewFromTwoByte(context->GetIsolate(), orphans,
+ v8::NewStringType::kNormal, 8)
+ .ToLocalChecked();
// single lead surrogate
uint16_t lead[1] = { 0xd800 };
- v8::Handle<String> lead_str = v8::String::NewFromTwoByte(
- context->GetIsolate(), lead, v8::String::kNormalString, 1);
+ v8::Local<String> lead_str =
+ v8::String::NewFromTwoByte(context->GetIsolate(), lead,
+ v8::NewStringType::kNormal, 1)
+ .ToLocalChecked();
// single trail surrogate
uint16_t trail[1] = { 0xdc00 };
- v8::Handle<String> trail_str = v8::String::NewFromTwoByte(
- context->GetIsolate(), trail, v8::String::kNormalString, 1);
+ v8::Local<String> trail_str =
+ v8::String::NewFromTwoByte(context->GetIsolate(), trail,
+ v8::NewStringType::kNormal, 1)
+ .ToLocalChecked();
// surrogate pair
uint16_t pair[2] = { 0xd800, 0xdc00 };
- v8::Handle<String> pair_str = v8::String::NewFromTwoByte(
- context->GetIsolate(), pair, v8::String::kNormalString, 2);
+ v8::Local<String> pair_str =
+ v8::String::NewFromTwoByte(context->GetIsolate(), pair,
+ v8::NewStringType::kNormal, 2)
+ .ToLocalChecked();
const int kStride = 4; // Must match stride in for loops in JS below.
CompileRun(
"var left = '';"
@@ -7116,9 +8025,13 @@ THREADED_TEST(StringWrite) {
"for (var i = 0; i < 0xd800; i += 4) {"
" right = String.fromCharCode(i) + right;"
"}");
- v8::Handle<v8::Object> global = context->Global();
- Handle<String> left_tree = global->Get(v8_str("left")).As<String>();
- Handle<String> right_tree = global->Get(v8_str("right")).As<String>();
+ v8::Local<v8::Object> global = context->Global();
+ Local<String> left_tree = global->Get(context.local(), v8_str("left"))
+ .ToLocalChecked()
+ .As<String>();
+ Local<String> right_tree = global->Get(context.local(), v8_str("right"))
+ .ToLocalChecked()
+ .As<String>();
CHECK_EQ(5, str2->Length());
CHECK_EQ(0xd800 / kStride, left_tree->Length());
@@ -7394,15 +8307,17 @@ static void Utf16Helper(
const char* name,
const char* lengths_name,
int len) {
- Local<v8::Array> a =
- Local<v8::Array>::Cast(context->Global()->Get(v8_str(name)));
+ Local<v8::Array> a = Local<v8::Array>::Cast(
+ context->Global()->Get(context.local(), v8_str(name)).ToLocalChecked());
Local<v8::Array> alens =
- Local<v8::Array>::Cast(context->Global()->Get(v8_str(lengths_name)));
+ Local<v8::Array>::Cast(context->Global()
+ ->Get(context.local(), v8_str(lengths_name))
+ .ToLocalChecked());
for (int i = 0; i < len; i++) {
Local<v8::String> string =
- Local<v8::String>::Cast(a->Get(i));
- Local<v8::Number> expected_len =
- Local<v8::Number>::Cast(alens->Get(i));
+ Local<v8::String>::Cast(a->Get(context.local(), i).ToLocalChecked());
+ Local<v8::Number> expected_len = Local<v8::Number>::Cast(
+ alens->Get(context.local(), i).ToLocalChecked());
int length = GetUtf8Length(string);
CHECK_EQ(static_cast<int>(expected_len->Value()), length);
}
@@ -7458,7 +8373,7 @@ THREADED_TEST(Utf16) {
}
-static bool SameSymbol(Handle<String> s1, Handle<String> s2) {
+static bool SameSymbol(Local<String> s1, Local<String> s2) {
i::Handle<i::String> is1(v8::Utils::OpenHandle(*s1));
i::Handle<i::String> is2(v8::Utils::OpenHandle(*s2));
return *is1 == *is2;
@@ -7469,10 +8384,14 @@ THREADED_TEST(Utf16Symbol) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- Handle<String> symbol1 = v8::String::NewFromUtf8(
- context->GetIsolate(), "abc", v8::String::kInternalizedString);
- Handle<String> symbol2 = v8::String::NewFromUtf8(
- context->GetIsolate(), "abc", v8::String::kInternalizedString);
+ Local<String> symbol1 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "abc",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> symbol2 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "abc",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
CHECK(SameSymbol(symbol1, symbol2));
CompileRun(
@@ -7490,35 +8409,49 @@ THREADED_TEST(Utf16Symbol) {
"if (sym3.charCodeAt(2) != 0xdc07) throw sym1.charCodeAt(2);"
"if (sym4.length != 3) throw sym4;"
"if (sym4.charCodeAt(2) != 0xdc08) throw sym2.charCodeAt(2);");
- Handle<String> sym0 = v8::String::NewFromUtf8(
- context->GetIsolate(), "benedictus", v8::String::kInternalizedString);
- Handle<String> sym0b = v8::String::NewFromUtf8(
- context->GetIsolate(), "S\303\270ren", v8::String::kInternalizedString);
- Handle<String> sym1 =
+ Local<String> sym0 =
+ v8::String::NewFromUtf8(context->GetIsolate(), "benedictus",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> sym0b =
+ v8::String::NewFromUtf8(context->GetIsolate(), "S\303\270ren",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> sym1 =
v8::String::NewFromUtf8(context->GetIsolate(), "\355\240\201\355\260\207",
- v8::String::kInternalizedString);
- Handle<String> sym2 =
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> sym2 =
v8::String::NewFromUtf8(context->GetIsolate(), "\360\220\220\210",
- v8::String::kInternalizedString);
- Handle<String> sym3 = v8::String::NewFromUtf8(
- context->GetIsolate(), "x\355\240\201\355\260\207",
- v8::String::kInternalizedString);
- Handle<String> sym4 =
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> sym3 = v8::String::NewFromUtf8(context->GetIsolate(),
+ "x\355\240\201\355\260\207",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> sym4 =
v8::String::NewFromUtf8(context->GetIsolate(), "x\360\220\220\210",
- v8::String::kInternalizedString);
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
v8::Local<v8::Object> global = context->Global();
- Local<Value> s0 = global->Get(v8_str("sym0"));
- Local<Value> s0b = global->Get(v8_str("sym0b"));
- Local<Value> s1 = global->Get(v8_str("sym1"));
- Local<Value> s2 = global->Get(v8_str("sym2"));
- Local<Value> s3 = global->Get(v8_str("sym3"));
- Local<Value> s4 = global->Get(v8_str("sym4"));
- CHECK(SameSymbol(sym0, Handle<String>::Cast(s0)));
- CHECK(SameSymbol(sym0b, Handle<String>::Cast(s0b)));
- CHECK(SameSymbol(sym1, Handle<String>::Cast(s1)));
- CHECK(SameSymbol(sym2, Handle<String>::Cast(s2)));
- CHECK(SameSymbol(sym3, Handle<String>::Cast(s3)));
- CHECK(SameSymbol(sym4, Handle<String>::Cast(s4)));
+ Local<Value> s0 =
+ global->Get(context.local(), v8_str("sym0")).ToLocalChecked();
+ Local<Value> s0b =
+ global->Get(context.local(), v8_str("sym0b")).ToLocalChecked();
+ Local<Value> s1 =
+ global->Get(context.local(), v8_str("sym1")).ToLocalChecked();
+ Local<Value> s2 =
+ global->Get(context.local(), v8_str("sym2")).ToLocalChecked();
+ Local<Value> s3 =
+ global->Get(context.local(), v8_str("sym3")).ToLocalChecked();
+ Local<Value> s4 =
+ global->Get(context.local(), v8_str("sym4")).ToLocalChecked();
+ CHECK(SameSymbol(sym0, Local<String>::Cast(s0)));
+ CHECK(SameSymbol(sym0b, Local<String>::Cast(s0b)));
+ CHECK(SameSymbol(sym1, Local<String>::Cast(s1)));
+ CHECK(SameSymbol(sym2, Local<String>::Cast(s2)));
+ CHECK(SameSymbol(sym3, Local<String>::Cast(s3)));
+ CHECK(SameSymbol(sym4, Local<String>::Cast(s4)));
}
@@ -7578,29 +8511,32 @@ THREADED_TEST(ToArrayIndex) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<String> str = v8_str("42");
- v8::Handle<v8::Uint32> index = str->ToArrayIndex();
+ v8::Local<String> str = v8_str("42");
+ v8::MaybeLocal<v8::Uint32> index = str->ToArrayIndex(context.local());
CHECK(!index.IsEmpty());
- CHECK_EQ(42.0, index->Uint32Value());
+ CHECK_EQ(42.0,
+ index.ToLocalChecked()->Uint32Value(context.local()).FromJust());
str = v8_str("42asdf");
- index = str->ToArrayIndex();
+ index = str->ToArrayIndex(context.local());
CHECK(index.IsEmpty());
str = v8_str("-42");
- index = str->ToArrayIndex();
+ index = str->ToArrayIndex(context.local());
CHECK(index.IsEmpty());
str = v8_str("4294967294");
- index = str->ToArrayIndex();
+ index = str->ToArrayIndex(context.local());
CHECK(!index.IsEmpty());
- CHECK_EQ(4294967294.0, index->Uint32Value());
- v8::Handle<v8::Number> num = v8::Number::New(isolate, 1);
- index = num->ToArrayIndex();
+ CHECK_EQ(4294967294.0,
+ index.ToLocalChecked()->Uint32Value(context.local()).FromJust());
+ v8::Local<v8::Number> num = v8::Number::New(isolate, 1);
+ index = num->ToArrayIndex(context.local());
CHECK(!index.IsEmpty());
- CHECK_EQ(1.0, index->Uint32Value());
+ CHECK_EQ(1.0,
+ index.ToLocalChecked()->Uint32Value(context.local()).FromJust());
num = v8::Number::New(isolate, -1);
- index = num->ToArrayIndex();
+ index = num->ToArrayIndex(context.local());
CHECK(index.IsEmpty());
- v8::Handle<v8::Object> obj = v8::Object::New(isolate);
- index = obj->ToArrayIndex();
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ index = obj->ToArrayIndex(context.local());
CHECK(index.IsEmpty());
}
@@ -7609,33 +8545,58 @@ THREADED_TEST(ErrorConstruction) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<String> foo = v8_str("foo");
- v8::Handle<String> message = v8_str("message");
- v8::Handle<Value> range_error = v8::Exception::RangeError(foo);
+ v8::Local<String> foo = v8_str("foo");
+ v8::Local<String> message = v8_str("message");
+ v8::Local<Value> range_error = v8::Exception::RangeError(foo);
CHECK(range_error->IsObject());
- CHECK(range_error.As<v8::Object>()->Get(message)->Equals(foo));
- v8::Handle<Value> reference_error = v8::Exception::ReferenceError(foo);
+ CHECK(range_error.As<v8::Object>()
+ ->Get(context.local(), message)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo)
+ .FromJust());
+ v8::Local<Value> reference_error = v8::Exception::ReferenceError(foo);
CHECK(reference_error->IsObject());
- CHECK(reference_error.As<v8::Object>()->Get(message)->Equals(foo));
- v8::Handle<Value> syntax_error = v8::Exception::SyntaxError(foo);
+ CHECK(reference_error.As<v8::Object>()
+ ->Get(context.local(), message)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo)
+ .FromJust());
+ v8::Local<Value> syntax_error = v8::Exception::SyntaxError(foo);
CHECK(syntax_error->IsObject());
- CHECK(syntax_error.As<v8::Object>()->Get(message)->Equals(foo));
- v8::Handle<Value> type_error = v8::Exception::TypeError(foo);
+ CHECK(syntax_error.As<v8::Object>()
+ ->Get(context.local(), message)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo)
+ .FromJust());
+ v8::Local<Value> type_error = v8::Exception::TypeError(foo);
CHECK(type_error->IsObject());
- CHECK(type_error.As<v8::Object>()->Get(message)->Equals(foo));
- v8::Handle<Value> error = v8::Exception::Error(foo);
+ CHECK(type_error.As<v8::Object>()
+ ->Get(context.local(), message)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo)
+ .FromJust());
+ v8::Local<Value> error = v8::Exception::Error(foo);
CHECK(error->IsObject());
- CHECK(error.As<v8::Object>()->Get(message)->Equals(foo));
+ CHECK(error.As<v8::Object>()
+ ->Get(context.local(), message)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo)
+ .FromJust());
}
static void ThrowV8Exception(const v8::FunctionCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- v8::Handle<String> foo = v8_str("foo");
- v8::Handle<String> message = v8_str("message");
- v8::Handle<Value> error = v8::Exception::Error(foo);
+ v8::Local<String> foo = v8_str("foo");
+ v8::Local<String> message = v8_str("message");
+ v8::Local<Value> error = v8::Exception::Error(foo);
CHECK(error->IsObject());
- CHECK(error.As<v8::Object>()->Get(message)->Equals(foo));
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(error.As<v8::Object>()
+ ->Get(context, message)
+ .ToLocalChecked()
+ ->Equals(context, foo)
+ .FromJust());
info.GetIsolate()->ThrowException(error);
info.GetReturnValue().SetUndefined();
}
@@ -7644,15 +8605,17 @@ static void ThrowV8Exception(const v8::FunctionCallbackInfo<v8::Value>& info) {
THREADED_TEST(ExceptionCreateMessage) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<String> foo_str = v8_str("foo");
- v8::Handle<String> message_str = v8_str("message");
+ v8::Local<String> foo_str = v8_str("foo");
+ v8::Local<String> message_str = v8_str("message");
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(true);
Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(context->GetIsolate(), ThrowV8Exception);
v8::Local<v8::Object> global = context->Global();
- global->Set(v8_str("throwV8Exception"), fun->GetFunction());
+ CHECK(global->Set(context.local(), v8_str("throwV8Exception"),
+ fun->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
TryCatch try_catch(context->GetIsolate());
CompileRun(
@@ -7662,16 +8625,21 @@ THREADED_TEST(ExceptionCreateMessage) {
"f1();");
CHECK(try_catch.HasCaught());
- v8::Handle<v8::Value> error = try_catch.Exception();
+ v8::Local<v8::Value> error = try_catch.Exception();
CHECK(error->IsObject());
- CHECK(error.As<v8::Object>()->Get(message_str)->Equals(foo_str));
+ CHECK(error.As<v8::Object>()
+ ->Get(context.local(), message_str)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo_str)
+ .FromJust());
- v8::Handle<v8::Message> message = v8::Exception::CreateMessage(error);
+ v8::Local<v8::Message> message =
+ v8::Exception::CreateMessage(context->GetIsolate(), error);
CHECK(!message.IsEmpty());
- CHECK_EQ(2, message->GetLineNumber());
- CHECK_EQ(2, message->GetStartColumn());
+ CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(2, message->GetStartColumn(context.local()).FromJust());
- v8::Handle<v8::StackTrace> stackTrace = message->GetStackTrace();
+ v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
CHECK(!stackTrace.IsEmpty());
CHECK_EQ(2, stackTrace->GetFrameCount());
@@ -7679,7 +8647,7 @@ THREADED_TEST(ExceptionCreateMessage) {
CHECK(!stackTrace.IsEmpty());
CHECK_EQ(2, stackTrace->GetFrameCount());
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+ context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(false);
// Now check message location when SetCaptureStackTraceForUncaughtExceptions
// is false.
@@ -7694,12 +8662,16 @@ THREADED_TEST(ExceptionCreateMessage) {
error = try_catch.Exception();
CHECK(error->IsObject());
- CHECK(error.As<v8::Object>()->Get(message_str)->Equals(foo_str));
+ CHECK(error.As<v8::Object>()
+ ->Get(context.local(), message_str)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo_str)
+ .FromJust());
- message = v8::Exception::CreateMessage(error);
+ message = v8::Exception::CreateMessage(context->GetIsolate(), error);
CHECK(!message.IsEmpty());
- CHECK_EQ(2, message->GetLineNumber());
- CHECK_EQ(9, message->GetStartColumn());
+ CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(9, message->GetStartColumn(context.local()).FromJust());
// Should be empty stack trace.
stackTrace = message->GetStackTrace();
@@ -7735,48 +8707,27 @@ static void YSetter(Local<String> name,
Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
Local<Object> this_obj = Local<Object>::Cast(info.This());
- if (this_obj->Has(name)) this_obj->Delete(name);
- this_obj->Set(name, value);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ if (this_obj->Has(context, name).FromJust())
+ this_obj->Delete(context, name).FromJust();
+ CHECK(this_obj->Set(context, name, value).FromJust());
}
THREADED_TEST(DeleteAccessor) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("y"), YGetter, YSetter);
LocalContext context;
- v8::Handle<v8::Object> holder = obj->NewInstance();
- context->Global()->Set(v8_str("holder"), holder);
- v8::Handle<Value> result = CompileRun(
- "holder.y = 11; holder.y = 12; holder.y");
- CHECK_EQ(12u, result->Uint32Value());
-}
-
-
-THREADED_TEST(TypeSwitch) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> templ1 = v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> templ3 = v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> templs[3] = { templ1, templ2, templ3 };
- v8::Handle<v8::TypeSwitch> type_switch = v8::TypeSwitch::New(3, templs);
- LocalContext context;
- v8::Handle<v8::Object> obj0 = v8::Object::New(isolate);
- v8::Handle<v8::Object> obj1 = templ1->GetFunction()->NewInstance();
- v8::Handle<v8::Object> obj2 = templ2->GetFunction()->NewInstance();
- v8::Handle<v8::Object> obj3 = templ3->GetFunction()->NewInstance();
- for (int i = 0; i < 10; i++) {
- CHECK_EQ(0, type_switch->match(obj0));
- CHECK_EQ(1, type_switch->match(obj1));
- CHECK_EQ(2, type_switch->match(obj2));
- CHECK_EQ(3, type_switch->match(obj3));
- CHECK_EQ(3, type_switch->match(obj3));
- CHECK_EQ(2, type_switch->match(obj2));
- CHECK_EQ(1, type_switch->match(obj1));
- CHECK_EQ(0, type_switch->match(obj0));
- }
+ v8::Local<v8::Object> holder =
+ obj->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("holder"), holder)
+ .FromJust());
+ v8::Local<Value> result =
+ CompileRun("holder.y = 11; holder.y = 12; holder.y");
+ CHECK_EQ(12u, result->Uint32Value(context.local()).FromJust());
}
@@ -7786,20 +8737,22 @@ static void TroubleCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
trouble_nesting++;
// Call a JS function that throws an uncaught exception.
- Local<v8::Object> arg_this =
- args.GetIsolate()->GetCurrentContext()->Global();
- Local<Value> trouble_callee = (trouble_nesting == 3) ?
- arg_this->Get(v8_str("trouble_callee")) :
- arg_this->Get(v8_str("trouble_caller"));
+ Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ Local<v8::Object> arg_this = context->Global();
+ Local<Value> trouble_callee =
+ (trouble_nesting == 3)
+ ? arg_this->Get(context, v8_str("trouble_callee")).ToLocalChecked()
+ : arg_this->Get(context, v8_str("trouble_caller")).ToLocalChecked();
CHECK(trouble_callee->IsFunction());
- args.GetReturnValue().Set(
- Function::Cast(*trouble_callee)->Call(arg_this, 0, NULL));
+ args.GetReturnValue().Set(Function::Cast(*trouble_callee)
+ ->Call(context, arg_this, 0, NULL)
+ .FromMaybe(v8::Local<v8::Value>()));
}
static int report_count = 0;
-static void ApiUncaughtExceptionTestListener(v8::Handle<v8::Message>,
- v8::Handle<Value>) {
+static void ApiUncaughtExceptionTestListener(v8::Local<v8::Message>,
+ v8::Local<Value>) {
report_count++;
}
@@ -7811,12 +8764,14 @@ TEST(ApiUncaughtException) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::V8::AddMessageListener(ApiUncaughtExceptionTestListener);
+ isolate->AddMessageListener(ApiUncaughtExceptionTestListener);
Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(isolate, TroubleCallback);
v8::Local<v8::Object> global = env->Global();
- global->Set(v8_str("trouble"), fun->GetFunction());
+ CHECK(global->Set(env.local(), v8_str("trouble"),
+ fun->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"function trouble_callee() {"
@@ -7826,25 +8781,31 @@ TEST(ApiUncaughtException) {
"function trouble_caller() {"
" trouble();"
"};");
- Local<Value> trouble = global->Get(v8_str("trouble"));
+ Local<Value> trouble =
+ global->Get(env.local(), v8_str("trouble")).ToLocalChecked();
CHECK(trouble->IsFunction());
- Local<Value> trouble_callee = global->Get(v8_str("trouble_callee"));
+ Local<Value> trouble_callee =
+ global->Get(env.local(), v8_str("trouble_callee")).ToLocalChecked();
CHECK(trouble_callee->IsFunction());
- Local<Value> trouble_caller = global->Get(v8_str("trouble_caller"));
+ Local<Value> trouble_caller =
+ global->Get(env.local(), v8_str("trouble_caller")).ToLocalChecked();
CHECK(trouble_caller->IsFunction());
- Function::Cast(*trouble_caller)->Call(global, 0, NULL);
+ Function::Cast(*trouble_caller)
+ ->Call(env.local(), global, 0, NULL)
+ .FromMaybe(v8::Local<v8::Value>());
CHECK_EQ(1, report_count);
- v8::V8::RemoveMessageListeners(ApiUncaughtExceptionTestListener);
+ isolate->RemoveMessageListeners(ApiUncaughtExceptionTestListener);
}
TEST(ApiUncaughtExceptionInObjectObserve) {
+ v8::internal::FLAG_harmony_object_observe = true;
v8::internal::FLAG_stack_size = 150;
report_count = 0;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::V8::AddMessageListener(ApiUncaughtExceptionTestListener);
+ isolate->AddMessageListener(ApiUncaughtExceptionTestListener);
CompileRun(
"var obj = {};"
"var observe_count = 0;"
@@ -7860,19 +8821,22 @@ TEST(ApiUncaughtExceptionInObjectObserve) {
"obj.foo = 'bar';");
CHECK_EQ(3, report_count);
ExpectInt32("observe_count", 2);
- v8::V8::RemoveMessageListeners(ApiUncaughtExceptionTestListener);
+ isolate->RemoveMessageListeners(ApiUncaughtExceptionTestListener);
}
static const char* script_resource_name = "ExceptionInNativeScript.js";
-static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
- v8::Handle<Value>) {
- v8::Handle<v8::Value> name_val = message->GetScriptOrigin().ResourceName();
+static void ExceptionInNativeScriptTestListener(v8::Local<v8::Message> message,
+ v8::Local<Value>) {
+ v8::Local<v8::Value> name_val = message->GetScriptOrigin().ResourceName();
CHECK(!name_val.IsEmpty() && name_val->IsString());
v8::String::Utf8Value name(message->GetScriptOrigin().ResourceName());
CHECK_EQ(0, strcmp(script_resource_name, *name));
- CHECK_EQ(3, message->GetLineNumber());
- v8::String::Utf8Value source_line(message->GetSourceLine());
+ v8::Local<v8::Context> context =
+ v8::Isolate::GetCurrent()->GetCurrentContext();
+ CHECK_EQ(3, message->GetLineNumber(context).FromJust());
+ v8::String::Utf8Value source_line(
+ message->GetSourceLine(context).ToLocalChecked());
CHECK_EQ(0, strcmp(" new o.foo();", *source_line));
}
@@ -7881,12 +8845,14 @@ TEST(ExceptionInNativeScript) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::V8::AddMessageListener(ExceptionInNativeScriptTestListener);
+ isolate->AddMessageListener(ExceptionInNativeScriptTestListener);
Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(isolate, TroubleCallback);
v8::Local<v8::Object> global = env->Global();
- global->Set(v8_str("trouble"), fun->GetFunction());
+ CHECK(global->Set(env.local(), v8_str("trouble"),
+ fun->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
CompileRunWithOrigin(
"function trouble() {\n"
@@ -7894,10 +8860,11 @@ TEST(ExceptionInNativeScript) {
" new o.foo();\n"
"};",
script_resource_name);
- Local<Value> trouble = global->Get(v8_str("trouble"));
+ Local<Value> trouble =
+ global->Get(env.local(), v8_str("trouble")).ToLocalChecked();
CHECK(trouble->IsFunction());
- Function::Cast(*trouble)->Call(global, 0, NULL);
- v8::V8::RemoveMessageListeners(ExceptionInNativeScriptTestListener);
+ CHECK(Function::Cast(*trouble)->Call(env.local(), global, 0, NULL).IsEmpty());
+ isolate->RemoveMessageListeners(ExceptionInNativeScriptTestListener);
}
@@ -7935,7 +8902,9 @@ TEST(TryCatchFinallyUsingTryCatchHandler) {
void CEvaluate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- CompileRun(args[0]->ToString(args.GetIsolate()));
+ CompileRun(args[0]
+ ->ToString(args.GetIsolate()->GetCurrentContext())
+ .ToLocalChecked());
}
@@ -7981,56 +8950,60 @@ static bool SecurityTestCallback(Local<v8::Context> accessing_context,
TEST(SecurityHandler) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope0(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(SecurityTestCallback);
// Create an environment
- v8::Handle<Context> context0 = Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
- v8::Handle<v8::Object> global0 = context0->Global();
- v8::Handle<Script> script0 = v8_compile("foo = 111");
- script0->Run();
- global0->Set(v8_str("0"), v8_num(999));
- v8::Handle<Value> foo0 = global0->Get(v8_str("foo"));
- CHECK_EQ(111, foo0->Int32Value());
- v8::Handle<Value> z0 = global0->Get(v8_str("0"));
- CHECK_EQ(999, z0->Int32Value());
+ v8::Local<v8::Object> global0 = context0->Global();
+ v8::Local<Script> script0 = v8_compile("foo = 111");
+ script0->Run(context0).ToLocalChecked();
+ CHECK(global0->Set(context0, v8_str("0"), v8_num(999)).FromJust());
+ v8::Local<Value> foo0 =
+ global0->Get(context0, v8_str("foo")).ToLocalChecked();
+ CHECK_EQ(111, foo0->Int32Value(context0).FromJust());
+ v8::Local<Value> z0 = global0->Get(context0, v8_str("0")).ToLocalChecked();
+ CHECK_EQ(999, z0->Int32Value(context0).FromJust());
// Create another environment, should fail security checks.
v8::HandleScope scope1(isolate);
- v8::Handle<Context> context1 =
- Context::New(isolate, NULL, global_template);
+ v8::Local<Context> context1 = Context::New(isolate, NULL, global_template);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("othercontext"), global0);
+ v8::Local<v8::Object> global1 = context1->Global();
+ global1->Set(context1, v8_str("othercontext"), global0).FromJust();
// This set will fail the security check.
- v8::Handle<Script> script1 =
- v8_compile("othercontext.foo = 222; othercontext[0] = 888;");
- script1->Run();
+ v8::Local<Script> script1 =
+ v8_compile("othercontext.foo = 222; othercontext[0] = 888;");
+ CHECK(script1->Run(context1).IsEmpty());
g_security_callback_result = true;
// This read will pass the security check.
- v8::Handle<Value> foo1 = global0->Get(v8_str("foo"));
- CHECK_EQ(111, foo1->Int32Value());
+ v8::Local<Value> foo1 =
+ global0->Get(context1, v8_str("foo")).ToLocalChecked();
+ CHECK_EQ(111, foo1->Int32Value(context0).FromJust());
// This read will pass the security check.
- v8::Handle<Value> z1 = global0->Get(v8_str("0"));
- CHECK_EQ(999, z1->Int32Value());
+ v8::Local<Value> z1 = global0->Get(context1, v8_str("0")).ToLocalChecked();
+ CHECK_EQ(999, z1->Int32Value(context1).FromJust());
// Create another environment, should pass security checks.
{
v8::HandleScope scope2(isolate);
LocalContext context2;
- v8::Handle<v8::Object> global2 = context2->Global();
- global2->Set(v8_str("othercontext"), global0);
- v8::Handle<Script> script2 =
+ v8::Local<v8::Object> global2 = context2->Global();
+ CHECK(global2->Set(context2.local(), v8_str("othercontext"), global0)
+ .FromJust());
+ v8::Local<Script> script2 =
v8_compile("othercontext.foo = 333; othercontext[0] = 888;");
- script2->Run();
- v8::Handle<Value> foo2 = global0->Get(v8_str("foo"));
- CHECK_EQ(333, foo2->Int32Value());
- v8::Handle<Value> z2 = global0->Get(v8_str("0"));
- CHECK_EQ(888, z2->Int32Value());
+ script2->Run(context2.local()).ToLocalChecked();
+ v8::Local<Value> foo2 =
+ global0->Get(context2.local(), v8_str("foo")).ToLocalChecked();
+ CHECK_EQ(333, foo2->Int32Value(context2.local()).FromJust());
+ v8::Local<Value> z2 =
+ global0->Get(context2.local(), v8_str("0")).ToLocalChecked();
+ CHECK_EQ(888, z2->Int32Value(context2.local()).FromJust());
}
context1->Exit();
@@ -8041,7 +9014,7 @@ TEST(SecurityHandler) {
THREADED_TEST(SecurityChecks) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -8051,12 +9024,14 @@ THREADED_TEST(SecurityChecks) {
// Create a function in env1.
CompileRun("spy=function(){return spy;}");
- Local<Value> spy = env1->Global()->Get(v8_str("spy"));
+ Local<Value> spy =
+ env1->Global()->Get(env1.local(), v8_str("spy")).ToLocalChecked();
CHECK(spy->IsFunction());
// Create another function accessing global objects.
CompileRun("spy2=function(){return new this.Array();}");
- Local<Value> spy2 = env1->Global()->Get(v8_str("spy2"));
+ Local<Value> spy2 =
+ env1->Global()->Get(env1.local(), v8_str("spy2")).ToLocalChecked();
CHECK(spy2->IsFunction());
// Switch to env2 in the same domain and invoke spy on env2.
@@ -8064,7 +9039,9 @@ THREADED_TEST(SecurityChecks) {
env2->SetSecurityToken(foo);
// Enter env2
Context::Scope scope_env2(env2);
- Local<Value> result = Function::Cast(*spy)->Call(env2->Global(), 0, NULL);
+ Local<Value> result = Function::Cast(*spy)
+ ->Call(env2, env2->Global(), 0, NULL)
+ .ToLocalChecked();
CHECK(result->IsFunction());
}
@@ -8074,7 +9051,7 @@ THREADED_TEST(SecurityChecks) {
// Call cross_domain_call, it should throw an exception
v8::TryCatch try_catch(env1->GetIsolate());
- Function::Cast(*spy2)->Call(env2->Global(), 0, NULL);
+ CHECK(Function::Cast(*spy2)->Call(env2, env2->Global(), 0, NULL).IsEmpty());
CHECK(try_catch.HasCaught());
}
}
@@ -8084,18 +9061,25 @@ THREADED_TEST(SecurityChecks) {
THREADED_TEST(SecurityChecksForPrototypeChain) {
LocalContext current;
v8::HandleScope scope(current->GetIsolate());
- v8::Handle<Context> other = Context::New(current->GetIsolate());
+ v8::Local<Context> other = Context::New(current->GetIsolate());
// Change context to be able to get to the Object function in the
// other context without hitting the security checks.
v8::Local<Value> other_object;
{ Context::Scope scope(other);
- other_object = other->Global()->Get(v8_str("Object"));
- other->Global()->Set(v8_num(42), v8_num(87));
+ other_object =
+ other->Global()->Get(other, v8_str("Object")).ToLocalChecked();
+ CHECK(other->Global()->Set(other, v8_num(42), v8_num(87)).FromJust());
}
- current->Global()->Set(v8_str("other"), other->Global());
- CHECK(v8_compile("other")->Run()->Equals(other->Global()));
+ CHECK(current->Global()
+ ->Set(current.local(), v8_str("other"), other->Global())
+ .FromJust());
+ CHECK(v8_compile("other")
+ ->Run(current.local())
+ .ToLocalChecked()
+ ->Equals(current.local(), other->Global())
+ .FromJust());
// Make sure the security check fails here and we get an undefined
// result instead of getting the Object function. Repeat in a loop
@@ -8103,41 +9087,50 @@ THREADED_TEST(SecurityChecksForPrototypeChain) {
v8::Local<Script> access_other0 = v8_compile("other.Object");
v8::Local<Script> access_other1 = v8_compile("other[42]");
for (int i = 0; i < 5; i++) {
- CHECK(access_other0->Run().IsEmpty());
- CHECK(access_other1->Run().IsEmpty());
+ CHECK(access_other0->Run(current.local()).IsEmpty());
+ CHECK(access_other1->Run(current.local()).IsEmpty());
}
// Create an object that has 'other' in its prototype chain and make
// sure we cannot access the Object function indirectly through
// that. Repeat in a loop to make sure to exercise the IC code.
- v8_compile("function F() { };"
- "F.prototype = other;"
- "var f = new F();")->Run();
+ v8_compile(
+ "function F() { };"
+ "F.prototype = other;"
+ "var f = new F();")
+ ->Run(current.local())
+ .ToLocalChecked();
v8::Local<Script> access_f0 = v8_compile("f.Object");
v8::Local<Script> access_f1 = v8_compile("f[42]");
for (int j = 0; j < 5; j++) {
- CHECK(access_f0->Run().IsEmpty());
- CHECK(access_f1->Run().IsEmpty());
+ CHECK(access_f0->Run(current.local()).IsEmpty());
+ CHECK(access_f1->Run(current.local()).IsEmpty());
}
// Now it gets hairy: Set the prototype for the other global object
// to be the current global object. The prototype chain for 'f' now
// goes through 'other' but ends up in the current global object.
{ Context::Scope scope(other);
- other->Global()->Set(v8_str("__proto__"), current->Global());
+ CHECK(other->Global()
+ ->Set(other, v8_str("__proto__"), current->Global())
+ .FromJust());
}
// Set a named and an index property on the current global
// object. To force the lookup to go through the other global object,
// the properties must not exist in the other global object.
- current->Global()->Set(v8_str("foo"), v8_num(100));
- current->Global()->Set(v8_num(99), v8_num(101));
+ CHECK(current->Global()
+ ->Set(current.local(), v8_str("foo"), v8_num(100))
+ .FromJust());
+ CHECK(current->Global()
+ ->Set(current.local(), v8_num(99), v8_num(101))
+ .FromJust());
// Try to read the properties from f and make sure that the access
// gets stopped by the security checks on the other global object.
Local<Script> access_f2 = v8_compile("f.foo");
Local<Script> access_f3 = v8_compile("f[99]");
for (int k = 0; k < 5; k++) {
- CHECK(access_f2->Run().IsEmpty());
- CHECK(access_f3->Run().IsEmpty());
+ CHECK(access_f2->Run(current.local()).IsEmpty());
+ CHECK(access_f3->Run(current.local()).IsEmpty());
}
}
@@ -8155,21 +9148,28 @@ static bool SecurityTestCallbackWithGC(Local<v8::Context> accessing_context,
TEST(SecurityTestGCAllowed) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallback(SecurityTestCallbackWithGC);
- v8::Handle<Context> context = Context::New(isolate);
+ v8::Local<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
- context->Global()->Set(v8_str("obj"), object_template->NewInstance());
+ CHECK(context->Global()
+ ->Set(context, v8_str("obj"),
+ object_template->NewInstance(context).ToLocalChecked())
+ .FromJust());
security_check_with_gc_called = false;
CompileRun("obj[0] = new String(1002);");
CHECK(security_check_with_gc_called);
security_check_with_gc_called = false;
- CHECK(CompileRun("obj[0]")->ToString(isolate)->Equals(v8_str("1002")));
+ CHECK(CompileRun("obj[0]")
+ ->ToString(context)
+ .ToLocalChecked()
+ ->Equals(context, v8_str("1002"))
+ .FromJust());
CHECK(security_check_with_gc_called);
}
@@ -8177,7 +9177,7 @@ TEST(SecurityTestGCAllowed) {
THREADED_TEST(CrossDomainDelete) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -8186,8 +9186,9 @@ THREADED_TEST(CrossDomainDelete) {
env1->SetSecurityToken(foo);
env2->SetSecurityToken(foo);
- env1->Global()->Set(v8_str("prop"), v8_num(3));
- env2->Global()->Set(v8_str("env1"), env1->Global());
+ CHECK(
+ env1->Global()->Set(env1.local(), v8_str("prop"), v8_num(3)).FromJust());
+ CHECK(env2->Global()->Set(env2, v8_str("env1"), env1->Global()).FromJust());
// Change env2 to a different domain and delete env1.prop.
env2->SetSecurityToken(bar);
@@ -8199,16 +9200,17 @@ THREADED_TEST(CrossDomainDelete) {
}
// Check that env1.prop still exists.
- Local<Value> v = env1->Global()->Get(v8_str("prop"));
+ Local<Value> v =
+ env1->Global()->Get(env1.local(), v8_str("prop")).ToLocalChecked();
CHECK(v->IsNumber());
- CHECK_EQ(3, v->Int32Value());
+ CHECK_EQ(3, v->Int32Value(env1.local()).FromJust());
}
-THREADED_TEST(CrossDomainIsPropertyEnumerable) {
+THREADED_TEST(CrossDomainPropertyIsEnumerable) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -8217,8 +9219,9 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
env1->SetSecurityToken(foo);
env2->SetSecurityToken(foo);
- env1->Global()->Set(v8_str("prop"), v8_num(3));
- env2->Global()->Set(v8_str("env1"), env1->Global());
+ CHECK(
+ env1->Global()->Set(env1.local(), v8_str("prop"), v8_num(3)).FromJust());
+ CHECK(env2->Global()->Set(env2, v8_str("env1"), env1->Global()).FromJust());
// env1.prop is enumerable in env2.
Local<String> test = v8_str("propertyIsEnumerable.call(env1, 'prop')");
@@ -8241,7 +9244,7 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
THREADED_TEST(CrossDomainFor) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -8250,13 +9253,14 @@ THREADED_TEST(CrossDomainFor) {
env1->SetSecurityToken(foo);
env2->SetSecurityToken(foo);
- env1->Global()->Set(v8_str("prop"), v8_num(3));
- env2->Global()->Set(v8_str("env1"), env1->Global());
+ CHECK(
+ env1->Global()->Set(env1.local(), v8_str("prop"), v8_num(3)).FromJust());
+ CHECK(env2->Global()->Set(env2, v8_str("env1"), env1->Global()).FromJust());
// Change env2 to a different domain and set env1's global object
// as the __proto__ of an object in env2 and enumerate properties
// in for-in. It shouldn't enumerate properties on env1's global
- // object.
+ // object. It shouldn't throw either, just silently ignore them.
env2->SetSecurityToken(bar);
{
Context::Scope scope_env2(env2);
@@ -8279,7 +9283,7 @@ THREADED_TEST(CrossDomainFor) {
THREADED_TEST(CrossDomainForInOnPrototype) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -8288,8 +9292,9 @@ THREADED_TEST(CrossDomainForInOnPrototype) {
env1->SetSecurityToken(foo);
env2->SetSecurityToken(foo);
- env1->Global()->Set(v8_str("prop"), v8_num(3));
- env2->Global()->Set(v8_str("env1"), env1->Global());
+ CHECK(
+ env1->Global()->Set(env1.local(), v8_str("prop"), v8_num(3)).FromJust());
+ CHECK(env2->Global()->Set(env2, v8_str("env1"), env1->Global()).FromJust());
// Change env2 to a different domain and set env1's global object
// as the __proto__ of an object in env2 and enumerate properties
@@ -8305,9 +9310,9 @@ THREADED_TEST(CrossDomainForInOnPrototype) {
" for (var p in obj) {"
" if (p == 'prop') return false;"
" }"
- " return false;"
- " } catch (e) {"
" return true;"
+ " } catch (e) {"
+ " return false;"
" }"
"})()");
CHECK(result->IsTrue());
@@ -8318,9 +9323,8 @@ THREADED_TEST(CrossDomainForInOnPrototype) {
TEST(ContextDetachGlobal) {
LocalContext env1;
v8::HandleScope handle_scope(env1->GetIsolate());
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
- Local<v8::Object> global1 = env1->Global();
Local<Value> foo = v8_str("foo");
@@ -8333,44 +9337,54 @@ TEST(ContextDetachGlobal) {
// Create a function in env2 and add a reference to it in env1.
Local<v8::Object> global2 = env2->Global();
- global2->Set(v8_str("prop"), v8::Integer::New(env2->GetIsolate(), 1));
+ CHECK(global2->Set(env2, v8_str("prop"),
+ v8::Integer::New(env2->GetIsolate(), 1))
+ .FromJust());
CompileRun("function getProp() {return prop;}");
- env1->Global()->Set(v8_str("getProp"),
- global2->Get(v8_str("getProp")));
+ CHECK(env1->Global()
+ ->Set(env1.local(), v8_str("getProp"),
+ global2->Get(env2, v8_str("getProp")).ToLocalChecked())
+ .FromJust());
// Detach env2's global, and reuse the global object of env2
env2->Exit();
env2->DetachGlobal();
- v8::Handle<Context> env3 = Context::New(env1->GetIsolate(),
- 0,
- v8::Handle<v8::ObjectTemplate>(),
- global2);
+ v8::Local<Context> env3 = Context::New(
+ env1->GetIsolate(), 0, v8::Local<v8::ObjectTemplate>(), global2);
env3->SetSecurityToken(v8_str("bar"));
- env3->Enter();
+ env3->Enter();
Local<v8::Object> global3 = env3->Global();
- CHECK(global2->Equals(global3));
- CHECK(global3->Get(v8_str("prop"))->IsUndefined());
- CHECK(global3->Get(v8_str("getProp"))->IsUndefined());
- global3->Set(v8_str("prop"), v8::Integer::New(env3->GetIsolate(), -1));
- global3->Set(v8_str("prop2"), v8::Integer::New(env3->GetIsolate(), 2));
+ CHECK(global2->Equals(env3, global3).FromJust());
+ CHECK(global3->Get(env3, v8_str("prop")).ToLocalChecked()->IsUndefined());
+ CHECK(global3->Get(env3, v8_str("getProp")).ToLocalChecked()->IsUndefined());
+ CHECK(global3->Set(env3, v8_str("prop"),
+ v8::Integer::New(env3->GetIsolate(), -1))
+ .FromJust());
+ CHECK(global3->Set(env3, v8_str("prop2"),
+ v8::Integer::New(env3->GetIsolate(), 2))
+ .FromJust());
env3->Exit();
// Call getProp in env1, and it should return the value 1
{
- Local<Value> get_prop = global1->Get(v8_str("getProp"));
+ Local<v8::Object> global1 = env1->Global();
+ Local<Value> get_prop =
+ global1->Get(env1.local(), v8_str("getProp")).ToLocalChecked();
CHECK(get_prop->IsFunction());
v8::TryCatch try_catch(env1->GetIsolate());
- Local<Value> r = Function::Cast(*get_prop)->Call(global1, 0, NULL);
+ Local<Value> r = Function::Cast(*get_prop)
+ ->Call(env1.local(), global1, 0, NULL)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
- CHECK_EQ(1, r->Int32Value());
+ CHECK_EQ(1, r->Int32Value(env1.local()).FromJust());
}
// Check that env3 is not accessible from env1
{
- Local<Value> r = global3->Get(v8_str("prop2"));
+ v8::MaybeLocal<Value> r = global3->Get(env1.local(), v8_str("prop2"));
CHECK(r.IsEmpty());
}
}
@@ -8381,7 +9395,7 @@ TEST(DetachGlobal) {
v8::HandleScope scope(env1->GetIsolate());
// Create second environment.
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> foo = v8_str("foo");
@@ -8392,16 +9406,20 @@ TEST(DetachGlobal) {
// Create a property on the global object in env2.
{
v8::Context::Scope scope(env2);
- env2->Global()->Set(v8_str("p"), v8::Integer::New(env2->GetIsolate(), 42));
+ CHECK(env2->Global()
+ ->Set(env2, v8_str("p"), v8::Integer::New(env2->GetIsolate(), 42))
+ .FromJust());
}
// Create a reference to env2 global from env1 global.
- env1->Global()->Set(v8_str("other"), env2->Global());
+ CHECK(env1->Global()
+ ->Set(env1.local(), v8_str("other"), env2->Global())
+ .FromJust());
// Check that we have access to other.p in env2 from env1.
Local<Value> result = CompileRun("other.p");
CHECK(result->IsInt32());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(env1.local()).FromJust());
// Hold on to global from env2 and detach global from env2.
Local<v8::Object> global2 = env2->Global();
@@ -8413,11 +9431,9 @@ TEST(DetachGlobal) {
CHECK(result.IsEmpty());
// Reuse global2 for env3.
- v8::Handle<Context> env3 = Context::New(env1->GetIsolate(),
- 0,
- v8::Handle<v8::ObjectTemplate>(),
- global2);
- CHECK(global2->Equals(env3->Global()));
+ v8::Local<Context> env3 = Context::New(
+ env1->GetIsolate(), 0, v8::Local<v8::ObjectTemplate>(), global2);
+ CHECK(global2->Equals(env1.local(), env3->Global()).FromJust());
// Start by using the same security token for env3 as for env1 and env2.
env3->SetSecurityToken(foo);
@@ -8425,13 +9441,15 @@ TEST(DetachGlobal) {
// Create a property on the global object in env3.
{
v8::Context::Scope scope(env3);
- env3->Global()->Set(v8_str("p"), v8::Integer::New(env3->GetIsolate(), 24));
+ CHECK(env3->Global()
+ ->Set(env3, v8_str("p"), v8::Integer::New(env3->GetIsolate(), 24))
+ .FromJust());
}
// Check that other.p is now the property in env3 and that we have access.
result = CompileRun("other.p");
CHECK(result->IsInt32());
- CHECK_EQ(24, result->Int32Value());
+ CHECK_EQ(24, result->Int32Value(env3).FromJust());
// Change security token for env3 to something different from env1 and env2.
env3->SetSecurityToken(v8_str("bar"));
@@ -8445,8 +9463,9 @@ TEST(DetachGlobal) {
void GetThisX(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
info.GetReturnValue().Set(
- info.GetIsolate()->GetCurrentContext()->Global()->Get(v8_str("x")));
+ context->Global()->Get(context, v8_str("x")).ToLocalChecked());
}
@@ -8468,21 +9487,30 @@ TEST(DetachedAccesses) {
env1->SetSecurityToken(foo);
env2->SetSecurityToken(foo);
- env1->Global()->Set(v8_str("x"), v8_str("env1_x"));
+ CHECK(env1->Global()
+ ->Set(env1.local(), v8_str("x"), v8_str("env1_x"))
+ .FromJust());
{
v8::Context::Scope scope(env2);
- env2->Global()->Set(v8_str("x"), v8_str("env2_x"));
+ CHECK(env2->Global()->Set(env2, v8_str("x"), v8_str("env2_x")).FromJust());
CompileRun(
"function bound_x() { return x; }"
"function get_x() { return this.x; }"
"function get_x_w() { return (function() {return this.x;})(); }");
- env1->Global()->Set(v8_str("bound_x"), CompileRun("bound_x"));
- env1->Global()->Set(v8_str("get_x"), CompileRun("get_x"));
- env1->Global()->Set(v8_str("get_x_w"), CompileRun("get_x_w"));
- env1->Global()->Set(
- v8_str("this_x"),
- CompileRun("Object.getOwnPropertyDescriptor(this, 'this_x').get"));
+ CHECK(env1->Global()
+ ->Set(env1.local(), v8_str("bound_x"), CompileRun("bound_x"))
+ .FromJust());
+ CHECK(env1->Global()
+ ->Set(env1.local(), v8_str("get_x"), CompileRun("get_x"))
+ .FromJust());
+ CHECK(env1->Global()
+ ->Set(env1.local(), v8_str("get_x_w"), CompileRun("get_x_w"))
+ .FromJust());
+ env1->Global()
+ ->Set(env1.local(), v8_str("this_x"),
+ CompileRun("Object.getOwnPropertyDescriptor(this, 'this_x').get"))
+ .FromJust();
}
Local<Object> env2_global = env2->Global();
@@ -8490,24 +9518,22 @@ TEST(DetachedAccesses) {
Local<Value> result;
result = CompileRun("bound_x()");
- CHECK(v8_str("env2_x")->Equals(result));
+ CHECK(v8_str("env2_x")->Equals(env1.local(), result).FromJust());
result = CompileRun("get_x()");
CHECK(result.IsEmpty());
result = CompileRun("get_x_w()");
CHECK(result.IsEmpty());
result = CompileRun("this_x()");
- CHECK(v8_str("env2_x")->Equals(result));
+ CHECK(v8_str("env2_x")->Equals(env1.local(), result).FromJust());
// Reattach env2's proxy
- env2 = Context::New(env1->GetIsolate(),
- 0,
- v8::Handle<v8::ObjectTemplate>(),
+ env2 = Context::New(env1->GetIsolate(), 0, v8::Local<v8::ObjectTemplate>(),
env2_global);
env2->SetSecurityToken(foo);
{
v8::Context::Scope scope(env2);
- env2->Global()->Set(v8_str("x"), v8_str("env3_x"));
- env2->Global()->Set(v8_str("env1"), env1->Global());
+ CHECK(env2->Global()->Set(env2, v8_str("x"), v8_str("env3_x")).FromJust());
+ CHECK(env2->Global()->Set(env2, v8_str("env1"), env1->Global()).FromJust());
result = CompileRun(
"results = [];"
"for (var i = 0; i < 4; i++ ) {"
@@ -8520,10 +9546,18 @@ TEST(DetachedAccesses) {
Local<v8::Array> results = Local<v8::Array>::Cast(result);
CHECK_EQ(16u, results->Length());
for (int i = 0; i < 16; i += 4) {
- CHECK(v8_str("env2_x")->Equals(results->Get(i + 0)));
- CHECK(v8_str("env1_x")->Equals(results->Get(i + 1)));
- CHECK(v8_str("env3_x")->Equals(results->Get(i + 2)));
- CHECK(v8_str("env2_x")->Equals(results->Get(i + 3)));
+ CHECK(v8_str("env2_x")
+ ->Equals(env2, results->Get(env2, i + 0).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env1_x")
+ ->Equals(env2, results->Get(env2, i + 1).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env3_x")
+ ->Equals(env2, results->Get(env2, i + 2).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env2_x")
+ ->Equals(env2, results->Get(env2, i + 3).ToLocalChecked())
+ .FromJust());
}
}
@@ -8539,10 +9573,22 @@ TEST(DetachedAccesses) {
Local<v8::Array> results = Local<v8::Array>::Cast(result);
CHECK_EQ(16u, results->Length());
for (int i = 0; i < 16; i += 4) {
- CHECK(v8_str("env2_x")->Equals(results->Get(i + 0)));
- CHECK(v8_str("env3_x")->Equals(results->Get(i + 1)));
- CHECK(v8_str("env3_x")->Equals(results->Get(i + 2)));
- CHECK(v8_str("env2_x")->Equals(results->Get(i + 3)));
+ CHECK(v8_str("env2_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 0).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env3_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 1).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env3_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 2).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env2_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 3).ToLocalChecked())
+ .FromJust());
}
result = CompileRun(
@@ -8557,10 +9603,22 @@ TEST(DetachedAccesses) {
results = Local<v8::Array>::Cast(result);
CHECK_EQ(16u, results->Length());
for (int i = 0; i < 16; i += 4) {
- CHECK(v8_str("env2_x")->Equals(results->Get(i + 0)));
- CHECK(v8_str("env1_x")->Equals(results->Get(i + 1)));
- CHECK(v8_str("env3_x")->Equals(results->Get(i + 2)));
- CHECK(v8_str("env2_x")->Equals(results->Get(i + 3)));
+ CHECK(v8_str("env2_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 0).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env1_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 1).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env3_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 2).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("env2_x")
+ ->Equals(env1.local(),
+ results->Get(env1.local(), i + 3).ToLocalChecked())
+ .FromJust());
}
}
@@ -8568,8 +9626,8 @@ TEST(DetachedAccesses) {
static bool allowed_access = false;
static bool AccessBlocker(Local<v8::Context> accessing_context,
Local<v8::Object> accessed_object) {
- return CcTest::isolate()->GetCurrentContext()->Global()->Equals(
- accessed_object) ||
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ return context->Global()->Equals(context, accessed_object).FromJust() ||
allowed_access;
}
@@ -8584,11 +9642,11 @@ static void EchoGetter(
}
-static void EchoSetter(Local<String> name,
- Local<Value> value,
- const v8::PropertyCallbackInfo<void>&) {
+static void EchoSetter(Local<String> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<void>& args) {
if (value->IsNumber())
- g_echo_value = value->Int32Value();
+ g_echo_value =
+ value->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust();
}
@@ -8615,23 +9673,20 @@ static void UnreachableFunction(
TEST(AccessControl) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(AccessBlocker);
// Add an accessor accessible by cross-domain JS code.
global_template->SetAccessor(
- v8_str("accessible_prop"),
- EchoGetter, EchoSetter,
- v8::Handle<Value>(),
+ v8_str("accessible_prop"), EchoGetter, EchoSetter, v8::Local<Value>(),
v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
// Add an accessor that is not accessible by cross-domain JS code.
- global_template->SetAccessor(v8_str("blocked_prop"),
- UnreachableGetter, UnreachableSetter,
- v8::Handle<Value>(),
+ global_template->SetAccessor(v8_str("blocked_prop"), UnreachableGetter,
+ UnreachableSetter, v8::Local<Value>(),
v8::DEFAULT);
global_template->SetAccessorProperty(
@@ -8645,7 +9700,7 @@ TEST(AccessControl) {
v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
- v8::Handle<v8::Object> global0 = context0->Global();
+ v8::Local<v8::Object> global0 = context0->Global();
// Define a property with JS getter and setter.
CompileRun(
@@ -8653,11 +9708,13 @@ TEST(AccessControl) {
"function setter() { return 'setter'; }\n"
"Object.defineProperty(this, 'js_accessor_p', {get:getter, set:setter})");
- Local<Value> getter = global0->Get(v8_str("getter"));
- Local<Value> setter = global0->Get(v8_str("setter"));
+ Local<Value> getter =
+ global0->Get(context0, v8_str("getter")).ToLocalChecked();
+ Local<Value> setter =
+ global0->Get(context0, v8_str("setter")).ToLocalChecked();
// And define normal element.
- global0->Set(239, v8_str("239"));
+ CHECK(global0->Set(context0, 239, v8_str("239")).FromJust());
// Define an element with JS getter and setter.
CompileRun(
@@ -8665,16 +9722,18 @@ TEST(AccessControl) {
"function el_setter() { return 'el_setter'; };\n"
"Object.defineProperty(this, '42', {get: el_getter, set: el_setter});");
- Local<Value> el_getter = global0->Get(v8_str("el_getter"));
- Local<Value> el_setter = global0->Get(v8_str("el_setter"));
+ Local<Value> el_getter =
+ global0->Get(context0, v8_str("el_getter")).ToLocalChecked();
+ Local<Value> el_setter =
+ global0->Get(context0, v8_str("el_setter")).ToLocalChecked();
v8::HandleScope scope1(isolate);
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("other"), global0);
+ v8::Local<v8::Object> global1 = context1->Global();
+ CHECK(global1->Set(context1, v8_str("other"), global0).FromJust());
// Access blocked property.
CompileRun("other.blocked_prop = 1");
@@ -8731,28 +9790,29 @@ TEST(AccessControl) {
allowed_access = false;
- v8::Handle<Value> value;
+ v8::Local<Value> value;
// Access accessible property
value = CompileRun("other.accessible_prop = 3");
CHECK(value->IsNumber());
- CHECK_EQ(3, value->Int32Value());
+ CHECK_EQ(3, value->Int32Value(context1).FromJust());
CHECK_EQ(3, g_echo_value);
value = CompileRun("other.accessible_prop");
CHECK(value->IsNumber());
- CHECK_EQ(3, value->Int32Value());
+ CHECK_EQ(3, value->Int32Value(context1).FromJust());
value = CompileRun(
"Object.getOwnPropertyDescriptor(other, 'accessible_prop').value");
CHECK(value->IsNumber());
- CHECK_EQ(3, value->Int32Value());
+ CHECK_EQ(3, value->Int32Value(context1).FromJust());
value = CompileRun("propertyIsEnumerable.call(other, 'accessible_prop')");
CHECK(value->IsTrue());
// Enumeration doesn't enumerate accessors from inaccessible objects in
// the prototype chain even if the accessors are in themselves accessible.
+ // Enumeration doesn't throw, it silently ignores what it can't access.
value = CompileRun(
"(function() {"
" var obj = { '__proto__': other };"
@@ -8764,16 +9824,18 @@ TEST(AccessControl) {
" return false;"
" }"
" }"
- " return false;"
- " } catch (e) {"
" return true;"
+ " } catch (e) {"
+ " return false;"
" }"
"})()");
CHECK(value->IsTrue());
// Test that preventExtensions fails on a non-accessible object even if that
// object is already non-extensible.
- global1->Set(v8_str("checked_object"), global_template->NewInstance());
+ CHECK(global1->Set(context1, v8_str("checked_object"),
+ global_template->NewInstance(context1).ToLocalChecked())
+ .FromJust());
allowed_access = true;
CompileRun("Object.preventExtensions(checked_object)");
ExpectFalse("Object.isExtensible(checked_object)");
@@ -8788,38 +9850,40 @@ TEST(AccessControl) {
TEST(AccessControlES5) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(AccessBlocker);
// Add accessible accessor.
global_template->SetAccessor(
- v8_str("accessible_prop"),
- EchoGetter, EchoSetter,
- v8::Handle<Value>(),
+ v8_str("accessible_prop"), EchoGetter, EchoSetter, v8::Local<Value>(),
v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
// Add an accessor that is not accessible by cross-domain JS code.
- global_template->SetAccessor(v8_str("blocked_prop"),
- UnreachableGetter, UnreachableSetter,
- v8::Handle<Value>(),
+ global_template->SetAccessor(v8_str("blocked_prop"), UnreachableGetter,
+ UnreachableSetter, v8::Local<Value>(),
v8::DEFAULT);
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
- v8::Handle<v8::Object> global0 = context0->Global();
+ v8::Local<v8::Object> global0 = context0->Global();
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("other"), global0);
+ v8::Local<v8::Object> global1 = context1->Global();
+ CHECK(global1->Set(context1, v8_str("other"), global0).FromJust());
// Regression test for issue 1154.
- CHECK(CompileRun("Object.keys(other).length == 0")->BooleanValue());
+ CHECK(CompileRun("Object.keys(other).length == 1")
+ ->BooleanValue(context1)
+ .FromJust());
+ CHECK(CompileRun("Object.keys(other)[0] == 'accessible_prop'")
+ ->BooleanValue(context1)
+ .FromJust());
CHECK(CompileRun("other.blocked_prop").IsEmpty());
// Regression test for issue 1027.
@@ -8866,33 +9930,34 @@ static bool AccessAlwaysBlocked(Local<v8::Context> accessing_context,
THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
obj_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Add an accessor accessible by cross-domain JS code.
obj_template->SetAccessor(
- v8_str("accessible_prop"), EchoGetter, EchoSetter, v8::Handle<Value>(),
+ v8_str("accessible_prop"), EchoGetter, EchoSetter, v8::Local<Value>(),
v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
context0->Enter();
- v8::Handle<v8::Object> global0 = context0->Global();
+ v8::Local<v8::Object> global0 = context0->Global();
v8::HandleScope scope1(CcTest::isolate());
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("other"), global0);
- global1->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global1 = context1->Global();
+ CHECK(global1->Set(context1, v8_str("other"), global0).FromJust());
+ CHECK(global1->Set(context1, v8_str("object"),
+ obj_template->NewInstance(context1).ToLocalChecked())
+ .FromJust());
- v8::Handle<Value> value;
+ v8::Local<Value> value;
// Attempt to get the property names of the other global object and
// of an object that requires access checks. Accessing the other
@@ -8902,12 +9967,12 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
value = CompileRun(
"var names = Object.getOwnPropertyNames(other);"
"names.length == 1 && names[0] == 'accessible_prop';");
- CHECK(value->BooleanValue());
+ CHECK(value->BooleanValue(context1).FromJust());
value = CompileRun(
"var names = Object.getOwnPropertyNames(object);"
"names.length == 1 && names[0] == 'accessible_prop';");
- CHECK(value->BooleanValue());
+ CHECK(value->BooleanValue(context1).FromJust());
context1->Exit();
context0->Exit();
@@ -8917,11 +9982,13 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
TEST(Regress470113) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->SetAccessCheckCallback(AccessAlwaysBlocked);
LocalContext env;
- env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("prohibited"),
+ obj_template->NewInstance(env.local()).ToLocalChecked())
+ .FromJust());
{
v8::TryCatch try_catch(isolate);
@@ -8948,49 +10015,47 @@ THREADED_TEST(CrossDomainAccessors) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::FunctionTemplate> func_template =
+ v8::Local<v8::FunctionTemplate> func_template =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
func_template->InstanceTemplate();
- v8::Handle<v8::ObjectTemplate> proto_template =
+ v8::Local<v8::ObjectTemplate> proto_template =
func_template->PrototypeTemplate();
// Add an accessor to proto that's accessible by cross-domain JS code.
- proto_template->SetAccessor(v8_str("accessible"),
- ConstTenGetter, 0,
- v8::Handle<Value>(),
- v8::ALL_CAN_READ);
+ proto_template->SetAccessor(v8_str("accessible"), ConstTenGetter, 0,
+ v8::Local<Value>(), v8::ALL_CAN_READ);
// Add an accessor that is not accessible by cross-domain JS code.
- global_template->SetAccessor(v8_str("unreachable"),
- UnreachableGetter, 0,
- v8::Handle<Value>(),
- v8::DEFAULT);
+ global_template->SetAccessor(v8_str("unreachable"), UnreachableGetter, 0,
+ v8::Local<Value>(), v8::DEFAULT);
v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
Local<v8::Object> global = context0->Global();
// Add a normal property that shadows 'accessible'
- global->Set(v8_str("accessible"), v8_num(11));
+ CHECK(global->Set(context0, v8_str("accessible"), v8_num(11)).FromJust());
// Enter a new context.
v8::HandleScope scope1(CcTest::isolate());
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("other"), global);
+ v8::Local<v8::Object> global1 = context1->Global();
+ CHECK(global1->Set(context1, v8_str("other"), global).FromJust());
// Should return 10, instead of 11
- v8::Handle<Value> value = v8_compile("other.accessible")->Run();
+ v8::Local<Value> value =
+ v8_compile("other.accessible")->Run(context1).ToLocalChecked();
CHECK(value->IsNumber());
- CHECK_EQ(10, value->Int32Value());
+ CHECK_EQ(10, value->Int32Value(context1).FromJust());
- value = v8_compile("other.unreachable")->Run();
- CHECK(value.IsEmpty());
+ v8::MaybeLocal<v8::Value> maybe_value =
+ v8_compile("other.unreachable")->Run(context1);
+ CHECK(maybe_value.IsEmpty());
context1->Exit();
context0->Exit();
@@ -9019,10 +10084,11 @@ TEST(AccessControlIC) {
// Create an object that requires access-check functions to be
// called for cross-domain access.
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallback(AccessCounter);
- Local<v8::Object> object = object_template->NewInstance();
+ Local<v8::Object> object =
+ object_template->NewInstance(context0).ToLocalChecked();
v8::HandleScope scope1(isolate);
@@ -9031,10 +10097,10 @@ TEST(AccessControlIC) {
context1->Enter();
// Make easy access to the object from the other environment.
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("obj"), object);
+ v8::Local<v8::Object> global1 = context1->Global();
+ CHECK(global1->Set(context1, v8_str("obj"), object).FromJust());
- v8::Handle<Value> value;
+ v8::Local<Value> value;
// Check that the named access-control function is called every time.
CompileRun("function testProp(obj) {"
@@ -9044,7 +10110,7 @@ TEST(AccessControlIC) {
"}");
value = CompileRun("testProp(obj)");
CHECK(value->IsNumber());
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(context1).FromJust());
CHECK_EQ(21, access_count);
// Check that the named access-control function is called every time.
@@ -9058,14 +10124,14 @@ TEST(AccessControlIC) {
// in that case.
value = CompileRun("testKeyed(obj)");
CHECK(value->IsNumber());
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(context1).FromJust());
CHECK_EQ(42, access_count);
// Force the inline caches into generic state and try again.
CompileRun("testKeyed({ a: 0 })");
CompileRun("testKeyed({ b: 0 })");
value = CompileRun("testKeyed(obj)");
CHECK(value->IsNumber());
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(context1).FromJust());
CHECK_EQ(63, access_count);
// Check that the indexed access-control function is called every time.
@@ -9078,14 +10144,14 @@ TEST(AccessControlIC) {
"}");
value = CompileRun("testIndexed(obj)");
CHECK(value->IsNumber());
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(context1).FromJust());
CHECK_EQ(21, access_count);
// Force the inline caches into generic state.
CompileRun("testIndexed(new Array(1))");
// Test that the indexed access check is called.
value = CompileRun("testIndexed(obj)");
CHECK(value->IsNumber());
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(context1).FromJust());
CHECK_EQ(42, access_count);
access_count = 0;
@@ -9101,13 +10167,13 @@ TEST(AccessControlIC) {
// Force obj into slow case.
value = CompileRun("delete obj.prop");
- CHECK(value->BooleanValue());
+ CHECK(value->BooleanValue(context1).FromJust());
// Force inline caches into dictionary probing mode.
CompileRun("var o = { x: 0 }; delete o.x; testProp(o);");
// Test that the named access check is called.
value = CompileRun("testProp(obj);");
CHECK(value->IsNumber());
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(context1).FromJust());
CHECK_EQ(33, access_count);
// Force the call inline cache into dictionary probing mode.
@@ -9144,14 +10210,17 @@ THREADED_TEST(InstanceProperties) {
instance->Set(v8_str("f"),
v8::FunctionTemplate::New(isolate, InstanceFunctionCallback));
- Local<Value> o = t->GetFunction()->NewInstance();
+ Local<Value> o = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
- context->Global()->Set(v8_str("i"), o);
+ CHECK(context->Global()->Set(context.local(), v8_str("i"), o).FromJust());
Local<Value> value = CompileRun("i.x");
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
value = CompileRun("i.f()");
- CHECK_EQ(12, value->Int32Value());
+ CHECK_EQ(12, value->Int32Value(context.local()).FromJust());
}
@@ -9199,22 +10268,22 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
global_object = env->Global();
Local<Value> value = CompileRun("x");
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(env.local()).FromJust());
value = CompileRun("f()");
- CHECK_EQ(12, value->Int32Value());
+ CHECK_EQ(12, value->Int32Value(env.local()).FromJust());
value = CompileRun(script);
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(env.local()).FromJust());
}
{
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
Local<Value> value = CompileRun("x");
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(env.local()).FromJust());
value = CompileRun("f()");
- CHECK_EQ(12, value->Int32Value());
+ CHECK_EQ(12, value->Int32Value(env.local()).FromJust());
value = CompileRun(script);
- CHECK_EQ(1, value->Int32Value());
+ CHECK_EQ(1, value->Int32Value(env.local()).FromJust());
}
}
@@ -9254,7 +10323,7 @@ THREADED_TEST(CallKnownGlobalReceiver) {
{
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
- env->Global()->Set(v8_str("foo"), foo);
+ CHECK(env->Global()->Set(env.local(), v8_str("foo"), foo).FromJust());
CompileRun("foo()");
}
}
@@ -9321,25 +10390,30 @@ THREADED_TEST(ShadowObject) {
instance->SetAccessor(v8_str("y"), ShadowYGetter, ShadowYSetter);
- Local<Value> o = t->GetFunction()->NewInstance();
- context->Global()->Set(v8_str("__proto__"), o);
+ Local<Value> o = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("__proto__"), o)
+ .FromJust());
Local<Value> value =
CompileRun("this.propertyIsEnumerable(0)");
CHECK(value->IsBoolean());
- CHECK(!value->BooleanValue());
+ CHECK(!value->BooleanValue(context.local()).FromJust());
value = CompileRun("x");
- CHECK_EQ(12, value->Int32Value());
+ CHECK_EQ(12, value->Int32Value(context.local()).FromJust());
value = CompileRun("f()");
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
CompileRun("y = 43");
CHECK_EQ(1, shadow_y_setter_call_count);
value = CompileRun("y");
CHECK_EQ(1, shadow_y_getter_call_count);
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
@@ -9359,32 +10433,78 @@ THREADED_TEST(HiddenPrototype) {
Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
- Local<v8::Object> o0 = t0->GetFunction()->NewInstance();
- Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
- Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
- Local<v8::Object> o3 = t3->GetFunction()->NewInstance();
+ Local<v8::Object> o0 = t0->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o1 = t1->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o2 = t2->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o3 = t3->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
// Setting the prototype on an object skips hidden prototypes.
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- o0->Set(v8_str("__proto__"), o1);
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
- o0->Set(v8_str("__proto__"), o2);
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
- CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value());
- o0->Set(v8_str("__proto__"), o3);
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
- CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value());
- CHECK_EQ(3, o0->Get(v8_str("u"))->Int32Value());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o0->Set(context.local(), v8_str("__proto__"), o1).FromJust());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o0->Set(context.local(), v8_str("__proto__"), o2).FromJust());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(2, o0->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o0->Set(context.local(), v8_str("__proto__"), o3).FromJust());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(2, o0->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(3, o0->Get(context.local(), v8_str("u"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
// Getting the prototype of o0 should get the first visible one
// which is o3. Therefore, z should not be defined on the prototype
// object.
- Local<Value> proto = o0->Get(v8_str("__proto__"));
+ Local<Value> proto =
+ o0->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
CHECK(proto->IsObject());
- CHECK(proto.As<v8::Object>()->Get(v8_str("z"))->IsUndefined());
+ CHECK(proto.As<v8::Object>()
+ ->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->IsUndefined());
}
@@ -9399,34 +10519,70 @@ THREADED_TEST(HiddenPrototypeSet) {
Local<v8::FunctionTemplate> pt = v8::FunctionTemplate::New(isolate);
ht->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
- Local<v8::Object> o = ot->GetFunction()->NewInstance();
- Local<v8::Object> h = ht->GetFunction()->NewInstance();
- Local<v8::Object> p = pt->GetFunction()->NewInstance();
- o->Set(v8_str("__proto__"), h);
- h->Set(v8_str("__proto__"), p);
+ Local<v8::Object> o = ot->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> h = ht->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> p = pt->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(o->Set(context.local(), v8_str("__proto__"), h).FromJust());
+ CHECK(h->Set(context.local(), v8_str("__proto__"), p).FromJust());
// Setting a property that exists on the hidden prototype goes there.
- o->Set(v8_str("x"), v8_num(7));
- CHECK_EQ(7, o->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(7, h->Get(v8_str("x"))->Int32Value());
- CHECK(p->Get(v8_str("x"))->IsUndefined());
+ CHECK(o->Set(context.local(), v8_str("x"), v8_num(7)).FromJust());
+ CHECK_EQ(7, o->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(7, h->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(p->Get(context.local(), v8_str("x")).ToLocalChecked()->IsUndefined());
// Setting a new property should not be forwarded to the hidden prototype.
- o->Set(v8_str("y"), v8_num(6));
- CHECK_EQ(6, o->Get(v8_str("y"))->Int32Value());
- CHECK(h->Get(v8_str("y"))->IsUndefined());
- CHECK(p->Get(v8_str("y"))->IsUndefined());
+ CHECK(o->Set(context.local(), v8_str("y"), v8_num(6)).FromJust());
+ CHECK_EQ(6, o->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(h->Get(context.local(), v8_str("y")).ToLocalChecked()->IsUndefined());
+ CHECK(p->Get(context.local(), v8_str("y")).ToLocalChecked()->IsUndefined());
// Setting a property that only exists on a prototype of the hidden prototype
// is treated normally again.
- p->Set(v8_str("z"), v8_num(8));
- CHECK_EQ(8, o->Get(v8_str("z"))->Int32Value());
- CHECK_EQ(8, h->Get(v8_str("z"))->Int32Value());
- CHECK_EQ(8, p->Get(v8_str("z"))->Int32Value());
- o->Set(v8_str("z"), v8_num(9));
- CHECK_EQ(9, o->Get(v8_str("z"))->Int32Value());
- CHECK_EQ(8, h->Get(v8_str("z"))->Int32Value());
- CHECK_EQ(8, p->Get(v8_str("z"))->Int32Value());
+ CHECK(p->Set(context.local(), v8_str("z"), v8_num(8)).FromJust());
+ CHECK_EQ(8, o->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(8, h->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(8, p->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o->Set(context.local(), v8_str("z"), v8_num(9)).FromJust());
+ CHECK_EQ(9, o->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(8, h->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(8, p->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -9435,17 +10591,20 @@ THREADED_TEST(HiddenPrototypeIdentityHash) {
LocalContext context;
v8::HandleScope handle_scope(context->GetIsolate());
- Handle<FunctionTemplate> t = FunctionTemplate::New(context->GetIsolate());
+ Local<FunctionTemplate> t = FunctionTemplate::New(context->GetIsolate());
t->SetHiddenPrototype(true);
t->InstanceTemplate()->Set(v8_str("foo"), v8_num(75));
- Handle<Object> p = t->GetFunction()->NewInstance();
- Handle<Object> o = Object::New(context->GetIsolate());
- o->SetPrototype(p);
+ Local<Object> p = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<Object> o = Object::New(context->GetIsolate());
+ CHECK(o->SetPrototype(context.local(), p).FromJust());
int hash = o->GetIdentityHash();
USE(hash);
- o->Set(v8_str("foo"), v8_num(42));
- DCHECK_EQ(hash, o->GetIdentityHash());
+ CHECK(o->Set(context.local(), v8_str("foo"), v8_num(42)).FromJust());
+ CHECK_EQ(hash, o->GetIdentityHash());
}
@@ -9465,45 +10624,88 @@ THREADED_TEST(SetPrototype) {
Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3));
- Local<v8::Object> o0 = t0->GetFunction()->NewInstance();
- Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
- Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
- Local<v8::Object> o3 = t3->GetFunction()->NewInstance();
+ Local<v8::Object> o0 = t0->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o1 = t1->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o2 = t2->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o3 = t3->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
// Setting the prototype on an object does not skip hidden prototypes.
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK(o0->SetPrototype(o1));
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
- CHECK(o1->SetPrototype(o2));
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
- CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value());
- CHECK(o2->SetPrototype(o3));
- CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value());
- CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value());
- CHECK_EQ(3, o0->Get(v8_str("u"))->Int32Value());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o0->SetPrototype(context.local(), o1).FromJust());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o1->SetPrototype(context.local(), o2).FromJust());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(2, o0->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK(o2->SetPrototype(context.local(), o3).FromJust());
+ CHECK_EQ(0, o0->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1, o0->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(2, o0->Get(context.local(), v8_str("z"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(3, o0->Get(context.local(), v8_str("u"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
// Getting the prototype of o0 should get the first visible one
// which is o3. Therefore, z should not be defined on the prototype
// object.
- Local<Value> proto = o0->Get(v8_str("__proto__"));
+ Local<Value> proto =
+ o0->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
CHECK(proto->IsObject());
- CHECK(proto.As<v8::Object>()->Equals(o3));
+ CHECK(proto.As<v8::Object>()->Equals(context.local(), o3).FromJust());
// However, Object::GetPrototype ignores hidden prototype.
Local<Value> proto0 = o0->GetPrototype();
CHECK(proto0->IsObject());
- CHECK(proto0.As<v8::Object>()->Equals(o1));
+ CHECK(proto0.As<v8::Object>()->Equals(context.local(), o1).FromJust());
Local<Value> proto1 = o1->GetPrototype();
CHECK(proto1->IsObject());
- CHECK(proto1.As<v8::Object>()->Equals(o2));
+ CHECK(proto1.As<v8::Object>()->Equals(context.local(), o2).FromJust());
Local<Value> proto2 = o2->GetPrototype();
CHECK(proto2->IsObject());
- CHECK(proto2.As<v8::Object>()->Equals(o3));
+ CHECK(proto2.As<v8::Object>()->Equals(context.local(), o3).FromJust());
}
@@ -9537,21 +10739,33 @@ THREADED_TEST(Regress91517) {
t2->InstanceTemplate()->Set(v8_str(name_buf.start()), v8_num(2));
}
- Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
- Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
- Local<v8::Object> o3 = t3->GetFunction()->NewInstance();
- Local<v8::Object> o4 = t4->GetFunction()->NewInstance();
+ Local<v8::Object> o1 = t1->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o2 = t2->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o3 = t3->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o4 = t4->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
// Create prototype chain of hidden prototypes.
- CHECK(o4->SetPrototype(o3));
- CHECK(o3->SetPrototype(o2));
- CHECK(o2->SetPrototype(o1));
+ CHECK(o4->SetPrototype(context.local(), o3).FromJust());
+ CHECK(o3->SetPrototype(context.local(), o2).FromJust());
+ CHECK(o2->SetPrototype(context.local(), o1).FromJust());
// Call the runtime version of GetOwnPropertyNames() on the natively
// created object through JavaScript.
- context->Global()->Set(v8_str("obj"), o4);
- // PROPERTY_ATTRIBUTES_NONE = 0
- CompileRun("var names = %GetOwnPropertyNames(obj, 0);");
+ CHECK(context->Global()->Set(context.local(), v8_str("obj"), o4).FromJust());
+ // PROPERTY_FILTER_NONE = 0
+ CompileRun("var names = %GetOwnPropertyKeys(obj, 0);");
ExpectInt32("names.length", 1006);
ExpectTrue("names.indexOf(\"baz\") >= 0");
@@ -9584,7 +10798,10 @@ THREADED_TEST(Regress269562) {
i1->Set(v8_str("n1"), v8_num(1));
i1->Set(v8_str("n2"), v8_num(2));
- Local<v8::Object> o1 = t1->GetFunction()->NewInstance();
+ Local<v8::Object> o1 = t1->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
Local<v8::FunctionTemplate> t2 =
v8::FunctionTemplate::New(context->GetIsolate());
t2->SetHiddenPrototype(true);
@@ -9593,12 +10810,15 @@ THREADED_TEST(Regress269562) {
t2->Inherit(t1);
t2->InstanceTemplate()->Set(v8_str("mine"), v8_num(4));
- Local<v8::Object> o2 = t2->GetFunction()->NewInstance();
- CHECK(o2->SetPrototype(o1));
+ Local<v8::Object> o2 = t2->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(o2->SetPrototype(context.local(), o1).FromJust());
v8::Local<v8::Symbol> sym =
v8::Symbol::New(context->GetIsolate(), v8_str("s1"));
- o1->Set(sym, v8_num(3));
+ CHECK(o1->Set(context.local(), sym, v8_num(3)).FromJust());
o1->SetPrivate(context.local(),
v8::Private::New(context->GetIsolate(), v8_str("h1")),
v8::Integer::New(context->GetIsolate(), 2013))
@@ -9606,10 +10826,10 @@ THREADED_TEST(Regress269562) {
// Call the runtime version of GetOwnPropertyNames() on
// the natively created object through JavaScript.
- context->Global()->Set(v8_str("obj"), o2);
- context->Global()->Set(v8_str("sym"), sym);
- // PROPERTY_ATTRIBUTES_NONE = 0
- CompileRun("var names = %GetOwnPropertyNames(obj, 0);");
+ CHECK(context->Global()->Set(context.local(), v8_str("obj"), o2).FromJust());
+ CHECK(context->Global()->Set(context.local(), v8_str("sym"), sym).FromJust());
+ // PROPERTY_FILTER_NONE = 0
+ CompileRun("var names = %GetOwnPropertyKeys(obj, 0);");
ExpectInt32("names.length", 7);
ExpectTrue("names.indexOf(\"foo\") >= 0");
@@ -9630,27 +10850,44 @@ THREADED_TEST(FunctionReadOnlyPrototype) {
Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(isolate, 42));
t1->ReadOnlyPrototype();
- context->Global()->Set(v8_str("func1"), t1->GetFunction());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("func1"),
+ t1->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
// Configured value of ReadOnly flag.
- CHECK(CompileRun(
- "(function() {"
- " descriptor = Object.getOwnPropertyDescriptor(func1, 'prototype');"
- " return (descriptor['writable'] == false);"
- "})()")->BooleanValue());
- CHECK_EQ(42, CompileRun("func1.prototype.x")->Int32Value());
- CHECK_EQ(42,
- CompileRun("func1.prototype = {}; func1.prototype.x")->Int32Value());
+ CHECK(
+ CompileRun(
+ "(function() {"
+ " descriptor = Object.getOwnPropertyDescriptor(func1, 'prototype');"
+ " return (descriptor['writable'] == false);"
+ "})()")
+ ->BooleanValue(context.local())
+ .FromJust());
+ CHECK_EQ(
+ 42,
+ CompileRun("func1.prototype.x")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(42, CompileRun("func1.prototype = {}; func1.prototype.x")
+ ->Int32Value(context.local())
+ .FromJust());
Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->PrototypeTemplate()->Set(v8_str("x"), v8::Integer::New(isolate, 42));
- context->Global()->Set(v8_str("func2"), t2->GetFunction());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("func2"),
+ t2->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
// Default value of ReadOnly flag.
- CHECK(CompileRun(
- "(function() {"
- " descriptor = Object.getOwnPropertyDescriptor(func2, 'prototype');"
- " return (descriptor['writable'] == true);"
- "})()")->BooleanValue());
- CHECK_EQ(42, CompileRun("func2.prototype.x")->Int32Value());
+ CHECK(
+ CompileRun(
+ "(function() {"
+ " descriptor = Object.getOwnPropertyDescriptor(func2, 'prototype');"
+ " return (descriptor['writable'] == true);"
+ "})()")
+ ->BooleanValue(context.local())
+ .FromJust());
+ CHECK_EQ(
+ 42,
+ CompileRun("func2.prototype.x")->Int32Value(context.local()).FromJust());
}
@@ -9661,18 +10898,26 @@ THREADED_TEST(SetPrototypeThrows) {
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
- Local<v8::Object> o0 = t->GetFunction()->NewInstance();
- Local<v8::Object> o1 = t->GetFunction()->NewInstance();
+ Local<v8::Object> o0 = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<v8::Object> o1 = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
- CHECK(o0->SetPrototype(o1));
+ CHECK(o0->SetPrototype(context.local(), o1).FromJust());
// If setting the prototype leads to the cycle, SetPrototype should
// return false and keep VM in sane state.
v8::TryCatch try_catch(isolate);
- CHECK(!o1->SetPrototype(o0));
+ CHECK(o1->SetPrototype(context.local(), o0).IsNothing());
CHECK(!try_catch.HasCaught());
- DCHECK(!CcTest::i_isolate()->has_pending_exception());
+ CHECK(!CcTest::i_isolate()->has_pending_exception());
- CHECK_EQ(42, CompileRun("function f() { return 42; }; f()")->Int32Value());
+ CHECK_EQ(42, CompileRun("function f() { return 42; }; f()")
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -9683,16 +10928,18 @@ THREADED_TEST(FunctionRemovePrototype) {
Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->RemovePrototype();
- Local<v8::Function> fun = t1->GetFunction();
- context->Global()->Set(v8_str("fun"), fun);
- CHECK(!CompileRun("'prototype' in fun")->BooleanValue());
+ Local<v8::Function> fun = t1->GetFunction(context.local()).ToLocalChecked();
+ CHECK(context->Global()->Set(context.local(), v8_str("fun"), fun).FromJust());
+ CHECK(!CompileRun("'prototype' in fun")
+ ->BooleanValue(context.local())
+ .FromJust());
v8::TryCatch try_catch(isolate);
CompileRun("new fun()");
CHECK(try_catch.HasCaught());
try_catch.Reset();
- fun->NewInstance();
+ CHECK(fun->NewInstance(context.local()).IsEmpty());
CHECK(try_catch.HasCaught());
}
@@ -9707,17 +10954,21 @@ THREADED_TEST(GetterSetterExceptions) {
"var x = { };"
"x.__defineSetter__('set', Throw);"
"x.__defineGetter__('get', Throw);");
- Local<v8::Object> x =
- Local<v8::Object>::Cast(context->Global()->Get(v8_str("x")));
+ Local<v8::Object> x = Local<v8::Object>::Cast(
+ context->Global()->Get(context.local(), v8_str("x")).ToLocalChecked());
v8::TryCatch try_catch(isolate);
- x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
- x->Get(v8_str("get"));
- x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
- x->Get(v8_str("get"));
- x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
- x->Get(v8_str("get"));
- x->Set(v8_str("set"), v8::Integer::New(isolate, 8));
- x->Get(v8_str("get"));
+ CHECK(x->Set(context.local(), v8_str("set"), v8::Integer::New(isolate, 8))
+ .IsNothing());
+ CHECK(x->Get(context.local(), v8_str("get")).IsEmpty());
+ CHECK(x->Set(context.local(), v8_str("set"), v8::Integer::New(isolate, 8))
+ .IsNothing());
+ CHECK(x->Get(context.local(), v8_str("get")).IsEmpty());
+ CHECK(x->Set(context.local(), v8_str("set"), v8::Integer::New(isolate, 8))
+ .IsNothing());
+ CHECK(x->Get(context.local(), v8_str("get")).IsEmpty());
+ CHECK(x->Set(context.local(), v8_str("set"), v8::Integer::New(isolate, 8))
+ .IsNothing());
+ CHECK(x->Get(context.local(), v8_str("get")).IsEmpty());
}
@@ -9727,13 +10978,14 @@ THREADED_TEST(Constructor) {
v8::HandleScope handle_scope(isolate);
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("Fun"));
- Local<Function> cons = templ->GetFunction();
- context->Global()->Set(v8_str("Fun"), cons);
- Local<v8::Object> inst = cons->NewInstance();
- i::Handle<i::JSObject> obj(v8::Utils::OpenHandle(*inst));
+ Local<Function> cons = templ->GetFunction(context.local()).ToLocalChecked();
+ CHECK(
+ context->Global()->Set(context.local(), v8_str("Fun"), cons).FromJust());
+ Local<v8::Object> inst = cons->NewInstance(context.local()).ToLocalChecked();
+ i::Handle<i::JSReceiver> obj(v8::Utils::OpenHandle(*inst));
CHECK(obj->IsJSObject());
Local<Value> value = CompileRun("(new Fun()).constructor === Fun");
- CHECK(value->BooleanValue());
+ CHECK(value->BooleanValue(context.local()).FromJust());
}
@@ -9742,18 +10994,19 @@ static void ConstructorCallback(
ApiTestFuzzer::Fuzz();
Local<Object> This;
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
if (args.IsConstructCall()) {
Local<Object> Holder = args.Holder();
This = Object::New(args.GetIsolate());
Local<Value> proto = Holder->GetPrototype();
if (proto->IsObject()) {
- This->SetPrototype(proto);
+ This->SetPrototype(context, proto).FromJust();
}
} else {
This = args.This();
}
- This->Set(v8_str("a"), args[0]);
+ This->Set(context, v8_str("a"), args[0]).FromJust();
args.GetReturnValue().Set(This);
}
@@ -9773,8 +11026,11 @@ THREADED_TEST(ConstructorForObject) {
{
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(ConstructorCallback);
- Local<Object> instance = instance_template->NewInstance();
- context->Global()->Set(v8_str("obj"), instance);
+ Local<Object> instance =
+ instance_template->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"), instance)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
@@ -9783,65 +11039,72 @@ THREADED_TEST(ConstructorForObject) {
value = CompileRun("(function() { var o = new obj(28); return o.a; })()");
CHECK(!try_catch.HasCaught());
CHECK(value->IsInt32());
- CHECK_EQ(28, value->Int32Value());
+ CHECK_EQ(28, value->Int32Value(context.local()).FromJust());
Local<Value> args1[] = {v8_num(28)};
- Local<Value> value_obj1 = instance->CallAsConstructor(1, args1);
+ Local<Value> value_obj1 =
+ instance->CallAsConstructor(context.local(), 1, args1).ToLocalChecked();
CHECK(value_obj1->IsObject());
Local<Object> object1 = Local<Object>::Cast(value_obj1);
- value = object1->Get(v8_str("a"));
+ value = object1->Get(context.local(), v8_str("a")).ToLocalChecked();
CHECK(value->IsInt32());
CHECK(!try_catch.HasCaught());
- CHECK_EQ(28, value->Int32Value());
+ CHECK_EQ(28, value->Int32Value(context.local()).FromJust());
// Call the Object's constructor with a String.
value =
CompileRun("(function() { var o = new obj('tipli'); return o.a; })()");
CHECK(!try_catch.HasCaught());
CHECK(value->IsString());
- String::Utf8Value string_value1(value->ToString(isolate));
+ String::Utf8Value string_value1(
+ value->ToString(context.local()).ToLocalChecked());
CHECK_EQ(0, strcmp("tipli", *string_value1));
Local<Value> args2[] = {v8_str("tipli")};
- Local<Value> value_obj2 = instance->CallAsConstructor(1, args2);
+ Local<Value> value_obj2 =
+ instance->CallAsConstructor(context.local(), 1, args2).ToLocalChecked();
CHECK(value_obj2->IsObject());
Local<Object> object2 = Local<Object>::Cast(value_obj2);
- value = object2->Get(v8_str("a"));
+ value = object2->Get(context.local(), v8_str("a")).ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK(value->IsString());
- String::Utf8Value string_value2(value->ToString(isolate));
+ String::Utf8Value string_value2(
+ value->ToString(context.local()).ToLocalChecked());
CHECK_EQ(0, strcmp("tipli", *string_value2));
// Call the Object's constructor with a Boolean.
value = CompileRun("(function() { var o = new obj(true); return o.a; })()");
CHECK(!try_catch.HasCaught());
CHECK(value->IsBoolean());
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
- Handle<Value> args3[] = {v8::True(isolate)};
- Local<Value> value_obj3 = instance->CallAsConstructor(1, args3);
+ Local<Value> args3[] = {v8::True(isolate)};
+ Local<Value> value_obj3 =
+ instance->CallAsConstructor(context.local(), 1, args3).ToLocalChecked();
CHECK(value_obj3->IsObject());
Local<Object> object3 = Local<Object>::Cast(value_obj3);
- value = object3->Get(v8_str("a"));
+ value = object3->Get(context.local(), v8_str("a")).ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK(value->IsBoolean());
- CHECK_EQ(true, value->BooleanValue());
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
// Call the Object's constructor with undefined.
- Handle<Value> args4[] = {v8::Undefined(isolate)};
- Local<Value> value_obj4 = instance->CallAsConstructor(1, args4);
+ Local<Value> args4[] = {v8::Undefined(isolate)};
+ Local<Value> value_obj4 =
+ instance->CallAsConstructor(context.local(), 1, args4).ToLocalChecked();
CHECK(value_obj4->IsObject());
Local<Object> object4 = Local<Object>::Cast(value_obj4);
- value = object4->Get(v8_str("a"));
+ value = object4->Get(context.local(), v8_str("a")).ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK(value->IsUndefined());
// Call the Object's constructor with null.
- Handle<Value> args5[] = {v8::Null(isolate)};
- Local<Value> value_obj5 = instance->CallAsConstructor(1, args5);
+ Local<Value> args5[] = {v8::Null(isolate)};
+ Local<Value> value_obj5 =
+ instance->CallAsConstructor(context.local(), 1, args5).ToLocalChecked();
CHECK(value_obj5->IsObject());
Local<Object> object5 = Local<Object>::Cast(value_obj5);
- value = object5->Get(v8_str("a"));
+ value = object5->Get(context.local(), v8_str("a")).ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK(value->IsNull());
}
@@ -9849,8 +11112,11 @@ THREADED_TEST(ConstructorForObject) {
// Check exception handling when there is no constructor set for the Object.
{
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
- Local<Object> instance = instance_template->NewInstance();
- context->Global()->Set(v8_str("obj2"), instance);
+ Local<Object> instance =
+ instance_template->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj2"), instance)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
@@ -9858,15 +11124,16 @@ THREADED_TEST(ConstructorForObject) {
value = CompileRun("new obj2(28)");
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value1(try_catch.Exception());
- CHECK_EQ(0, strcmp("TypeError: obj2 is not a function", *exception_value1));
+ CHECK_EQ(0,
+ strcmp("TypeError: obj2 is not a constructor", *exception_value1));
try_catch.Reset();
Local<Value> args[] = {v8_num(29)};
- value = instance->CallAsConstructor(1, args);
+ CHECK(instance->CallAsConstructor(context.local(), 1, args).IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ(0,
- strcmp("TypeError: object is not a function", *exception_value2));
+ CHECK_EQ(
+ 0, strcmp("TypeError: object is not a constructor", *exception_value2));
try_catch.Reset();
}
@@ -9874,8 +11141,11 @@ THREADED_TEST(ConstructorForObject) {
{
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(ThrowValue);
- Local<Object> instance = instance_template->NewInstance();
- context->Global()->Set(v8_str("obj3"), instance);
+ Local<Object> instance =
+ instance_template->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj3"), instance)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
@@ -9887,7 +11157,7 @@ THREADED_TEST(ConstructorForObject) {
try_catch.Reset();
Local<Value> args[] = {v8_num(23)};
- value = instance->CallAsConstructor(1, args);
+ CHECK(instance->CallAsConstructor(context.local(), 1, args).IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ(0, strcmp("23", *exception_value2));
@@ -9898,9 +11168,12 @@ THREADED_TEST(ConstructorForObject) {
{
Local<FunctionTemplate> function_template =
FunctionTemplate::New(isolate, FakeConstructorCallback);
- Local<Function> function = function_template->GetFunction();
+ Local<Function> function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
Local<Object> instance1 = function;
- context->Global()->Set(v8_str("obj4"), instance1);
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj4"), instance1)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
@@ -9913,14 +11186,18 @@ THREADED_TEST(ConstructorForObject) {
CHECK(value->IsObject());
Local<Value> args1[] = {v8_num(28)};
- value = instance1->CallAsConstructor(1, args1);
+ value = instance1->CallAsConstructor(context.local(), 1, args1)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK(value->IsObject());
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(FakeConstructorCallback);
- Local<Object> instance2 = instance_template->NewInstance();
- context->Global()->Set(v8_str("obj5"), instance2);
+ Local<Object> instance2 =
+ instance_template->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj5"), instance2)
+ .FromJust());
CHECK(!try_catch.HasCaught());
CHECK(instance2->IsObject());
@@ -9931,7 +11208,8 @@ THREADED_TEST(ConstructorForObject) {
CHECK(!value->IsObject());
Local<Value> args2[] = {v8_num(28)};
- value = instance2->CallAsConstructor(1, args2);
+ value = instance2->CallAsConstructor(context.local(), 1, args2)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
CHECK(!value->IsObject());
}
@@ -9944,8 +11222,9 @@ THREADED_TEST(FunctionDescriptorException) {
v8::HandleScope handle_scope(isolate);
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
templ->SetClassName(v8_str("Fun"));
- Local<Function> cons = templ->GetFunction();
- context->Global()->Set(v8_str("Fun"), cons);
+ Local<Function> cons = templ->GetFunction(context.local()).ToLocalChecked();
+ CHECK(
+ context->Global()->Set(context.local(), v8_str("Fun"), cons).FromJust());
Local<Value> value = CompileRun(
"function test() {"
" try {"
@@ -9960,7 +11239,7 @@ THREADED_TEST(FunctionDescriptorException) {
" return 4;"
"}"
"test();");
- CHECK_EQ(0, value->Int32Value());
+ CHECK_EQ(0, value->Int32Value(context.local()).FromJust());
}
@@ -9980,10 +11259,22 @@ THREADED_TEST(EvalAliasedDynamic) {
"var x = new Object();"
"x.eval = function(x) { return 1; };"
"result3 = f(x);");
- script->Run();
- CHECK_EQ(2, current->Global()->Get(v8_str("result1"))->Int32Value());
- CHECK_EQ(0, current->Global()->Get(v8_str("result2"))->Int32Value());
- CHECK_EQ(1, current->Global()->Get(v8_str("result3"))->Int32Value());
+ script->Run(current.local()).ToLocalChecked();
+ CHECK_EQ(2, current->Global()
+ ->Get(current.local(), v8_str("result1"))
+ .ToLocalChecked()
+ ->Int32Value(current.local())
+ .FromJust());
+ CHECK_EQ(0, current->Global()
+ ->Get(current.local(), v8_str("result2"))
+ .ToLocalChecked()
+ ->Int32Value(current.local())
+ .FromJust());
+ CHECK_EQ(1, current->Global()
+ ->Get(current.local(), v8_str("result3"))
+ .ToLocalChecked()
+ ->Int32Value(current.local())
+ .FromJust());
v8::TryCatch try_catch(current->GetIsolate());
script = v8_compile(
@@ -9992,9 +11283,13 @@ THREADED_TEST(EvalAliasedDynamic) {
" with (x) { return eval('bar'); }"
"}"
"result4 = f(this)");
- script->Run();
+ script->Run(current.local()).ToLocalChecked();
CHECK(!try_catch.HasCaught());
- CHECK_EQ(2, current->Global()->Get(v8_str("result4"))->Int32Value());
+ CHECK_EQ(2, current->Global()
+ ->Get(current.local(), v8_str("result4"))
+ .ToLocalChecked()
+ ->Int32Value(current.local())
+ .FromJust());
try_catch.Reset();
}
@@ -10010,27 +11305,34 @@ THREADED_TEST(CrossEval) {
current->SetSecurityToken(token);
// Set up reference from current to other.
- current->Global()->Set(v8_str("other"), other->Global());
+ CHECK(current->Global()
+ ->Set(current.local(), v8_str("other"), other->Global())
+ .FromJust());
// Check that new variables are introduced in other context.
Local<Script> script = v8_compile("other.eval('var foo = 1234')");
- script->Run();
- Local<Value> foo = other->Global()->Get(v8_str("foo"));
- CHECK_EQ(1234, foo->Int32Value());
- CHECK(!current->Global()->Has(v8_str("foo")));
+ script->Run(current.local()).ToLocalChecked();
+ Local<Value> foo =
+ other->Global()->Get(current.local(), v8_str("foo")).ToLocalChecked();
+ CHECK_EQ(1234, foo->Int32Value(other.local()).FromJust());
+ CHECK(!current->Global()->Has(current.local(), v8_str("foo")).FromJust());
// Check that writing to non-existing properties introduces them in
// the other context.
script = v8_compile("other.eval('na = 1234')");
- script->Run();
- CHECK_EQ(1234, other->Global()->Get(v8_str("na"))->Int32Value());
- CHECK(!current->Global()->Has(v8_str("na")));
+ script->Run(current.local()).ToLocalChecked();
+ CHECK_EQ(1234, other->Global()
+ ->Get(current.local(), v8_str("na"))
+ .ToLocalChecked()
+ ->Int32Value(other.local())
+ .FromJust());
+ CHECK(!current->Global()->Has(current.local(), v8_str("na")).FromJust());
// Check that global variables in current context are not visible in other
// context.
v8::TryCatch try_catch(CcTest::isolate());
script = v8_compile("var bar = 42; other.eval('bar');");
- Local<Value> result = script->Run();
+ CHECK(script->Run(current.local()).IsEmpty());
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -10041,36 +11343,43 @@ THREADED_TEST(CrossEval) {
" var baz = 87;"
" return other.eval('baz');"
"})();");
- result = script->Run();
+ CHECK(script->Run(current.local()).IsEmpty());
CHECK(try_catch.HasCaught());
try_catch.Reset();
// Check that global variables in the other environment are visible
// when evaluting code.
- other->Global()->Set(v8_str("bis"), v8_num(1234));
+ CHECK(other->Global()
+ ->Set(other.local(), v8_str("bis"), v8_num(1234))
+ .FromJust());
script = v8_compile("other.eval('bis')");
- CHECK_EQ(1234, script->Run()->Int32Value());
+ CHECK_EQ(1234, script->Run(current.local())
+ .ToLocalChecked()
+ ->Int32Value(current.local())
+ .FromJust());
CHECK(!try_catch.HasCaught());
// Check that the 'this' pointer points to the global object evaluating
// code.
- other->Global()->Set(v8_str("t"), other->Global());
+ CHECK(other->Global()
+ ->Set(current.local(), v8_str("t"), other->Global())
+ .FromJust());
script = v8_compile("other.eval('this == t')");
- result = script->Run();
+ Local<Value> result = script->Run(current.local()).ToLocalChecked();
CHECK(result->IsTrue());
CHECK(!try_catch.HasCaught());
// Check that variables introduced in with-statement are not visible in
// other context.
script = v8_compile("with({x:2}){other.eval('x')}");
- result = script->Run();
+ CHECK(script->Run(current.local()).IsEmpty());
CHECK(try_catch.HasCaught());
try_catch.Reset();
// Check that you cannot use 'eval.call' with another object than the
// current global object.
script = v8_compile("other.y = 1; eval.call(other, 'y')");
- result = script->Run();
+ CHECK(script->Run(current.local()).IsEmpty());
CHECK(try_catch.HasCaught());
}
@@ -10086,7 +11395,7 @@ THREADED_TEST(EvalInDetachedGlobal) {
// Set up function in context0 that uses eval from context0.
context0->Enter();
- v8::Handle<v8::Value> fun = CompileRun(
+ v8::Local<v8::Value> fun = CompileRun(
"var x = 42;"
"(function() {"
" var e = eval;"
@@ -10098,13 +11407,13 @@ THREADED_TEST(EvalInDetachedGlobal) {
// detaching the global. Before detaching, the call succeeds and
// after detaching and exception is thrown.
context1->Enter();
- context1->Global()->Set(v8_str("fun"), fun);
- v8::Handle<v8::Value> x_value = CompileRun("fun('x')");
- CHECK_EQ(42, x_value->Int32Value());
+ CHECK(context1->Global()->Set(context1, v8_str("fun"), fun).FromJust());
+ v8::Local<v8::Value> x_value = CompileRun("fun('x')");
+ CHECK_EQ(42, x_value->Int32Value(context1).FromJust());
context0->DetachGlobal();
v8::TryCatch catcher(isolate);
x_value = CompileRun("fun('x')");
- CHECK_EQ(42, x_value->Int32Value());
+ CHECK_EQ(42, x_value->Int32Value(context1).FromJust());
context1->Exit();
}
@@ -10119,12 +11428,14 @@ THREADED_TEST(CrossLazyLoad) {
current->SetSecurityToken(token);
// Set up reference from current to other.
- current->Global()->Set(v8_str("other"), other->Global());
+ CHECK(current->Global()
+ ->Set(current.local(), v8_str("other"), other->Global())
+ .FromJust());
// Trigger lazy loading in other context.
Local<Script> script = v8_compile("other.eval('new Date(42)')");
- Local<Value> value = script->Run();
- CHECK_EQ(42.0, value->NumberValue());
+ Local<Value> value = script->Run(current.local()).ToLocalChecked();
+ CHECK_EQ(42.0, value->NumberValue(current.local()).FromJust());
}
@@ -10132,7 +11443,10 @@ static void call_as_function(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
if (args.IsConstructCall()) {
if (args[0]->IsInt32()) {
- args.GetReturnValue().Set(v8_num(-args[0]->Int32Value()));
+ args.GetReturnValue().Set(
+ v8_num(-args[0]
+ ->Int32Value(args.GetIsolate()->GetCurrentContext())
+ .FromJust()));
return;
}
}
@@ -10158,63 +11472,74 @@ THREADED_TEST(CallAsFunction) {
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(call_as_function);
- Local<v8::Object> instance = t->GetFunction()->NewInstance();
- context->Global()->Set(v8_str("obj"), instance);
+ Local<v8::Object> instance = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"), instance)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
value = CompileRun("obj(42)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
value = CompileRun("(function(o){return o(49)})(obj)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(49, value->Int32Value());
+ CHECK_EQ(49, value->Int32Value(context.local()).FromJust());
// test special case of call as function
value = CompileRun("[obj]['0'](45)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(45, value->Int32Value());
+ CHECK_EQ(45, value->Int32Value(context.local()).FromJust());
value = CompileRun(
"obj.call = Function.prototype.call;"
"obj.call(null, 87)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(87, value->Int32Value());
+ CHECK_EQ(87, value->Int32Value(context.local()).FromJust());
// Regression tests for bug #1116356: Calling call through call/apply
// must work for non-function receivers.
const char* apply_99 = "Function.prototype.call.apply(obj, [this, 99])";
value = CompileRun(apply_99);
CHECK(!try_catch.HasCaught());
- CHECK_EQ(99, value->Int32Value());
+ CHECK_EQ(99, value->Int32Value(context.local()).FromJust());
const char* call_17 = "Function.prototype.call.call(obj, this, 17)";
value = CompileRun(call_17);
CHECK(!try_catch.HasCaught());
- CHECK_EQ(17, value->Int32Value());
+ CHECK_EQ(17, value->Int32Value(context.local()).FromJust());
// Check that the call-as-function handler can be called through
// new.
value = CompileRun("new obj(43)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(-43, value->Int32Value());
+ CHECK_EQ(-43, value->Int32Value(context.local()).FromJust());
// Check that the call-as-function handler can be called through
// the API.
- v8::Handle<Value> args[] = {v8_num(28)};
- value = instance->CallAsFunction(instance, 1, args);
+ v8::Local<Value> args[] = {v8_num(28)};
+ value = instance->CallAsFunction(context.local(), instance, 1, args)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
- CHECK_EQ(28, value->Int32Value());
+ CHECK_EQ(28, value->Int32Value(context.local()).FromJust());
}
{
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template(t->InstanceTemplate());
USE(instance_template);
- Local<v8::Object> instance = t->GetFunction()->NewInstance();
- context->Global()->Set(v8_str("obj2"), instance);
+ Local<v8::Object> instance = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj2"), instance)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
@@ -10230,9 +11555,9 @@ THREADED_TEST(CallAsFunction) {
// Call an object without call-as-function handler through the API
value = CompileRun("obj2(28)");
- v8::Handle<Value> args[] = {v8_num(28)};
- value = instance->CallAsFunction(instance, 1, args);
- CHECK(value.IsEmpty());
+ v8::Local<Value> args[] = {v8_num(28)};
+ CHECK(
+ instance->CallAsFunction(context.local(), instance, 1, args).IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ(0,
@@ -10244,8 +11569,13 @@ THREADED_TEST(CallAsFunction) {
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(ThrowValue);
- Local<v8::Object> instance = t->GetFunction()->NewInstance();
- context->Global()->Set(v8_str("obj3"), instance);
+ Local<v8::Object> instance = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj3"), instance)
+ .FromJust());
v8::TryCatch try_catch(isolate);
Local<Value> value;
CHECK(!try_catch.HasCaught());
@@ -10257,8 +11587,9 @@ THREADED_TEST(CallAsFunction) {
CHECK_EQ(0, strcmp("22", *exception_value1));
try_catch.Reset();
- v8::Handle<Value> args[] = {v8_num(23)};
- value = instance->CallAsFunction(instance, 1, args);
+ v8::Local<Value> args[] = {v8_num(23)};
+ CHECK(
+ instance->CallAsFunction(context.local(), instance, 1, args).IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
CHECK_EQ(0, strcmp("23", *exception_value2));
@@ -10269,18 +11600,31 @@ THREADED_TEST(CallAsFunction) {
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetCallAsFunctionHandler(ReturnThis);
- Local<v8::Object> instance = t->GetFunction()->NewInstance();
+ Local<v8::Object> instance = t->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
Local<v8::Value> a1 =
- instance->CallAsFunction(v8::Undefined(isolate), 0, NULL);
+ instance->CallAsFunction(context.local(), v8::Undefined(isolate), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a1->StrictEquals(instance));
- Local<v8::Value> a2 = instance->CallAsFunction(v8::Null(isolate), 0, NULL);
+ Local<v8::Value> a2 =
+ instance->CallAsFunction(context.local(), v8::Null(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(a2->StrictEquals(instance));
- Local<v8::Value> a3 = instance->CallAsFunction(v8_num(42), 0, NULL);
+ Local<v8::Value> a3 =
+ instance->CallAsFunction(context.local(), v8_num(42), 0, NULL)
+ .ToLocalChecked();
CHECK(a3->StrictEquals(instance));
- Local<v8::Value> a4 = instance->CallAsFunction(v8_str("hello"), 0, NULL);
+ Local<v8::Value> a4 =
+ instance->CallAsFunction(context.local(), v8_str("hello"), 0, NULL)
+ .ToLocalChecked();
CHECK(a4->StrictEquals(instance));
- Local<v8::Value> a5 = instance->CallAsFunction(v8::True(isolate), 0, NULL);
+ Local<v8::Value> a5 =
+ instance->CallAsFunction(context.local(), v8::True(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(a5->StrictEquals(instance));
}
@@ -10294,41 +11638,65 @@ THREADED_TEST(CallAsFunction) {
" return this;"
"}");
Local<Function> ReturnThisSloppy = Local<Function>::Cast(
- context->Global()->Get(v8_str("ReturnThisSloppy")));
+ context->Global()
+ ->Get(context.local(), v8_str("ReturnThisSloppy"))
+ .ToLocalChecked());
Local<Function> ReturnThisStrict = Local<Function>::Cast(
- context->Global()->Get(v8_str("ReturnThisStrict")));
+ context->Global()
+ ->Get(context.local(), v8_str("ReturnThisStrict"))
+ .ToLocalChecked());
Local<v8::Value> a1 =
- ReturnThisSloppy->CallAsFunction(v8::Undefined(isolate), 0, NULL);
+ ReturnThisSloppy->CallAsFunction(context.local(),
+ v8::Undefined(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(a1->StrictEquals(context->Global()));
Local<v8::Value> a2 =
- ReturnThisSloppy->CallAsFunction(v8::Null(isolate), 0, NULL);
+ ReturnThisSloppy->CallAsFunction(context.local(), v8::Null(isolate), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a2->StrictEquals(context->Global()));
- Local<v8::Value> a3 = ReturnThisSloppy->CallAsFunction(v8_num(42), 0, NULL);
+ Local<v8::Value> a3 =
+ ReturnThisSloppy->CallAsFunction(context.local(), v8_num(42), 0, NULL)
+ .ToLocalChecked();
CHECK(a3->IsNumberObject());
CHECK_EQ(42.0, a3.As<v8::NumberObject>()->ValueOf());
Local<v8::Value> a4 =
- ReturnThisSloppy->CallAsFunction(v8_str("hello"), 0, NULL);
+ ReturnThisSloppy->CallAsFunction(context.local(), v8_str("hello"), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a4->IsStringObject());
CHECK(a4.As<v8::StringObject>()->ValueOf()->StrictEquals(v8_str("hello")));
Local<v8::Value> a5 =
- ReturnThisSloppy->CallAsFunction(v8::True(isolate), 0, NULL);
+ ReturnThisSloppy->CallAsFunction(context.local(), v8::True(isolate), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a5->IsBooleanObject());
CHECK(a5.As<v8::BooleanObject>()->ValueOf());
Local<v8::Value> a6 =
- ReturnThisStrict->CallAsFunction(v8::Undefined(isolate), 0, NULL);
+ ReturnThisStrict->CallAsFunction(context.local(),
+ v8::Undefined(isolate), 0, NULL)
+ .ToLocalChecked();
CHECK(a6->IsUndefined());
Local<v8::Value> a7 =
- ReturnThisStrict->CallAsFunction(v8::Null(isolate), 0, NULL);
+ ReturnThisStrict->CallAsFunction(context.local(), v8::Null(isolate), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a7->IsNull());
- Local<v8::Value> a8 = ReturnThisStrict->CallAsFunction(v8_num(42), 0, NULL);
+ Local<v8::Value> a8 =
+ ReturnThisStrict->CallAsFunction(context.local(), v8_num(42), 0, NULL)
+ .ToLocalChecked();
CHECK(a8->StrictEquals(v8_num(42)));
Local<v8::Value> a9 =
- ReturnThisStrict->CallAsFunction(v8_str("hello"), 0, NULL);
+ ReturnThisStrict->CallAsFunction(context.local(), v8_str("hello"), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a9->StrictEquals(v8_str("hello")));
Local<v8::Value> a10 =
- ReturnThisStrict->CallAsFunction(v8::True(isolate), 0, NULL);
+ ReturnThisStrict->CallAsFunction(context.local(), v8::True(isolate), 0,
+ NULL)
+ .ToLocalChecked();
CHECK(a10->StrictEquals(v8::True(isolate)));
}
}
@@ -10343,7 +11711,8 @@ THREADED_TEST(CallableObject) {
{
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(call_as_function);
- Local<Object> instance = instance_template->NewInstance();
+ Local<Object> instance =
+ instance_template->NewInstance(context.local()).ToLocalChecked();
v8::TryCatch try_catch(isolate);
CHECK(instance->IsCallable());
@@ -10352,7 +11721,8 @@ THREADED_TEST(CallableObject) {
{
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
- Local<Object> instance = instance_template->NewInstance();
+ Local<Object> instance =
+ instance_template->NewInstance(context.local()).ToLocalChecked();
v8::TryCatch try_catch(isolate);
CHECK(!instance->IsCallable());
@@ -10362,7 +11732,8 @@ THREADED_TEST(CallableObject) {
{
Local<FunctionTemplate> function_template =
FunctionTemplate::New(isolate, call_as_function);
- Local<Function> function = function_template->GetFunction();
+ Local<Function> function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
Local<Object> instance = function;
v8::TryCatch try_catch(isolate);
@@ -10372,7 +11743,8 @@ THREADED_TEST(CallableObject) {
{
Local<FunctionTemplate> function_template = FunctionTemplate::New(isolate);
- Local<Function> function = function_template->GetFunction();
+ Local<Function> function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
Local<Object> instance = function;
v8::TryCatch try_catch(isolate);
@@ -10489,9 +11861,14 @@ static void FastApiCallback_TrivialSignature(
CheckReturnValue(args, FUNCTION_ADDR(FastApiCallback_TrivialSignature));
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, args.GetIsolate());
- CHECK(args.This()->Equals(args.Holder()));
- CHECK(args.Data()->Equals(v8_str("method_data")));
- args.GetReturnValue().Set(args[0]->Int32Value() + 1);
+ CHECK(args.This()
+ ->Equals(isolate->GetCurrentContext(), args.Holder())
+ .FromJust());
+ CHECK(args.Data()
+ ->Equals(isolate->GetCurrentContext(), v8_str("method_data"))
+ .FromJust());
+ args.GetReturnValue().Set(
+ args[0]->Int32Value(isolate->GetCurrentContext()).FromJust() + 1);
}
static void FastApiCallback_SimpleSignature(
@@ -10500,12 +11877,20 @@ static void FastApiCallback_SimpleSignature(
CheckReturnValue(args, FUNCTION_ADDR(FastApiCallback_SimpleSignature));
v8::Isolate* isolate = CcTest::isolate();
CHECK_EQ(isolate, args.GetIsolate());
- CHECK(args.This()->GetPrototype()->Equals(args.Holder()));
- CHECK(args.Data()->Equals(v8_str("method_data")));
+ CHECK(args.This()
+ ->GetPrototype()
+ ->Equals(isolate->GetCurrentContext(), args.Holder())
+ .FromJust());
+ CHECK(args.Data()
+ ->Equals(isolate->GetCurrentContext(), v8_str("method_data"))
+ .FromJust());
// Note, we're using HasRealNamedProperty instead of Has to avoid
// invoking the interceptor again.
- CHECK(args.Holder()->HasRealNamedProperty(v8_str("foo")));
- args.GetReturnValue().Set(args[0]->Int32Value() + 1);
+ CHECK(args.Holder()
+ ->HasRealNamedProperty(isolate->GetCurrentContext(), v8_str("foo"))
+ .FromJust());
+ args.GetReturnValue().Set(
+ args[0]->Int32Value(isolate->GetCurrentContext()).FromJust() + 1);
}
@@ -10534,13 +11919,16 @@ THREADED_TEST(CallICFastApi_DirectCall_GCMoveStub) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> nativeobject_templ =
+ v8::Local<v8::ObjectTemplate> nativeobject_templ =
v8::ObjectTemplate::New(isolate);
nativeobject_templ->Set(isolate, "callback",
v8::FunctionTemplate::New(isolate,
DirectApiCallback));
- v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
- context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+ v8::Local<v8::Object> nativeobject_obj =
+ nativeobject_templ->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("nativeobject"), nativeobject_obj)
+ .FromJust());
// call the api function multiple times to ensure direct call stub creation.
CompileRun(
"function f() {"
@@ -10562,15 +11950,18 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> nativeobject_templ =
+ v8::Local<v8::ObjectTemplate> nativeobject_templ =
v8::ObjectTemplate::New(isolate);
nativeobject_templ->Set(isolate, "callback",
v8::FunctionTemplate::New(isolate,
ThrowingDirectApiCallback));
- v8::Local<v8::Object> nativeobject_obj = nativeobject_templ->NewInstance();
- context->Global()->Set(v8_str("nativeobject"), nativeobject_obj);
+ v8::Local<v8::Object> nativeobject_obj =
+ nativeobject_templ->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("nativeobject"), nativeobject_obj)
+ .FromJust());
// call the api function multiple times to ensure direct call stub creation.
- v8::Handle<Value> result = CompileRun(
+ v8::Local<Value> result = CompileRun(
"var result = '';"
"function f() {"
" for (var i = 1; i <= 5; i++) {"
@@ -10578,14 +11969,14 @@ THREADED_TEST(CallICFastApi_DirectCall_Throw) {
" }"
"}"
"f(); result;");
- CHECK(v8_str("ggggg")->Equals(result));
+ CHECK(v8_str("ggggg")->Equals(context.local(), result).FromJust());
}
static int p_getter_count_3;
-static Handle<Value> DoDirectGetter() {
+static Local<Value> DoDirectGetter() {
if (++p_getter_count_3 % 3 == 0) {
CcTest::heap()->CollectAllGarbage();
GenerateSomeGarbage();
@@ -10607,17 +11998,22 @@ static void LoadICFastApi_DirectCall_GCMoveStub(Accessor accessor) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), accessor);
- context->Global()->Set(v8_str("o1"), obj->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o1"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
p_getter_count_3 = 0;
- v8::Handle<v8::Value> result = CompileRun(
+ v8::Local<v8::Value> result = CompileRun(
"function f() {"
" for (var i = 0; i < 30; i++) o1.p1;"
" return o1.p1"
"}"
"f();");
- CHECK(v8_str("Direct Getter Result")->Equals(result));
+ CHECK(v8_str("Direct Getter Result")
+ ->Equals(context.local(), result)
+ .FromJust());
CHECK_EQ(31, p_getter_count_3);
}
@@ -10638,16 +12034,19 @@ THREADED_TEST(LoadICFastApi_DirectCall_Throw) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj = v8::ObjectTemplate::New(isolate);
obj->SetAccessor(v8_str("p1"), ThrowingDirectGetterCallback);
- context->Global()->Set(v8_str("o1"), obj->NewInstance());
- v8::Handle<Value> result = CompileRun(
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o1"),
+ obj->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
+ v8::Local<Value> result = CompileRun(
"var result = '';"
"for (var i = 0; i < 5; i++) {"
" try { o1.p1; } catch (e) { result += e; }"
"}"
"result;");
- CHECK(v8_str("ggggg")->Equals(result));
+ CHECK(v8_str("ggggg")->Equals(context.local(), result).FromJust());
}
@@ -10655,29 +12054,35 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(isolate,
- FastApiCallback_TrivialSignature,
- v8_str("method_data"),
- v8::Handle<v8::Signature>());
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_TrivialSignature, v8_str("method_data"),
+ v8::Local<v8::Signature>());
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"var result = 0;"
"for (var i = 0; i < 100; i++) {"
" result = o.method(41);"
"}");
- CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK_EQ(100, interceptor_call_count);
}
@@ -10686,22 +12091,26 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"o.foo = 17;"
"var receiver = {};"
@@ -10710,7 +12119,11 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
"for (var i = 0; i < 100; i++) {"
" result = receiver.method(41);"
"}");
- CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK_EQ(100, interceptor_call_count);
}
@@ -10719,22 +12132,26 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"o.foo = 17;"
"var receiver = {};"
@@ -10748,8 +12165,16 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
" receiver = {method: function(x) { return x - 1 }};"
" }"
"}");
- CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_EQ(40, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK_GE(interceptor_call_count, 50);
}
@@ -10758,22 +12183,26 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"o.foo = 17;"
"var receiver = {};"
@@ -10787,8 +12216,16 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
" o.method = function(x) { return x - 1 };"
" }"
"}");
- CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_EQ(40, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK_GE(interceptor_call_count, 50);
}
@@ -10797,22 +12234,26 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
v8::TryCatch try_catch(isolate);
CompileRun(
"o.foo = 17;"
@@ -10829,9 +12270,17 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
"}");
CHECK(try_catch.HasCaught());
// TODO(verwaest): Adjust message.
- CHECK(v8_str("TypeError: receiver.method is not a function")
- ->Equals(try_catch.Exception()->ToString(isolate)));
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK(
+ v8_str("TypeError: receiver.method is not a function")
+ ->Equals(
+ context.local(),
+ try_catch.Exception()->ToString(context.local()).ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK_GE(interceptor_call_count, 50);
}
@@ -10840,22 +12289,26 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
int interceptor_call_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
v8::External::New(isolate, &interceptor_call_count)));
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
v8::TryCatch try_catch(isolate);
CompileRun(
"o.foo = 17;"
@@ -10871,9 +12324,17 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
" }"
"}");
CHECK(try_catch.HasCaught());
- CHECK(v8_str("TypeError: Illegal invocation")
- ->Equals(try_catch.Exception()->ToString(isolate)));
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK(
+ v8_str("TypeError: Illegal invocation")
+ ->Equals(
+ context.local(),
+ try_catch.Exception()->ToString(context.local()).ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
CHECK_GE(interceptor_call_count, 50);
}
@@ -10881,48 +12342,58 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
THREADED_PROFILED_TEST(CallICFastApi_TrivialSignature) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(isolate,
- FastApiCallback_TrivialSignature,
- v8_str("method_data"),
- v8::Handle<v8::Signature>());
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ isolate, FastApiCallback_TrivialSignature, v8_str("method_data"),
+ v8::Local<v8::Signature>());
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+ v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
USE(templ);
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"var result = 0;"
"for (var i = 0; i < 100; i++) {"
" result = o.method(41);"
"}");
- CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+ v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"o.foo = 17;"
"var receiver = {};"
@@ -10932,27 +12403,35 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature) {
" result = receiver.method(41);"
"}");
- CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+ v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"o.foo = 17;"
"var receiver = {};"
@@ -10966,28 +12445,40 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss1) {
" receiver = {method: function(x) { return x - 1 }};"
" }"
"}");
- CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK_EQ(40, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+ v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
v8::TryCatch try_catch(isolate);
CompileRun(
"o.foo = 17;"
@@ -11004,29 +12495,41 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
"}");
CHECK(try_catch.HasCaught());
// TODO(verwaest): Adjust message.
- CHECK(v8_str("TypeError: receiver.method is not a function")
- ->Equals(try_catch.Exception()->ToString(isolate)));
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK(
+ v8_str("TypeError: receiver.method is not a function")
+ ->Equals(
+ context.local(),
+ try_catch.Exception()->ToString(context.local()).ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::FunctionTemplate> fun_templ =
+ v8::Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
+ v8::Local<v8::FunctionTemplate> method_templ = v8::FunctionTemplate::New(
isolate, FastApiCallback_SimpleSignature, v8_str("method_data"),
v8::Signature::New(isolate, fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+ v8::Local<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ v8::Local<v8::Function> fun =
+ fun_templ->GetFunction(context.local()).ToLocalChecked();
GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ fun->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
v8::TryCatch try_catch(isolate);
CompileRun(
"o.foo = 17;"
@@ -11042,16 +12545,24 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
" }"
"}");
CHECK(try_catch.HasCaught());
- CHECK(v8_str("TypeError: Illegal invocation")
- ->Equals(try_catch.Exception()->ToString(isolate)));
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+ CHECK(
+ v8_str("TypeError: Illegal invocation")
+ ->Equals(
+ context.local(),
+ try_catch.Exception()->ToString(context.local()).ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(42, context->Global()
+ ->Get(context.local(), v8_str("saved_result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
static void ThrowingGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- info.GetIsolate()->ThrowException(Handle<Value>());
+ info.GetIsolate()->ThrowException(Local<Value>());
info.GetReturnValue().SetUndefined();
}
@@ -11064,10 +12575,13 @@ THREADED_TEST(VariousGetPropertiesAndThrowingCallbacks) {
Local<ObjectTemplate> instance_templ = templ->InstanceTemplate();
instance_templ->SetAccessor(v8_str("f"), ThrowingGetter);
- Local<Object> instance = templ->GetFunction()->NewInstance();
+ Local<Object> instance = templ->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
Local<Object> another = Object::New(context->GetIsolate());
- another->SetPrototype(instance);
+ CHECK(another->SetPrototype(context.local(), instance).FromJust());
Local<Object> with_js_getter = CompileRun(
"o = {};\n"
@@ -11077,49 +12591,53 @@ THREADED_TEST(VariousGetPropertiesAndThrowingCallbacks) {
TryCatch try_catch(context->GetIsolate());
- Local<Value> result = instance->GetRealNamedProperty(v8_str("f"));
+ v8::MaybeLocal<Value> result =
+ instance->GetRealNamedProperty(context.local(), v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
Maybe<PropertyAttribute> attr =
- instance->GetRealNamedPropertyAttributes(v8_str("f"));
+ instance->GetRealNamedPropertyAttributes(context.local(), v8_str("f"));
CHECK(!try_catch.HasCaught());
CHECK(Just(None) == attr);
- result = another->GetRealNamedProperty(v8_str("f"));
+ result = another->GetRealNamedProperty(context.local(), v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
- attr = another->GetRealNamedPropertyAttributes(v8_str("f"));
+ attr = another->GetRealNamedPropertyAttributes(context.local(), v8_str("f"));
CHECK(!try_catch.HasCaught());
CHECK(Just(None) == attr);
- result = another->GetRealNamedPropertyInPrototypeChain(v8_str("f"));
+ result = another->GetRealNamedPropertyInPrototypeChain(context.local(),
+ v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
- attr = another->GetRealNamedPropertyAttributesInPrototypeChain(v8_str("f"));
+ attr = another->GetRealNamedPropertyAttributesInPrototypeChain(
+ context.local(), v8_str("f"));
CHECK(!try_catch.HasCaught());
CHECK(Just(None) == attr);
- result = another->Get(v8_str("f"));
+ result = another->Get(context.local(), v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
- result = with_js_getter->GetRealNamedProperty(v8_str("f"));
+ result = with_js_getter->GetRealNamedProperty(context.local(), v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
- attr = with_js_getter->GetRealNamedPropertyAttributes(v8_str("f"));
+ attr = with_js_getter->GetRealNamedPropertyAttributes(context.local(),
+ v8_str("f"));
CHECK(!try_catch.HasCaught());
CHECK(Just(None) == attr);
- result = with_js_getter->Get(v8_str("f"));
+ result = with_js_getter->Get(context.local(), v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
@@ -11142,23 +12660,23 @@ static void ThrowingCallbackWithTryCatch(
static int call_depth;
-static void WithTryCatch(Handle<Message> message, Handle<Value> data) {
+static void WithTryCatch(Local<Message> message, Local<Value> data) {
TryCatch try_catch(CcTest::isolate());
}
-static void ThrowFromJS(Handle<Message> message, Handle<Value> data) {
+static void ThrowFromJS(Local<Message> message, Local<Value> data) {
if (--call_depth) CompileRun("throw 'ThrowInJS';");
}
-static void ThrowViaApi(Handle<Message> message, Handle<Value> data) {
+static void ThrowViaApi(Local<Message> message, Local<Value> data) {
if (--call_depth) CcTest::isolate()->ThrowException(v8_str("ThrowViaApi"));
}
-static void WebKitLike(Handle<Message> message, Handle<Value> data) {
- Handle<String> errorMessageString = message->Get();
+static void WebKitLike(Local<Message> message, Local<Value> data) {
+ Local<String> errorMessageString = message->Get();
CHECK(!errorMessageString.IsEmpty());
message->GetStackTrace();
message->GetScriptOrigin().ResourceName();
@@ -11171,16 +12689,18 @@ THREADED_TEST(ExceptionsDoNotPropagatePastTryCatch) {
HandleScope scope(isolate);
Local<Function> func =
- FunctionTemplate::New(isolate,
- ThrowingCallbackWithTryCatch)->GetFunction();
- context->Global()->Set(v8_str("func"), func);
+ FunctionTemplate::New(isolate, ThrowingCallbackWithTryCatch)
+ ->GetFunction(context.local())
+ .ToLocalChecked();
+ CHECK(
+ context->Global()->Set(context.local(), v8_str("func"), func).FromJust());
MessageCallback callbacks[] =
{ NULL, WebKitLike, ThrowViaApi, ThrowFromJS, WithTryCatch };
for (unsigned i = 0; i < sizeof(callbacks)/sizeof(callbacks[0]); i++) {
MessageCallback callback = callbacks[i];
if (callback != NULL) {
- V8::AddMessageListener(callback);
+ isolate->AddMessageListener(callback);
}
// Some small number to control number of times message handler should
// throw an exception.
@@ -11190,7 +12710,7 @@ THREADED_TEST(ExceptionsDoNotPropagatePastTryCatch) {
"try { func(); } catch(e) { thrown = true; }\n"
"thrown\n");
if (callback != NULL) {
- V8::RemoveMessageListeners(callback);
+ isolate->RemoveMessageListeners(callback);
}
}
}
@@ -11237,34 +12757,39 @@ THREADED_TEST(Overriding) {
// so 'h' can be shadowed on the instance object.
Local<ObjectTemplate> child_proto_templ = child_templ->PrototypeTemplate();
child_proto_templ->SetAccessor(v8_str("h"), ParentGetter, 0,
- v8::Handle<Value>(), v8::DEFAULT, v8::ReadOnly);
+ v8::Local<Value>(), v8::DEFAULT, v8::ReadOnly);
// Add 'i' as an accessor to the instance template with ReadOnly attributes
// but the attribute does not have effect because it is duplicated with
// NULL setter.
child_instance_templ->SetAccessor(v8_str("i"), ChildGetter, 0,
- v8::Handle<Value>(), v8::DEFAULT, v8::ReadOnly);
-
+ v8::Local<Value>(), v8::DEFAULT,
+ v8::ReadOnly);
// Instantiate the child template.
- Local<v8::Object> instance = child_templ->GetFunction()->NewInstance();
+ Local<v8::Object> instance = child_templ->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
// Check that the child function overrides the parent one.
- context->Global()->Set(v8_str("o"), instance);
- Local<Value> value = v8_compile("o.f")->Run();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"), instance)
+ .FromJust());
+ Local<Value> value = v8_compile("o.f")->Run(context.local()).ToLocalChecked();
// Check that the 'g' that was added last is hit.
- CHECK_EQ(42, value->Int32Value());
- value = v8_compile("o.g")->Run();
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
+ value = v8_compile("o.g")->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
// Check that 'h' cannot be shadowed.
- value = v8_compile("o.h = 3; o.h")->Run();
- CHECK_EQ(1, value->Int32Value());
+ value = v8_compile("o.h = 3; o.h")->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(1, value->Int32Value(context.local()).FromJust());
// Check that 'i' cannot be shadowed or changed.
- value = v8_compile("o.i = 3; o.i")->Run();
- CHECK_EQ(42, value->Int32Value());
+ value = v8_compile("o.i = 3; o.i")->Run(context.local()).ToLocalChecked();
+ CHECK_EQ(42, value->Int32Value(context.local()).FromJust());
}
@@ -11285,11 +12810,14 @@ THREADED_TEST(IsConstructCall) {
LocalContext context;
- context->Global()->Set(v8_str("f"), templ->GetFunction());
- Local<Value> value = v8_compile("f()")->Run();
- CHECK(!value->BooleanValue());
- value = v8_compile("new f()")->Run();
- CHECK(value->BooleanValue());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("f"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
+ Local<Value> value = v8_compile("f()")->Run(context.local()).ToLocalChecked();
+ CHECK(!value->BooleanValue(context.local()).FromJust());
+ value = v8_compile("new f()")->Run(context.local()).ToLocalChecked();
+ CHECK(value->BooleanValue(context.local()).FromJust());
}
@@ -11304,27 +12832,41 @@ THREADED_TEST(ObjectProtoToString) {
Local<String> customized_tostring = v8_str("customized toString");
// Replace Object.prototype.toString
- v8_compile("Object.prototype.toString = function() {"
- " return 'customized toString';"
- "}")->Run();
+ v8_compile(
+ "Object.prototype.toString = function() {"
+ " return 'customized toString';"
+ "}")
+ ->Run(context.local())
+ .ToLocalChecked();
// Normal ToString call should call replaced Object.prototype.toString
- Local<v8::Object> instance = templ->GetFunction()->NewInstance();
- Local<String> value = instance->ToString(isolate);
- CHECK(value->IsString() && value->Equals(customized_tostring));
+ Local<v8::Object> instance = templ->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<String> value = instance->ToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), customized_tostring).FromJust());
// ObjectProtoToString should not call replace toString function.
- value = instance->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object MyClass]")));
+ value = instance->ObjectProtoToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object MyClass]")).FromJust());
// Check global
- value = context->Global()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object global]")));
+ value =
+ context->Global()->ObjectProtoToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object global]")).FromJust());
// Check ordinary object
- Local<Value> object = v8_compile("new Object()")->Run();
- value = object.As<v8::Object>()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object Object]")));
+ Local<Value> object =
+ v8_compile("new Object()")->Run(context.local()).ToLocalChecked();
+ value = object.As<v8::Object>()
+ ->ObjectProtoToString(context.local())
+ .ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object Object]")).FromJust());
}
@@ -11346,33 +12888,48 @@ TEST(ObjectProtoToStringES6) {
"}");
// Normal ToString call should call replaced Object.prototype.toString
- Local<v8::Object> instance = templ->GetFunction()->NewInstance();
- Local<String> value = instance->ToString(isolate);
- CHECK(value->IsString() && value->Equals(customized_tostring));
+ Local<v8::Object> instance = templ->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+ Local<String> value = instance->ToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), customized_tostring).FromJust());
// ObjectProtoToString should not call replace toString function.
- value = instance->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object MyClass]")));
+ value = instance->ObjectProtoToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object MyClass]")).FromJust());
// Check global
- value = context->Global()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object global]")));
+ value =
+ context->Global()->ObjectProtoToString(context.local()).ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object global]")).FromJust());
// Check ordinary object
Local<Value> object = CompileRun("new Object()");
- value = object.As<v8::Object>()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object Object]")));
+ value = object.As<v8::Object>()
+ ->ObjectProtoToString(context.local())
+ .ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object Object]")).FromJust());
// Check that ES6 semantics using @@toStringTag work
Local<v8::Symbol> toStringTag = v8::Symbol::GetToStringTag(isolate);
-#define TEST_TOSTRINGTAG(type, tag, expected) \
- do { \
- object = CompileRun("new " #type "()"); \
- object.As<v8::Object>()->Set(toStringTag, v8_str(#tag)); \
- value = object.As<v8::Object>()->ObjectProtoToString(); \
- CHECK(value->IsString() && \
- value->Equals(v8_str("[object " #expected "]"))); \
+#define TEST_TOSTRINGTAG(type, tag, expected) \
+ do { \
+ object = CompileRun("new " #type "()"); \
+ CHECK(object.As<v8::Object>() \
+ ->Set(context.local(), toStringTag, v8_str(#tag)) \
+ .FromJust()); \
+ value = object.As<v8::Object>() \
+ ->ObjectProtoToString(context.local()) \
+ .ToLocalChecked(); \
+ CHECK(value->IsString() && \
+ value->Equals(context.local(), v8_str("[object " #expected "]")) \
+ .FromJust()); \
} while (0)
TEST_TOSTRINGTAG(Array, Object, Object);
@@ -11389,8 +12946,9 @@ TEST(ObjectProtoToStringES6) {
#undef TEST_TOSTRINGTAG
- Local<v8::RegExp> valueRegExp = v8::RegExp::New(v8_str("^$"),
- v8::RegExp::kNone);
+ Local<v8::RegExp> valueRegExp =
+ v8::RegExp::New(context.local(), v8_str("^$"), v8::RegExp::kNone)
+ .ToLocalChecked();
Local<Value> valueNumber = v8_num(123);
Local<v8::Symbol> valueSymbol = v8_symbol("TestSymbol");
Local<v8::Function> valueFunction =
@@ -11399,13 +12957,18 @@ TEST(ObjectProtoToStringES6) {
Local<v8::Primitive> valueNull = v8::Null(v8::Isolate::GetCurrent());
Local<v8::Primitive> valueUndef = v8::Undefined(v8::Isolate::GetCurrent());
-#define TEST_TOSTRINGTAG(type, tagValue, expected) \
- do { \
- object = CompileRun("new " #type "()"); \
- object.As<v8::Object>()->Set(toStringTag, tagValue); \
- value = object.As<v8::Object>()->ObjectProtoToString(); \
- CHECK(value->IsString() && \
- value->Equals(v8_str("[object " #expected "]"))); \
+#define TEST_TOSTRINGTAG(type, tagValue, expected) \
+ do { \
+ object = CompileRun("new " #type "()"); \
+ CHECK(object.As<v8::Object>() \
+ ->Set(context.local(), toStringTag, tagValue) \
+ .FromJust()); \
+ value = object.As<v8::Object>() \
+ ->ObjectProtoToString(context.local()) \
+ .ToLocalChecked(); \
+ CHECK(value->IsString() && \
+ value->Equals(context.local(), v8_str("[object " #expected "]")) \
+ .FromJust()); \
} while (0)
#define TEST_TOSTRINGTAG_TYPES(tagValue) \
@@ -11430,22 +12993,28 @@ TEST(ObjectProtoToStringES6) {
// @@toStringTag getter throws
Local<Value> obj = v8::Object::New(isolate);
- obj.As<v8::Object>()->SetAccessor(toStringTag, ThrowingSymbolAccessorGetter);
+ obj.As<v8::Object>()
+ ->SetAccessor(context.local(), toStringTag, ThrowingSymbolAccessorGetter)
+ .FromJust();
{
TryCatch try_catch(isolate);
- value = obj.As<v8::Object>()->ObjectProtoToString();
- CHECK(value.IsEmpty());
+ CHECK(obj.As<v8::Object>()->ObjectProtoToString(context.local()).IsEmpty());
CHECK(try_catch.HasCaught());
}
// @@toStringTag getter does not throw
obj = v8::Object::New(isolate);
- obj.As<v8::Object>()->SetAccessor(
- toStringTag, SymbolAccessorGetterReturnsDefault, 0, v8_str("Test"));
+ obj.As<v8::Object>()
+ ->SetAccessor(context.local(), toStringTag,
+ SymbolAccessorGetterReturnsDefault, 0, v8_str("Test"))
+ .FromJust();
{
TryCatch try_catch(isolate);
- value = obj.As<v8::Object>()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object Test]")));
+ value = obj.As<v8::Object>()
+ ->ObjectProtoToString(context.local())
+ .ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object Test]")).FromJust());
CHECK(!try_catch.HasCaught());
}
@@ -11453,8 +13022,11 @@ TEST(ObjectProtoToStringES6) {
obj = CompileRun("obj = {}; obj[Symbol.toStringTag] = 'Test'; obj");
{
TryCatch try_catch(isolate);
- value = obj.As<v8::Object>()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object Test]")));
+ value = obj.As<v8::Object>()
+ ->ObjectProtoToString(context.local())
+ .ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object Test]")).FromJust());
CHECK(!try_catch.HasCaught());
}
@@ -11465,8 +13037,7 @@ TEST(ObjectProtoToStringES6) {
"}); obj");
{
TryCatch try_catch(isolate);
- value = obj.As<v8::Object>()->ObjectProtoToString();
- CHECK(value.IsEmpty());
+ CHECK(obj.As<v8::Object>()->ObjectProtoToString(context.local()).IsEmpty());
CHECK(try_catch.HasCaught());
}
@@ -11477,8 +13048,11 @@ TEST(ObjectProtoToStringES6) {
"}); obj");
{
TryCatch try_catch(isolate);
- value = obj.As<v8::Object>()->ObjectProtoToString();
- CHECK(value->IsString() && value->Equals(v8_str("[object Test]")));
+ value = obj.As<v8::Object>()
+ ->ObjectProtoToString(context.local())
+ .ToLocalChecked();
+ CHECK(value->IsString() &&
+ value->Equals(context.local(), v8_str("[object Test]")).FromJust());
CHECK(!try_catch.HasCaught());
}
}
@@ -11492,29 +13066,83 @@ THREADED_TEST(ObjectGetConstructorName) {
"function Parent() {};"
"function Child() {};"
"Child.prototype = new Parent();"
+ "Child.prototype.constructor = Child;"
"var outer = { inner: function() { } };"
"var p = new Parent();"
"var c = new Child();"
"var x = new outer.inner();"
- "var proto = Child.prototype;")->Run();
+ "var proto = Child.prototype;")
+ ->Run(context.local())
+ .ToLocalChecked();
- Local<v8::Value> p = context->Global()->Get(v8_str("p"));
+ Local<v8::Value> p =
+ context->Global()->Get(context.local(), v8_str("p")).ToLocalChecked();
CHECK(p->IsObject() &&
- p->ToObject(isolate)->GetConstructorName()->Equals(v8_str("Parent")));
+ p->ToObject(context.local())
+ .ToLocalChecked()
+ ->GetConstructorName()
+ ->Equals(context.local(), v8_str("Parent"))
+ .FromJust());
- Local<v8::Value> c = context->Global()->Get(v8_str("c"));
+ Local<v8::Value> c =
+ context->Global()->Get(context.local(), v8_str("c")).ToLocalChecked();
CHECK(c->IsObject() &&
- c->ToObject(isolate)->GetConstructorName()->Equals(v8_str("Child")));
+ c->ToObject(context.local())
+ .ToLocalChecked()
+ ->GetConstructorName()
+ ->Equals(context.local(), v8_str("Child"))
+ .FromJust());
- Local<v8::Value> x = context->Global()->Get(v8_str("x"));
+ Local<v8::Value> x =
+ context->Global()->Get(context.local(), v8_str("x")).ToLocalChecked();
CHECK(x->IsObject() &&
- x->ToObject(isolate)->GetConstructorName()->Equals(
- v8_str("outer.inner")));
+ x->ToObject(context.local())
+ .ToLocalChecked()
+ ->GetConstructorName()
+ ->Equals(context.local(), v8_str("outer.inner"))
+ .FromJust());
- Local<v8::Value> child_prototype = context->Global()->Get(v8_str("proto"));
+ Local<v8::Value> child_prototype =
+ context->Global()->Get(context.local(), v8_str("proto")).ToLocalChecked();
CHECK(child_prototype->IsObject() &&
- child_prototype->ToObject(isolate)->GetConstructorName()->Equals(
- v8_str("Parent")));
+ child_prototype->ToObject(context.local())
+ .ToLocalChecked()
+ ->GetConstructorName()
+ ->Equals(context.local(), v8_str("Parent"))
+ .FromJust());
+}
+
+
+THREADED_TEST(SubclassGetConstructorName) {
+ v8::Isolate* isolate = CcTest::isolate();
+ LocalContext context;
+ v8::HandleScope scope(isolate);
+ v8_compile(
+ "\"use strict\";"
+ "class Parent {}"
+ "class Child extends Parent {}"
+ "var p = new Parent();"
+ "var c = new Child();")
+ ->Run(context.local())
+ .ToLocalChecked();
+
+ Local<v8::Value> p =
+ context->Global()->Get(context.local(), v8_str("p")).ToLocalChecked();
+ CHECK(p->IsObject() &&
+ p->ToObject(context.local())
+ .ToLocalChecked()
+ ->GetConstructorName()
+ ->Equals(context.local(), v8_str("Parent"))
+ .FromJust());
+
+ Local<v8::Value> c =
+ context->Global()->Get(context.local(), v8_str("c")).ToLocalChecked();
+ CHECK(c->IsObject() &&
+ c->ToObject(context.local())
+ .ToLocalChecked()
+ ->GetConstructorName()
+ ->Equals(context.local(), v8_str("Child"))
+ .FromJust());
}
@@ -11689,10 +13317,10 @@ static void ThrowInJS(const v8::FunctionCallbackInfo<v8::Value>& args) {
{
v8::Locker nested_locker(isolate);
v8::HandleScope scope(isolate);
- v8::Handle<Value> exception;
+ v8::Local<Value> exception;
{
v8::TryCatch try_catch(isolate);
- v8::Handle<Value> value = CompileRun(code);
+ v8::Local<Value> value = CompileRun(code);
CHECK(value.IsEmpty());
CHECK(try_catch.HasCaught());
// Make sure to wrap the exception in a new handle because
@@ -11713,7 +13341,7 @@ static void ThrowInJSNoCatch(const v8::FunctionCallbackInfo<v8::Value>& args) {
{
v8::Locker nested_locker(CcTest::isolate());
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<Value> value = CompileRun(code);
+ v8::Local<Value> value = CompileRun(code);
CHECK(value.IsEmpty());
args.GetReturnValue().Set(v8_str("foo"));
}
@@ -11730,8 +13358,8 @@ TEST(NestedLockers) {
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(isolate, ThrowInJS);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("throw_in_js"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("throw_in_js"), fun).FromJust());
Local<Script> script = v8_compile("(function () {"
" try {"
" throw_in_js();"
@@ -11740,7 +13368,10 @@ TEST(NestedLockers) {
" return e * 13;"
" }"
"})();");
- CHECK_EQ(91, script->Run()->Int32Value());
+ CHECK_EQ(91, script->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
@@ -11752,8 +13383,8 @@ TEST(NestedLockersNoTryCatch) {
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(env->GetIsolate(), ThrowInJSNoCatch);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("throw_in_js"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("throw_in_js"), fun).FromJust());
Local<Script> script = v8_compile("(function () {"
" try {"
" throw_in_js();"
@@ -11762,7 +13393,10 @@ TEST(NestedLockersNoTryCatch) {
" return e * 13;"
" }"
"})();");
- CHECK_EQ(91, script->Run()->Int32Value());
+ CHECK_EQ(91, script->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
@@ -11788,13 +13422,18 @@ THREADED_TEST(LockUnlockLock) {
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(CcTest::isolate(), UnlockForAMoment);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("unlock_for_a_moment"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("unlock_for_a_moment"), fun)
+ .FromJust());
Local<Script> script = v8_compile("(function () {"
" unlock_for_a_moment();"
" return 42;"
"})();");
- CHECK_EQ(42, script->Run()->Int32Value());
+ CHECK_EQ(42, script->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
{
v8::Locker locker(CcTest::isolate());
@@ -11802,13 +13441,18 @@ THREADED_TEST(LockUnlockLock) {
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(CcTest::isolate(), UnlockForAMoment);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("unlock_for_a_moment"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("unlock_for_a_moment"), fun)
+ .FromJust());
Local<Script> script = v8_compile("(function () {"
" unlock_for_a_moment();"
" return 42;"
"})();");
- CHECK_EQ(42, script->Run()->Int32Value());
+ CHECK_EQ(42, script->Run(env.local())
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
}
@@ -11824,8 +13468,7 @@ static int GetGlobalObjectsCount() {
count++;
}
}
- // Subtract one to compensate for the code stub context that is always present
- return count - 1;
+ return count;
}
@@ -11860,14 +13503,14 @@ TEST(DontLeakGlobalObjects) {
{ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
- v8_compile("Date")->Run();
+ v8_compile("Date")->Run(context.local()).ToLocalChecked();
}
CcTest::isolate()->ContextDisposedNotification();
CheckSurvivingGlobalObjectsCount(0);
{ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
- v8_compile("/aaa/")->Run();
+ v8_compile("/aaa/")->Run(context.local()).ToLocalChecked();
}
CcTest::isolate()->ContextDisposedNotification();
CheckSurvivingGlobalObjectsCount(0);
@@ -11876,7 +13519,7 @@ TEST(DontLeakGlobalObjects) {
const char* extension_list[] = { "v8/gc" };
v8::ExtensionConfiguration extensions(1, extension_list);
LocalContext context(&extensions);
- v8_compile("gc();")->Run();
+ v8_compile("gc();")->Run(context.local()).ToLocalChecked();
}
CcTest::isolate()->ContextDisposedNotification();
CheckSurvivingGlobalObjectsCount(0);
@@ -11928,7 +13571,9 @@ TEST(WeakCallbackApi) {
{
v8::HandleScope scope(isolate);
v8::Local<v8::Object> obj = v8::Object::New(isolate);
- obj->Set(v8_str("key"), v8::Integer::New(isolate, 231));
+ CHECK(
+ obj->Set(context.local(), v8_str("key"), v8::Integer::New(isolate, 231))
+ .FromJust());
v8::Persistent<v8::Object>* handle =
new v8::Persistent<v8::Object>(isolate, obj);
handle->SetWeak<v8::Persistent<v8::Object>>(
@@ -12077,7 +13722,7 @@ THREADED_TEST(CheckForCrossContextObjectLiterals) {
}
-static v8::Handle<Value> NestedScope(v8::Local<Context> env) {
+static v8::Local<Value> NestedScope(v8::Local<Context> env) {
v8::EscapableHandleScope inner(env->GetIsolate());
env->Enter();
v8::Local<Value> three = v8_num(3);
@@ -12092,8 +13737,8 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
v8::HandleScope outer(isolate);
v8::Local<Context> env = Context::New(isolate);
env->Enter();
- v8::Handle<Value> value = NestedScope(env);
- v8::Handle<String> str(value->ToString(isolate));
+ v8::Local<Value> value = NestedScope(env);
+ v8::Local<String> str(value->ToString(env).ToLocalChecked());
CHECK(!str.IsEmpty());
env->Exit();
}
@@ -12340,7 +13985,9 @@ void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
Local<ObjectTemplate> t = ObjectTemplate::New(isolate);
t->Set(v8_str("asdf"), v8::FunctionTemplate::New(isolate, RuntimeCallback));
- env->Global()->Set(v8_str("obj"), t->NewInstance());
+ CHECK(env->Global()
+ ->Set(env, v8_str("obj"), t->NewInstance(env).ToLocalChecked())
+ .FromJust());
const char* script =
"function bar() {\n"
@@ -12353,16 +14000,15 @@ void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
"// Invoke on the runtime function.\n"
"obj.asdf()";
CompileRun(script);
- bar_func_ = i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*env->Global()->Get(v8_str("bar"))));
- DCHECK(!bar_func_.is_null());
+ bar_func_ = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(
+ *env->Global()->Get(env, v8_str("bar")).ToLocalChecked()));
+ CHECK(!bar_func_.is_null());
- foo_func_ =
- i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*env->Global()->Get(v8_str("foo"))));
- DCHECK(!foo_func_.is_null());
+ foo_func_ = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(
+ *env->Global()->Get(env, v8_str("foo")).ToLocalChecked()));
+ CHECK(!foo_func_.is_null());
- v8::Handle<v8::Value> value = CompileRun("bar();");
+ v8::Local<v8::Value> value = CompileRun("bar();");
CHECK(value->IsNumber());
CHECK_EQ(9801.0, v8::Number::Cast(*value)->Value());
@@ -12621,10 +14267,18 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
CompileRun(script);
// Keep a strong reference to the code object in the handle scope.
- i::Handle<i::Code> bar_code(i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*env->Global()->Get(v8_str("bar"))))->code());
- i::Handle<i::Code> foo_code(i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*env->Global()->Get(v8_str("foo"))))->code());
+ i::Handle<i::Code> bar_code(
+ i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()
+ ->Get(env.local(), v8_str("bar"))
+ .ToLocalChecked()))
+ ->code());
+ i::Handle<i::Code> foo_code(
+ i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()
+ ->Get(env.local(), v8_str("foo"))
+ .ToLocalChecked()))
+ ->code());
// Clear the compilation cache to get more wastage.
reinterpret_cast<i::Isolate*>(isolate)->compilation_cache()->Clear();
@@ -12727,8 +14381,10 @@ THREADED_TEST(Regress54) {
local->SetInternalFieldCount(1);
templ.Reset(isolate, inner.Escape(local));
}
- v8::Handle<v8::Object> result =
- v8::Local<v8::ObjectTemplate>::New(isolate, templ)->NewInstance();
+ v8::Local<v8::Object> result =
+ v8::Local<v8::ObjectTemplate>::New(isolate, templ)
+ ->NewInstance(context.local())
+ .ToLocalChecked();
CHECK_EQ(1, result->InternalFieldCount());
}
@@ -12739,32 +14395,32 @@ TEST(CatchStackOverflow) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch(context->GetIsolate());
- v8::Handle<v8::Value> result = CompileRun(
- "function f() {"
- " return f();"
- "}"
- ""
- "f();");
+ v8::Local<v8::Value> result = CompileRun(
+ "function f() {"
+ " return f();"
+ "}"
+ ""
+ "f();");
CHECK(result.IsEmpty());
}
-static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script,
+static void CheckTryCatchSourceInfo(v8::Local<v8::Script> script,
const char* resource_name,
int line_offset) {
v8::HandleScope scope(CcTest::isolate());
v8::TryCatch try_catch(CcTest::isolate());
- v8::Handle<v8::Value> result = script->Run();
- CHECK(result.IsEmpty());
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ CHECK(script->Run(context).IsEmpty());
CHECK(try_catch.HasCaught());
- v8::Handle<v8::Message> message = try_catch.Message();
+ v8::Local<v8::Message> message = try_catch.Message();
CHECK(!message.IsEmpty());
- CHECK_EQ(10 + line_offset, message->GetLineNumber());
+ CHECK_EQ(10 + line_offset, message->GetLineNumber(context).FromJust());
CHECK_EQ(91, message->GetStartPosition());
CHECK_EQ(92, message->GetEndPosition());
- CHECK_EQ(2, message->GetStartColumn());
- CHECK_EQ(3, message->GetEndColumn());
- v8::String::Utf8Value line(message->GetSourceLine());
+ CHECK_EQ(2, message->GetStartColumn(context).FromJust());
+ CHECK_EQ(3, message->GetEndColumn(context).FromJust());
+ v8::String::Utf8Value line(message->GetSourceLine(context).ToLocalChecked());
CHECK_EQ(0, strcmp(" throw 'nirk';", *line));
v8::String::Utf8Value name(message->GetScriptOrigin().ResourceName());
CHECK_EQ(0, strcmp(resource_name, *name));
@@ -12790,22 +14446,22 @@ THREADED_TEST(TryCatchSourceInfo) {
"Foo();\n");
const char* resource_name;
- v8::Handle<v8::Script> script;
+ v8::Local<v8::Script> script;
resource_name = "test.js";
script = CompileWithOrigin(source, resource_name);
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test1.js";
- v8::ScriptOrigin origin1(
- v8::String::NewFromUtf8(context->GetIsolate(), resource_name));
- script = v8::Script::Compile(source, &origin1);
+ v8::ScriptOrigin origin1(v8_str(resource_name));
+ script =
+ v8::Script::Compile(context.local(), source, &origin1).ToLocalChecked();
CheckTryCatchSourceInfo(script, resource_name, 0);
resource_name = "test2.js";
- v8::ScriptOrigin origin2(
- v8::String::NewFromUtf8(context->GetIsolate(), resource_name),
- v8::Integer::New(context->GetIsolate(), 7));
- script = v8::Script::Compile(source, &origin2);
+ v8::ScriptOrigin origin2(v8_str(resource_name),
+ v8::Integer::New(context->GetIsolate(), 7));
+ script =
+ v8::Script::Compile(context.local(), source, &origin2).ToLocalChecked();
CheckTryCatchSourceInfo(script, resource_name, 7);
}
@@ -12814,28 +14470,35 @@ THREADED_TEST(TryCatchSourceInfoForEOSError) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch(context->GetIsolate());
- v8::Script::Compile(v8_str("!\n"));
+ CHECK(v8::Script::Compile(context.local(), v8_str("!\n")).IsEmpty());
CHECK(try_catch.HasCaught());
- v8::Handle<v8::Message> message = try_catch.Message();
- CHECK_EQ(1, message->GetLineNumber());
- CHECK_EQ(0, message->GetStartColumn());
+ v8::Local<v8::Message> message = try_catch.Message();
+ CHECK_EQ(1, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(0, message->GetStartColumn(context.local()).FromJust());
}
THREADED_TEST(CompilationCache) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::String> source0 =
- v8::String::NewFromUtf8(context->GetIsolate(), "1234");
- v8::Handle<v8::String> source1 =
- v8::String::NewFromUtf8(context->GetIsolate(), "1234");
- v8::Handle<v8::Script> script0 = CompileWithOrigin(source0, "test.js");
- v8::Handle<v8::Script> script1 = CompileWithOrigin(source1, "test.js");
- v8::Handle<v8::Script> script2 =
- v8::Script::Compile(source0); // different origin
- CHECK_EQ(1234, script0->Run()->Int32Value());
- CHECK_EQ(1234, script1->Run()->Int32Value());
- CHECK_EQ(1234, script2->Run()->Int32Value());
+ v8::Local<v8::String> source0 = v8_str("1234");
+ v8::Local<v8::String> source1 = v8_str("1234");
+ v8::Local<v8::Script> script0 = CompileWithOrigin(source0, "test.js");
+ v8::Local<v8::Script> script1 = CompileWithOrigin(source1, "test.js");
+ v8::Local<v8::Script> script2 = v8::Script::Compile(context.local(), source0)
+ .ToLocalChecked(); // different origin
+ CHECK_EQ(1234, script0->Run(context.local())
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1234, script1->Run(context.local())
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(1234, script2->Run(context.local())
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -12853,8 +14516,11 @@ THREADED_TEST(CallbackFunctionName) {
Local<ObjectTemplate> t = ObjectTemplate::New(isolate);
t->Set(v8_str("asdf"),
v8::FunctionTemplate::New(isolate, FunctionNameCallback));
- context->Global()->Set(v8_str("obj"), t->NewInstance());
- v8::Handle<v8::Value> value = CompileRun("obj.asdf.name");
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ t->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
+ v8::Local<v8::Value> value = CompileRun("obj.asdf.name");
CHECK(value->IsString());
v8::String::Utf8Value name(value);
CHECK_EQ(0, strcmp("asdf", *name));
@@ -12864,32 +14530,37 @@ THREADED_TEST(CallbackFunctionName) {
THREADED_TEST(DateAccess) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::Value> date =
- v8::Date::New(context->GetIsolate(), 1224744689038.0);
+ v8::Local<v8::Value> date =
+ v8::Date::New(context.local(), 1224744689038.0).ToLocalChecked();
CHECK(date->IsDate());
CHECK_EQ(1224744689038.0, date.As<v8::Date>()->ValueOf());
}
-void CheckProperties(v8::Isolate* isolate, v8::Handle<v8::Value> val,
+void CheckProperties(v8::Isolate* isolate, v8::Local<v8::Value> val,
unsigned elmc, const char* elmv[]) {
- v8::Handle<v8::Object> obj = val.As<v8::Object>();
- v8::Handle<v8::Array> props = obj->GetPropertyNames();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Object> obj = val.As<v8::Object>();
+ v8::Local<v8::Array> props = obj->GetPropertyNames(context).ToLocalChecked();
CHECK_EQ(elmc, props->Length());
for (unsigned i = 0; i < elmc; i++) {
- v8::String::Utf8Value elm(props->Get(v8::Integer::New(isolate, i)));
+ v8::String::Utf8Value elm(
+ props->Get(context, v8::Integer::New(isolate, i)).ToLocalChecked());
CHECK_EQ(0, strcmp(elmv[i], *elm));
}
}
-void CheckOwnProperties(v8::Isolate* isolate, v8::Handle<v8::Value> val,
+void CheckOwnProperties(v8::Isolate* isolate, v8::Local<v8::Value> val,
unsigned elmc, const char* elmv[]) {
- v8::Handle<v8::Object> obj = val.As<v8::Object>();
- v8::Handle<v8::Array> props = obj->GetOwnPropertyNames();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Object> obj = val.As<v8::Object>();
+ v8::Local<v8::Array> props =
+ obj->GetOwnPropertyNames(context).ToLocalChecked();
CHECK_EQ(elmc, props->Length());
for (unsigned i = 0; i < elmc; i++) {
- v8::String::Utf8Value elm(props->Get(v8::Integer::New(isolate, i)));
+ v8::String::Utf8Value elm(
+ props->Get(context, v8::Integer::New(isolate, i)).ToLocalChecked());
CHECK_EQ(0, strcmp(elmv[i], *elm));
}
}
@@ -12899,7 +14570,7 @@ THREADED_TEST(PropertyEnumeration) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj = CompileRun(
+ v8::Local<v8::Value> obj = CompileRun(
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -12908,34 +14579,50 @@ THREADED_TEST(PropertyEnumeration) {
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
"result;");
- v8::Handle<v8::Array> elms = obj.As<v8::Array>();
+ v8::Local<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4u, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
CheckProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 0)), elmc0, elmv0);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 0)).ToLocalChecked(),
+ elmc0, elmv0);
CheckOwnProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 0)), elmc0, elmv0);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 0)).ToLocalChecked(),
+ elmc0, elmv0);
int elmc1 = 2;
const char* elmv1[] = {"a", "b"};
CheckProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 1)), elmc1, elmv1);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 1)).ToLocalChecked(),
+ elmc1, elmv1);
CheckOwnProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 1)), elmc1, elmv1);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 1)).ToLocalChecked(),
+ elmc1, elmv1);
int elmc2 = 3;
const char* elmv2[] = {"0", "1", "2"};
CheckProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 2)), elmc2, elmv2);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 2)).ToLocalChecked(),
+ elmc2, elmv2);
CheckOwnProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 2)), elmc2, elmv2);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 2)).ToLocalChecked(),
+ elmc2, elmv2);
int elmc3 = 4;
const char* elmv3[] = {"w", "z", "x", "y"};
CheckProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 3)), elmc3, elmv3);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 3)).ToLocalChecked(),
+ elmc3, elmv3);
int elmc4 = 2;
const char* elmv4[] = {"w", "z"};
CheckOwnProperties(
- isolate, elms->Get(v8::Integer::New(isolate, 3)), elmc4, elmv4);
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 3)).ToLocalChecked(),
+ elmc4, elmv4);
}
@@ -12943,7 +14630,7 @@ THREADED_TEST(PropertyEnumeration2) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj = CompileRun(
+ v8::Local<v8::Value> obj = CompileRun(
"var result = [];"
"result[0] = {};"
"result[1] = {a: 1, b: 2};"
@@ -12952,15 +14639,19 @@ THREADED_TEST(PropertyEnumeration2) {
"var x = { __proto__: proto, w: 0, z: 1 };"
"result[3] = x;"
"result;");
- v8::Handle<v8::Array> elms = obj.As<v8::Array>();
+ v8::Local<v8::Array> elms = obj.As<v8::Array>();
CHECK_EQ(4u, elms->Length());
int elmc0 = 0;
const char** elmv0 = NULL;
- CheckProperties(isolate,
- elms->Get(v8::Integer::New(isolate, 0)), elmc0, elmv0);
-
- v8::Handle<v8::Value> val = elms->Get(v8::Integer::New(isolate, 0));
- v8::Handle<v8::Array> props = val.As<v8::Object>()->GetPropertyNames();
+ CheckProperties(
+ isolate,
+ elms->Get(context.local(), v8::Integer::New(isolate, 0)).ToLocalChecked(),
+ elmc0, elmv0);
+
+ v8::Local<v8::Value> val =
+ elms->Get(context.local(), v8::Integer::New(isolate, 0)).ToLocalChecked();
+ v8::Local<v8::Array> props =
+ val.As<v8::Object>()->GetPropertyNames(context.local()).ToLocalChecked();
CHECK_EQ(0u, props->Length());
for (uint32_t i = 0; i < props->Length(); i++) {
printf("p[%u]\n", i);
@@ -12991,14 +14682,20 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
}
}
- Local<v8::Object> instance_1 = templ->NewInstance();
- context->Global()->Set(v8_str("obj_1"), instance_1);
+ Local<v8::Object> instance_1 =
+ templ->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj_1"), instance_1)
+ .FromJust());
Local<Value> value_1 = CompileRun("obj_1.a");
CHECK(value_1.IsEmpty());
- Local<v8::Object> instance_2 = templ->NewInstance();
- context->Global()->Set(v8_str("obj_2"), instance_2);
+ Local<v8::Object> instance_2 =
+ templ->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj_2"), instance_2)
+ .FromJust());
Local<Value> value_2 = CompileRun("obj_2.a");
CHECK(value_2.IsEmpty());
@@ -13015,8 +14712,9 @@ TEST(PreCompileSerialization) {
i::FLAG_min_preparse_length = 0;
const char* script = "function foo(a) { return a+1; }";
v8::ScriptCompiler::Source source(v8_str(script));
- v8::ScriptCompiler::Compile(isolate, &source,
- v8::ScriptCompiler::kProduceParserCache);
+ v8::ScriptCompiler::Compile(env.local(), &source,
+ v8::ScriptCompiler::kProduceParserCache)
+ .ToLocalChecked();
// Serialize.
const v8::ScriptCompiler::CachedData* cd = source.GetCachedData();
i::byte* serialized_data = i::NewArray<i::byte>(cd->length);
@@ -13044,15 +14742,19 @@ THREADED_TEST(DictionaryICLoadedFunction) {
// Test LoadIC.
for (int i = 0; i < 2; i++) {
LocalContext context;
- context->Global()->Set(v8_str("tmp"), v8::True(CcTest::isolate()));
- context->Global()->Delete(v8_str("tmp"));
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("tmp"), v8::True(CcTest::isolate()))
+ .FromJust());
+ context->Global()->Delete(context.local(), v8_str("tmp")).FromJust();
CompileRun("for (var j = 0; j < 10; j++) new RegExp('');");
}
// Test CallIC.
for (int i = 0; i < 2; i++) {
LocalContext context;
- context->Global()->Set(v8_str("tmp"), v8::True(CcTest::isolate()));
- context->Global()->Delete(v8_str("tmp"));
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("tmp"), v8::True(CcTest::isolate()))
+ .FromJust());
+ context->Global()->Delete(context.local(), v8_str("tmp")).FromJust();
CompileRun("for (var j = 0; j < 10; j++) RegExp('')");
}
}
@@ -13080,10 +14782,12 @@ THREADED_TEST(CrossContextNew) {
// Call the constructor function from context0 and check that the
// result has the 'x' property.
context1->Enter();
- context1->Global()->Set(v8_str("other"), context0->Global());
+ CHECK(context1->Global()
+ ->Set(context1, v8_str("other"), context0->Global())
+ .FromJust());
Local<Value> value = CompileRun("var instance = new other.C(); instance.x");
CHECK(value->IsInt32());
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context1).FromJust());
context1->Exit();
}
@@ -13104,22 +14808,47 @@ TEST(ObjectClone) {
Local<Value> val = CompileRun(sample);
CHECK(val->IsObject());
Local<v8::Object> obj = val.As<v8::Object>();
- obj->Set(v8_str("gamma"), v8_str("cloneme"));
+ obj->Set(env.local(), v8_str("gamma"), v8_str("cloneme")).FromJust();
- CHECK(v8_str("hello")->Equals(obj->Get(v8_str("alpha"))));
- CHECK(v8::Integer::New(isolate, 123)->Equals(obj->Get(v8_str("beta"))));
- CHECK(v8_str("cloneme")->Equals(obj->Get(v8_str("gamma"))));
+ CHECK(v8_str("hello")
+ ->Equals(env.local(),
+ obj->Get(env.local(), v8_str("alpha")).ToLocalChecked())
+ .FromJust());
+ CHECK(v8::Integer::New(isolate, 123)
+ ->Equals(env.local(),
+ obj->Get(env.local(), v8_str("beta")).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("cloneme")
+ ->Equals(env.local(),
+ obj->Get(env.local(), v8_str("gamma")).ToLocalChecked())
+ .FromJust());
// Clone it.
Local<v8::Object> clone = obj->Clone();
- CHECK(v8_str("hello")->Equals(clone->Get(v8_str("alpha"))));
- CHECK(v8::Integer::New(isolate, 123)->Equals(clone->Get(v8_str("beta"))));
- CHECK(v8_str("cloneme")->Equals(clone->Get(v8_str("gamma"))));
+ CHECK(v8_str("hello")
+ ->Equals(env.local(),
+ clone->Get(env.local(), v8_str("alpha")).ToLocalChecked())
+ .FromJust());
+ CHECK(v8::Integer::New(isolate, 123)
+ ->Equals(env.local(),
+ clone->Get(env.local(), v8_str("beta")).ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str("cloneme")
+ ->Equals(env.local(),
+ clone->Get(env.local(), v8_str("gamma")).ToLocalChecked())
+ .FromJust());
// Set a property on the clone, verify each object.
- clone->Set(v8_str("beta"), v8::Integer::New(isolate, 456));
- CHECK(v8::Integer::New(isolate, 123)->Equals(obj->Get(v8_str("beta"))));
- CHECK(v8::Integer::New(isolate, 456)->Equals(clone->Get(v8_str("beta"))));
+ CHECK(clone->Set(env.local(), v8_str("beta"), v8::Integer::New(isolate, 456))
+ .FromJust());
+ CHECK(v8::Integer::New(isolate, 123)
+ ->Equals(env.local(),
+ obj->Get(env.local(), v8_str("beta")).ToLocalChecked())
+ .FromJust());
+ CHECK(v8::Integer::New(isolate, 456)
+ ->Equals(env.local(),
+ clone->Get(env.local(), v8_str("beta")).ToLocalChecked())
+ .FromJust());
}
@@ -13194,8 +14923,8 @@ THREADED_TEST(MorphCompositeStringTest) {
v8::Utils::ToLocal(factory->NewExternalStringFromOneByte(
&one_byte_resource).ToHandleChecked()));
- env->Global()->Set(v8_str("lhs"), lhs);
- env->Global()->Set(v8_str("rhs"), rhs);
+ CHECK(env->Global()->Set(env.local(), v8_str("lhs"), lhs).FromJust());
+ CHECK(env->Global()->Set(env.local(), v8_str("rhs"), rhs).FromJust());
CompileRun(
"var cons = lhs + rhs;"
@@ -13211,7 +14940,8 @@ THREADED_TEST(MorphCompositeStringTest) {
&uc16_resource);
// This should UTF-8 without flattening, since everything is ASCII.
- Handle<String> cons = v8_compile("cons")->Run().As<String>();
+ Local<String> cons =
+ v8_compile("cons")->Run(env.local()).ToLocalChecked().As<String>();
CHECK_EQ(128, cons->Utf8Length());
int nchars = -1;
CHECK_EQ(129, cons->WriteUtf8(utf_buffer, -1, &nchars));
@@ -13234,12 +14964,22 @@ THREADED_TEST(MorphCompositeStringTest) {
const char* expected_slice_on_cons =
"ow is the time for all good men to come to the aid of the party"
"Now is the time for all good men to come to the aid of the part";
- CHECK(String::NewFromUtf8(env->GetIsolate(), expected_cons)
- ->Equals(env->Global()->Get(v8_str("cons"))));
- CHECK(String::NewFromUtf8(env->GetIsolate(), expected_slice)
- ->Equals(env->Global()->Get(v8_str("slice"))));
- CHECK(String::NewFromUtf8(env->GetIsolate(), expected_slice_on_cons)
- ->Equals(env->Global()->Get(v8_str("slice_on_cons"))));
+ CHECK(v8_str(expected_cons)
+ ->Equals(env.local(), env->Global()
+ ->Get(env.local(), v8_str("cons"))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str(expected_slice)
+ ->Equals(env.local(), env->Global()
+ ->Get(env.local(), v8_str("slice"))
+ .ToLocalChecked())
+ .FromJust());
+ CHECK(v8_str(expected_slice_on_cons)
+ ->Equals(env.local(),
+ env->Global()
+ ->Get(env.local(), v8_str("slice_on_cons"))
+ .ToLocalChecked())
+ .FromJust());
}
i::DeleteArray(two_byte_string);
}
@@ -13262,8 +15002,9 @@ TEST(CompileExternalTwoByteSource) {
uint16_t* two_byte_string = AsciiToTwoByteString(one_byte_sources[i]);
TestResource* uc16_resource = new TestResource(two_byte_string);
v8::Local<v8::String> source =
- v8::String::NewExternal(context->GetIsolate(), uc16_resource);
- v8::Script::Compile(source);
+ v8::String::NewExternalTwoByte(context->GetIsolate(), uc16_resource)
+ .ToLocalChecked();
+ v8::Script::Compile(context.local(), source).FromMaybe(Local<Script>());
}
}
@@ -13293,7 +15034,7 @@ class RegExpInterruptionThread : public v8::base::Thread {
}
// Wait a bit before terminating.
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(50));
- v8::V8::TerminateExecution(isolate_);
+ isolate_->TerminateExecution();
}
private:
@@ -13301,11 +15042,12 @@ class RegExpInterruptionThread : public v8::base::Thread {
};
-void RunBeforeGC(v8::GCType type, v8::GCCallbackFlags flags) {
+void RunBeforeGC(v8::Isolate* isolate, v8::GCType type,
+ v8::GCCallbackFlags flags) {
if (v8::base::NoBarrier_Load(&regexp_interruption_data.loop_count) != 2) {
return;
}
- v8::HandleScope scope(CcTest::isolate());
+ v8::HandleScope scope(isolate);
v8::Local<v8::String> string = v8::Local<v8::String>::New(
CcTest::isolate(), regexp_interruption_data.string);
string->MakeExternal(regexp_interruption_data.string_resource);
@@ -13317,22 +15059,22 @@ void RunBeforeGC(v8::GCType type, v8::GCCallbackFlags flags) {
// * turn the subject string from one-byte internal to two-byte external string
// * force termination
TEST(RegExpInterruption) {
- v8::HandleScope scope(CcTest::isolate());
LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
- RegExpInterruptionThread timeout_thread(CcTest::isolate());
+ RegExpInterruptionThread timeout_thread(env->GetIsolate());
- v8::V8::AddGCPrologueCallback(RunBeforeGC);
+ env->GetIsolate()->AddGCPrologueCallback(RunBeforeGC);
static const char* one_byte_content = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
i::uc16* uc16_content = AsciiToTwoByteString(one_byte_content);
v8::Local<v8::String> string = v8_str(one_byte_content);
- CcTest::global()->Set(v8_str("a"), string);
- regexp_interruption_data.string.Reset(CcTest::isolate(), string);
+ env->Global()->Set(env.local(), v8_str("a"), string).FromJust();
+ regexp_interruption_data.string.Reset(env->GetIsolate(), string);
regexp_interruption_data.string_resource = new UC16VectorResource(
i::Vector<const i::uc16>(uc16_content, i::StrLength(one_byte_content)));
- v8::TryCatch try_catch(CcTest::isolate());
+ v8::TryCatch try_catch(env->GetIsolate());
timeout_thread.Start();
CompileRun("/((a*)*)*b/.exec(a)");
@@ -13352,155 +15094,27 @@ TEST(RegExpInterruption) {
TEST(ReadOnlyPropertyInGlobalProto) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
LocalContext context(0, templ);
- v8::Handle<v8::Object> global = context->Global();
- v8::Handle<v8::Object> global_proto =
- v8::Handle<v8::Object>::Cast(global->Get(v8_str("__proto__")));
- global_proto->ForceSet(v8_str("x"), v8::Integer::New(isolate, 0),
- v8::ReadOnly);
- global_proto->ForceSet(v8_str("y"), v8::Integer::New(isolate, 0),
- v8::ReadOnly);
+ v8::Local<v8::Object> global = context->Global();
+ v8::Local<v8::Object> global_proto = v8::Local<v8::Object>::Cast(
+ global->Get(context.local(), v8_str("__proto__")).ToLocalChecked());
+ global_proto->DefineOwnProperty(context.local(), v8_str("x"),
+ v8::Integer::New(isolate, 0), v8::ReadOnly)
+ .FromJust();
+ global_proto->DefineOwnProperty(context.local(), v8_str("y"),
+ v8::Integer::New(isolate, 0), v8::ReadOnly)
+ .FromJust();
// Check without 'eval' or 'with'.
- v8::Handle<v8::Value> res =
+ v8::Local<v8::Value> res =
CompileRun("function f() { x = 42; return x; }; f()");
- CHECK(v8::Integer::New(isolate, 0)->Equals(res));
+ CHECK(v8::Integer::New(isolate, 0)->Equals(context.local(), res).FromJust());
// Check with 'eval'.
res = CompileRun("function f() { eval('1'); y = 43; return y; }; f()");
- CHECK(v8::Integer::New(isolate, 0)->Equals(res));
+ CHECK(v8::Integer::New(isolate, 0)->Equals(context.local(), res).FromJust());
// Check with 'with'.
res = CompileRun("function f() { with (this) { y = 44 }; return y; }; f()");
- CHECK(v8::Integer::New(isolate, 0)->Equals(res));
-}
-
-static int force_set_set_count = 0;
-static int force_set_get_count = 0;
-bool pass_on_get = false;
-
-static void ForceSetGetter(v8::Local<v8::String> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- force_set_get_count++;
- if (pass_on_get) {
- return;
- }
- info.GetReturnValue().Set(3);
-}
-
-static void ForceSetSetter(v8::Local<v8::String> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- force_set_set_count++;
-}
-
-static void ForceSetInterceptGetter(
- v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(name->IsString());
- ForceSetGetter(Local<String>::Cast(name), info);
-}
-
-static void ForceSetInterceptSetter(
- v8::Local<v8::Name> name, v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- force_set_set_count++;
- info.GetReturnValue().SetUndefined();
-}
-
-
-TEST(ForceSet) {
- force_set_get_count = 0;
- force_set_set_count = 0;
- pass_on_get = false;
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
- v8::Handle<v8::String> access_property =
- v8::String::NewFromUtf8(isolate, "a");
- templ->SetAccessor(access_property, ForceSetGetter, ForceSetSetter);
- LocalContext context(NULL, templ);
- v8::Handle<v8::Object> global = context->Global();
-
- // Ordinary properties
- v8::Handle<v8::String> simple_property =
- v8::String::NewFromUtf8(isolate, "p");
- global->ForceSet(simple_property, v8::Int32::New(isolate, 4), v8::ReadOnly);
- CHECK_EQ(4, global->Get(simple_property)->Int32Value());
- // This should fail because the property is read-only
- global->Set(simple_property, v8::Int32::New(isolate, 5));
- CHECK_EQ(4, global->Get(simple_property)->Int32Value());
- // This should succeed even though the property is read-only
- global->ForceSet(simple_property, v8::Int32::New(isolate, 6));
- CHECK_EQ(6, global->Get(simple_property)->Int32Value());
-
- // Accessors
- CHECK_EQ(0, force_set_set_count);
- CHECK_EQ(0, force_set_get_count);
- CHECK_EQ(3, global->Get(access_property)->Int32Value());
- // CHECK_EQ the property shouldn't override it, just call the setter
- // which in this case does nothing.
- global->Set(access_property, v8::Int32::New(isolate, 7));
- CHECK_EQ(3, global->Get(access_property)->Int32Value());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(2, force_set_get_count);
- // ForceSet doesn't call the accessors for now.
- // TODO(verwaest): Update once blink doesn't rely on ForceSet to delete api
- // accessors.
- global->ForceSet(access_property, v8::Int32::New(isolate, 8));
- CHECK_EQ(8, global->Get(access_property)->Int32Value());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(2, force_set_get_count);
-}
-
-
-TEST(ForceSetWithInterceptor) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
- templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
- ForceSetInterceptGetter, ForceSetInterceptSetter));
- pass_on_get = true;
- LocalContext context(NULL, templ);
- v8::Handle<v8::Object> global = context->Global();
-
- force_set_get_count = 0;
- force_set_set_count = 0;
- pass_on_get = false;
-
- v8::Handle<v8::String> some_property =
- v8::String::NewFromUtf8(isolate, "a");
- CHECK_EQ(0, force_set_set_count);
- CHECK_EQ(0, force_set_get_count);
- CHECK_EQ(3, global->Get(some_property)->Int32Value());
- // Setting the property shouldn't override it, just call the setter
- // which in this case does nothing.
- global->Set(some_property, v8::Int32::New(isolate, 7));
- CHECK_EQ(3, global->Get(some_property)->Int32Value());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(2, force_set_get_count);
- // Getting the property when the interceptor returns an empty handle
- // should yield undefined, since the property isn't present on the
- // object itself yet.
- pass_on_get = true;
- CHECK(global->Get(some_property)->IsUndefined());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(3, force_set_get_count);
- // Forcing the property to be set should cause the value to be
- // set locally without calling the interceptor.
- global->ForceSet(some_property, v8::Int32::New(isolate, 8));
- CHECK_EQ(8, global->Get(some_property)->Int32Value());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(4, force_set_get_count);
- // Reenabling the interceptor should cause it to take precedence over
- // the property
- pass_on_get = false;
- CHECK_EQ(3, global->Get(some_property)->Int32Value());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(5, force_set_get_count);
- // The interceptor should also work for other properties
- CHECK_EQ(3, global->Get(v8::String::NewFromUtf8(isolate, "b"))
- ->Int32Value());
- CHECK_EQ(1, force_set_set_count);
- CHECK_EQ(6, force_set_get_count);
+ CHECK(v8::Integer::New(isolate, 0)->Equals(context.local(), res).FromJust());
}
@@ -13515,10 +15129,10 @@ TEST(CreateDataProperty) {
"Object.defineProperty(a, 'foo', {value: 23});"
"Object.defineProperty(a, 'bar', {value: 23, configurable: true});");
- v8::Local<v8::Object> obj =
- v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
- v8::Local<v8::Array> arr =
- v8::Local<v8::Array>::Cast(env->Global()->Get(v8_str("b")));
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
+ v8::Local<v8::Array> arr = v8::Local<v8::Array>::Cast(
+ env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked());
{
// Can't change a non-configurable properties.
v8::TryCatch try_catch(isolate);
@@ -13618,10 +15232,10 @@ TEST(DefineOwnProperty) {
"Object.defineProperty(a, 'foo', {value: 23});"
"Object.defineProperty(a, 'bar', {value: 23, configurable: true});");
- v8::Local<v8::Object> obj =
- v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
- v8::Local<v8::Array> arr =
- v8::Local<v8::Array>::Cast(env->Global()->Get(v8_str("b")));
+ v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
+ v8::Local<v8::Array> arr = v8::Local<v8::Array>::Cast(
+ env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked());
{
// Can't change a non-configurable properties.
v8::TryCatch try_catch(isolate);
@@ -13745,9 +15359,9 @@ THREADED_TEST(InitGlobalVarInProtoChain) {
v8::HandleScope scope(context->GetIsolate());
// Introduce a variable in the prototype chain.
CompileRun("__proto__.x = 42");
- v8::Handle<v8::Value> result = CompileRun("var x = 43; x");
+ v8::Local<v8::Value> result = CompileRun("var x = 43; x");
CHECK(!result->IsUndefined());
- CHECK_EQ(43, result->Int32Value());
+ CHECK_EQ(43, result->Int32Value(context.local()).FromJust());
}
@@ -13760,16 +15374,16 @@ THREADED_TEST(ReplaceConstantFunction) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Object> obj = v8::Object::New(isolate);
- v8::Handle<v8::FunctionTemplate> func_templ =
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+ v8::Local<v8::FunctionTemplate> func_templ =
v8::FunctionTemplate::New(isolate);
- v8::Handle<v8::String> foo_string =
- v8::String::NewFromUtf8(isolate, "foo");
- obj->Set(foo_string, func_templ->GetFunction());
- v8::Handle<v8::Object> obj_clone = obj->Clone();
- obj_clone->Set(foo_string,
- v8::String::NewFromUtf8(isolate, "Hello"));
- CHECK(!obj->Get(foo_string)->IsUndefined());
+ v8::Local<v8::String> foo_string = v8_str("foo");
+ obj->Set(context.local(), foo_string,
+ func_templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
+ v8::Local<v8::Object> obj_clone = obj->Clone();
+ obj_clone->Set(context.local(), foo_string, v8_str("Hello")).FromJust();
+ CHECK(!obj->Get(context.local(), foo_string).ToLocalChecked()->IsUndefined());
}
@@ -13784,20 +15398,21 @@ static void CheckElementValue(i::Isolate* isolate,
template <class ExternalArrayClass, class ElementType>
-static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
- v8::Handle<Object> obj,
+static void ObjectWithExternalArrayTestHelper(Local<Context> context,
+ v8::Local<Object> obj,
int element_count,
i::ExternalArrayType array_type,
int64_t low, int64_t high) {
- i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
+ i::Handle<i::JSReceiver> jsobj = v8::Utils::OpenHandle(*obj);
i::Isolate* isolate = jsobj->GetIsolate();
- obj->Set(v8_str("field"),
- v8::Int32::New(reinterpret_cast<v8::Isolate*>(isolate), 1503));
- context->Global()->Set(v8_str("ext_array"), obj);
- v8::Handle<v8::Value> result = CompileRun("ext_array.field");
- CHECK_EQ(1503, result->Int32Value());
+ obj->Set(context, v8_str("field"),
+ v8::Int32::New(reinterpret_cast<v8::Isolate*>(isolate), 1503))
+ .FromJust();
+ CHECK(context->Global()->Set(context, v8_str("ext_array"), obj).FromJust());
+ v8::Local<v8::Value> result = CompileRun("ext_array.field");
+ CHECK_EQ(1503, result->Int32Value(context).FromJust());
result = CompileRun("ext_array[1]");
- CHECK_EQ(1, result->Int32Value());
+ CHECK_EQ(1, result->Int32Value(context).FromJust());
// Check assigned smis
result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -13809,14 +15424,14 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
"}"
"sum;");
- CHECK_EQ(28, result->Int32Value());
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
// Check pass through of assigned smis
result = CompileRun("var sum = 0;"
"for (var i = 0; i < 8; i++) {"
" sum += ext_array[i] = ext_array[i] = -i;"
"}"
"sum;");
- CHECK_EQ(-28, result->Int32Value());
+ CHECK_EQ(-28, result->Int32Value(context).FromJust());
// Check assigned smis in reverse order
@@ -13828,7 +15443,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" sum += ext_array[i];"
"}"
"sum;");
- CHECK_EQ(28, result->Int32Value());
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
// Check pass through of assigned HeapNumbers
result = CompileRun("var sum = 0;"
@@ -13836,7 +15451,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" sum += ext_array[i] = ext_array[i] = (-i * 0.5);"
"}"
"sum;");
- CHECK_EQ(-28, result->Int32Value());
+ CHECK_EQ(-28, result->Int32Value(context).FromJust());
// Check assigned HeapNumbers
result = CompileRun("for (var i = 0; i < 16; i+=2) {"
@@ -13847,7 +15462,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" sum += ext_array[i];"
"}"
"sum;");
- CHECK_EQ(28, result->Int32Value());
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
// Check assigned HeapNumbers in reverse order
result = CompileRun("for (var i = 14; i >= 0; i-=2) {"
@@ -13858,7 +15473,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" sum += ext_array[i];"
"}"
"sum;");
- CHECK_EQ(28, result->Int32Value());
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
i::ScopedVector<char> test_buf(1024);
@@ -13877,13 +15492,13 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
boundary_program,
low);
result = CompileRun(test_buf.start());
- CHECK_EQ(low, result->IntegerValue());
+ CHECK_EQ(low, result->IntegerValue(context).FromJust());
i::SNPrintF(test_buf,
boundary_program,
high);
result = CompileRun(test_buf.start());
- CHECK_EQ(high, result->IntegerValue());
+ CHECK_EQ(high, result->IntegerValue(context).FromJust());
// Check misprediction of type in IC.
result = CompileRun("var tmp_array = ext_array;"
@@ -13898,7 +15513,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
"sum;");
// Force GC to trigger verification.
CcTest::heap()->CollectAllGarbage();
- CHECK_EQ(28, result->Int32Value());
+ CHECK_EQ(28, result->Int32Value(context).FromJust());
// Make sure out-of-range loads do not throw.
i::SNPrintF(test_buf,
@@ -13911,7 +15526,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
"caught_exception;",
element_count);
result = CompileRun(test_buf.start());
- CHECK_EQ(false, result->BooleanValue());
+ CHECK_EQ(false, result->BooleanValue(context).FromJust());
// Make sure out-of-range stores do not throw.
i::SNPrintF(test_buf,
@@ -13924,14 +15539,14 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
"caught_exception;",
element_count);
result = CompileRun(test_buf.start());
- CHECK_EQ(false, result->BooleanValue());
+ CHECK_EQ(false, result->BooleanValue(context).FromJust());
// Check other boundary conditions, values and operations.
result = CompileRun("for (var i = 0; i < 8; i++) {"
" ext_array[7] = undefined;"
"}"
"ext_array[7];");
- CHECK_EQ(0, result->Int32Value());
+ CHECK_EQ(0, result->Int32Value(context).FromJust());
if (array_type == i::kExternalFloat64Array ||
array_type == i::kExternalFloat32Array) {
CHECK(std::isnan(
@@ -13944,7 +15559,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" ext_array[6] = '2.3';"
"}"
"ext_array[6];");
- CHECK_EQ(2, result->Int32Value());
+ CHECK_EQ(2, result->Int32Value(context).FromJust());
CHECK_EQ(2,
static_cast<int>(
i::Object::GetElement(
@@ -13961,7 +15576,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" ext_array[i] = NaN;"
"}"
"ext_array[5];");
- CHECK_EQ(0, result->Int32Value());
+ CHECK_EQ(0, result->Int32Value(context).FromJust());
CheckElementValue(isolate, 0, jsobj, 5);
result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -13973,7 +15588,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
"ext_array[5];");
int expected_value =
(array_type == i::kExternalUint8ClampedArray) ? 255 : 0;
- CHECK_EQ(expected_value, result->Int32Value());
+ CHECK_EQ(expected_value, result->Int32Value(context).FromJust());
CheckElementValue(isolate, expected_value, jsobj, 5);
result = CompileRun("for (var i = 0; i < 8; i++) {"
@@ -13983,7 +15598,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" ext_array[i] = -Infinity;"
"}"
"ext_array[5];");
- CHECK_EQ(0, result->Int32Value());
+ CHECK_EQ(0, result->Int32Value(context).FromJust());
CheckElementValue(isolate, 0, jsobj, 5);
// Check truncation behavior of integral arrays.
@@ -14016,11 +15631,11 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
unsigned_data :
(is_pixel_data ? pixel_data : signed_data)));
result = CompileRun(test_buf.start());
- CHECK_EQ(true, result->BooleanValue());
+ CHECK_EQ(true, result->BooleanValue(context).FromJust());
}
- i::Handle<ExternalArrayClass> array(
- ExternalArrayClass::cast(jsobj->elements()));
+ i::Handle<ExternalArrayClass> array(ExternalArrayClass::cast(
+ i::Handle<i::JSObject>::cast(jsobj)->elements()));
for (int i = 0; i < element_count; i++) {
array->set(i, static_cast<ElementType>(i));
}
@@ -14038,7 +15653,7 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" sum=ee_op_test_complex_func(sum);"
"}"
"sum;");
- CHECK_EQ(16000000, result->Int32Value());
+ CHECK_EQ(16000000, result->Int32Value(context).FromJust());
// Test count operations
result = CompileRun("function ee_op_test_count_func(sum) {"
@@ -14053,34 +15668,46 @@ static void ObjectWithExternalArrayTestHelper(Handle<Context> context,
" sum=ee_op_test_count_func(sum);"
"}"
"sum;");
- CHECK_EQ(16000000, result->Int32Value());
+ CHECK_EQ(16000000, result->Int32Value(context).FromJust());
result = CompileRun("ext_array[3] = 33;"
"delete ext_array[3];"
"ext_array[3];");
- CHECK_EQ(33, result->Int32Value());
+ CHECK_EQ(33, result->Int32Value(context).FromJust());
result = CompileRun("ext_array[0] = 10; ext_array[1] = 11;"
"ext_array[2] = 12; ext_array[3] = 13;"
"ext_array.__defineGetter__('2',"
"function() { return 120; });"
"ext_array[2];");
- CHECK_EQ(12, result->Int32Value());
+ CHECK_EQ(12, result->Int32Value(context).FromJust());
result = CompileRun("var js_array = new Array(40);"
"js_array[0] = 77;"
"js_array;");
- CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+ CHECK_EQ(77, v8::Object::Cast(*result)
+ ->Get(context, v8_str("0"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
result = CompileRun("ext_array[1] = 23;"
"ext_array.__proto__ = [];"
"js_array.__proto__ = ext_array;"
"js_array.concat(ext_array);");
- CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
- CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+ CHECK_EQ(77, v8::Object::Cast(*result)
+ ->Get(context, v8_str("0"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(23, v8::Object::Cast(*result)
+ ->Get(context, v8_str("1"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
result = CompileRun("ext_array[1] = 23;");
- CHECK_EQ(23, result->Int32Value());
+ CHECK_EQ(23, result->Int32Value(context).FromJust());
}
@@ -14111,7 +15738,7 @@ static void FixedTypedArrayTestHelper(i::ExternalArrayType array_type,
CHECK_EQ(static_cast<int64_t>(static_cast<ElementType>(i)),
static_cast<int64_t>(fixed_array->get_scalar(i)));
}
- v8::Handle<v8::Object> obj = v8::Utils::ToLocal(jsobj);
+ v8::Local<v8::Object> obj = v8::Utils::ToLocal(jsobj);
ObjectWithExternalArrayTestHelper<FixedTypedArrayClass, ElementType>(
context.local(), obj, kElementCount, array_type,
@@ -14196,7 +15823,7 @@ void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
CHECK_EQ(kElementCount, static_cast<int>(ta->Length()));
CHECK_EQ(2 * sizeof(ElementType), ta->ByteOffset());
CHECK_EQ(kElementCount * sizeof(ElementType), ta->ByteLength());
- CHECK(ab->Equals(ta->Buffer()));
+ CHECK(ab->Equals(env.local(), ta->Buffer()).FromJust());
ElementType* data = backing_store.start() + 2;
for (int i = 0; i < kElementCount; i++) {
@@ -14280,7 +15907,7 @@ THREADED_TEST(DataView) {
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
CHECK_EQ(2u, dv->ByteOffset());
CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
- CHECK(ab->Equals(dv->Buffer()));
+ CHECK(ab->Equals(env.local(), dv->Buffer()).FromJust());
}
@@ -14420,22 +16047,22 @@ THREADED_TEST(SharedDataView) {
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
CHECK_EQ(2u, dv->ByteOffset());
CHECK_EQ(kSize, static_cast<int>(dv->ByteLength()));
- CHECK(ab->Equals(dv->Buffer()));
+ CHECK(ab->Equals(env.local(), dv->Buffer()).FromJust());
}
-#define IS_ARRAY_BUFFER_VIEW_TEST(View) \
- THREADED_TEST(Is##View) { \
- LocalContext env; \
- v8::Isolate* isolate = env->GetIsolate(); \
- v8::HandleScope handle_scope(isolate); \
- \
- Handle<Value> result = CompileRun( \
- "var ab = new ArrayBuffer(128);" \
- "new " #View "(ab)"); \
- CHECK(result->IsArrayBufferView()); \
- CHECK(result->Is##View()); \
- CheckInternalFieldsAreZero<v8::ArrayBufferView>(result.As<v8::View>()); \
+#define IS_ARRAY_BUFFER_VIEW_TEST(View) \
+ THREADED_TEST(Is##View) { \
+ LocalContext env; \
+ v8::Isolate* isolate = env->GetIsolate(); \
+ v8::HandleScope handle_scope(isolate); \
+ \
+ Local<Value> result = CompileRun( \
+ "var ab = new ArrayBuffer(128);" \
+ "new " #View "(ab)"); \
+ CHECK(result->IsArrayBufferView()); \
+ CHECK(result->Is##View()); \
+ CheckInternalFieldsAreZero<v8::ArrayBufferView>(result.As<v8::View>()); \
}
IS_ARRAY_BUFFER_VIEW_TEST(Uint8Array)
@@ -14457,20 +16084,45 @@ THREADED_TEST(ScriptContextDependence) {
LocalContext c1;
v8::HandleScope scope(c1->GetIsolate());
const char *source = "foo";
- v8::Handle<v8::Script> dep = v8_compile(source);
- v8::ScriptCompiler::Source script_source(v8::String::NewFromUtf8(
- c1->GetIsolate(), source));
- v8::Handle<v8::UnboundScript> indep =
- v8::ScriptCompiler::CompileUnbound(c1->GetIsolate(), &script_source);
- c1->Global()->Set(v8::String::NewFromUtf8(c1->GetIsolate(), "foo"),
- v8::Integer::New(c1->GetIsolate(), 100));
- CHECK_EQ(dep->Run()->Int32Value(), 100);
- CHECK_EQ(indep->BindToCurrentContext()->Run()->Int32Value(), 100);
+ v8::Local<v8::Script> dep = v8_compile(source);
+ v8::ScriptCompiler::Source script_source(
+ v8::String::NewFromUtf8(c1->GetIsolate(), source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked());
+ v8::Local<v8::UnboundScript> indep =
+ v8::ScriptCompiler::CompileUnboundScript(c1->GetIsolate(), &script_source)
+ .ToLocalChecked();
+ c1->Global()
+ ->Set(c1.local(), v8::String::NewFromUtf8(c1->GetIsolate(), "foo",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Integer::New(c1->GetIsolate(), 100))
+ .FromJust();
+ CHECK_EQ(
+ dep->Run(c1.local()).ToLocalChecked()->Int32Value(c1.local()).FromJust(),
+ 100);
+ CHECK_EQ(indep->BindToCurrentContext()
+ ->Run(c1.local())
+ .ToLocalChecked()
+ ->Int32Value(c1.local())
+ .FromJust(),
+ 100);
LocalContext c2;
- c2->Global()->Set(v8::String::NewFromUtf8(c2->GetIsolate(), "foo"),
- v8::Integer::New(c2->GetIsolate(), 101));
- CHECK_EQ(dep->Run()->Int32Value(), 100);
- CHECK_EQ(indep->BindToCurrentContext()->Run()->Int32Value(), 101);
+ c2->Global()
+ ->Set(c2.local(), v8::String::NewFromUtf8(c2->GetIsolate(), "foo",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Integer::New(c2->GetIsolate(), 101))
+ .FromJust();
+ CHECK_EQ(
+ dep->Run(c2.local()).ToLocalChecked()->Int32Value(c2.local()).FromJust(),
+ 100);
+ CHECK_EQ(indep->BindToCurrentContext()
+ ->Run(c2.local())
+ .ToLocalChecked()
+ ->Int32Value(c2.local())
+ .FromJust(),
+ 101);
}
@@ -14479,25 +16131,27 @@ THREADED_TEST(StackTrace) {
v8::HandleScope scope(context->GetIsolate());
v8::TryCatch try_catch(context->GetIsolate());
const char *source = "function foo() { FAIL.FAIL; }; foo();";
- v8::Handle<v8::String> src =
- v8::String::NewFromUtf8(context->GetIsolate(), source);
- v8::Handle<v8::String> origin =
- v8::String::NewFromUtf8(context->GetIsolate(), "stack-trace-test");
+ v8::Local<v8::String> src = v8_str(source);
+ v8::Local<v8::String> origin = v8_str("stack-trace-test");
v8::ScriptCompiler::Source script_source(src, v8::ScriptOrigin(origin));
- v8::ScriptCompiler::CompileUnbound(context->GetIsolate(), &script_source)
- ->BindToCurrentContext()
- ->Run();
+ CHECK(v8::ScriptCompiler::CompileUnboundScript(context->GetIsolate(),
+ &script_source)
+ .ToLocalChecked()
+ ->BindToCurrentContext()
+ ->Run(context.local())
+ .IsEmpty());
CHECK(try_catch.HasCaught());
- v8::String::Utf8Value stack(try_catch.StackTrace());
+ v8::String::Utf8Value stack(
+ try_catch.StackTrace(context.local()).ToLocalChecked());
CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
}
// Checks that a StackFrame has certain expected values.
void checkStackFrame(const char* expected_script_name,
- const char* expected_func_name, int expected_line_number,
- int expected_column, bool is_eval, bool is_constructor,
- v8::Handle<v8::StackFrame> frame) {
+ const char* expected_func_name, int expected_line_number,
+ int expected_column, bool is_eval, bool is_constructor,
+ v8::Local<v8::StackFrame> frame) {
v8::HandleScope scope(CcTest::isolate());
v8::String::Utf8Value func_name(frame->GetFunctionName());
v8::String::Utf8Value script_name(frame->GetScriptName());
@@ -14520,12 +16174,18 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
const char* origin = "capture-stack-trace-test";
const int kOverviewTest = 1;
const int kDetailedTest = 2;
+ const int kFunctionName = 3;
+ const int kDisplayName = 4;
+ const int kFunctionNameAndDisplayName = 5;
+ const int kDisplayNameIsNotString = 6;
+ const int kFunctionNameIsNotString = 7;
- DCHECK(args.Length() == 1);
+ CHECK(args.Length() == 1);
- int testGroup = args[0]->Int32Value();
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ int testGroup = args[0]->Int32Value(context).FromJust();
if (testGroup == kOverviewTest) {
- v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kOverview);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bar", 2, 10, false, false,
@@ -14539,7 +16199,7 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(stackTrace->AsArray()->IsArray());
} else if (testGroup == kDetailedTest) {
- v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
checkStackFrame(origin, "bat", 4, 22, false, false,
@@ -14553,6 +16213,35 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
checkStackFrame(origin, "", 10, 1, false, false, stackTrace->GetFrame(3));
CHECK(stackTrace->AsArray()->IsArray());
+ } else if (testGroup == kFunctionName) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "function.name", 2, 24, false, false,
+ stackTrace->GetFrame(0));
+ } else if (testGroup == kDisplayName) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "function.displayName", 2, 24, false, false,
+ stackTrace->GetFrame(0));
+ } else if (testGroup == kFunctionNameAndDisplayName) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "function.displayName", 2, 24, false, false,
+ stackTrace->GetFrame(0));
+ } else if (testGroup == kDisplayNameIsNotString) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "function.name", 2, 24, false, false,
+ stackTrace->GetFrame(0));
+ } else if (testGroup == kFunctionNameIsNotString) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "f", 2, 24, false, false, stackTrace->GetFrame(0));
}
}
@@ -14563,8 +16252,7 @@ void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(CaptureStackTrace) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::String> origin =
- v8::String::NewFromUtf8(isolate, "capture-stack-trace-test");
+ v8::Local<v8::String> origin = v8_str("capture-stack-trace-test");
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->Set(v8_str("AnalyzeStackInNativeCode"),
v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
@@ -14581,14 +16269,15 @@ TEST(CaptureStackTrace) {
" bar();\n"
"}\n"
"var x;eval('new foo();');";
- v8::Handle<v8::String> overview_src =
- v8::String::NewFromUtf8(isolate, overview_source);
+ v8::Local<v8::String> overview_src = v8_str(overview_source);
v8::ScriptCompiler::Source script_source(overview_src,
v8::ScriptOrigin(origin));
- v8::Handle<Value> overview_result(
- v8::ScriptCompiler::CompileUnbound(isolate, &script_source)
+ v8::Local<Value> overview_result(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source)
+ .ToLocalChecked()
->BindToCurrentContext()
- ->Run());
+ ->Run(context.local())
+ .ToLocalChecked());
CHECK(!overview_result.IsEmpty());
CHECK(overview_result->IsObject());
@@ -14601,27 +16290,58 @@ TEST(CaptureStackTrace) {
" bat();\n"
"}\n"
"eval('new baz();');";
- v8::Handle<v8::String> detailed_src =
- v8::String::NewFromUtf8(isolate, detailed_source);
+ v8::Local<v8::String> detailed_src = v8_str(detailed_source);
// Make the script using a non-zero line and column offset.
- v8::Handle<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
- v8::Handle<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
+ v8::Local<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
+ v8::Local<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
v8::ScriptCompiler::Source script_source2(detailed_src, detailed_origin);
- v8::Handle<v8::UnboundScript> detailed_script(
- v8::ScriptCompiler::CompileUnbound(isolate, &script_source2));
- v8::Handle<Value> detailed_result(
- detailed_script->BindToCurrentContext()->Run());
+ v8::Local<v8::UnboundScript> detailed_script(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source2)
+ .ToLocalChecked());
+ v8::Local<Value> detailed_result(detailed_script->BindToCurrentContext()
+ ->Run(context.local())
+ .ToLocalChecked());
CHECK(!detailed_result.IsEmpty());
CHECK(detailed_result->IsObject());
+
+ // Test using function.name and function.displayName in stack trace
+ const char* function_name_source =
+ "function bar(function_name, display_name, testGroup) {\n"
+ " var f = function() { AnalyzeStackInNativeCode(testGroup); };\n"
+ " if (function_name) {\n"
+ " Object.defineProperty(f, 'name', { value: function_name });\n"
+ " }\n"
+ " if (display_name) {\n"
+ " f.displayName = display_name;"
+ " }\n"
+ " f()\n"
+ "}\n"
+ "bar('function.name', undefined, 3);\n"
+ "bar(undefined, 'function.displayName', 4);\n"
+ "bar('function.name', 'function.displayName', 5);\n"
+ "bar('function.name', 239, 6);\n"
+ "bar(239, undefined, 7);\n";
+ v8::Local<v8::String> function_name_src =
+ v8::String::NewFromUtf8(isolate, function_name_source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::ScriptCompiler::Source script_source3(function_name_src,
+ v8::ScriptOrigin(origin));
+ v8::Local<Value> function_name_result(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source3)
+ .ToLocalChecked()
+ ->BindToCurrentContext()
+ ->Run(context.local())
+ .ToLocalChecked());
+ CHECK(!function_name_result.IsEmpty());
}
static void StackTraceForUncaughtExceptionListener(
- v8::Handle<v8::Message> message,
- v8::Handle<Value>) {
+ v8::Local<v8::Message> message, v8::Local<Value>) {
report_count++;
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK_EQ(2, stack_trace->GetFrameCount());
checkStackFrame("origin", "foo", 2, 3, false, false,
stack_trace->GetFrame(0));
@@ -14633,9 +16353,10 @@ static void StackTraceForUncaughtExceptionListener(
TEST(CaptureStackTraceForUncaughtException) {
report_count = 0;
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRunWithOrigin(
"function foo() {\n"
@@ -14646,11 +16367,12 @@ TEST(CaptureStackTraceForUncaughtException) {
"};",
"origin");
v8::Local<v8::Object> global = env->Global();
- Local<Value> trouble = global->Get(v8_str("bar"));
+ Local<Value> trouble =
+ global->Get(env.local(), v8_str("bar")).ToLocalChecked();
CHECK(trouble->IsFunction());
- Function::Cast(*trouble)->Call(global, 0, NULL);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
+ CHECK(Function::Cast(*trouble)->Call(env.local(), global, 0, NULL).IsEmpty());
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
CHECK_EQ(1, report_count);
}
@@ -14658,7 +16380,8 @@ TEST(CaptureStackTraceForUncaughtException) {
TEST(GetStackTraceForUncaughtExceptionFromSimpleStackTrace) {
report_count = 0;
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// Create an Error object first.
CompileRunWithOrigin(
@@ -14671,27 +16394,28 @@ TEST(GetStackTraceForUncaughtExceptionFromSimpleStackTrace) {
"var e;",
"origin");
v8::Local<v8::Object> global = env->Global();
- Local<Value> trouble = global->Get(v8_str("bar"));
+ Local<Value> trouble =
+ global->Get(env.local(), v8_str("bar")).ToLocalChecked();
CHECK(trouble->IsFunction());
- Function::Cast(*trouble)->Call(global, 0, NULL);
+ Function::Cast(*trouble)->Call(env.local(), global, 0, NULL).ToLocalChecked();
// Enable capturing detailed stack trace late, and throw the exception.
// The detailed stack trace should be extracted from the simple stack.
- v8::V8::AddMessageListener(StackTraceForUncaughtExceptionListener);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRunWithOrigin("throw e", "origin");
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
CHECK_EQ(1, report_count);
}
TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true,
- 1024,
- v8::StackTrace::kDetailed);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 1024,
+ v8::StackTrace::kDetailed);
CompileRun(
"var setters = ['column', 'lineNumber', 'scriptName',\n"
@@ -14702,13 +16426,13 @@ TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
" Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
"}\n");
CompileRun("throw 'exception';");
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
}
-static void StackTraceFunctionNameListener(v8::Handle<v8::Message> message,
- v8::Handle<Value>) {
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+static void StackTraceFunctionNameListener(v8::Local<v8::Message> message,
+ v8::Local<Value>) {
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK_EQ(5, stack_trace->GetFrameCount());
checkStackFrame("origin", "foo:0", 4, 7, false, false,
stack_trace->GetFrame(0));
@@ -14724,7 +16448,8 @@ static void StackTraceFunctionNameListener(v8::Handle<v8::Message> message,
TEST(GetStackTraceContainsFunctionsWithFunctionName) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
CompileRunWithOrigin(
"function gen(name, counter) {\n"
@@ -14746,18 +16471,18 @@ TEST(GetStackTraceContainsFunctionsWithFunctionName) {
"};",
"origin");
- v8::V8::AddMessageListener(StackTraceFunctionNameListener);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ isolate->AddMessageListener(StackTraceFunctionNameListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRunWithOrigin("gen('foo', 3)();", "origin");
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(StackTraceFunctionNameListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(StackTraceFunctionNameListener);
}
-static void RethrowStackTraceHandler(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void RethrowStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
// Use the frame where JavaScript is called from.
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK(!stack_trace.IsEmpty());
int frame_count = stack_trace->GetFrameCount();
CHECK_EQ(3, frame_count);
@@ -14772,7 +16497,8 @@ static void RethrowStackTraceHandler(v8::Handle<v8::Message> message,
// is first thrown (not where it is rethrown).
TEST(RethrowStackTrace) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// We make sure that
// - the stack trace of the ReferenceError in g() is reported.
// - the stack trace is not overwritten when e1 is rethrown by t().
@@ -14790,17 +16516,17 @@ TEST(RethrowStackTrace) {
" t(e1); \n"
" } \n"
"} \n";
- v8::V8::AddMessageListener(RethrowStackTraceHandler);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ isolate->AddMessageListener(RethrowStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRun(source);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(RethrowStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowStackTraceHandler);
}
-static void RethrowPrimitiveStackTraceHandler(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+static void RethrowPrimitiveStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK(!stack_trace.IsEmpty());
int frame_count = stack_trace->GetFrameCount();
CHECK_EQ(2, frame_count);
@@ -14814,7 +16540,8 @@ static void RethrowPrimitiveStackTraceHandler(v8::Handle<v8::Message> message,
// Test that we do not recognize identity for primitive exceptions.
TEST(RethrowPrimitiveStackTrace) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
// We do not capture stack trace for non Error objects on creation time.
// Instead, we capture the stack trace on last throw.
const char* source =
@@ -14826,18 +16553,18 @@ TEST(RethrowPrimitiveStackTrace) {
"} catch (e1) { \n"
" t(e1) \n"
"} \n";
- v8::V8::AddMessageListener(RethrowPrimitiveStackTraceHandler);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ isolate->AddMessageListener(RethrowPrimitiveStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRun(source);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(RethrowPrimitiveStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowPrimitiveStackTraceHandler);
}
-static void RethrowExistingStackTraceHandler(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void RethrowExistingStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
// Use the frame where JavaScript is called from.
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK(!stack_trace.IsEmpty());
CHECK_EQ(1, stack_trace->GetFrameCount());
CHECK_EQ(1, stack_trace->GetFrame(0)->GetLineNumber());
@@ -14848,22 +16575,23 @@ static void RethrowExistingStackTraceHandler(v8::Handle<v8::Message> message,
// not where it is thrown.
TEST(RethrowExistingStackTrace) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
const char* source =
"var e = new Error(); \n"
"throw e; \n";
- v8::V8::AddMessageListener(RethrowExistingStackTraceHandler);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ isolate->AddMessageListener(RethrowExistingStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRun(source);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(RethrowExistingStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowExistingStackTraceHandler);
}
-static void RethrowBogusErrorStackTraceHandler(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void RethrowBogusErrorStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
// Use the frame where JavaScript is called from.
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
CHECK(!stack_trace.IsEmpty());
CHECK_EQ(1, stack_trace->GetFrameCount());
CHECK_EQ(2, stack_trace->GetFrame(0)->GetLineNumber());
@@ -14873,15 +16601,16 @@ static void RethrowBogusErrorStackTraceHandler(v8::Handle<v8::Message> message,
// Test that the stack trace is captured where the bogus Error object is thrown.
TEST(RethrowBogusErrorStackTrace) {
LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
const char* source =
"var e = {__proto__: new Error()} \n"
"throw e; \n";
- v8::V8::AddMessageListener(RethrowBogusErrorStackTraceHandler);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ isolate->AddMessageListener(RethrowBogusErrorStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
CompileRun(source);
- v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
- v8::V8::RemoveMessageListeners(RethrowBogusErrorStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowBogusErrorStackTraceHandler);
}
@@ -14895,21 +16624,28 @@ int promise_reject_column_number = -1;
int promise_reject_frame_count = -1;
void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
+ v8::Local<v8::Object> global = CcTest::global();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
if (reject_message.GetEvent() == v8::kPromiseRejectWithNoHandler) {
promise_reject_counter++;
- CcTest::global()->Set(v8_str("rejected"), reject_message.GetPromise());
- CcTest::global()->Set(v8_str("value"), reject_message.GetValue());
- v8::Handle<v8::Message> message =
- v8::Exception::CreateMessage(reject_message.GetValue());
- v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ global->Set(context, v8_str("rejected"), reject_message.GetPromise())
+ .FromJust();
+ global->Set(context, v8_str("value"), reject_message.GetValue()).FromJust();
+ v8::Local<v8::Message> message = v8::Exception::CreateMessage(
+ CcTest::isolate(), reject_message.GetValue());
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- promise_reject_msg_line_number = message->GetLineNumber();
- promise_reject_msg_column_number = message->GetStartColumn() + 1;
+ promise_reject_msg_line_number = message->GetLineNumber(context).FromJust();
+ promise_reject_msg_column_number =
+ message->GetStartColumn(context).FromJust() + 1;
if (!stack_trace.IsEmpty()) {
promise_reject_frame_count = stack_trace->GetFrameCount();
if (promise_reject_frame_count > 0) {
- CHECK(stack_trace->GetFrame(0)->GetScriptName()->Equals(v8_str("pro")));
+ CHECK(stack_trace->GetFrame(0)
+ ->GetScriptName()
+ ->Equals(context, v8_str("pro"))
+ .FromJust());
promise_reject_line_number = stack_trace->GetFrame(0)->GetLineNumber();
promise_reject_column_number = stack_trace->GetFrame(0)->GetColumn();
} else {
@@ -14919,19 +16655,25 @@ void PromiseRejectCallback(v8::PromiseRejectMessage reject_message) {
}
} else {
promise_revoke_counter++;
- CcTest::global()->Set(v8_str("revoked"), reject_message.GetPromise());
+ global->Set(context, v8_str("revoked"), reject_message.GetPromise())
+ .FromJust();
CHECK(reject_message.GetValue().IsEmpty());
}
}
-v8::Handle<v8::Promise> GetPromise(const char* name) {
- return v8::Handle<v8::Promise>::Cast(CcTest::global()->Get(v8_str(name)));
+v8::Local<v8::Promise> GetPromise(const char* name) {
+ return v8::Local<v8::Promise>::Cast(
+ CcTest::global()
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str(name))
+ .ToLocalChecked());
}
-v8::Handle<v8::Value> RejectValue() {
- return CcTest::global()->Get(v8_str("value"));
+v8::Local<v8::Value> RejectValue() {
+ return CcTest::global()
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str("value"))
+ .ToLocalChecked();
}
@@ -14943,9 +16685,12 @@ void ResetPromiseStates() {
promise_reject_line_number = -1;
promise_reject_column_number = -1;
promise_reject_frame_count = -1;
- CcTest::global()->Set(v8_str("rejected"), v8_str(""));
- CcTest::global()->Set(v8_str("value"), v8_str(""));
- CcTest::global()->Set(v8_str("revoked"), v8_str(""));
+
+ v8::Local<v8::Object> global = CcTest::global();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ global->Set(context, v8_str("rejected"), v8_str("")).FromJust();
+ global->Set(context, v8_str("value"), v8_str("")).FromJust();
+ global->Set(context, v8_str("revoked"), v8_str("")).FromJust();
}
@@ -14984,8 +16729,9 @@ TEST(PromiseRejectCallback) {
CHECK_EQ(1, promise_reject_counter);
CHECK_EQ(0, promise_revoke_counter);
CHECK_EQ(v8::kPromiseRejectWithNoHandler, reject_event);
- CHECK(GetPromise("rejected")->Equals(GetPromise("p1")));
- CHECK(RejectValue()->Equals(v8_str("ppp")));
+ CHECK(
+ GetPromise("rejected")->Equals(env.local(), GetPromise("p1")).FromJust());
+ CHECK(RejectValue()->Equals(env.local(), v8_str("ppp")).FromJust());
// Reject p0 again. Callback is not triggered again.
CompileRun("reject();");
@@ -15001,9 +16747,11 @@ TEST(PromiseRejectCallback) {
CHECK(!GetPromise("p2")->HasHandler());
CHECK_EQ(2, promise_reject_counter);
CHECK_EQ(1, promise_revoke_counter);
- CHECK(GetPromise("rejected")->Equals(GetPromise("p2")));
- CHECK(RejectValue()->Equals(v8_str("ppp")));
- CHECK(GetPromise("revoked")->Equals(GetPromise("p1")));
+ CHECK(
+ GetPromise("rejected")->Equals(env.local(), GetPromise("p2")).FromJust());
+ CHECK(RejectValue()->Equals(env.local(), v8_str("ppp")).FromJust());
+ CHECK(
+ GetPromise("revoked")->Equals(env.local(), GetPromise("p1")).FromJust());
ResetPromiseStates();
@@ -15049,9 +16797,11 @@ TEST(PromiseRejectCallback) {
CHECK(GetPromise("q_")->HasHandler());
CHECK_EQ(2, promise_reject_counter);
CHECK_EQ(1, promise_revoke_counter);
- CHECK(GetPromise("rejected")->Equals(GetPromise("q2")));
- CHECK(GetPromise("revoked")->Equals(GetPromise("q_")));
- CHECK(RejectValue()->Equals(v8_str("qqq")));
+ CHECK(
+ GetPromise("rejected")->Equals(env.local(), GetPromise("q2")).FromJust());
+ CHECK(
+ GetPromise("revoked")->Equals(env.local(), GetPromise("q_")).FromJust());
+ CHECK(RejectValue()->Equals(env.local(), v8_str("qqq")).FromJust());
// Add a reject handler to the resolved q1, which rejects by throwing.
CompileRun(
@@ -15066,8 +16816,9 @@ TEST(PromiseRejectCallback) {
CHECK(!GetPromise("q3")->HasHandler());
CHECK_EQ(3, promise_reject_counter);
CHECK_EQ(1, promise_revoke_counter);
- CHECK(GetPromise("rejected")->Equals(GetPromise("q3")));
- CHECK(RejectValue()->Equals(v8_str("qqqq")));
+ CHECK(
+ GetPromise("rejected")->Equals(env.local(), GetPromise("q3")).FromJust());
+ CHECK(RejectValue()->Equals(env.local(), v8_str("qqqq")).FromJust());
ResetPromiseStates();
@@ -15097,8 +16848,9 @@ TEST(PromiseRejectCallback) {
CHECK(!GetPromise("r3")->HasHandler());
CHECK_EQ(1, promise_reject_counter);
CHECK_EQ(0, promise_revoke_counter);
- CHECK(GetPromise("rejected")->Equals(GetPromise("r2")));
- CHECK(RejectValue()->Equals(v8_str("rrr")));
+ CHECK(
+ GetPromise("rejected")->Equals(env.local(), GetPromise("r2")).FromJust());
+ CHECK(RejectValue()->Equals(env.local(), v8_str("rrr")).FromJust());
// Add reject handler to r2.
CompileRun("var r4 = r2.catch(function() {});");
@@ -15109,8 +16861,9 @@ TEST(PromiseRejectCallback) {
CHECK(!GetPromise("r4")->HasHandler());
CHECK_EQ(1, promise_reject_counter);
CHECK_EQ(1, promise_revoke_counter);
- CHECK(GetPromise("revoked")->Equals(GetPromise("r2")));
- CHECK(RejectValue()->Equals(v8_str("rrr")));
+ CHECK(
+ GetPromise("revoked")->Equals(env.local(), GetPromise("r2")).FromJust());
+ CHECK(RejectValue()->Equals(env.local(), v8_str("rrr")).FromJust());
// Add reject handlers to r4.
CompileRun("var r5 = r4.then(function() {}, function() {});");
@@ -15150,10 +16903,10 @@ TEST(PromiseRejectCallback) {
CHECK(!GetPromise("s3")->HasHandler());
CHECK_EQ(3, promise_reject_counter);
CHECK_EQ(0, promise_revoke_counter);
- CHECK(RejectValue()->Equals(v8_str("sss")));
+ CHECK(RejectValue()->Equals(env.local(), v8_str("sss")).FromJust());
// Test stack frames.
- V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ env->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(true);
ResetPromiseStates();
@@ -15269,15 +17022,15 @@ TEST(PromiseRejectCallback) {
void AnalyzeStackOfEvalWithSourceURL(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(5, stackTrace->GetFrameCount());
- v8::Handle<v8::String> url = v8_str("eval_url");
+ v8::Local<v8::String> url = v8_str("eval_url");
for (int i = 0; i < 3; i++) {
- v8::Handle<v8::String> name =
+ v8::Local<v8::String> name =
stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
- CHECK(url->Equals(name));
+ CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
}
}
@@ -15307,8 +17060,6 @@ TEST(SourceURLInStackTrace) {
i::ScopedVector<char> code(1024);
i::SNPrintF(code, source, "//# sourceURL=eval_url");
CHECK(CompileRun(code.start())->IsUndefined());
- i::SNPrintF(code, source, "//@ sourceURL=eval_url");
- CHECK(CompileRun(code.start())->IsUndefined());
}
@@ -15317,7 +17068,7 @@ static int scriptIdInStack[2];
void AnalyzeScriptIdInStack(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kScriptId);
CHECK_EQ(2, stackTrace->GetFrameCount());
for (int i = 0; i < 2; i++) {
@@ -15334,14 +17085,13 @@ TEST(ScriptIdInStackTrace) {
v8::FunctionTemplate::New(isolate, AnalyzeScriptIdInStack));
LocalContext context(0, templ);
- v8::Handle<v8::String> scriptSource = v8::String::NewFromUtf8(
- isolate,
- "function foo() {\n"
- " AnalyzeScriptIdInStack();"
- "}\n"
- "foo();\n");
+ v8::Local<v8::String> scriptSource = v8_str(
+ "function foo() {\n"
+ " AnalyzeScriptIdInStack();"
+ "}\n"
+ "foo();\n");
v8::Local<v8::Script> script = CompileWithOrigin(scriptSource, "test");
- script->Run();
+ script->Run(context.local()).ToLocalChecked();
for (int i = 0; i < 2; i++) {
CHECK(scriptIdInStack[i] != v8::Message::kNoScriptIdInfo);
CHECK_EQ(scriptIdInStack[i], script->GetUnboundScript()->GetId());
@@ -15352,15 +17102,15 @@ TEST(ScriptIdInStackTrace) {
void AnalyzeStackOfInlineScriptWithSourceURL(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
- v8::Handle<v8::String> url = v8_str("source_url");
+ v8::Local<v8::String> url = v8_str("source_url");
for (int i = 0; i < 3; i++) {
- v8::Handle<v8::String> name =
+ v8::Local<v8::String> name =
stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
- CHECK(url->Equals(name));
+ CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
}
}
@@ -15390,23 +17140,21 @@ TEST(InlineScriptWithSourceURLInStackTrace) {
i::ScopedVector<char> code(1024);
i::SNPrintF(code, source, "//# sourceURL=source_url");
CHECK(CompileRunWithOrigin(code.start(), "url", 0, 1)->IsUndefined());
- i::SNPrintF(code, source, "//@ sourceURL=source_url");
- CHECK(CompileRunWithOrigin(code.start(), "url", 0, 1)->IsUndefined());
}
void AnalyzeStackOfDynamicScriptWithSourceURL(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
- v8::Handle<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
args.GetIsolate(), 10, v8::StackTrace::kDetailed);
CHECK_EQ(4, stackTrace->GetFrameCount());
- v8::Handle<v8::String> url = v8_str("source_url");
+ v8::Local<v8::String> url = v8_str("source_url");
for (int i = 0; i < 3; i++) {
- v8::Handle<v8::String> name =
+ v8::Local<v8::String> name =
stackTrace->GetFrame(i)->GetScriptNameOrSourceURL();
CHECK(!name.IsEmpty());
- CHECK(url->Equals(name));
+ CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
}
}
@@ -15436,8 +17184,6 @@ TEST(DynamicWithSourceURLInStackTrace) {
i::ScopedVector<char> code(1024);
i::SNPrintF(code, source, "//# sourceURL=source_url");
CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
- i::SNPrintF(code, source, "//@ sourceURL=source_url");
- CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
}
@@ -15459,7 +17205,8 @@ TEST(DynamicWithSourceURLInStackTraceString) {
v8::TryCatch try_catch(context->GetIsolate());
CompileRunWithOrigin(code.start(), "", 0, 0);
CHECK(try_catch.HasCaught());
- v8::String::Utf8Value stack(try_catch.StackTrace());
+ v8::String::Utf8Value stack(
+ try_catch.StackTrace(context.local()).ToLocalChecked());
CHECK(strstr(*stack, "at foo (source_url:3:5)") != NULL);
}
@@ -15482,8 +17229,7 @@ TEST(EvalWithSourceURLInMessageScriptResourceNameOrSourceURL) {
CHECK(try_catch.HasCaught());
Local<v8::Message> message = try_catch.Message();
- Handle<Value> sourceURL =
- message->GetScriptOrigin().ResourceName();
+ Local<Value> sourceURL = message->GetScriptOrigin().ResourceName();
CHECK_EQ(0, strcmp(*v8::String::Utf8Value(sourceURL), "source_url"));
}
@@ -15506,8 +17252,7 @@ TEST(RecursionWithSourceURLInMessageScriptResourceNameOrSourceURL) {
CHECK(try_catch.HasCaught());
Local<v8::Message> message = try_catch.Message();
- Handle<Value> sourceURL =
- message->GetScriptOrigin().ResourceName();
+ Local<Value> sourceURL = message->GetScriptOrigin().ResourceName();
CHECK_EQ(0, strcmp(*v8::String::Utf8Value(sourceURL), "source_url"));
}
@@ -15597,8 +17342,10 @@ TEST(SetStackLimit) {
v8::HandleScope scope(env->GetIsolate());
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(env->GetIsolate(), GetStackLimitCallback);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("get_stack_limit"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("get_stack_limit"), fun)
+ .FromJust());
CompileRun("get_stack_limit();");
CHECK(stack_limit == set_limit);
@@ -15619,8 +17366,10 @@ TEST(SetStackLimitInThread) {
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
v8::FunctionTemplate::New(CcTest::isolate(), GetStackLimitCallback);
- Local<Function> fun = fun_templ->GetFunction();
- env->Global()->Set(v8_str("get_stack_limit"), fun);
+ Local<Function> fun = fun_templ->GetFunction(env.local()).ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("get_stack_limit"), fun)
+ .FromJust());
CompileRun("get_stack_limit();");
CHECK(stack_limit == set_limit);
@@ -15653,7 +17402,7 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
}
}
virtual ~VisitorImpl() {}
- virtual void VisitExternalString(v8::Handle<v8::String> string) {
+ virtual void VisitExternalString(v8::Local<v8::String> string) {
if (!string->IsExternal()) {
CHECK(string->IsExternalOneByte());
return;
@@ -15685,7 +17434,9 @@ TEST(ExternalizeOldSpaceTwoByteCons) {
LocalContext env;
v8::HandleScope scope(isolate);
v8::Local<v8::String> cons =
- CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString(isolate);
+ CompileRun("'Romeo Montague ' + 'Juliet Capulet'")
+ ->ToString(env.local())
+ .ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
CcTest::heap()->CollectAllAvailableGarbage();
CHECK(CcTest::heap()->old_space()->Contains(*v8::Utils::OpenHandle(*cons)));
@@ -15707,7 +17458,9 @@ TEST(ExternalizeOldSpaceOneByteCons) {
LocalContext env;
v8::HandleScope scope(isolate);
v8::Local<v8::String> cons =
- CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString(isolate);
+ CompileRun("'Romeo Montague ' + 'Juliet Capulet'")
+ ->ToString(env.local())
+ .ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
CcTest::heap()->CollectAllAvailableGarbage();
CHECK(CcTest::heap()->old_space()->Contains(*v8::Utils::OpenHandle(*cons)));
@@ -15733,21 +17486,26 @@ TEST(VisitExternalStrings) {
TestResource* resource[4];
resource[0] = new TestResource(two_byte_string);
v8::Local<v8::String> string0 =
- v8::String::NewExternal(env->GetIsolate(), resource[0]);
+ v8::String::NewExternalTwoByte(env->GetIsolate(), resource[0])
+ .ToLocalChecked();
resource[1] = new TestResource(two_byte_string, NULL, false);
v8::Local<v8::String> string1 =
- v8::String::NewExternal(env->GetIsolate(), resource[1]);
+ v8::String::NewExternalTwoByte(env->GetIsolate(), resource[1])
+ .ToLocalChecked();
// Externalized symbol.
resource[2] = new TestResource(two_byte_string, NULL, false);
- v8::Local<v8::String> string2 = v8::String::NewFromUtf8(
- env->GetIsolate(), string, v8::String::kInternalizedString);
+ v8::Local<v8::String> string2 =
+ v8::String::NewFromUtf8(env->GetIsolate(), string,
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
CHECK(string2->MakeExternal(resource[2]));
// Symbolized External.
resource[3] = new TestResource(AsciiToTwoByteString("Some other string"));
v8::Local<v8::String> string3 =
- v8::String::NewExternal(env->GetIsolate(), resource[3]);
+ v8::String::NewExternalTwoByte(env->GetIsolate(), resource[3])
+ .ToLocalChecked();
CcTest::heap()->CollectAllAvailableGarbage(); // Tenure string.
// Turn into a symbol.
i::Handle<i::String> string3_i = v8::Utils::OpenHandle(*string3);
@@ -15762,7 +17520,7 @@ TEST(VisitExternalStrings) {
CHECK(string3->IsExternal());
VisitorImpl visitor(resource);
- v8::V8::VisitExternalResources(&visitor);
+ isolate->VisitExternalResources(&visitor);
visitor.CheckVisitedResources();
}
@@ -15777,7 +17535,8 @@ TEST(ExternalStringCollectedAtTearDown) {
const char* s = "One string to test them all, one string to find them.";
TestOneByteResource* inscription =
new TestOneByteResource(i::StrDup(s), &destroyed);
- v8::Local<v8::String> ring = v8::String::NewExternal(isolate, inscription);
+ v8::Local<v8::String> ring =
+ v8::String::NewExternalOneByte(isolate, inscription).ToLocalChecked();
// Ring is still alive. Orcs are roaming freely across our lands.
CHECK_EQ(0, destroyed);
USE(ring);
@@ -15801,7 +17560,8 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
const char* s = "One string to test them all";
TestOneByteResource* inscription =
new TestOneByteResource(i::StrDup(s), &destroyed);
- v8::Local<v8::String> ring = CompileRun("ring")->ToString(isolate);
+ v8::Local<v8::String> ring =
+ CompileRun("ring")->ToString(env.local()).ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
ring->MakeExternal(inscription);
// Ring is still alive. Orcs are roaming freely across our lands.
@@ -15917,8 +17677,8 @@ THREADED_TEST(QuietSignalingNaNs) {
double test_value = test_values[i];
// Check that Number::New preserves non-NaNs and quiets SNaNs.
- v8::Handle<v8::Value> number = v8::Number::New(isolate, test_value);
- double stored_number = number->NumberValue();
+ v8::Local<v8::Value> number = v8::Number::New(isolate, test_value);
+ double stored_number = number->NumberValue(context.local()).FromJust();
if (!std::isnan(test_value)) {
CHECK_EQ(test_value, stored_number);
} else {
@@ -15937,10 +17697,10 @@ THREADED_TEST(QuietSignalingNaNs) {
// Check that Date::New preserves non-NaNs in the date range and
// quiets SNaNs.
- v8::Handle<v8::Value> date =
- v8::Date::New(isolate, test_value);
+ v8::Local<v8::Value> date =
+ v8::Date::New(context.local(), test_value).ToLocalChecked();
double expected_stored_date = DoubleToDateTime(test_value);
- double stored_date = date->NumberValue();
+ double stored_date = date->NumberValue(context.local()).FromJust();
if (!std::isnan(expected_stored_date)) {
CHECK_EQ(expected_stored_date, stored_date);
} else {
@@ -15964,7 +17724,8 @@ static void SpaghettiIncident(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(args.GetIsolate());
v8::TryCatch tc(args.GetIsolate());
- v8::Handle<v8::String> str(args[0]->ToString(args.GetIsolate()));
+ v8::MaybeLocal<v8::String> str(
+ args[0]->ToString(args.GetIsolate()->GetCurrentContext()));
USE(str);
if (tc.HasCaught())
tc.ReThrow();
@@ -15977,9 +17738,12 @@ THREADED_TEST(SpaghettiStackReThrow) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext context;
- context->Global()->Set(
- v8::String::NewFromUtf8(isolate, "s"),
- v8::FunctionTemplate::New(isolate, SpaghettiIncident)->GetFunction());
+ context->Global()
+ ->Set(context.local(), v8_str("s"),
+ v8::FunctionTemplate::New(isolate, SpaghettiIncident)
+ ->GetFunction(context.local())
+ .ToLocalChecked())
+ .FromJust();
v8::TryCatch try_catch(isolate);
CompileRun(
"var i = 0;"
@@ -16020,7 +17784,7 @@ TEST(Regress528) {
v8::Local<Context> context = Context::New(isolate);
context->Enter();
- Local<v8::String> obj = v8::String::NewFromUtf8(isolate, "");
+ Local<v8::String> obj = v8_str("");
context->SetEmbedderData(0, obj);
CompileRun(source_simple);
context->Exit();
@@ -16069,9 +17833,9 @@ TEST(Regress528) {
v8::TryCatch try_catch(isolate);
CompileRun(source_exception);
CHECK(try_catch.HasCaught());
- v8::Handle<v8::Message> message = try_catch.Message();
+ v8::Local<v8::Message> message = try_catch.Message();
CHECK(!message.IsEmpty());
- CHECK_EQ(1, message->GetLineNumber());
+ CHECK_EQ(1, message->GetLineNumber(context).FromJust());
context->Exit();
}
isolate->ContextDisposedNotification();
@@ -16093,24 +17857,26 @@ THREADED_TEST(ScriptOrigin) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::ScriptOrigin origin = v8::ScriptOrigin(
- v8::String::NewFromUtf8(env->GetIsolate(), "test"),
- v8::Integer::New(env->GetIsolate(), 1),
+ v8_str("test"), v8::Integer::New(env->GetIsolate(), 1),
v8::Integer::New(env->GetIsolate(), 1), v8::True(env->GetIsolate()),
- v8::Handle<v8::Integer>(), v8::True(env->GetIsolate()),
- v8::String::NewFromUtf8(env->GetIsolate(), "http://sourceMapUrl"),
- v8::True(env->GetIsolate()));
- v8::Handle<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(), "function f() {}\n\nfunction g() {}");
- v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::Integer>(), v8::True(env->GetIsolate()),
+ v8_str("http://sourceMapUrl"), v8::True(env->GetIsolate()));
+ v8::Local<v8::String> script = v8_str("function f() {}\n\nfunction g() {}");
+ v8::Script::Compile(env.local(), script, &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()->Get(env.local(), v8_str("g")).ToLocalChecked());
v8::ScriptOrigin script_origin_f = f->GetScriptOrigin();
CHECK_EQ(0, strcmp("test",
*v8::String::Utf8Value(script_origin_f.ResourceName())));
- CHECK_EQ(1, script_origin_f.ResourceLineOffset()->Int32Value());
+ CHECK_EQ(
+ 1,
+ script_origin_f.ResourceLineOffset()->Int32Value(env.local()).FromJust());
CHECK(script_origin_f.Options().IsSharedCrossOrigin());
CHECK(script_origin_f.Options().IsEmbedderDebugScript());
CHECK(script_origin_f.Options().IsOpaque());
@@ -16122,7 +17888,9 @@ THREADED_TEST(ScriptOrigin) {
v8::ScriptOrigin script_origin_g = g->GetScriptOrigin();
CHECK_EQ(0, strcmp("test",
*v8::String::Utf8Value(script_origin_g.ResourceName())));
- CHECK_EQ(1, script_origin_g.ResourceLineOffset()->Int32Value());
+ CHECK_EQ(
+ 1,
+ script_origin_g.ResourceLineOffset()->Int32Value(env.local()).FromJust());
CHECK(script_origin_g.Options().IsSharedCrossOrigin());
CHECK(script_origin_g.Options().IsEmbedderDebugScript());
CHECK(script_origin_g.Options().IsOpaque());
@@ -16134,19 +17902,98 @@ THREADED_TEST(ScriptOrigin) {
THREADED_TEST(FunctionGetInferredName) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Handle<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "var foo = { bar : { baz : function() {}}}; var f = foo.bar.baz;");
- v8::Script::Compile(script, &origin)->Run();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Local<v8::String> script =
+ v8_str("var foo = { bar : { baz : function() {}}}; var f = foo.bar.baz;");
+ v8::Script::Compile(env.local(), script, &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
CHECK_EQ(0,
strcmp("foo.bar.baz", *v8::String::Utf8Value(f->GetInferredName())));
}
+THREADED_TEST(FunctionGetDebugName) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ const char* code =
+ "var error = false;"
+ "function a() { this.x = 1; };"
+ "a.displayName = 'display_a';"
+ "var b = (function() {"
+ " var f = function() { this.x = 2; };"
+ " f.displayName = 'display_b';"
+ " return f;"
+ "})();"
+ "var c = function() {};"
+ "c.__defineGetter__('displayName', function() {"
+ " error = true;"
+ " throw new Error();"
+ "});"
+ "function d() {};"
+ "d.__defineGetter__('displayName', function() {"
+ " error = true;"
+ " return 'wrong_display_name';"
+ "});"
+ "function e() {};"
+ "e.displayName = 'wrong_display_name';"
+ "e.__defineSetter__('displayName', function() {"
+ " error = true;"
+ " throw new Error();"
+ "});"
+ "function f() {};"
+ "f.displayName = { 'foo': 6, toString: function() {"
+ " error = true;"
+ " return 'wrong_display_name';"
+ "}};"
+ "var g = function() {"
+ " arguments.callee.displayName = 'set_in_runtime';"
+ "}; g();"
+ "var h = function() {};"
+ "h.displayName = 'displayName';"
+ "Object.defineProperty(h, 'name', { value: 'function.name' });"
+ "var i = function() {};"
+ "i.displayName = 239;"
+ "Object.defineProperty(i, 'name', { value: 'function.name' });"
+ "var j = function() {};"
+ "Object.defineProperty(j, 'name', { value: 'function.name' });"
+ "var foo = { bar : { baz : function() {}}}; var k = foo.bar.baz;";
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Script::Compile(env.local(), v8_str(code), &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
+ v8::Local<v8::Value> error =
+ env->Global()->Get(env.local(), v8_str("error")).ToLocalChecked();
+ CHECK_EQ(false, error->BooleanValue(env.local()).FromJust());
+ const char* functions[] = {"a", "display_a",
+ "b", "display_b",
+ "c", "c",
+ "d", "d",
+ "e", "e",
+ "f", "f",
+ "g", "set_in_runtime",
+ "h", "displayName",
+ "i", "function.name",
+ "j", "function.name",
+ "k", "foo.bar.baz"};
+ for (size_t i = 0; i < sizeof(functions) / sizeof(functions[0]) / 2; ++i) {
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ env->Global()
+ ->Get(env.local(),
+ v8::String::NewFromUtf8(env->GetIsolate(), functions[i * 2],
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked());
+ CHECK_EQ(0, strcmp(functions[i * 2 + 1],
+ *v8::String::Utf8Value(f->GetDebugName())));
+ }
+}
+
+
THREADED_TEST(FunctionGetDisplayName) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -16182,27 +18029,28 @@ THREADED_TEST(FunctionGetDisplayName) {
"var g = function() {"
" arguments.callee.displayName = 'set_in_runtime';"
"}; g();";
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), code), &origin)
- ->Run();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Script::Compile(env.local(), v8_str(code), &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
v8::Local<v8::Value> error =
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "error"));
+ env->Global()->Get(env.local(), v8_str("error")).ToLocalChecked();
v8::Local<v8::Function> a = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "a")));
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
v8::Local<v8::Function> b = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "b")));
+ env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked());
v8::Local<v8::Function> c = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "c")));
+ env->Global()->Get(env.local(), v8_str("c")).ToLocalChecked());
v8::Local<v8::Function> d = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "d")));
+ env->Global()->Get(env.local(), v8_str("d")).ToLocalChecked());
v8::Local<v8::Function> e = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "e")));
+ env->Global()->Get(env.local(), v8_str("e")).ToLocalChecked());
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
- CHECK_EQ(false, error->BooleanValue());
+ env->Global()->Get(env.local(), v8_str("g")).ToLocalChecked());
+ CHECK_EQ(false, error->BooleanValue(env.local()).FromJust());
CHECK_EQ(0, strcmp("display_a", *v8::String::Utf8Value(a->GetDisplayName())));
CHECK_EQ(0, strcmp("display_b", *v8::String::Utf8Value(b->GetDisplayName())));
CHECK(c->GetDisplayName()->IsUndefined());
@@ -16217,15 +18065,16 @@ THREADED_TEST(FunctionGetDisplayName) {
THREADED_TEST(ScriptLineNumber) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Handle<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(), "function f() {}\n\nfunction g() {}");
- v8::Script::Compile(script, &origin)->Run();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Local<v8::String> script = v8_str("function f() {}\n\nfunction g() {}");
+ v8::Script::Compile(env.local(), script, &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()->Get(env.local(), v8_str("g")).ToLocalChecked());
CHECK_EQ(0, f->GetScriptLineNumber());
CHECK_EQ(2, g->GetScriptLineNumber());
}
@@ -16236,16 +18085,18 @@ THREADED_TEST(ScriptColumnNumber) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"),
- v8::Integer::New(isolate, 3),
+ v8::ScriptOrigin(v8_str("test"), v8::Integer::New(isolate, 3),
v8::Integer::New(isolate, 2));
- v8::Handle<v8::String> script = v8::String::NewFromUtf8(
- isolate, "function foo() {}\n\n function bar() {}");
- v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::String> script =
+ v8_str("function foo() {}\n\n function bar() {}");
+ v8::Script::Compile(env.local(), script, &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "foo")));
+ env->Global()->Get(env.local(), v8_str("foo")).ToLocalChecked());
v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "bar")));
+ env->Global()->Get(env.local(), v8_str("bar")).ToLocalChecked());
CHECK_EQ(14, foo->GetScriptColumnNumber());
CHECK_EQ(17, bar->GetScriptColumnNumber());
}
@@ -16274,17 +18125,17 @@ THREADED_TEST(FunctionGetScriptId) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(isolate, "test"),
- v8::Integer::New(isolate, 3),
+ v8::ScriptOrigin(v8_str("test"), v8::Integer::New(isolate, 3),
v8::Integer::New(isolate, 2));
- v8::Handle<v8::String> scriptSource = v8::String::NewFromUtf8(
- isolate, "function foo() {}\n\n function bar() {}");
- v8::Local<v8::Script> script(v8::Script::Compile(scriptSource, &origin));
- script->Run();
+ v8::Local<v8::String> scriptSource =
+ v8_str("function foo() {}\n\n function bar() {}");
+ v8::Local<v8::Script> script(
+ v8::Script::Compile(env.local(), scriptSource, &origin).ToLocalChecked());
+ script->Run(env.local()).ToLocalChecked();
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "foo")));
+ env->Global()->Get(env.local(), v8_str("foo")).ToLocalChecked());
v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "bar")));
+ env->Global()->Get(env.local(), v8_str("bar")).ToLocalChecked());
CHECK_EQ(script->GetUnboundScript()->GetId(), foo->ScriptId());
CHECK_EQ(script->GetUnboundScript()->GetId(), bar->ScriptId());
}
@@ -16293,24 +18144,27 @@ THREADED_TEST(FunctionGetScriptId) {
THREADED_TEST(FunctionGetBoundFunction) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::NewFromUtf8(
- env->GetIsolate(), "test"));
- v8::Handle<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
+ v8::Local<v8::String> script = v8_str(
"var a = new Object();\n"
"a.x = 1;\n"
"function f () { return this.x };\n"
"var g = f.bind(a);\n"
"var b = g();");
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(env.local(), script, &origin)
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(env.local(), v8_str("f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()->Get(env.local(), v8_str("g")).ToLocalChecked());
CHECK(g->GetBoundFunction()->IsFunction());
Local<v8::Function> original_function = Local<v8::Function>::Cast(
g->GetBoundFunction());
- CHECK(f->GetName()->Equals(original_function->GetName()));
+ CHECK(f->GetName()
+ ->Equals(env.local(), original_function->GetName())
+ .FromJust());
CHECK_EQ(f->GetScriptLineNumber(), original_function->GetScriptLineNumber());
CHECK_EQ(f->GetScriptColumnNumber(),
original_function->GetScriptColumnNumber());
@@ -16332,7 +18186,9 @@ static void SetterWhichSetsYOnThisTo23(
const v8::PropertyCallbackInfo<void>& info) {
CHECK(v8::Utils::OpenHandle(*info.This())->IsJSObject());
CHECK(v8::Utils::OpenHandle(*info.Holder())->IsJSObject());
- Local<Object>::Cast(info.This())->Set(v8_str("y"), v8_num(23));
+ Local<Object>::Cast(info.This())
+ ->Set(info.GetIsolate()->GetCurrentContext(), v8_str("y"), v8_num(23))
+ .FromJust();
}
@@ -16340,7 +18196,10 @@ void FooGetInterceptor(Local<Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
CHECK(v8::Utils::OpenHandle(*info.This())->IsJSObject());
CHECK(v8::Utils::OpenHandle(*info.Holder())->IsJSObject());
- if (!name->Equals(v8_str("foo"))) return;
+ if (!name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust()) {
+ return;
+ }
info.GetReturnValue().Set(v8_num(42));
}
@@ -16349,8 +18208,13 @@ void FooSetInterceptor(Local<Name> name, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
CHECK(v8::Utils::OpenHandle(*info.This())->IsJSObject());
CHECK(v8::Utils::OpenHandle(*info.Holder())->IsJSObject());
- if (!name->Equals(v8_str("foo"))) return;
- Local<Object>::Cast(info.This())->Set(v8_str("y"), v8_num(23));
+ if (!name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust()) {
+ return;
+ }
+ Local<Object>::Cast(info.This())
+ ->Set(info.GetIsolate()->GetCurrentContext(), v8_str("y"), v8_num(23))
+ .FromJust();
info.GetReturnValue().Set(v8_num(23));
}
@@ -16362,7 +18226,10 @@ TEST(SetterOnConstructorPrototype) {
templ->SetAccessor(v8_str("x"), GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
LocalContext context;
- context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("P"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("function C1() {"
" this.x = 23;"
"};"
@@ -16376,16 +18243,30 @@ TEST(SetterOnConstructorPrototype) {
v8::Local<v8::Script> script;
script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
- v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
- CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
+ v8::Local<v8::Object> c1 = v8::Local<v8::Object>::Cast(
+ script->Run(context.local()).ToLocalChecked());
+ CHECK_EQ(42, c1->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(23, c1->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
-script = v8_compile("new C2();");
+ script = v8_compile("new C2();");
for (int i = 0; i < 10; i++) {
- v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
- CHECK_EQ(42, c2->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(23, c2->Get(v8_str("y"))->Int32Value());
+ v8::Local<v8::Object> c2 = v8::Local<v8::Object>::Cast(
+ script->Run(context.local()).ToLocalChecked());
+ CHECK_EQ(42, c2->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(23, c2->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
}
@@ -16399,8 +18280,11 @@ static void NamedPropertyGetterWhichReturns42(
static void NamedPropertySetterWhichSetsYOnThisTo23(
Local<Name> name, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- if (name->Equals(v8_str("x"))) {
- Local<Object>::Cast(info.This())->Set(v8_str("y"), v8_num(23));
+ if (name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("x"))
+ .FromJust()) {
+ Local<Object>::Cast(info.This())
+ ->Set(info.GetIsolate()->GetCurrentContext(), v8_str("y"), v8_num(23))
+ .FromJust();
}
}
@@ -16413,7 +18297,10 @@ THREADED_TEST(InterceptorOnConstructorPrototype) {
NamedPropertyGetterWhichReturns42,
NamedPropertySetterWhichSetsYOnThisTo23));
LocalContext context;
- context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("P"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("function C1() {"
" this.x = 23;"
"};"
@@ -16427,16 +18314,30 @@ THREADED_TEST(InterceptorOnConstructorPrototype) {
v8::Local<v8::Script> script;
script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
- v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
- CHECK_EQ(23, c1->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(42, c1->Get(v8_str("y"))->Int32Value());
+ v8::Local<v8::Object> c1 = v8::Local<v8::Object>::Cast(
+ script->Run(context.local()).ToLocalChecked());
+ CHECK_EQ(23, c1->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(42, c1->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
script = v8_compile("new C2();");
for (int i = 0; i < 10; i++) {
- v8::Handle<v8::Object> c2 = v8::Handle<v8::Object>::Cast(script->Run());
- CHECK_EQ(23, c2->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(42, c2->Get(v8_str("y"))->Int32Value());
+ v8::Local<v8::Object> c2 = v8::Local<v8::Object>::Cast(
+ script->Run(context.local()).ToLocalChecked());
+ CHECK_EQ(23, c2->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(42, c2->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
}
@@ -16454,8 +18355,10 @@ TEST(Regress618) {
// Use a simple object as prototype.
v8::Local<v8::Object> prototype = v8::Object::New(isolate);
- prototype->Set(v8_str("y"), v8_num(42));
- context->Global()->Set(v8_str("P"), prototype);
+ prototype->Set(context.local(), v8_str("y"), v8_num(42)).FromJust();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("P"), prototype)
+ .FromJust());
// This compile will add the code to the compilation cache.
CompileRun(source);
@@ -16464,25 +18367,42 @@ TEST(Regress618) {
// Allow enough iterations for the inobject slack tracking logic
// to finalize instance size and install the fast construct stub.
for (int i = 0; i < 256; i++) {
- v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
- CHECK_EQ(23, c1->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(42, c1->Get(v8_str("y"))->Int32Value());
+ v8::Local<v8::Object> c1 = v8::Local<v8::Object>::Cast(
+ script->Run(context.local()).ToLocalChecked());
+ CHECK_EQ(23, c1->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(42, c1->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
// Use an API object with accessors as prototype.
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
- context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("P"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
// This compile will get the code from the compilation cache.
CompileRun(source);
script = v8_compile("new C1();");
for (int i = 0; i < 10; i++) {
- v8::Handle<v8::Object> c1 = v8::Handle<v8::Object>::Cast(script->Run());
- CHECK_EQ(42, c1->Get(v8_str("x"))->Int32Value());
- CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value());
+ v8::Local<v8::Object> c1 = v8::Local<v8::Object>::Cast(
+ script->Run(context.local()).ToLocalChecked());
+ CHECK_EQ(42, c1->Get(context.local(), v8_str("x"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(23, c1->Get(context.local(), v8_str("y"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
}
@@ -16494,12 +18414,6 @@ int epilogue_call_count_second = 0;
int prologue_call_count_alloc = 0;
int epilogue_call_count_alloc = 0;
-void PrologueCallback(v8::GCType, v8::GCCallbackFlags flags) {
- CHECK_EQ(flags, v8::kNoGCCallbackFlags);
- ++prologue_call_count;
-}
-
-
void PrologueCallback(v8::Isolate* isolate,
v8::GCType,
v8::GCCallbackFlags flags) {
@@ -16508,13 +18422,6 @@ void PrologueCallback(v8::Isolate* isolate,
++prologue_call_count;
}
-
-void EpilogueCallback(v8::GCType, v8::GCCallbackFlags flags) {
- CHECK_EQ(flags, v8::kNoGCCallbackFlags);
- ++epilogue_call_count;
-}
-
-
void EpilogueCallback(v8::Isolate* isolate,
v8::GCType,
v8::GCCallbackFlags flags) {
@@ -16524,12 +18431,6 @@ void EpilogueCallback(v8::Isolate* isolate,
}
-void PrologueCallbackSecond(v8::GCType, v8::GCCallbackFlags flags) {
- CHECK_EQ(flags, v8::kNoGCCallbackFlags);
- ++prologue_call_count_second;
-}
-
-
void PrologueCallbackSecond(v8::Isolate* isolate,
v8::GCType,
v8::GCCallbackFlags flags) {
@@ -16539,12 +18440,6 @@ void PrologueCallbackSecond(v8::Isolate* isolate,
}
-void EpilogueCallbackSecond(v8::GCType, v8::GCCallbackFlags flags) {
- CHECK_EQ(flags, v8::kNoGCCallbackFlags);
- ++epilogue_call_count_second;
-}
-
-
void EpilogueCallbackSecond(v8::Isolate* isolate,
v8::GCType,
v8::GCCallbackFlags flags) {
@@ -16597,29 +18492,31 @@ void EpilogueCallbackAlloc(v8::Isolate* isolate,
TEST(GCCallbacksOld) {
LocalContext context;
- v8::V8::AddGCPrologueCallback(PrologueCallback);
- v8::V8::AddGCEpilogueCallback(EpilogueCallback);
+ gc_callbacks_isolate = context->GetIsolate();
+
+ context->GetIsolate()->AddGCPrologueCallback(PrologueCallback);
+ context->GetIsolate()->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
- v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
- v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
+ context->GetIsolate()->AddGCPrologueCallback(PrologueCallbackSecond);
+ context->GetIsolate()->AddGCEpilogueCallback(EpilogueCallbackSecond);
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
- v8::V8::RemoveGCPrologueCallback(PrologueCallback);
- v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
+ context->GetIsolate()->RemoveGCPrologueCallback(PrologueCallback);
+ context->GetIsolate()->RemoveGCEpilogueCallback(EpilogueCallback);
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
- v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
- v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
+ context->GetIsolate()->RemoveGCPrologueCallback(PrologueCallbackSecond);
+ context->GetIsolate()->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
CcTest::heap()->CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
@@ -16726,17 +18623,17 @@ THREADED_TEST(TwoByteStringInOneByteCons) {
// Atom RegExp.
Local<Value> reresult = CompileRun("str2.match(/abel/g).length;");
- CHECK_EQ(6, reresult->Int32Value());
+ CHECK_EQ(6, reresult->Int32Value(context.local()).FromJust());
// Nonatom RegExp.
reresult = CompileRun("str2.match(/abe./g).length;");
- CHECK_EQ(6, reresult->Int32Value());
+ CHECK_EQ(6, reresult->Int32Value(context.local()).FromJust());
reresult = CompileRun("str2.search(/bel/g);");
- CHECK_EQ(1, reresult->Int32Value());
+ CHECK_EQ(1, reresult->Int32Value(context.local()).FromJust());
reresult = CompileRun("str2.search(/be./g);");
- CHECK_EQ(1, reresult->Int32Value());
+ CHECK_EQ(1, reresult->Int32Value(context.local()).FromJust());
ExpectTrue("/bel/g.test(str2);");
@@ -16759,7 +18656,8 @@ THREADED_TEST(TwoByteStringInOneByteCons) {
ExpectObject("str2.lastIndexOf('dab');", lastindexof);
reresult = CompileRun("str2.charCodeAt(2);");
- CHECK_EQ(static_cast<int32_t>('e'), reresult->Int32Value());
+ CHECK_EQ(static_cast<int32_t>('e'),
+ reresult->Int32Value(context.local()).FromJust());
}
@@ -16781,27 +18679,31 @@ TEST(ContainsOnlyOneByte) {
}
string_contents[length-1] = 0;
// Simple case.
- Handle<String> string =
- String::NewExternal(isolate,
- new TestResource(string_contents, NULL, false));
+ Local<String> string =
+ String::NewExternalTwoByte(isolate,
+ new TestResource(string_contents, NULL, false))
+ .ToLocalChecked();
CHECK(!string->IsOneByte() && string->ContainsOnlyOneByte());
// Counter example.
- string = String::NewFromTwoByte(isolate, string_contents);
+ string = String::NewFromTwoByte(isolate, string_contents,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
// Test left right and balanced cons strings.
- Handle<String> base = String::NewFromUtf8(isolate, "a");
- Handle<String> left = base;
- Handle<String> right = base;
+ Local<String> base = v8_str("a");
+ Local<String> left = base;
+ Local<String> right = base;
for (int i = 0; i < 1000; i++) {
left = String::Concat(base, left);
right = String::Concat(right, base);
}
- Handle<String> balanced = String::Concat(left, base);
+ Local<String> balanced = String::Concat(left, base);
balanced = String::Concat(balanced, right);
- Handle<String> cons_strings[] = {left, balanced, right};
- Handle<String> two_byte =
- String::NewExternal(isolate,
- new TestResource(string_contents, NULL, false));
+ Local<String> cons_strings[] = {left, balanced, right};
+ Local<String> two_byte =
+ String::NewExternalTwoByte(isolate,
+ new TestResource(string_contents, NULL, false))
+ .ToLocalChecked();
USE(two_byte); USE(cons_strings);
for (size_t i = 0; i < arraysize(cons_strings); i++) {
// Base assumptions.
@@ -16822,9 +18724,10 @@ TEST(ContainsOnlyOneByte) {
for (int i = 0; i < size; i++) {
int shift = 8 + (i % 7);
string_contents[alignment + i] = 1 << shift;
- string = String::NewExternal(
- isolate,
- new TestResource(string_contents + alignment, NULL, false));
+ string = String::NewExternalTwoByte(
+ isolate,
+ new TestResource(string_contents + alignment, NULL, false))
+ .ToLocalChecked();
CHECK_EQ(size, string->Length());
CHECK(!string->ContainsOnlyOneByte());
string_contents[alignment + i] = 0x41;
@@ -16850,26 +18753,31 @@ TEST(GCInFailedAccessCheckCallback) {
// invocation. Then force the callback to be called from va
v8::V8::Initialize();
- v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckCallbackGC);
-
v8::Isolate* isolate = CcTest::isolate();
+
+ isolate->SetFailedAccessCheckCallbackFunction(&FailedAccessCheckCallbackGC);
+
v8::HandleScope scope(isolate);
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
- context0->Global()->Set(v8_str("x"), v8_num(42));
- v8::Handle<v8::Object> global0 = context0->Global();
+ CHECK(context0->Global()
+ ->Set(context0.local(), v8_str("x"), v8_num(42))
+ .FromJust());
+ v8::Local<v8::Object> global0 = context0->Global();
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
LocalContext context1(NULL, global_template);
- context1->Global()->Set(v8_str("other"), global0);
+ CHECK(context1->Global()
+ ->Set(context1.local(), v8_str("other"), global0)
+ .FromJust());
v8::TryCatch try_catch(isolate);
@@ -16909,11 +18817,16 @@ TEST(GCInFailedAccessCheckCallback) {
try_catch.Reset();
// Delete element.
- CHECK_EQ(false, global0->Delete(0));
+ CHECK(global0->Delete(context1.local(), 0).IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
// DefineAccessor.
- CHECK_EQ(false,
- global0->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("x")));
+ CHECK(global0->SetAccessor(context1.local(), v8_str("x"), GetXValue, NULL,
+ v8_str("x"))
+ .IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
// Define JavaScript accessor.
CHECK(CompileRun(
@@ -16936,13 +18849,23 @@ TEST(GCInFailedAccessCheckCallback) {
CHECK(try_catch.HasCaught());
try_catch.Reset();
- CHECK_EQ(false, global0->HasRealIndexedProperty(0));
- CHECK_EQ(false, global0->HasRealNamedProperty(v8_str("x")));
- CHECK_EQ(false, global0->HasRealNamedCallbackProperty(v8_str("x")));
+ CHECK(global0->HasRealIndexedProperty(context1.local(), 0).IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+
+ CHECK(
+ global0->HasRealNamedProperty(context1.local(), v8_str("x")).IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+
+ CHECK(global0->HasRealNamedCallbackProperty(context1.local(), v8_str("x"))
+ .IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
// Reset the failed access check callback so it does not influence
// the other tests.
- v8::V8::SetFailedAccessCheckCallbackFunction(NULL);
+ isolate->SetFailedAccessCheckCallbackFunction(NULL);
}
@@ -16955,7 +18878,7 @@ TEST(IsolateNewDispose) {
CHECK(current_isolate != isolate);
CHECK(current_isolate == CcTest::isolate());
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ isolate->SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = NULL;
isolate->Dispose();
CHECK(!last_location);
@@ -16973,7 +18896,7 @@ UNINITIALIZED_TEST(DisposeIsolateWhenInUse) {
LocalContext context(isolate);
// Run something in this isolate.
ExpectTrue("true");
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ isolate->SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = NULL;
// Still entered, should fail.
isolate->Dispose();
@@ -17137,7 +19060,7 @@ TEST(RunTwoIsolatesOnSingleThread) {
context1.Reset();
isolate1->Exit();
- v8::V8::SetFatalErrorHandler(StoringErrorCallback);
+ isolate2->SetFatalErrorHandler(StoringErrorCallback);
last_location = last_message = NULL;
isolate1->Dispose();
@@ -17171,7 +19094,7 @@ static int CalcFibonacci(v8::Isolate* isolate, int limit) {
"fib(%d)", limit);
Local<Value> value = CompileRun(code.start());
CHECK(value->IsNumber());
- return static_cast<int>(value->NumberValue());
+ return static_cast<int>(value->NumberValue(context.local()).FromJust());
}
class IsolateThread : public v8::base::Thread {
@@ -17230,7 +19153,7 @@ TEST(IsolateDifferentContexts) {
v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("2");
CHECK(v->IsNumber());
- CHECK_EQ(2, static_cast<int>(v->NumberValue()));
+ CHECK_EQ(2, static_cast<int>(v->NumberValue(context).FromJust()));
}
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -17239,7 +19162,7 @@ TEST(IsolateDifferentContexts) {
v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("22");
CHECK(v->IsNumber());
- CHECK_EQ(22, static_cast<int>(v->NumberValue()));
+ CHECK_EQ(22, static_cast<int>(v->NumberValue(context).FromJust()));
}
isolate->Dispose();
}
@@ -17281,7 +19204,7 @@ class InitDefaultIsolateThread : public v8::base::Thread {
break;
case SetFatalHandler:
- v8::V8::SetFatalErrorHandler(NULL);
+ isolate->SetFatalErrorHandler(NULL);
break;
case SetCounterFunction:
@@ -17462,11 +19385,12 @@ class Visitor42 : public v8::PersistentHandleVisitor {
CHECK_EQ(42, value->WrapperClassId());
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::Value> handle = v8::Local<v8::Value>::New(isolate, *value);
- v8::Handle<v8::Value> object =
- v8::Local<v8::Object>::New(isolate, *object_);
+ v8::Local<v8::Value> handle = v8::Local<v8::Value>::New(isolate, *value);
+ v8::Local<v8::Value> object = v8::Local<v8::Object>::New(isolate, *object_);
CHECK(handle->IsObject());
- CHECK(Handle<Object>::Cast(handle)->Equals(object));
+ CHECK(Local<Object>::Cast(handle)
+ ->Equals(isolate->GetCurrentContext(), object)
+ .FromJust());
++counter_;
}
@@ -17485,7 +19409,7 @@ TEST(PersistentHandleVisitor) {
CHECK_EQ(42, object.WrapperClassId());
Visitor42 visitor(&object);
- v8::V8::VisitHandlesWithClassIds(isolate, &visitor);
+ isolate->VisitHandlesWithClassIds(&visitor);
CHECK_EQ(1, visitor.counter_);
object.Reset();
@@ -17522,7 +19446,7 @@ TEST(PersistentHandleInNewSpaceVisitor) {
CHECK_EQ(42, object2.WrapperClassId());
Visitor42 visitor(&object2);
- v8::V8::VisitHandlesForPartialDependence(isolate, &visitor);
+ isolate->VisitHandlesForPartialDependence(&visitor);
CHECK_EQ(1, visitor.counter_);
object1.Reset();
@@ -17531,38 +19455,53 @@ TEST(PersistentHandleInNewSpaceVisitor) {
TEST(RegExp) {
+ i::FLAG_harmony_regexps = true;
+ i::FLAG_harmony_unicode_regexps = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::RegExp> re = v8::RegExp::New(v8_str("foo"), v8::RegExp::kNone);
+ v8::Local<v8::RegExp> re =
+ v8::RegExp::New(context.local(), v8_str("foo"), v8::RegExp::kNone)
+ .ToLocalChecked();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("foo")));
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("foo")).FromJust());
CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
- re = v8::RegExp::New(v8_str("bar"),
+ re = v8::RegExp::New(context.local(), v8_str("bar"),
static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
- v8::RegExp::kGlobal));
+ v8::RegExp::kGlobal))
+ .ToLocalChecked();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("bar")));
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("bar")).FromJust());
CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kGlobal,
static_cast<int>(re->GetFlags()));
- re = v8::RegExp::New(v8_str("baz"),
+ re = v8::RegExp::New(context.local(), v8_str("baz"),
static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
- v8::RegExp::kMultiline));
+ v8::RegExp::kMultiline))
+ .ToLocalChecked();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("baz")));
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("baz")).FromJust());
CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline,
static_cast<int>(re->GetFlags()));
+ re = v8::RegExp::New(context.local(), v8_str("baz"),
+ static_cast<v8::RegExp::Flags>(v8::RegExp::kUnicode |
+ v8::RegExp::kSticky))
+ .ToLocalChecked();
+ CHECK(re->IsRegExp());
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("baz")).FromJust());
+ CHECK_EQ(v8::RegExp::kUnicode | v8::RegExp::kSticky,
+ static_cast<int>(re->GetFlags()));
+
re = CompileRun("/quux/").As<v8::RegExp>();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("quux")));
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("quux")).FromJust());
CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
re = CompileRun("/quux/gm").As<v8::RegExp>();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("quux")));
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("quux")).FromJust());
CHECK_EQ(v8::RegExp::kGlobal | v8::RegExp::kMultiline,
static_cast<int>(re->GetFlags()));
@@ -17570,32 +19509,39 @@ TEST(RegExp) {
// still works.
CompileRun("RegExp = function() {}");
- re = v8::RegExp::New(v8_str("foobar"), v8::RegExp::kNone);
+ re = v8::RegExp::New(context.local(), v8_str("foobar"), v8::RegExp::kNone)
+ .ToLocalChecked();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("foobar")));
+ CHECK(re->GetSource()->Equals(context.local(), v8_str("foobar")).FromJust());
CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
- re = v8::RegExp::New(v8_str("foobarbaz"),
+ re = v8::RegExp::New(context.local(), v8_str("foobarbaz"),
static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
- v8::RegExp::kMultiline));
+ v8::RegExp::kMultiline))
+ .ToLocalChecked();
CHECK(re->IsRegExp());
- CHECK(re->GetSource()->Equals(v8_str("foobarbaz")));
+ CHECK(
+ re->GetSource()->Equals(context.local(), v8_str("foobarbaz")).FromJust());
CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline,
static_cast<int>(re->GetFlags()));
- context->Global()->Set(v8_str("re"), re);
+ CHECK(context->Global()->Set(context.local(), v8_str("re"), re).FromJust());
ExpectTrue("re.test('FoobarbaZ')");
// RegExps are objects on which you can set properties.
- re->Set(v8_str("property"), v8::Integer::New(context->GetIsolate(), 32));
- v8::Handle<v8::Value> value(CompileRun("re.property"));
- CHECK_EQ(32, value->Int32Value());
+ re->Set(context.local(), v8_str("property"),
+ v8::Integer::New(context->GetIsolate(), 32))
+ .FromJust();
+ v8::Local<v8::Value> value(CompileRun("re.property"));
+ CHECK_EQ(32, value->Int32Value(context.local()).FromJust());
v8::TryCatch try_catch(context->GetIsolate());
- re = v8::RegExp::New(v8_str("foo["), v8::RegExp::kNone);
- CHECK(re.IsEmpty());
+ CHECK(v8::RegExp::New(context.local(), v8_str("foo["), v8::RegExp::kNone)
+ .IsEmpty());
CHECK(try_catch.HasCaught());
- context->Global()->Set(v8_str("ex"), try_catch.Exception());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("ex"), try_catch.Exception())
+ .FromJust());
ExpectTrue("ex instanceof SyntaxError");
}
@@ -17604,18 +19550,18 @@ THREADED_TEST(Equals) {
LocalContext localContext;
v8::HandleScope handleScope(localContext->GetIsolate());
- v8::Handle<v8::Object> globalProxy = localContext->Global();
- v8::Handle<Value> global = globalProxy->GetPrototype();
+ v8::Local<v8::Object> globalProxy = localContext->Global();
+ v8::Local<Value> global = globalProxy->GetPrototype();
CHECK(global->StrictEquals(global));
CHECK(!global->StrictEquals(globalProxy));
CHECK(!globalProxy->StrictEquals(global));
CHECK(globalProxy->StrictEquals(globalProxy));
- CHECK(global->Equals(global));
- CHECK(!global->Equals(globalProxy));
- CHECK(!globalProxy->Equals(global));
- CHECK(globalProxy->Equals(globalProxy));
+ CHECK(global->Equals(localContext.local(), global).FromJust());
+ CHECK(!global->Equals(localContext.local(), globalProxy).FromJust());
+ CHECK(!globalProxy->Equals(localContext.local(), global).FromJust());
+ CHECK(globalProxy->Equals(localContext.local(), globalProxy).FromJust());
}
@@ -17626,8 +19572,10 @@ static void Getter(v8::Local<v8::Name> property,
static void Enumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate());
- result->Set(0, v8_str("universalAnswer"));
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate());
+ result->Set(info.GetIsolate()->GetCurrentContext(), 0,
+ v8_str("universalAnswer"))
+ .FromJust();
info.GetReturnValue().Set(result);
}
@@ -17638,53 +19586,68 @@ TEST(NamedEnumeratorAndForIn) {
v8::HandleScope handle_scope(isolate);
v8::Context::Scope context_scope(context.local());
- v8::Handle<v8::ObjectTemplate> tmpl = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> tmpl = v8::ObjectTemplate::New(isolate);
tmpl->SetHandler(v8::NamedPropertyHandlerConfiguration(Getter, NULL, NULL,
NULL, Enumerator));
- context->Global()->Set(v8_str("o"), tmpl->NewInstance());
- v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
- "var result = []; for (var k in o) result.push(k); result"));
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("o"),
+ tmpl->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
+ v8::Local<v8::Array> result = v8::Local<v8::Array>::Cast(
+ CompileRun("var result = []; for (var k in o) result.push(k); result"));
CHECK_EQ(1u, result->Length());
- CHECK(v8_str("universalAnswer")->Equals(result->Get(0)));
+ CHECK(v8_str("universalAnswer")
+ ->Equals(context.local(),
+ result->Get(context.local(), 0).ToLocalChecked())
+ .FromJust());
}
TEST(DefinePropertyPostDetach) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
- v8::Handle<v8::Object> proxy = context->Global();
- v8::Handle<v8::Function> define_property =
- CompileRun("(function() {"
- " Object.defineProperty("
- " this,"
- " 1,"
- " { configurable: true, enumerable: true, value: 3 });"
- "})").As<Function>();
+ v8::Local<v8::Object> proxy = context->Global();
+ v8::Local<v8::Function> define_property =
+ CompileRun(
+ "(function() {"
+ " Object.defineProperty("
+ " this,"
+ " 1,"
+ " { configurable: true, enumerable: true, value: 3 });"
+ "})")
+ .As<Function>();
context->DetachGlobal();
- define_property->Call(proxy, 0, NULL);
+ CHECK(define_property->Call(context.local(), proxy, 0, NULL).IsEmpty());
}
-static void InstallContextId(v8::Handle<Context> context, int id) {
+static void InstallContextId(v8::Local<Context> context, int id) {
Context::Scope scope(context);
- CompileRun("Object.prototype").As<Object>()->
- Set(v8_str("context_id"), v8::Integer::New(context->GetIsolate(), id));
+ CHECK(CompileRun("Object.prototype")
+ .As<Object>()
+ ->Set(context, v8_str("context_id"),
+ v8::Integer::New(context->GetIsolate(), id))
+ .FromJust());
}
-static void CheckContextId(v8::Handle<Object> object, int expected) {
- CHECK_EQ(expected, object->Get(v8_str("context_id"))->Int32Value());
+static void CheckContextId(v8::Local<Object> object, int expected) {
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ CHECK_EQ(expected, object->Get(context, v8_str("context_id"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
}
THREADED_TEST(CreationContext) {
v8::Isolate* isolate = CcTest::isolate();
HandleScope handle_scope(isolate);
- Handle<Context> context1 = Context::New(isolate);
+ Local<Context> context1 = Context::New(isolate);
InstallContextId(context1, 1);
- Handle<Context> context2 = Context::New(isolate);
+ Local<Context> context2 = Context::New(isolate);
InstallContextId(context2, 2);
- Handle<Context> context3 = Context::New(isolate);
+ Local<Context> context3 = Context::New(isolate);
InstallContextId(context3, 3);
Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New(isolate);
@@ -17694,7 +19657,7 @@ THREADED_TEST(CreationContext) {
{
Context::Scope scope(context1);
object1 = Object::New(isolate);
- func1 = tmpl->GetFunction();
+ func1 = tmpl->GetFunction(context1).ToLocalChecked();
}
Local<Object> object2;
@@ -17702,7 +19665,7 @@ THREADED_TEST(CreationContext) {
{
Context::Scope scope(context2);
object2 = Object::New(isolate);
- func2 = tmpl->GetFunction();
+ func2 = tmpl->GetFunction(context2).ToLocalChecked();
}
Local<Object> instance1;
@@ -17710,12 +19673,12 @@ THREADED_TEST(CreationContext) {
{
Context::Scope scope(context3);
- instance1 = func1->NewInstance();
- instance2 = func2->NewInstance();
+ instance1 = func1->NewInstance(context3).ToLocalChecked();
+ instance2 = func2->NewInstance(context3).ToLocalChecked();
}
{
- Handle<Context> other_context = Context::New(isolate);
+ Local<Context> other_context = Context::New(isolate);
Context::Scope scope(other_context);
CHECK(object1->CreationContext() == context1);
CheckContextId(object1, 1);
@@ -17767,7 +19730,7 @@ THREADED_TEST(CreationContext) {
THREADED_TEST(CreationContextOfJsFunction) {
HandleScope handle_scope(CcTest::isolate());
- Handle<Context> context = Context::New(CcTest::isolate());
+ Local<Context> context = Context::New(CcTest::isolate());
InstallContextId(context, 1);
Local<Object> function;
@@ -17776,13 +19739,46 @@ THREADED_TEST(CreationContextOfJsFunction) {
function = CompileRun("function foo() {}; foo").As<Object>();
}
- Handle<Context> other_context = Context::New(CcTest::isolate());
+ Local<Context> other_context = Context::New(CcTest::isolate());
Context::Scope scope(other_context);
CHECK(function->CreationContext() == context);
CheckContextId(function, 1);
}
+THREADED_TEST(CreationContextOfJsBoundFunction) {
+ HandleScope handle_scope(CcTest::isolate());
+ Local<Context> context1 = Context::New(CcTest::isolate());
+ InstallContextId(context1, 1);
+ Local<Context> context2 = Context::New(CcTest::isolate());
+ InstallContextId(context2, 2);
+
+ Local<Function> target_function;
+ {
+ Context::Scope scope(context1);
+ target_function = CompileRun("function foo() {}; foo").As<Function>();
+ }
+
+ Local<Function> bound_function1, bound_function2;
+ {
+ Context::Scope scope(context2);
+ CHECK(context2->Global()
+ ->Set(context2, v8_str("foo"), target_function)
+ .FromJust());
+ bound_function1 = CompileRun("foo.bind(1)").As<Function>();
+ bound_function2 =
+ CompileRun("Function.prototype.bind.call(foo, 2)").As<Function>();
+ }
+
+ Local<Context> other_context = Context::New(CcTest::isolate());
+ Context::Scope scope(other_context);
+ CHECK(bound_function1->CreationContext() == context1);
+ CheckContextId(bound_function1, 1);
+ CHECK(bound_function2->CreationContext() == context2);
+ CheckContextId(bound_function2, 1);
+}
+
+
void HasOwnPropertyIndexedPropertyGetter(
uint32_t index,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -17792,7 +19788,10 @@ void HasOwnPropertyIndexedPropertyGetter(
void HasOwnPropertyNamedPropertyGetter(
Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
- if (property->Equals(v8_str("foo"))) info.GetReturnValue().Set(v8_str("yes"));
+ if (property->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust()) {
+ info.GetReturnValue().Set(v8_str("yes"));
+ }
}
@@ -17804,13 +19803,19 @@ void HasOwnPropertyIndexedPropertyQuery(
void HasOwnPropertyNamedPropertyQuery(
Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
- if (property->Equals(v8_str("foo"))) info.GetReturnValue().Set(1);
+ if (property->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust()) {
+ info.GetReturnValue().Set(1);
+ }
}
void HasOwnPropertyNamedPropertyQuery2(
Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
- if (property->Equals(v8_str("bar"))) info.GetReturnValue().Set(1);
+ if (property->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("bar"))
+ .FromJust()) {
+ info.GetReturnValue().Set(1);
+ }
}
@@ -17826,7 +19831,7 @@ TEST(HasOwnProperty) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
{ // Check normal properties and defined getters.
- Handle<Value> value = CompileRun(
+ Local<Value> value = CompileRun(
"function Foo() {"
" this.foo = 11;"
" this.__defineGetter__('baz', function() { return 1; });"
@@ -17838,63 +19843,63 @@ TEST(HasOwnProperty) {
"Bar.prototype = new Foo();"
"new Bar();");
CHECK(value->IsObject());
- Handle<Object> object = value->ToObject(isolate);
- CHECK(object->Has(v8_str("foo")));
- CHECK(!object->HasOwnProperty(v8_str("foo")));
- CHECK(object->HasOwnProperty(v8_str("bar")));
- CHECK(object->Has(v8_str("baz")));
- CHECK(!object->HasOwnProperty(v8_str("baz")));
- CHECK(object->HasOwnProperty(v8_str("bla")));
+ Local<Object> object = value->ToObject(env.local()).ToLocalChecked();
+ CHECK(object->Has(env.local(), v8_str("foo")).FromJust());
+ CHECK(!object->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
+ CHECK(object->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
+ CHECK(object->Has(env.local(), v8_str("baz")).FromJust());
+ CHECK(!object->HasOwnProperty(env.local(), v8_str("baz")).FromJust());
+ CHECK(object->HasOwnProperty(env.local(), v8_str("bla")).FromJust());
}
{ // Check named getter interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
HasOwnPropertyNamedPropertyGetter));
- Handle<Object> instance = templ->NewInstance();
- CHECK(!instance->HasOwnProperty(v8_str("42")));
- CHECK(instance->HasOwnProperty(v8_str("foo")));
- CHECK(!instance->HasOwnProperty(v8_str("bar")));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
+ CHECK(instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
}
{ // Check indexed getter interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
HasOwnPropertyIndexedPropertyGetter));
- Handle<Object> instance = templ->NewInstance();
- CHECK(instance->HasOwnProperty(v8_str("42")));
- CHECK(!instance->HasOwnProperty(v8_str("43")));
- CHECK(!instance->HasOwnProperty(v8_str("foo")));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("43")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
}
{ // Check named query interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
0, 0, HasOwnPropertyNamedPropertyQuery));
- Handle<Object> instance = templ->NewInstance();
- CHECK(instance->HasOwnProperty(v8_str("foo")));
- CHECK(!instance->HasOwnProperty(v8_str("bar")));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
}
{ // Check indexed query interceptors.
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
0, 0, HasOwnPropertyIndexedPropertyQuery));
- Handle<Object> instance = templ->NewInstance();
- CHECK(instance->HasOwnProperty(v8_str("42")));
- CHECK(!instance->HasOwnProperty(v8_str("41")));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("41")).FromJust());
}
{ // Check callbacks.
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("foo"), HasOwnPropertyAccessorGetter);
- Handle<Object> instance = templ->NewInstance();
- CHECK(instance->HasOwnProperty(v8_str("foo")));
- CHECK(!instance->HasOwnProperty(v8_str("bar")));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
}
{ // Check that query wins on disagreement.
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
HasOwnPropertyNamedPropertyGetter, 0,
HasOwnPropertyNamedPropertyQuery2));
- Handle<Object> instance = templ->NewInstance();
- CHECK(!instance->HasOwnProperty(v8_str("foo")));
- CHECK(instance->HasOwnProperty(v8_str("bar")));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ CHECK(!instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
+ CHECK(instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
}
}
@@ -17902,38 +19907,42 @@ TEST(HasOwnProperty) {
TEST(IndexedInterceptorWithStringProto) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
NULL, NULL, HasOwnPropertyIndexedPropertyQuery));
LocalContext context;
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var s = new String('foobar'); obj.__proto__ = s;");
// These should be intercepted.
- CHECK(CompileRun("42 in obj")->BooleanValue());
- CHECK(CompileRun("'42' in obj")->BooleanValue());
+ CHECK(CompileRun("42 in obj")->BooleanValue(context.local()).FromJust());
+ CHECK(CompileRun("'42' in obj")->BooleanValue(context.local()).FromJust());
// These should fall through to the String prototype.
- CHECK(CompileRun("0 in obj")->BooleanValue());
- CHECK(CompileRun("'0' in obj")->BooleanValue());
+ CHECK(CompileRun("0 in obj")->BooleanValue(context.local()).FromJust());
+ CHECK(CompileRun("'0' in obj")->BooleanValue(context.local()).FromJust());
// And these should both fail.
- CHECK(!CompileRun("32 in obj")->BooleanValue());
- CHECK(!CompileRun("'32' in obj")->BooleanValue());
+ CHECK(!CompileRun("32 in obj")->BooleanValue(context.local()).FromJust());
+ CHECK(!CompileRun("'32' in obj")->BooleanValue(context.local()).FromJust());
}
void CheckCodeGenerationAllowed() {
- Handle<Value> result = CompileRun("eval('42')");
- CHECK_EQ(42, result->Int32Value());
+ Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ Local<Value> result = CompileRun("eval('42')");
+ CHECK_EQ(42, result->Int32Value(context).FromJust());
result = CompileRun("(function(e) { return e('42'); })(eval)");
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context).FromJust());
result = CompileRun("var f = new Function('return 42'); f()");
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context).FromJust());
}
void CheckCodeGenerationDisallowed() {
TryCatch try_catch(CcTest::isolate());
- Handle<Value> result = CompileRun("eval('42')");
+ Local<Value> result = CompileRun("eval('42')");
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -17980,12 +19989,14 @@ THREADED_TEST(AllowCodeGenFromStrings) {
// Disallow but setting a global callback that will allow the calls.
context->AllowCodeGenerationFromStrings(false);
- V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationAllowed);
+ context->GetIsolate()->SetAllowCodeGenerationFromStringsCallback(
+ &CodeGenerationAllowed);
CHECK(!context->IsCodeGenerationFromStringsAllowed());
CheckCodeGenerationAllowed();
// Set a callback that disallows the code generation.
- V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationDisallowed);
+ context->GetIsolate()->SetAllowCodeGenerationFromStringsCallback(
+ &CodeGenerationDisallowed);
CHECK(!context->IsCodeGenerationFromStringsAllowed());
CheckCodeGenerationDisallowed();
}
@@ -17996,16 +20007,17 @@ TEST(SetErrorMessageForCodeGenFromStrings) {
v8::HandleScope scope(context->GetIsolate());
TryCatch try_catch(context->GetIsolate());
- Handle<String> message = v8_str("Message");
- Handle<String> expected_message = v8_str("Uncaught EvalError: Message");
- V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationDisallowed);
+ Local<String> message = v8_str("Message");
+ Local<String> expected_message = v8_str("Uncaught EvalError: Message");
+ context->GetIsolate()->SetAllowCodeGenerationFromStringsCallback(
+ &CodeGenerationDisallowed);
context->AllowCodeGenerationFromStrings(false);
context->SetErrorMessageForCodeGenerationFromStrings(message);
- Handle<Value> result = CompileRun("eval('42')");
+ Local<Value> result = CompileRun("eval('42')");
CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
- Handle<String> actual_message = try_catch.Message()->Get();
- CHECK(expected_message->Equals(actual_message));
+ Local<String> actual_message = try_catch.Message()->Get();
+ CHECK(expected_message->Equals(context.local(), actual_message).FromJust());
}
@@ -18017,10 +20029,13 @@ THREADED_TEST(CallAPIFunctionOnNonObject) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Handle<FunctionTemplate> templ =
+ Local<FunctionTemplate> templ =
v8::FunctionTemplate::New(isolate, NonObjectThis);
- Handle<Function> function = templ->GetFunction();
- context->Global()->Set(v8_str("f"), function);
+ Local<Function> function =
+ templ->GetFunction(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("f"), function)
+ .FromJust());
TryCatch try_catch(isolate);
CompileRun("f.call(2)");
}
@@ -18033,19 +20048,35 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
LocalContext context;
- Local<v8::Object> obj = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), obj);
- obj->ForceSet(v8_str("1"), v8_str("DONT_CHANGE"), v8::ReadOnly);
- obj->Set(v8_str("1"), v8_str("foobar"));
- CHECK(v8_str("DONT_CHANGE")->Equals(obj->Get(v8_str("1"))));
- obj->ForceSet(v8_num(2), v8_str("DONT_CHANGE"), v8::ReadOnly);
- obj->Set(v8_num(2), v8_str("foobar"));
- CHECK(v8_str("DONT_CHANGE")->Equals(obj->Get(v8_num(2))));
+ Local<v8::Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
+ CHECK(context->Global()->Set(context.local(), v8_str("obj"), obj).FromJust());
+ obj->DefineOwnProperty(context.local(), v8_str("1"), v8_str("DONT_CHANGE"),
+ v8::ReadOnly)
+ .FromJust();
+ obj->Set(context.local(), v8_str("1"), v8_str("foobar")).FromJust();
+ CHECK(v8_str("DONT_CHANGE")
+ ->Equals(context.local(),
+ obj->Get(context.local(), v8_str("1")).ToLocalChecked())
+ .FromJust());
+ obj->DefineOwnProperty(context.local(), v8_str("2"), v8_str("DONT_CHANGE"),
+ v8::ReadOnly)
+ .FromJust();
+ obj->Set(context.local(), v8_num(2), v8_str("foobar")).FromJust();
+ CHECK(v8_str("DONT_CHANGE")
+ ->Equals(context.local(),
+ obj->Get(context.local(), v8_num(2)).ToLocalChecked())
+ .FromJust());
// Test non-smi case.
- obj->ForceSet(v8_str("2000000000"), v8_str("DONT_CHANGE"), v8::ReadOnly);
- obj->Set(v8_str("2000000000"), v8_str("foobar"));
- CHECK(v8_str("DONT_CHANGE")->Equals(obj->Get(v8_str("2000000000"))));
+ obj->DefineOwnProperty(context.local(), v8_str("2000000000"),
+ v8_str("DONT_CHANGE"), v8::ReadOnly)
+ .FromJust();
+ obj->Set(context.local(), v8_str("2000000000"), v8_str("foobar")).FromJust();
+ CHECK(v8_str("DONT_CHANGE")
+ ->Equals(context.local(),
+ obj->Get(context.local(), v8_str("2000000000"))
+ .ToLocalChecked())
+ .FromJust());
}
@@ -18113,20 +20144,24 @@ THREADED_TEST(Regress93759) {
Local<Object> simple_object = Object::New(isolate);
// Object with explicit security check.
- Local<Object> protected_object = no_proto_template->NewInstance();
+ Local<Object> protected_object =
+ no_proto_template->NewInstance(context).ToLocalChecked();
// JSGlobalProxy object, always have security check.
Local<Object> proxy_object = context->Global();
// Global object, the prototype of proxy_object. No security checks.
- Local<Object> global_object = proxy_object->GetPrototype()->ToObject(isolate);
+ Local<Object> global_object =
+ proxy_object->GetPrototype()->ToObject(context).ToLocalChecked();
// Hidden prototype without security check.
- Local<Object> hidden_prototype =
- hidden_proto_template->GetFunction()->NewInstance();
+ Local<Object> hidden_prototype = hidden_proto_template->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
Local<Object> object_with_hidden =
Object::New(isolate);
- object_with_hidden->SetPrototype(hidden_prototype);
+ object_with_hidden->SetPrototype(context, hidden_prototype).FromJust();
context->Exit();
@@ -18142,20 +20177,25 @@ THREADED_TEST(Regress93759) {
LocalContext context2(NULL, global_template);
Local<Value> result1 = CompileRun("Object.getPrototypeOf(simple)");
- CHECK(result1->Equals(simple_object->GetPrototype()));
+ CHECK(result1->Equals(context2.local(), simple_object->GetPrototype())
+ .FromJust());
Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
CHECK(result2->IsNull());
Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
- CHECK(result3->Equals(global_object->GetPrototype()));
+ CHECK(result3->Equals(context2.local(), global_object->GetPrototype())
+ .FromJust());
Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
CHECK(result4->IsNull());
Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
- CHECK(result5->Equals(
- object_with_hidden->GetPrototype()->ToObject(isolate)->GetPrototype()));
+ CHECK(result5->Equals(context2.local(), object_with_hidden->GetPrototype()
+ ->ToObject(context2.local())
+ .ToLocalChecked()
+ ->GetPrototype())
+ .FromJust());
}
@@ -18163,9 +20203,16 @@ static void TestReceiver(Local<Value> expected_result,
Local<Value> expected_receiver,
const char* code) {
Local<Value> result = CompileRun(code);
+ Local<Context> context = CcTest::isolate()->GetCurrentContext();
CHECK(result->IsObject());
- CHECK(expected_receiver->Equals(result.As<v8::Object>()->Get(1)));
- CHECK(expected_result->Equals(result.As<v8::Object>()->Get(0)));
+ CHECK(expected_receiver
+ ->Equals(context,
+ result.As<v8::Object>()->Get(context, 1).ToLocalChecked())
+ .FromJust());
+ CHECK(expected_result
+ ->Equals(context,
+ result.As<v8::Object>()->Get(context, 0).ToLocalChecked())
+ .FromJust());
}
@@ -18214,11 +20261,19 @@ THREADED_TEST(ForeignFunctionReceiver) {
"}"
"var id = 'o';"
"ownfunc");
- context->Global()->Set(v8_str("func"), foreign_function);
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("func"), foreign_function)
+ .FromJust());
// Sanity check the contexts.
- CHECK(i->Equals(foreign_context->Global()->Get(id)));
- CHECK(o->Equals(context->Global()->Get(id)));
+ CHECK(
+ i->Equals(
+ context.local(),
+ foreign_context->Global()->Get(context.local(), id).ToLocalChecked())
+ .FromJust());
+ CHECK(o->Equals(context.local(),
+ context->Global()->Get(context.local(), id).ToLocalChecked())
+ .FromJust());
// Checking local function's receiver.
// Calling function using its call/apply methods.
@@ -18226,9 +20281,15 @@ THREADED_TEST(ForeignFunctionReceiver) {
TestReceiver(o, context->Global(), "ownfunc.apply()");
// Making calls through built-in functions.
TestReceiver(o, context->Global(), "[1].map(ownfunc)[0]");
- CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/,ownfunc)[1]")));
- CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/g,ownfunc)[1]")));
- CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/g,ownfunc)[3]")));
+ CHECK(
+ o->Equals(context.local(), CompileRun("'abcbd'.replace(/b/,ownfunc)[1]"))
+ .FromJust());
+ CHECK(
+ o->Equals(context.local(), CompileRun("'abcbd'.replace(/b/g,ownfunc)[1]"))
+ .FromJust());
+ CHECK(
+ o->Equals(context.local(), CompileRun("'abcbd'.replace(/b/g,ownfunc)[3]"))
+ .FromJust());
// Calling with environment record as base.
TestReceiver(o, context->Global(), "ownfunc()");
// Calling with no base.
@@ -18250,9 +20311,12 @@ THREADED_TEST(ForeignFunctionReceiver) {
// Making calls through built-in functions.
TestReceiver(i, foreign_context->Global(), "[1].map(func)[0]");
// ToString(func()) is func()[0], i.e., the returned this.id.
- CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/,func)[1]")));
- CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[1]")));
- CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[3]")));
+ CHECK(i->Equals(context.local(), CompileRun("'abcbd'.replace(/b/,func)[1]"))
+ .FromJust());
+ CHECK(i->Equals(context.local(), CompileRun("'abcbd'.replace(/b/g,func)[1]"))
+ .FromJust());
+ CHECK(i->Equals(context.local(), CompileRun("'abcbd'.replace(/b/g,func)[3]"))
+ .FromJust());
// Calling with environment record as base.
TestReceiver(i, foreign_context->Global(), "func()");
@@ -18277,7 +20341,8 @@ void CallCompletedCallback2() {
void RecursiveCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
- int32_t level = args[0]->Int32Value();
+ int32_t level =
+ args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust();
if (level < 3) {
level++;
v8::base::OS::Print("Entering recursion level %d.\n", level);
@@ -18297,32 +20362,35 @@ void RecursiveCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(CallCompletedCallback) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Handle<v8::FunctionTemplate> recursive_runtime =
+ v8::Local<v8::FunctionTemplate> recursive_runtime =
v8::FunctionTemplate::New(env->GetIsolate(), RecursiveCall);
- env->Global()->Set(v8_str("recursion"),
- recursive_runtime->GetFunction());
+ env->Global()
+ ->Set(env.local(), v8_str("recursion"),
+ recursive_runtime->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
// Adding the same callback a second time has no effect.
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback2);
v8::base::OS::Print("--- Script (1) ---\n");
- Local<Script> script = v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "recursion(0)"));
- script->Run();
+ Local<Script> script =
+ v8::Script::Compile(env.local(), v8_str("recursion(0)")).ToLocalChecked();
+ script->Run(env.local()).ToLocalChecked();
CHECK_EQ(3, callback_fired);
v8::base::OS::Print("\n--- Script (2) ---\n");
callback_fired = 0;
env->GetIsolate()->RemoveCallCompletedCallback(CallCompletedCallback1);
- script->Run();
+ script->Run(env.local()).ToLocalChecked();
CHECK_EQ(2, callback_fired);
v8::base::OS::Print("\n--- Function ---\n");
callback_fired = 0;
- Local<Function> recursive_function =
- Local<Function>::Cast(env->Global()->Get(v8_str("recursion")));
- v8::Handle<Value> args[] = { v8_num(0) };
- recursive_function->Call(env->Global(), 1, args);
+ Local<Function> recursive_function = Local<Function>::Cast(
+ env->Global()->Get(env.local(), v8_str("recursion")).ToLocalChecked());
+ v8::Local<Value> args[] = {v8_num(0)};
+ recursive_function->Call(env.local(), env->Global(), 1, args)
+ .ToLocalChecked();
CHECK_EQ(2, callback_fired);
}
@@ -18382,50 +20450,50 @@ TEST(EnqueueMicrotask) {
"var ext1Calls = 0;"
"var ext2Calls = 0;");
CompileRun("1+1;");
- CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskOne));
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskOne));
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
g_passed_to_three = NULL;
env->GetIsolate()->EnqueueMicrotask(MicrotaskThree);
CompileRun("1+1;");
CHECK(!g_passed_to_three);
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
int dummy;
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskOne));
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
env->GetIsolate()->EnqueueMicrotask(MicrotaskThree, &dummy);
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
CHECK_EQ(&dummy, g_passed_to_three);
- CHECK_EQ(3, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(3, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
g_passed_to_three = NULL;
}
@@ -18456,14 +20524,16 @@ TEST(RunMicrotasksIgnoresThrownExceptions) {
"var exception1Calls = 0;"
"var exception2Calls = 0;");
isolate->EnqueueMicrotask(
- Function::New(isolate, MicrotaskExceptionOne));
+ Function::New(env.local(), MicrotaskExceptionOne).ToLocalChecked());
isolate->EnqueueMicrotask(
- Function::New(isolate, MicrotaskExceptionTwo));
+ Function::New(env.local(), MicrotaskExceptionTwo).ToLocalChecked());
TryCatch try_catch(isolate);
CompileRun("1+1;");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(1, CompileRun("exception1Calls")->Int32Value());
- CHECK_EQ(1, CompileRun("exception2Calls")->Int32Value());
+ CHECK_EQ(1,
+ CompileRun("exception1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1,
+ CompileRun("exception2Calls")->Int32Value(env.local()).FromJust());
}
@@ -18474,57 +20544,57 @@ TEST(SetAutorunMicrotasks) {
"var ext1Calls = 0;"
"var ext2Calls = 0;");
CompileRun("1+1;");
- CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(0, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskOne));
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->SetAutorunMicrotasks(false);
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskOne));
+ Function::New(env.local(), MicrotaskOne).ToLocalChecked());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(0, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->RunMicrotasks();
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(1, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->RunMicrotasks();
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(2, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->SetAutorunMicrotasks(true);
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
env->GetIsolate()->EnqueueMicrotask(
- Function::New(env->GetIsolate(), MicrotaskTwo));
+ Function::New(env.local(), MicrotaskTwo).ToLocalChecked());
{
v8::Isolate::SuppressMicrotaskExecutionScope scope(env->GetIsolate());
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(3, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
}
CompileRun("1+1;");
- CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value());
- CHECK_EQ(4, CompileRun("ext2Calls")->Int32Value());
+ CHECK_EQ(2, CompileRun("ext1Calls")->Int32Value(env.local()).FromJust());
+ CHECK_EQ(4, CompileRun("ext2Calls")->Int32Value(env.local()).FromJust());
}
@@ -18532,16 +20602,17 @@ TEST(RunMicrotasksWithoutEnteringContext) {
v8::Isolate* isolate = CcTest::isolate();
HandleScope handle_scope(isolate);
isolate->SetAutorunMicrotasks(false);
- Handle<Context> context = Context::New(isolate);
+ Local<Context> context = Context::New(isolate);
{
Context::Scope context_scope(context);
CompileRun("var ext1Calls = 0;");
- isolate->EnqueueMicrotask(Function::New(isolate, MicrotaskOne));
+ isolate->EnqueueMicrotask(
+ Function::New(context, MicrotaskOne).ToLocalChecked());
}
isolate->RunMicrotasks();
{
Context::Scope context_scope(context);
- CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value());
+ CHECK_EQ(1, CompileRun("ext1Calls")->Int32Value(context).FromJust());
}
isolate->SetAutorunMicrotasks(true);
}
@@ -18550,22 +20621,25 @@ TEST(RunMicrotasksWithoutEnteringContext) {
static void DebugEventInObserver(const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
if (event != v8::Break) return;
- Handle<Object> exec_state = event_details.GetExecutionState();
- Handle<Value> break_id = exec_state->Get(v8_str("break_id"));
+ Local<Object> exec_state = event_details.GetExecutionState();
+ Local<Context> context = CcTest::isolate()->GetCurrentContext();
+ Local<Value> break_id =
+ exec_state->Get(context, v8_str("break_id")).ToLocalChecked();
CompileRun("function f(id) { new FrameDetails(id, 0); }");
- Handle<Function> fun =
- Handle<Function>::Cast(CcTest::global()->Get(v8_str("f")));
- fun->Call(CcTest::global(), 1, &break_id);
+ Local<Function> fun = Local<Function>::Cast(
+ CcTest::global()->Get(context, v8_str("f")).ToLocalChecked());
+ fun->Call(context, CcTest::global(), 1, &break_id).ToLocalChecked();
}
TEST(Regress385349) {
+ i::FLAG_harmony_object_observe = true;
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope handle_scope(isolate);
isolate->SetAutorunMicrotasks(false);
- Handle<Context> context = Context::New(isolate);
- v8::Debug::SetDebugEventListener(DebugEventInObserver);
+ Local<Context> context = Context::New(isolate);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventInObserver);
{
Context::Scope context_scope(context);
CompileRun("var obj = {};"
@@ -18574,7 +20648,7 @@ TEST(Regress385349) {
}
isolate->RunMicrotasks();
isolate->SetAutorunMicrotasks(true);
- v8::Debug::SetDebugEventListener(NULL);
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
}
@@ -18745,7 +20819,8 @@ static int instance_checked_getter_count = 0;
static void InstanceCheckedGetter(
Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- CHECK(name->Equals(v8_str("foo")));
+ CHECK(name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust());
instance_checked_getter_count++;
info.GetReturnValue().Set(v8_num(11));
}
@@ -18755,8 +20830,10 @@ static int instance_checked_setter_count = 0;
static void InstanceCheckedSetter(Local<String> name,
Local<Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- CHECK(name->Equals(v8_str("foo")));
- CHECK(value->Equals(v8_num(23)));
+ CHECK(name->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("foo"))
+ .FromJust());
+ CHECK(value->Equals(info.GetIsolate()->GetCurrentContext(), v8_num(23))
+ .FromJust());
instance_checked_setter_count++;
}
@@ -18827,23 +20904,25 @@ THREADED_TEST(InstanceCheckOnInstanceAccessor) {
Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> inst = templ->InstanceTemplate();
- inst->SetAccessor(v8_str("foo"),
- InstanceCheckedGetter, InstanceCheckedSetter,
- Handle<Value>(),
- v8::DEFAULT,
- v8::None,
+ inst->SetAccessor(v8_str("foo"), InstanceCheckedGetter, InstanceCheckedSetter,
+ Local<Value>(), v8::DEFAULT, v8::None,
v8::AccessorSignature::New(context->GetIsolate(), templ));
- context->Global()->Set(v8_str("f"), templ->GetFunction());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("f"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
printf("Testing positive ...\n");
CompileRun("var obj = new f();");
- CHECK(templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(true);
printf("Testing negative ...\n");
CompileRun("var obj = {};"
"obj.__proto__ = new f();");
- CHECK(!templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(!templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(false);
}
@@ -18866,23 +20945,25 @@ THREADED_TEST(InstanceCheckOnInstanceAccessorWithInterceptor) {
Local<ObjectTemplate> inst = templ->InstanceTemplate();
templ->InstanceTemplate()->SetNamedPropertyHandler(EmptyInterceptorGetter,
EmptyInterceptorSetter);
- inst->SetAccessor(v8_str("foo"),
- InstanceCheckedGetter, InstanceCheckedSetter,
- Handle<Value>(),
- v8::DEFAULT,
- v8::None,
+ inst->SetAccessor(v8_str("foo"), InstanceCheckedGetter, InstanceCheckedSetter,
+ Local<Value>(), v8::DEFAULT, v8::None,
v8::AccessorSignature::New(context->GetIsolate(), templ));
- context->Global()->Set(v8_str("f"), templ->GetFunction());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("f"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
printf("Testing positive ...\n");
CompileRun("var obj = new f();");
- CHECK(templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(true);
printf("Testing negative ...\n");
CompileRun("var obj = {};"
"obj.__proto__ = new f();");
- CHECK(!templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(!templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(false);
}
@@ -18895,20 +20976,25 @@ THREADED_TEST(InstanceCheckOnPrototypeAccessor) {
Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
Local<ObjectTemplate> proto = templ->PrototypeTemplate();
proto->SetAccessor(v8_str("foo"), InstanceCheckedGetter,
- InstanceCheckedSetter, Handle<Value>(), v8::DEFAULT,
+ InstanceCheckedSetter, Local<Value>(), v8::DEFAULT,
v8::None,
v8::AccessorSignature::New(context->GetIsolate(), templ));
- context->Global()->Set(v8_str("f"), templ->GetFunction());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("f"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
printf("Testing positive ...\n");
CompileRun("var obj = new f();");
- CHECK(templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(true);
printf("Testing negative ...\n");
CompileRun("var obj = {};"
"obj.__proto__ = new f();");
- CHECK(!templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(!templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(false);
printf("Testing positive with modified prototype chain ...\n");
@@ -18916,7 +21002,8 @@ THREADED_TEST(InstanceCheckOnPrototypeAccessor) {
"var pro = {};"
"pro.__proto__ = obj.__proto__;"
"obj.__proto__ = pro;");
- CHECK(templ->HasInstance(context->Global()->Get(v8_str("obj"))));
+ CHECK(templ->HasInstance(
+ context->Global()->Get(context.local(), v8_str("obj")).ToLocalChecked()));
CheckInstanceCheckedAccessors(true);
}
@@ -18940,7 +21027,7 @@ TEST(TryFinallyMessage) {
CHECK(try_catch.HasCaught());
Local<Message> message = try_catch.Message();
CHECK(!message.IsEmpty());
- CHECK_EQ(2, message->GetLineNumber());
+ CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
}
{
@@ -18959,7 +21046,7 @@ TEST(TryFinallyMessage) {
CHECK(try_catch.HasCaught());
Local<Message> message = try_catch.Message();
CHECK(!message.IsEmpty());
- CHECK_EQ(6, message->GetLineNumber());
+ CHECK_EQ(6, message->GetLineNumber(context.local()).FromJust());
}
}
@@ -18978,7 +21065,10 @@ static void Helper137002(bool do_store,
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
}
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
// Turn monomorphic on slow object with native accessor, then turn
// polymorphic, finally optimize to create negative lookup and fail.
@@ -19004,10 +21094,16 @@ static void Helper137002(bool do_store,
CompileRun("result = obj.y;");
}
if (remove_accessor && !interceptor) {
- CHECK(context->Global()->Get(v8_str("result"))->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->IsUndefined());
} else {
- CHECK_EQ(do_store ? 23 : 42,
- context->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(do_store ? 23 : 42, context->Global()
+ ->Get(context.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
}
@@ -19031,7 +21127,10 @@ THREADED_TEST(Regress137002b) {
templ->SetAccessor(v8_str("foo"),
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
// Turn monomorphic on slow object with native accessor, then just
// delete the property and fail.
@@ -19082,12 +21181,30 @@ THREADED_TEST(Regress137002b) {
"store2(subobj);"
"var y_from_obj = obj.y;"
"var y_from_subobj = subobj.y;");
- CHECK(context->Global()->Get(v8_str("load_result"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("load_result2"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("keyed_load_result"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("keyed_load_result2"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("y_from_obj"))->IsUndefined());
- CHECK(context->Global()->Get(v8_str("y_from_subobj"))->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("load_result"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("load_result2"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("keyed_load_result"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("keyed_load_result2"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("y_from_obj"))
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(context->Global()
+ ->Get(context.local(), v8_str("y_from_subobj"))
+ .ToLocalChecked()
+ ->IsUndefined());
}
@@ -19100,7 +21217,10 @@ THREADED_TEST(Regress142088) {
templ->SetAccessor(v8_str("foo"),
GetterWhichReturns42,
SetterWhichSetsYOnThisTo23);
- context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("obj"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun("function load(x) { return x.foo; }"
"var o = Object.create(obj);"
@@ -19128,7 +21248,7 @@ THREADED_TEST(Regress157124) {
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- Local<Object> obj = templ->NewInstance();
+ Local<Object> obj = templ->NewInstance(context.local()).ToLocalChecked();
obj->GetIdentityHash();
obj->DeletePrivate(context.local(),
v8::Private::ForApi(isolate, v8_str("Bug")))
@@ -19169,7 +21289,8 @@ THREADED_TEST(Regress260106) {
Local<FunctionTemplate> templ = FunctionTemplate::New(isolate,
DummyCallHandler);
CompileRun("for (var i = 0; i < 128; i++) Object.prototype[i] = 0;");
- Local<Function> function = templ->GetFunction();
+ Local<Function> function =
+ templ->GetFunction(context.local()).ToLocalChecked();
CHECK(!function.IsEmpty());
CHECK(function->IsFunction());
}
@@ -19178,9 +21299,11 @@ THREADED_TEST(Regress260106) {
THREADED_TEST(JSONParseObject) {
LocalContext context;
HandleScope scope(context->GetIsolate());
- Local<Value> obj = v8::JSON::Parse(v8_str("{\"x\":42}"));
- Handle<Object> global = context->Global();
- global->Set(v8_str("obj"), obj);
+ Local<Value> obj =
+ v8::JSON::Parse(context->GetIsolate(), v8_str("{\"x\":42}"))
+ .ToLocalChecked();
+ Local<Object> global = context->Global();
+ global->Set(context.local(), v8_str("obj"), obj).FromJust();
ExpectString("JSON.stringify(obj)", "{\"x\":42}");
}
@@ -19188,9 +21311,10 @@ THREADED_TEST(JSONParseObject) {
THREADED_TEST(JSONParseNumber) {
LocalContext context;
HandleScope scope(context->GetIsolate());
- Local<Value> obj = v8::JSON::Parse(v8_str("42"));
- Handle<Object> global = context->Global();
- global->Set(v8_str("obj"), obj);
+ Local<Value> obj =
+ v8::JSON::Parse(context->GetIsolate(), v8_str("42")).ToLocalChecked();
+ Local<Object> global = context->Global();
+ global->Set(context.local(), v8_str("obj"), obj).FromJust();
ExpectString("JSON.stringify(obj)", "42");
}
@@ -19271,28 +21395,32 @@ TEST(JSONStringifyAccessCheck) {
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
- v8::Handle<v8::Object> global0 = context0->Global();
- global0->Set(v8_str("x"), v8_num(42));
+ v8::Local<v8::Object> global0 = context0->Global();
+ global0->Set(context0.local(), v8_str("x"), v8_num(42)).FromJust();
ExpectString("JSON.stringify(this)", "{\"x\":42}");
for (int i = 0; i < 2; i++) {
if (i == 1) {
// Install a toJSON function on the second run.
- v8::Handle<v8::FunctionTemplate> toJSON =
+ v8::Local<v8::FunctionTemplate> toJSON =
v8::FunctionTemplate::New(isolate, UnreachableCallback);
- global0->Set(v8_str("toJSON"), toJSON->GetFunction());
+ global0->Set(context0.local(), v8_str("toJSON"),
+ toJSON->GetFunction(context0.local()).ToLocalChecked())
+ .FromJust();
}
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
LocalContext context1(NULL, global_template);
- context1->Global()->Set(v8_str("other"), global0);
+ CHECK(context1->Global()
+ ->Set(context1.local(), v8_str("other"), global0)
+ .FromJust());
CHECK(CompileRun("JSON.stringify(other)").IsEmpty());
CHECK(CompileRun("JSON.stringify({ 'a' : other, 'b' : ['c'] })").IsEmpty());
@@ -19325,8 +21453,13 @@ void CatcherCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
void HasOwnPropertyCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args[0]->ToObject(args.GetIsolate())->HasOwnProperty(
- args[1]->ToString(args.GetIsolate()));
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ CHECK(
+ args[0]
+ ->ToObject(context)
+ .ToLocalChecked()
+ ->HasOwnProperty(context, args[1]->ToString(context).ToLocalChecked())
+ .IsNothing());
}
@@ -19353,33 +21486,41 @@ void CheckCorrectThrow(const char* script) {
TEST(AccessCheckThrows) {
i::FLAG_allow_natives_syntax = true;
v8::V8::Initialize();
- v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
v8::Isolate* isolate = CcTest::isolate();
+ isolate->SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
v8::HandleScope scope(isolate);
// Create an ObjectTemplate for global objects and install access
// check callbacks that will block access.
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->SetAccessCheckCallback(AccessAlwaysBlocked);
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
- v8::Handle<v8::Object> global0 = context0->Global();
+ v8::Local<v8::Object> global0 = context0->Global();
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
LocalContext context1(NULL, global_template);
- context1->Global()->Set(v8_str("other"), global0);
+ CHECK(context1->Global()
+ ->Set(context1.local(), v8_str("other"), global0)
+ .FromJust());
- v8::Handle<v8::FunctionTemplate> catcher_fun =
+ v8::Local<v8::FunctionTemplate> catcher_fun =
v8::FunctionTemplate::New(isolate, CatcherCallback);
- context1->Global()->Set(v8_str("catcher"), catcher_fun->GetFunction());
+ CHECK(context1->Global()
+ ->Set(context1.local(), v8_str("catcher"),
+ catcher_fun->GetFunction(context1.local()).ToLocalChecked())
+ .FromJust());
- v8::Handle<v8::FunctionTemplate> has_own_property_fun =
+ v8::Local<v8::FunctionTemplate> has_own_property_fun =
v8::FunctionTemplate::New(isolate, HasOwnPropertyCallback);
- context1->Global()->Set(v8_str("has_own_property"),
- has_own_property_fun->GetFunction());
+ CHECK(context1->Global()
+ ->Set(context1.local(), v8_str("has_own_property"),
+ has_own_property_fun->GetFunction(context1.local())
+ .ToLocalChecked())
+ .FromJust());
{
v8::TryCatch try_catch(isolate);
@@ -19402,14 +21543,14 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%DeleteProperty_Strict(other, '1')");
CheckCorrectThrow("%HasOwnProperty(other, 'x')");
CheckCorrectThrow("%HasProperty('x', other)");
- CheckCorrectThrow("%IsPropertyEnumerable(other, 'x')");
+ CheckCorrectThrow("%PropertyIsEnumerable(other, 'x')");
// PROPERTY_ATTRIBUTES_NONE = 0
CheckCorrectThrow("%DefineAccessorPropertyUnchecked("
"other, 'x', null, null, 1)");
// Reset the failed access check callback so it does not influence
// the other tests.
- v8::V8::SetFailedAccessCheckCallbackFunction(NULL);
+ isolate->SetFailedAccessCheckCallbackFunction(NULL);
}
@@ -19510,9 +21651,12 @@ class RequestInterruptTestWithFunctionCall
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
virtual void TestBody() {
- Local<Function> func = Function::New(
- isolate_, ShouldContinueCallback, v8::External::New(isolate_, this));
- env_->Global()->Set(v8_str("ShouldContinue"), func);
+ Local<Function> func = Function::New(env_.local(), ShouldContinueCallback,
+ v8::External::New(isolate_, this))
+ .ToLocalChecked();
+ CHECK(env_->Global()
+ ->Set(env_.local(), v8_str("ShouldContinue"), func)
+ .FromJust());
CompileRun("while (ShouldContinue()) { }");
}
@@ -19525,9 +21669,14 @@ class RequestInterruptTestWithMethodCall
virtual void TestBody() {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
- proto->Set(v8_str("shouldContinue"), Function::New(
- isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
- env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ proto->Set(v8_str("shouldContinue"),
+ Function::New(env_.local(), ShouldContinueCallback,
+ v8::External::New(isolate_, this))
+ .ToLocalChecked());
+ CHECK(env_->Global()
+ ->Set(env_.local(), v8_str("Klass"),
+ t->GetFunction(env_.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj = new Klass; while (obj.shouldContinue()) { }");
}
@@ -19542,7 +21691,10 @@ class RequestInterruptTestWithAccessor
v8::Local<v8::Template> proto = t->PrototypeTemplate();
proto->SetAccessorProperty(v8_str("shouldContinue"), FunctionTemplate::New(
isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
- env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ CHECK(env_->Global()
+ ->Set(env_.local(), v8_str("Klass"),
+ t->GetFunction(env_.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj = new Klass; while (obj.shouldContinue) { }");
}
@@ -19559,7 +21711,10 @@ class RequestInterruptTestWithNativeAccessor
&ShouldContinueNativeGetter,
NULL,
v8::External::New(isolate_, this));
- env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ CHECK(env_->Global()
+ ->Set(env_.local(), v8_str("Klass"),
+ t->GetFunction(env_.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj = new Klass; while (obj.shouldContinue) { }");
}
@@ -19582,13 +21737,18 @@ class RequestInterruptTestWithMethodCallAndInterceptor
virtual void TestBody() {
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
- proto->Set(v8_str("shouldContinue"), Function::New(
- isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
+ proto->Set(v8_str("shouldContinue"),
+ Function::New(env_.local(), ShouldContinueCallback,
+ v8::External::New(isolate_, this))
+ .ToLocalChecked());
v8::Local<v8::ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetHandler(
v8::NamedPropertyHandlerConfiguration(EmptyInterceptor));
- env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ CHECK(env_->Global()
+ ->Set(env_.local(), v8_str("Klass"),
+ t->GetFunction(env_.local()).ToLocalChecked())
+ .FromJust());
CompileRun("var obj = new Klass; while (obj.shouldContinue()) { }");
}
@@ -19603,15 +21763,19 @@ class RequestInterruptTestWithMathAbs
: public RequestInterruptTestBaseWithSimpleInterrupt {
public:
virtual void TestBody() {
- env_->Global()->Set(v8_str("WakeUpInterruptor"), Function::New(
- isolate_,
- WakeUpInterruptorCallback,
- v8::External::New(isolate_, this)));
-
- env_->Global()->Set(v8_str("ShouldContinue"), Function::New(
- isolate_,
- ShouldContinueCallback,
- v8::External::New(isolate_, this)));
+ env_->Global()
+ ->Set(env_.local(), v8_str("WakeUpInterruptor"),
+ Function::New(env_.local(), WakeUpInterruptorCallback,
+ v8::External::New(isolate_, this))
+ .ToLocalChecked())
+ .FromJust();
+
+ env_->Global()
+ ->Set(env_.local(), v8_str("ShouldContinue"),
+ Function::New(env_.local(), ShouldContinueCallback,
+ v8::External::New(isolate_, this))
+ .ToLocalChecked())
+ .FromJust();
i::FLAG_allow_natives_syntax = true;
CompileRun("function loopish(o) {"
@@ -19636,7 +21800,11 @@ class RequestInterruptTestWithMathAbs
private:
static void WakeUpInterruptorCallback(
const v8::FunctionCallbackInfo<Value>& info) {
- if (!info[0]->BooleanValue()) return;
+ if (!info[0]
+ ->BooleanValue(info.GetIsolate()->GetCurrentContext())
+ .FromJust()) {
+ return;
+ }
RequestInterruptTestBase* test =
reinterpret_cast<RequestInterruptTestBase*>(
@@ -19693,9 +21861,12 @@ class RequestMultipleInterrupts : public RequestInterruptTestBase {
}
virtual void TestBody() {
- Local<Function> func = Function::New(
- isolate_, ShouldContinueCallback, v8::External::New(isolate_, this));
- env_->Global()->Set(v8_str("ShouldContinue"), func);
+ Local<Function> func = Function::New(env_.local(), ShouldContinueCallback,
+ v8::External::New(isolate_, this))
+ .ToLocalChecked();
+ CHECK(env_->Global()
+ ->Set(env_.local(), v8_str("ShouldContinue"), func)
+ .FromJust());
CompileRun("while (ShouldContinue()) { }");
}
@@ -19754,7 +21925,10 @@ TEST(RequestInterruptSmallScripts) {
static Local<Value> function_new_expected_env;
static void FunctionNewCallback(const v8::FunctionCallbackInfo<Value>& info) {
- CHECK(function_new_expected_env->Equals(info.Data()));
+ CHECK(
+ function_new_expected_env->Equals(info.GetIsolate()->GetCurrentContext(),
+ info.Data())
+ .FromJust());
info.GetReturnValue().Set(17);
}
@@ -19765,10 +21939,11 @@ THREADED_TEST(FunctionNew) {
v8::HandleScope scope(isolate);
Local<Object> data = v8::Object::New(isolate);
function_new_expected_env = data;
- Local<Function> func = Function::New(isolate, FunctionNewCallback, data);
- env->Global()->Set(v8_str("func"), func);
+ Local<Function> func =
+ Function::New(env.local(), FunctionNewCallback, data).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
Local<Value> result = CompileRun("func();");
- CHECK(v8::Integer::New(isolate, 17)->Equals(result));
+ CHECK(v8::Integer::New(isolate, 17)->Equals(env.local(), result).FromJust());
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Verify function not cached
auto serial_number = handle(
@@ -19782,12 +21957,13 @@ THREADED_TEST(FunctionNew) {
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
function_new_expected_env = data2;
- Local<Function> func2 = Function::New(isolate, FunctionNewCallback, data2);
+ Local<Function> func2 =
+ Function::New(env.local(), FunctionNewCallback, data2).ToLocalChecked();
CHECK(!func2->IsNull());
- CHECK(!func->Equals(func2));
- env->Global()->Set(v8_str("func2"), func2);
+ CHECK(!func->Equals(env.local(), func2).FromJust());
+ CHECK(env->Global()->Set(env.local(), v8_str("func2"), func2).FromJust());
Local<Value> result2 = CompileRun("func2();");
- CHECK(v8::Integer::New(isolate, 17)->Equals(result2));
+ CHECK(v8::Integer::New(isolate, 17)->Equals(env.local(), result2).FromJust());
}
@@ -19805,7 +21981,9 @@ TEST(EscapeableHandleScope) {
for (int i = 0; i < runs; i++) {
Local<String> expected;
if (i != 0) {
- CHECK(v8_str("escape value")->Equals(values[i]));
+ CHECK(v8_str("escape value")
+ ->Equals(context.local(), values[i])
+ .FromJust());
} else {
CHECK(values[i].IsEmpty());
}
@@ -19825,7 +22003,10 @@ TEST(Regress239669) {
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
templ->SetAccessor(v8_str("x"), 0, SetterWhichExpectsThisAndHolderToDiffer);
- context->Global()->Set(v8_str("P"), templ->NewInstance());
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("P"),
+ templ->NewInstance(context.local()).ToLocalChecked())
+ .FromJust());
CompileRun(
"function C1() {"
" this.x = 23;"
@@ -19851,7 +22032,9 @@ class ApiCallOptimizationChecker {
CHECK(data == info.Data());
CHECK(receiver == info.This());
if (info.Length() == 1) {
- CHECK(v8_num(1)->Equals(info[0]));
+ CHECK(v8_num(1)
+ ->Equals(info.GetIsolate()->GetCurrentContext(), info[0])
+ .FromJust());
}
CHECK(holder == info.Holder());
count++;
@@ -19909,8 +22092,11 @@ class ApiCallOptimizationChecker {
v8::Context::New(isolate, NULL, signature_template);
v8::Context::Scope context_scope(context);
// Install regular object that can pass signature checks.
- Local<Object> function_receiver = signature_template->NewInstance();
- context->Global()->Set(v8_str("function_receiver"), function_receiver);
+ Local<Object> function_receiver =
+ signature_template->NewInstance(context).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context, v8_str("function_receiver"), function_receiver)
+ .FromJust());
// Get the holder objects.
Local<Object> inner_global =
Local<Object>::Cast(context->Global()->GetPrototype());
@@ -19918,16 +22104,17 @@ class ApiCallOptimizationChecker {
data = Object::New(isolate);
Local<FunctionTemplate> function_template = FunctionTemplate::New(
isolate, OptimizationCallback, data, signature);
- Local<Function> function = function_template->GetFunction();
+ Local<Function> function =
+ function_template->GetFunction(context).ToLocalChecked();
Local<Object> global_holder = inner_global;
Local<Object> function_holder = function_receiver;
if (signature_type == kSignatureOnPrototype) {
function_holder = Local<Object>::Cast(function_holder->GetPrototype());
global_holder = Local<Object>::Cast(global_holder->GetPrototype());
}
- global_holder->Set(v8_str("g_f"), function);
+ global_holder->Set(context, v8_str("g_f"), function).FromJust();
global_holder->SetAccessorProperty(v8_str("g_acc"), function, function);
- function_holder->Set(v8_str("f"), function);
+ function_holder->Set(context, v8_str("f"), function).FromJust();
function_holder->SetAccessorProperty(v8_str("acc"), function, function);
// Initialize expected values.
callee = function;
@@ -20001,7 +22188,7 @@ class ApiCallOptimizationChecker {
wrap_function.start(), key, key, key, key, key, key);
v8::TryCatch try_catch(isolate);
CompileRun(source.start());
- DCHECK(!try_catch.HasCaught());
+ CHECK(!try_catch.HasCaught());
CHECK_EQ(9, count);
}
};
@@ -20026,9 +22213,10 @@ TEST(FunctionCallOptimizationMultipleArgs) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Handle<Object> global = context->Global();
- Local<v8::Function> function = Function::New(isolate, Returns42);
- global->Set(v8_str("x"), function);
+ Local<Object> global = context->Global();
+ Local<v8::Function> function =
+ Function::New(context.local(), Returns42).ToLocalChecked();
+ global->Set(context.local(), v8_str("x"), function).FromJust();
CompileRun(
"function x_wrap() {\n"
" for (var i = 0; i < 5; i++) {\n"
@@ -20052,9 +22240,10 @@ TEST(ApiCallbackCanReturnSymbols) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Handle<Object> global = context->Global();
- Local<v8::Function> function = Function::New(isolate, ReturnsSymbolCallback);
- global->Set(v8_str("x"), function);
+ Local<Object> global = context->Global();
+ Local<v8::Function> function =
+ Function::New(context.local(), ReturnsSymbolCallback).ToLocalChecked();
+ global->Set(context.local(), v8_str("x"), function).FromJust();
CompileRun(
"function x_wrap() {\n"
" for (var i = 0; i < 5; i++) {\n"
@@ -20072,8 +22261,10 @@ TEST(EmptyApiCallback) {
auto isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
auto global = context->Global();
- auto function = FunctionTemplate::New(isolate)->GetFunction();
- global->Set(v8_str("x"), function);
+ auto function = FunctionTemplate::New(isolate)
+ ->GetFunction(context.local())
+ .ToLocalChecked();
+ global->Set(context.local(), v8_str("x"), function).FromJust();
auto result = CompileRun("x()");
CHECK(v8::Utils::OpenHandle(*result)->IsJSGlobalProxy());
@@ -20081,13 +22272,19 @@ TEST(EmptyApiCallback) {
result = CompileRun("x(1,2,3)");
CHECK(v8::Utils::OpenHandle(*result)->IsJSGlobalProxy());
+ result = CompileRun("x.call(undefined)");
+ CHECK(v8::Utils::OpenHandle(*result)->IsJSGlobalProxy());
+
+ result = CompileRun("x.call(null)");
+ CHECK(v8::Utils::OpenHandle(*result)->IsJSGlobalProxy());
+
result = CompileRun("7 + x.call(3) + 11");
CHECK(result->IsInt32());
- CHECK_EQ(21, result->Int32Value());
+ CHECK_EQ(21, result->Int32Value(context.local()).FromJust());
result = CompileRun("7 + x.call(3, 101, 102, 103, 104) + 11");
CHECK(result->IsInt32());
- CHECK_EQ(21, result->Int32Value());
+ CHECK_EQ(21, result->Int32Value(context.local()).FromJust());
result = CompileRun("var y = []; x.call(y)");
CHECK(result->IsArray());
@@ -20104,9 +22301,13 @@ TEST(SimpleSignatureCheck) {
auto global = context->Global();
auto sig_obj = FunctionTemplate::New(isolate);
auto sig = v8::Signature::New(isolate, sig_obj);
- auto x = FunctionTemplate::New(isolate, Returns42, Handle<Value>(), sig);
- global->Set(v8_str("sig_obj"), sig_obj->GetFunction());
- global->Set(v8_str("x"), x->GetFunction());
+ auto x = FunctionTemplate::New(isolate, Returns42, Local<Value>(), sig);
+ global->Set(context.local(), v8_str("sig_obj"),
+ sig_obj->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
+ global->Set(context.local(), v8_str("x"),
+ x->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun("var s = new sig_obj();");
{
TryCatch try_catch(isolate);
@@ -20122,13 +22323,13 @@ TEST(SimpleSignatureCheck) {
TryCatch try_catch(isolate);
auto result = CompileRun("s.x = x; s.x()");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
}
{
TryCatch try_catch(isolate);
auto result = CompileRun("x.call(s)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
}
}
@@ -20145,9 +22346,13 @@ TEST(ChainSignatureCheck) {
temp->Inherit(sig_obj);
sig_obj = temp;
}
- auto x = FunctionTemplate::New(isolate, Returns42, Handle<Value>(), sig);
- global->Set(v8_str("sig_obj"), sig_obj->GetFunction());
- global->Set(v8_str("x"), x->GetFunction());
+ auto x = FunctionTemplate::New(isolate, Returns42, Local<Value>(), sig);
+ global->Set(context.local(), v8_str("sig_obj"),
+ sig_obj->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
+ global->Set(context.local(), v8_str("x"),
+ x->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun("var s = new sig_obj();");
{
TryCatch try_catch(isolate);
@@ -20163,13 +22368,13 @@ TEST(ChainSignatureCheck) {
TryCatch try_catch(isolate);
auto result = CompileRun("s.x = x; s.x()");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
}
{
TryCatch try_catch(isolate);
auto result = CompileRun("x.call(s)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
}
}
@@ -20182,9 +22387,13 @@ TEST(PrototypeSignatureCheck) {
auto sig_obj = FunctionTemplate::New(isolate);
sig_obj->SetHiddenPrototype(true);
auto sig = v8::Signature::New(isolate, sig_obj);
- auto x = FunctionTemplate::New(isolate, Returns42, Handle<Value>(), sig);
- global->Set(v8_str("sig_obj"), sig_obj->GetFunction());
- global->Set(v8_str("x"), x->GetFunction());
+ auto x = FunctionTemplate::New(isolate, Returns42, Local<Value>(), sig);
+ global->Set(context.local(), v8_str("sig_obj"),
+ sig_obj->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
+ global->Set(context.local(), v8_str("x"),
+ x->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
CompileRun("s = {}; s.__proto__ = new sig_obj();");
{
TryCatch try_catch(isolate);
@@ -20200,13 +22409,13 @@ TEST(PrototypeSignatureCheck) {
TryCatch try_catch(isolate);
auto result = CompileRun("s.x = x; s.x()");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
}
{
TryCatch try_catch(isolate);
auto result = CompileRun("x.call(s)");
CHECK(!try_catch.HasCaught());
- CHECK_EQ(42, result->Int32Value());
+ CHECK_EQ(42, result->Int32Value(context.local()).FromJust());
}
}
@@ -20238,94 +22447,26 @@ TEST(Promises) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Handle<Object> global = context->Global();
// Creation.
- Handle<v8::Promise::Resolver> pr = v8::Promise::Resolver::New(isolate);
- Handle<v8::Promise::Resolver> rr = v8::Promise::Resolver::New(isolate);
- Handle<v8::Promise> p = pr->GetPromise();
- Handle<v8::Promise> r = rr->GetPromise();
- CHECK_EQ(isolate, p->GetIsolate());
+ Local<v8::Promise::Resolver> pr =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ Local<v8::Promise::Resolver> rr =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ Local<v8::Promise> p = pr->GetPromise();
+ Local<v8::Promise> r = rr->GetPromise();
// IsPromise predicate.
CHECK(p->IsPromise());
CHECK(r->IsPromise());
- Handle<Value> o = v8::Object::New(isolate);
+ Local<Value> o = v8::Object::New(isolate);
CHECK(!o->IsPromise());
// Resolution and rejection.
- pr->Resolve(v8::Integer::New(isolate, 1));
+ pr->Resolve(context.local(), v8::Integer::New(isolate, 1)).FromJust();
CHECK(p->IsPromise());
- rr->Reject(v8::Integer::New(isolate, 2));
+ rr->Reject(context.local(), v8::Integer::New(isolate, 2)).FromJust();
CHECK(r->IsPromise());
-
- // Chaining non-pending promises.
- CompileRun(
- "var x1 = 0;\n"
- "var x2 = 0;\n"
- "function f1(x) { x1 = x; return x+1 };\n"
- "function f2(x) { x2 = x; return x+1 };\n");
- Handle<Function> f1 = Handle<Function>::Cast(global->Get(v8_str("f1")));
- Handle<Function> f2 = Handle<Function>::Cast(global->Get(v8_str("f2")));
-
- p->Chain(f1);
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- isolate->RunMicrotasks();
- CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
-
- p->Catch(f2);
- isolate->RunMicrotasks();
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
-
- r->Catch(f2);
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
- isolate->RunMicrotasks();
- CHECK_EQ(2, global->Get(v8_str("x2"))->Int32Value());
-
- r->Chain(f1);
- isolate->RunMicrotasks();
- CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
-
- // Chaining pending promises.
- CompileRun("x1 = x2 = 0;");
- pr = v8::Promise::Resolver::New(isolate);
- rr = v8::Promise::Resolver::New(isolate);
-
- pr->GetPromise()->Chain(f1);
- rr->GetPromise()->Catch(f2);
- isolate->RunMicrotasks();
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
-
- pr->Resolve(v8::Integer::New(isolate, 1));
- rr->Reject(v8::Integer::New(isolate, 2));
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
-
- isolate->RunMicrotasks();
- CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(2, global->Get(v8_str("x2"))->Int32Value());
-
- // Multi-chaining.
- CompileRun("x1 = x2 = 0;");
- pr = v8::Promise::Resolver::New(isolate);
- pr->GetPromise()->Chain(f1)->Chain(f2);
- pr->Resolve(v8::Integer::New(isolate, 3));
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
- isolate->RunMicrotasks();
- CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
-
- CompileRun("x1 = x2 = 0;");
- rr = v8::Promise::Resolver::New(isolate);
- rr->GetPromise()->Catch(f1)->Chain(f2);
- rr->Reject(v8::Integer::New(isolate, 3));
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
- isolate->RunMicrotasks();
- CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
}
@@ -20333,19 +22474,21 @@ TEST(PromiseThen) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
- Handle<Object> global = context->Global();
+ Local<Object> global = context->Global();
// Creation.
- Handle<v8::Promise::Resolver> pr = v8::Promise::Resolver::New(isolate);
- Handle<v8::Promise::Resolver> qr = v8::Promise::Resolver::New(isolate);
- Handle<v8::Promise> p = pr->GetPromise();
- Handle<v8::Promise> q = qr->GetPromise();
+ Local<v8::Promise::Resolver> pr =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ Local<v8::Promise::Resolver> qr =
+ v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ Local<v8::Promise> p = pr->GetPromise();
+ Local<v8::Promise> q = qr->GetPromise();
CHECK(p->IsPromise());
CHECK(q->IsPromise());
- pr->Resolve(v8::Integer::New(isolate, 1));
- qr->Resolve(p);
+ pr->Resolve(context.local(), v8::Integer::New(isolate, 1)).FromJust();
+ qr->Resolve(context.local(), p).FromJust();
// Chaining non-pending promises.
CompileRun(
@@ -20353,45 +22496,73 @@ TEST(PromiseThen) {
"var x2 = 0;\n"
"function f1(x) { x1 = x; return x+1 };\n"
"function f2(x) { x2 = x; return x+1 };\n");
- Handle<Function> f1 = Handle<Function>::Cast(global->Get(v8_str("f1")));
- Handle<Function> f2 = Handle<Function>::Cast(global->Get(v8_str("f2")));
-
- // Chain
- q->Chain(f1);
- CHECK(global->Get(v8_str("x1"))->IsNumber());
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- isolate->RunMicrotasks();
- CHECK(!global->Get(v8_str("x1"))->IsNumber());
- CHECK(p->Equals(global->Get(v8_str("x1"))));
+ Local<Function> f1 = Local<Function>::Cast(
+ global->Get(context.local(), v8_str("f1")).ToLocalChecked());
+ Local<Function> f2 = Local<Function>::Cast(
+ global->Get(context.local(), v8_str("f2")).ToLocalChecked());
// Then
CompileRun("x1 = x2 = 0;");
- q->Then(f1);
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
+ q->Then(context.local(), f1).ToLocalChecked();
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
isolate->RunMicrotasks();
- CHECK_EQ(1, global->Get(v8_str("x1"))->Int32Value());
+ CHECK_EQ(1, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
// Then
CompileRun("x1 = x2 = 0;");
- pr = v8::Promise::Resolver::New(isolate);
- qr = v8::Promise::Resolver::New(isolate);
+ pr = v8::Promise::Resolver::New(context.local()).ToLocalChecked();
+ qr = v8::Promise::Resolver::New(context.local()).ToLocalChecked();
- qr->Resolve(pr);
- qr->GetPromise()->Then(f1)->Then(f2);
+ qr->Resolve(context.local(), pr).FromJust();
+ qr->GetPromise()
+ ->Then(context.local(), f1)
+ .ToLocalChecked()
+ ->Then(context.local(), f2)
+ .ToLocalChecked();
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
isolate->RunMicrotasks();
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
- pr->Resolve(v8::Integer::New(isolate, 3));
+ pr->Resolve(context.local(), v8::Integer::New(isolate, 3)).FromJust();
- CHECK_EQ(0, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(0, global->Get(v8_str("x2"))->Int32Value());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(0, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
isolate->RunMicrotasks();
- CHECK_EQ(3, global->Get(v8_str("x1"))->Int32Value());
- CHECK_EQ(4, global->Get(v8_str("x2"))->Int32Value());
+ CHECK_EQ(3, global->Get(context.local(), v8_str("x1"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
+ CHECK_EQ(4, global->Get(context.local(), v8_str("x2"))
+ .ToLocalChecked()
+ ->Int32Value(context.local())
+ .FromJust());
}
@@ -20436,9 +22607,12 @@ TEST(Regress354123) {
v8::Isolate* isolate = current->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
templ->SetAccessCheckCallback(AccessCounter);
- current->Global()->Set(v8_str("friend"), templ->NewInstance());
+ CHECK(current->Global()
+ ->Set(current.local(), v8_str("friend"),
+ templ->NewInstance(current.local()).ToLocalChecked())
+ .FromJust());
// Test access using __proto__ from the prototype chain.
access_count = 0;
@@ -20481,8 +22655,8 @@ TEST(CaptureStackTraceForStackOverflow) {
LocalContext current;
v8::Isolate* isolate = current->GetIsolate();
v8::HandleScope scope(isolate);
- V8::SetCaptureStackTraceForUncaughtExceptions(
- true, 10, v8::StackTrace::kDetailed);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kDetailed);
v8::TryCatch try_catch(isolate);
CompileRun("(function f(x) { f(x+1); })(0)");
CHECK(try_catch.HasCaught());
@@ -20496,8 +22670,8 @@ TEST(ScriptNameAndLineNumber) {
const char* url = "http://www.foo.com/foo.js";
v8::ScriptOrigin origin(v8_str(url), v8::Integer::New(isolate, 13));
v8::ScriptCompiler::Source script_source(v8_str("var foo;"), origin);
- Local<Script> script = v8::ScriptCompiler::Compile(
- isolate, &script_source);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(env.local(), &script_source).ToLocalChecked();
Local<Value> script_name = script->GetUnboundScript()->GetScriptName();
CHECK(!script_name.IsEmpty());
CHECK(script_name->IsString());
@@ -20507,7 +22681,7 @@ TEST(ScriptNameAndLineNumber) {
CHECK_EQ(13, line_number);
}
-void CheckMagicComments(Handle<Script> script, const char* expected_source_url,
+void CheckMagicComments(Local<Script> script, const char* expected_source_url,
const char* expected_source_mapping_url) {
if (expected_source_url != NULL) {
v8::String::Utf8Value url(script->GetUnboundScript()->GetSourceURL());
@@ -20605,34 +22779,56 @@ TEST(GetOwnPropertyDescriptor) {
" set : function(value) { this.value = value; },"
" get : function() { return this.value; },"
"});");
- Local<Object> x = Local<Object>::Cast(env->Global()->Get(v8_str("x")));
- Local<Value> desc = x->GetOwnPropertyDescriptor(v8_str("no_prop"));
+ Local<Object> x = Local<Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("x")).ToLocalChecked());
+ Local<Value> desc =
+ x->GetOwnPropertyDescriptor(env.local(), v8_str("no_prop"))
+ .ToLocalChecked();
CHECK(desc->IsUndefined());
- desc = x->GetOwnPropertyDescriptor(v8_str("p0"));
- CHECK(v8_num(12)->Equals(Local<Object>::Cast(desc)->Get(v8_str("value"))));
- desc = x->GetOwnPropertyDescriptor(v8_str("p1"));
+ desc =
+ x->GetOwnPropertyDescriptor(env.local(), v8_str("p0")).ToLocalChecked();
+ CHECK(v8_num(12)
+ ->Equals(env.local(), Local<Object>::Cast(desc)
+ ->Get(env.local(), v8_str("value"))
+ .ToLocalChecked())
+ .FromJust());
+ desc =
+ x->GetOwnPropertyDescriptor(env.local(), v8_str("p1")).ToLocalChecked();
Local<Function> set =
- Local<Function>::Cast(Local<Object>::Cast(desc)->Get(v8_str("set")));
+ Local<Function>::Cast(Local<Object>::Cast(desc)
+ ->Get(env.local(), v8_str("set"))
+ .ToLocalChecked());
Local<Function> get =
- Local<Function>::Cast(Local<Object>::Cast(desc)->Get(v8_str("get")));
- CHECK(v8_num(13)->Equals(get->Call(x, 0, NULL)));
- Handle<Value> args[] = { v8_num(14) };
- set->Call(x, 1, args);
- CHECK(v8_num(14)->Equals(get->Call(x, 0, NULL)));
+ Local<Function>::Cast(Local<Object>::Cast(desc)
+ ->Get(env.local(), v8_str("get"))
+ .ToLocalChecked());
+ CHECK(v8_num(13)
+ ->Equals(env.local(),
+ get->Call(env.local(), x, 0, NULL).ToLocalChecked())
+ .FromJust());
+ Local<Value> args[] = {v8_num(14)};
+ set->Call(env.local(), x, 1, args).ToLocalChecked();
+ CHECK(v8_num(14)
+ ->Equals(env.local(),
+ get->Call(env.local(), x, 0, NULL).ToLocalChecked())
+ .FromJust());
}
TEST(Regress411877) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallback(AccessCounter);
- v8::Handle<Context> context = Context::New(isolate);
+ v8::Local<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
- context->Global()->Set(v8_str("o"), object_template->NewInstance());
+ CHECK(context->Global()
+ ->Set(context, v8_str("o"),
+ object_template->NewInstance(context).ToLocalChecked())
+ .FromJust());
CompileRun("Object.getOwnPropertyNames(o)");
}
@@ -20640,16 +22836,17 @@ TEST(Regress411877) {
TEST(GetHiddenPropertyTableAfterAccessCheck) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallback(AccessCounter);
- v8::Handle<Context> context = Context::New(isolate);
+ v8::Local<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Handle<v8::Object> obj = object_template->NewInstance();
- obj->Set(v8_str("key"), v8_str("value"));
- obj->Delete(v8_str("key"));
+ v8::Local<v8::Object> obj =
+ object_template->NewInstance(context).ToLocalChecked();
+ obj->Set(context, v8_str("key"), v8_str("value")).FromJust();
+ obj->Delete(context, v8_str("key")).FromJust();
obj->SetPrivate(context, v8::Private::New(isolate, v8_str("hidden key 2")),
v8_str("hidden value 2"))
@@ -20660,14 +22857,17 @@ TEST(GetHiddenPropertyTableAfterAccessCheck) {
TEST(Regress411793) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> object_template =
+ v8::Local<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallback(AccessCounter);
- v8::Handle<Context> context = Context::New(isolate);
+ v8::Local<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
- context->Global()->Set(v8_str("o"), object_template->NewInstance());
+ CHECK(context->Global()
+ ->Set(context, v8_str("o"),
+ object_template->NewInstance(context).ToLocalChecked())
+ .FromJust());
CompileRun(
"Object.defineProperty(o, 'key', "
" { get: function() {}, set: function() {} });");
@@ -20743,14 +22943,15 @@ void RunStreamingTest(const char** chunks,
v8::ScriptOrigin origin(v8_str("http://foo.com"));
char* full_source = TestSourceStream::FullSourceString(chunks);
- v8::Handle<Script> script = v8::ScriptCompiler::Compile(
- isolate, &source, v8_str(full_source), origin);
+ v8::MaybeLocal<Script> script = v8::ScriptCompiler::Compile(
+ env.local(), &source, v8_str(full_source), origin);
if (expected_success) {
CHECK(!script.IsEmpty());
- v8::Handle<Value> result(script->Run());
+ v8::Local<Value> result(
+ script.ToLocalChecked()->Run(env.local()).ToLocalChecked());
// All scripts are supposed to return the fixed value 13 when ran.
- CHECK_EQ(13, result->Int32Value());
- CheckMagicComments(script, expected_source_url,
+ CHECK_EQ(13, result->Int32Value(env.local()).FromJust());
+ CheckMagicComments(script.ToLocalChecked(), expected_source_url,
expected_source_mapping_url);
} else {
CHECK(script.IsEmpty());
@@ -21016,10 +23217,12 @@ TEST(StreamingWithDebuggingEnabledLate) {
v8::ScriptOrigin origin(v8_str("http://foo.com"));
char* full_source = TestSourceStream::FullSourceString(chunks);
- EnableDebugger();
+ EnableDebugger(isolate);
- v8::Handle<Script> script = v8::ScriptCompiler::Compile(
- isolate, &source, v8_str(full_source), origin);
+ v8::Local<Script> script =
+ v8::ScriptCompiler::Compile(env.local(), &source, v8_str(full_source),
+ origin)
+ .ToLocalChecked();
Maybe<uint32_t> result =
script->Run(env.local()).ToLocalChecked()->Uint32Value(env.local());
@@ -21027,7 +23230,7 @@ TEST(StreamingWithDebuggingEnabledLate) {
delete[] full_source;
- DisableDebugger();
+ DisableDebugger(isolate);
}
@@ -21122,14 +23325,15 @@ TEST(StreamingWithHarmonyScopes) {
v8::ScriptOrigin origin(v8_str("http://foo.com"));
char* full_source = TestSourceStream::FullSourceString(chunks);
- v8::Handle<Script> script = v8::ScriptCompiler::Compile(
- isolate, &source, v8_str(full_source), origin);
+ v8::Local<Script> script =
+ v8::ScriptCompiler::Compile(env.local(), &source, v8_str(full_source),
+ origin)
+ .ToLocalChecked();
CHECK(!script.IsEmpty());
CHECK_EQ(false, try_catch.HasCaught());
// Running the script exposes the error.
- v8::Handle<Value> result(script->Run());
- CHECK(result.IsEmpty());
+ CHECK(script->Run(env.local()).IsEmpty());
CHECK(try_catch.HasCaught());
delete[] full_source;
}
@@ -21181,7 +23385,12 @@ TEST(CodeCache) {
script = v8::ScriptCompiler::Compile(context, &source, option)
.ToLocalChecked();
}
- CHECK_EQ(2, script->Run()->ToInt32(isolate2)->Int32Value());
+ CHECK_EQ(2, script->Run(context)
+ .ToLocalChecked()
+ ->ToInt32(context)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
}
isolate2->Dispose();
}
@@ -21193,13 +23402,16 @@ void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
int length = 16;
v8::ScriptCompiler::CachedData* cached_data =
new v8::ScriptCompiler::CachedData(data, length);
- DCHECK(!cached_data->rejected);
+ CHECK(!cached_data->rejected);
v8::ScriptOrigin origin(v8_str("origin"));
v8::ScriptCompiler::Source source(v8_str("42"), origin, cached_data);
- v8::Handle<v8::Script> script =
- v8::ScriptCompiler::Compile(CcTest::isolate(), &source, option);
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(context, &source, option).ToLocalChecked();
CHECK(cached_data->rejected);
- CHECK_EQ(42, script->Run()->Int32Value());
+ CHECK_EQ(
+ 42,
+ script->Run(context).ToLocalChecked()->Int32Value(context).FromJust());
}
@@ -21221,9 +23433,11 @@ TEST(ParserCacheRejectedGracefully) {
v8::ScriptOrigin origin(v8_str("origin"));
v8::Local<v8::String> source_str = v8_str("function foo() {}");
v8::ScriptCompiler::Source source(source_str, origin);
- v8::Handle<v8::Script> script = v8::ScriptCompiler::Compile(
- CcTest::isolate(), &source, v8::ScriptCompiler::kProduceParserCache);
- CHECK(!script.IsEmpty());
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(context.local(), &source,
+ v8::ScriptCompiler::kProduceParserCache)
+ .ToLocalChecked();
+ USE(script);
const v8::ScriptCompiler::CachedData* original_cached_data =
source.GetCachedData();
CHECK(original_cached_data != NULL);
@@ -21236,10 +23450,11 @@ TEST(ParserCacheRejectedGracefully) {
source_str, origin,
new v8::ScriptCompiler::CachedData(original_cached_data->data,
original_cached_data->length));
- v8::Handle<v8::Script> script =
- v8::ScriptCompiler::Compile(CcTest::isolate(), &source_with_cached_data,
- v8::ScriptCompiler::kConsumeParserCache);
- CHECK(!script.IsEmpty());
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(context.local(), &source_with_cached_data,
+ v8::ScriptCompiler::kConsumeParserCache)
+ .ToLocalChecked();
+ USE(script);
const v8::ScriptCompiler::CachedData* new_cached_data =
source_with_cached_data.GetCachedData();
CHECK(new_cached_data != NULL);
@@ -21255,10 +23470,11 @@ TEST(ParserCacheRejectedGracefully) {
incompatible_source_str, origin,
new v8::ScriptCompiler::CachedData(original_cached_data->data,
original_cached_data->length));
- v8::Handle<v8::Script> script =
- v8::ScriptCompiler::Compile(CcTest::isolate(), &source_with_cached_data,
- v8::ScriptCompiler::kConsumeParserCache);
- CHECK(!script.IsEmpty());
+ v8::Local<v8::Script> script =
+ v8::ScriptCompiler::Compile(context.local(), &source_with_cached_data,
+ v8::ScriptCompiler::kConsumeParserCache)
+ .ToLocalChecked();
+ USE(script);
const v8::ScriptCompiler::CachedData* new_cached_data =
source_with_cached_data.GetCachedData();
CHECK(new_cached_data != NULL);
@@ -21272,7 +23488,8 @@ TEST(StringConcatOverflow) {
v8::HandleScope scope(CcTest::isolate());
RandomLengthOneByteResource* r =
new RandomLengthOneByteResource(i::String::kMaxLength);
- v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), r);
+ v8::Local<v8::String> str =
+ v8::String::NewExternalOneByte(CcTest::isolate(), r).ToLocalChecked();
CHECK(!str.IsEmpty());
v8::TryCatch try_catch(CcTest::isolate());
v8::Local<v8::String> result = v8::String::Concat(str, str);
@@ -21324,11 +23541,13 @@ TEST(GetPrototypeAccessControl) {
v8::HandleScope handle_scope(isolate);
LocalContext env;
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->SetAccessCheckCallback(AccessAlwaysBlocked);
- env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("prohibited"),
+ obj_template->NewInstance(env.local()).ToLocalChecked())
+ .FromJust());
CHECK(CompileRun(
"function f() { return %_GetPrototype(prohibited); }"
@@ -21343,26 +23562,29 @@ TEST(GetPrototypeHidden) {
v8::HandleScope handle_scope(isolate);
LocalContext env;
- Handle<FunctionTemplate> t = FunctionTemplate::New(isolate);
+ Local<FunctionTemplate> t = FunctionTemplate::New(isolate);
t->SetHiddenPrototype(true);
- Handle<Object> proto = t->GetFunction()->NewInstance();
- Handle<Object> object = Object::New(isolate);
- Handle<Object> proto2 = Object::New(isolate);
- object->SetPrototype(proto);
- proto->SetPrototype(proto2);
-
- env->Global()->Set(v8_str("object"), object);
- env->Global()->Set(v8_str("proto"), proto);
- env->Global()->Set(v8_str("proto2"), proto2);
-
- v8::Handle<v8::Value> result = CompileRun("%_GetPrototype(object)");
- CHECK(result->Equals(proto2));
+ Local<Object> proto = t->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ Local<Object> object = Object::New(isolate);
+ Local<Object> proto2 = Object::New(isolate);
+ object->SetPrototype(env.local(), proto).FromJust();
+ proto->SetPrototype(env.local(), proto2).FromJust();
+
+ CHECK(env->Global()->Set(env.local(), v8_str("object"), object).FromJust());
+ CHECK(env->Global()->Set(env.local(), v8_str("proto"), proto).FromJust());
+ CHECK(env->Global()->Set(env.local(), v8_str("proto2"), proto2).FromJust());
+
+ v8::Local<v8::Value> result = CompileRun("%_GetPrototype(object)");
+ CHECK(result->Equals(env.local(), proto2).FromJust());
result = CompileRun(
"function f() { return %_GetPrototype(object); }"
"%OptimizeFunctionOnNextCall(f);"
"f()");
- CHECK(result->Equals(proto2));
+ CHECK(result->Equals(env.local(), proto2).FromJust());
}
@@ -21371,7 +23593,7 @@ TEST(ClassPrototypeCreationContext) {
v8::HandleScope handle_scope(isolate);
LocalContext env;
- Handle<Object> result = Handle<Object>::Cast(
+ Local<Object> result = Local<Object>::Cast(
CompileRun("'use strict'; class Example { }; Example.prototype"));
CHECK(env.local() == result->CreationContext());
}
@@ -21412,22 +23634,25 @@ TEST(NewStringRangeError) {
{
v8::TryCatch try_catch(isolate);
char* data = reinterpret_cast<char*>(buffer);
- CHECK(v8::String::NewFromUtf8(isolate, data, v8::String::kNormalString,
- length).IsEmpty());
+ CHECK(v8::String::NewFromUtf8(isolate, data, v8::NewStringType::kNormal,
+ length)
+ .IsEmpty());
CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
uint8_t* data = reinterpret_cast<uint8_t*>(buffer);
- CHECK(v8::String::NewFromOneByte(isolate, data, v8::String::kNormalString,
- length).IsEmpty());
+ CHECK(v8::String::NewFromOneByte(isolate, data, v8::NewStringType::kNormal,
+ length)
+ .IsEmpty());
CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
uint16_t* data = reinterpret_cast<uint16_t*>(buffer);
- CHECK(v8::String::NewFromTwoByte(isolate, data, v8::String::kNormalString,
- length).IsEmpty());
+ CHECK(v8::String::NewFromTwoByte(isolate, data, v8::NewStringType::kNormal,
+ length)
+ .IsEmpty());
CHECK(!try_catch.HasCaught());
}
free(buffer);
@@ -21487,11 +23712,10 @@ TEST(StrongModeAccessCheckAllowed) {
i::FLAG_strong_mode = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<Value> value;
+ v8::Local<Value> value;
access_was_called = false;
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
obj_template->SetAccessCheckCallback(AccessAlwaysAllowedWithFlag);
@@ -21499,14 +23723,16 @@ TEST(StrongModeAccessCheckAllowed) {
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
context0->Enter();
- v8::Handle<v8::Object> global0 = context0->Global();
- global0->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global0 = context0->Global();
+ global0->Set(context0, v8_str("object"),
+ obj_template->NewInstance(context0).ToLocalChecked())
+ .FromJust();
{
v8::TryCatch try_catch(isolate);
value = CompileRun("'use strong'; object.x");
CHECK(!try_catch.HasCaught());
CHECK(!access_was_called);
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context0).FromJust());
}
{
v8::TryCatch try_catch(isolate);
@@ -21524,14 +23750,16 @@ TEST(StrongModeAccessCheckAllowed) {
// Create an environment
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global1 = context1->Global();
+ global1->Set(context1, v8_str("object"),
+ obj_template->NewInstance(context1).ToLocalChecked())
+ .FromJust();
{
v8::TryCatch try_catch(isolate);
value = CompileRun("'use strong'; object.x");
CHECK(!try_catch.HasCaught());
CHECK(access_was_called);
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context1).FromJust());
}
access_was_called = false;
{
@@ -21557,11 +23785,10 @@ TEST(StrongModeAccessCheckBlocked) {
i::FLAG_strong_mode = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<Value> value;
+ v8::Local<Value> value;
access_was_called = false;
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
obj_template->SetAccessCheckCallback(AccessAlwaysBlockedWithFlag);
@@ -21569,14 +23796,16 @@ TEST(StrongModeAccessCheckBlocked) {
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
context0->Enter();
- v8::Handle<v8::Object> global0 = context0->Global();
- global0->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global0 = context0->Global();
+ global0->Set(context0, v8_str("object"),
+ obj_template->NewInstance(context0).ToLocalChecked())
+ .FromJust();
{
v8::TryCatch try_catch(isolate);
value = CompileRun("'use strong'; object.x");
CHECK(!try_catch.HasCaught());
CHECK(!access_was_called);
- CHECK_EQ(42, value->Int32Value());
+ CHECK_EQ(42, value->Int32Value(context0).FromJust());
}
{
v8::TryCatch try_catch(isolate);
@@ -21594,8 +23823,10 @@ TEST(StrongModeAccessCheckBlocked) {
// Create an environment
v8::Local<Context> context1 = Context::New(isolate);
context1->Enter();
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("object"), obj_template->NewInstance());
+ v8::Local<v8::Object> global1 = context1->Global();
+ global1->Set(context1, v8_str("object"),
+ obj_template->NewInstance(context1).ToLocalChecked())
+ .FromJust();
{
v8::TryCatch try_catch(isolate);
value = CompileRun("'use strong'; object.x");
@@ -21639,21 +23870,23 @@ TEST(StrongModeArityCallFromApi) {
{
v8::TryCatch try_catch(isolate);
- fun->Call(v8::Undefined(isolate), 0, nullptr);
+ CHECK(fun->Call(env.local(), v8::Undefined(isolate), 0, nullptr).IsEmpty());
CHECK(try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
- v8::Handle<Value> args[] = {v8_num(42)};
- fun->Call(v8::Undefined(isolate), arraysize(args), args);
+ v8::Local<Value> args[] = {v8_num(42)};
+ fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
- v8::Handle<Value> args[] = {v8_num(42), v8_num(555)};
- fun->Call(v8::Undefined(isolate), arraysize(args), args);
+ v8::Local<Value> args[] = {v8_num(42), v8_num(555)};
+ fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
}
}
@@ -21677,21 +23910,23 @@ TEST(StrongModeArityCallFromApi2) {
{
v8::TryCatch try_catch(isolate);
- fun->Call(v8::Undefined(isolate), 0, nullptr);
+ CHECK(fun->Call(env.local(), v8::Undefined(isolate), 0, nullptr).IsEmpty());
CHECK(try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
- v8::Handle<Value> args[] = {v8_num(42)};
- fun->Call(v8::Undefined(isolate), arraysize(args), args);
+ v8::Local<Value> args[] = {v8_num(42)};
+ fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch(isolate);
- v8::Handle<Value> args[] = {v8_num(42), v8_num(555)};
- fun->Call(v8::Undefined(isolate), arraysize(args), args);
+ v8::Local<Value> args[] = {v8_num(42), v8_num(555)};
+ fun->Call(env.local(), v8::Undefined(isolate), arraysize(args), args)
+ .ToLocalChecked();
CHECK(!try_catch.HasCaught());
}
}
@@ -21704,24 +23939,28 @@ TEST(StrongObjectDelete) {
v8::HandleScope scope(isolate);
Local<Object> obj;
{
- v8::TryCatch try_catch;
+ v8::TryCatch try_catch(isolate);
obj = Local<Object>::Cast(CompileRun(
"'use strong';"
"({});"));
CHECK(!try_catch.HasCaught());
}
- obj->ForceSet(v8_str("foo"), v8_num(1), v8::None);
- obj->ForceSet(v8_str("2"), v8_num(1), v8::None);
- CHECK(obj->HasOwnProperty(v8_str("foo")));
- CHECK(obj->HasOwnProperty(v8_str("2")));
- CHECK(!obj->Delete(v8_str("foo")));
- CHECK(!obj->Delete(2));
+ obj->DefineOwnProperty(env.local(), v8_str("foo"), v8_num(1), v8::None)
+ .FromJust();
+ obj->DefineOwnProperty(env.local(), v8_str("2"), v8_num(1), v8::None)
+ .FromJust();
+ CHECK(obj->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
+ CHECK(obj->HasOwnProperty(env.local(), v8_str("2")).FromJust());
+ CHECK(!obj->Delete(env.local(), v8_str("foo")).FromJust());
+ CHECK(!obj->Delete(env.local(), 2).FromJust());
}
static void ExtrasBindingTestRuntimeFunction(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK_EQ(3, args[0]->Int32Value());
+ CHECK_EQ(
+ 3,
+ args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust());
args.GetReturnValue().Set(v8_num(7));
}
@@ -21735,19 +23974,27 @@ TEST(ExtrasBindingObject) {
// export the tested functions.
v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
- auto func =
- binding->Get(v8_str("testExtraShouldReturnFive")).As<v8::Function>();
+ auto func = binding->Get(env.local(), v8_str("testExtraShouldReturnFive"))
+ .ToLocalChecked()
+ .As<v8::Function>();
auto undefined = v8::Undefined(isolate);
- auto result = func->Call(undefined, 0, {}).As<v8::Number>();
- CHECK_EQ(5, result->Int32Value());
+ auto result = func->Call(env.local(), undefined, 0, {})
+ .ToLocalChecked()
+ .As<v8::Number>();
+ CHECK_EQ(5, result->Int32Value(env.local()).FromJust());
- v8::Handle<v8::FunctionTemplate> runtimeFunction =
+ v8::Local<v8::FunctionTemplate> runtimeFunction =
v8::FunctionTemplate::New(isolate, ExtrasBindingTestRuntimeFunction);
- binding->Set(v8_str("runtime"), runtimeFunction->GetFunction());
- func =
- binding->Get(v8_str("testExtraShouldCallToRuntime")).As<v8::Function>();
- result = func->Call(undefined, 0, {}).As<v8::Number>();
- CHECK_EQ(7, result->Int32Value());
+ binding->Set(env.local(), v8_str("runtime"),
+ runtimeFunction->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
+ func = binding->Get(env.local(), v8_str("testExtraShouldCallToRuntime"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ result = func->Call(env.local(), undefined, 0, {})
+ .ToLocalChecked()
+ .As<v8::Number>();
+ CHECK_EQ(7, result->Int32Value(env.local()).FromJust());
}
@@ -21762,19 +24009,29 @@ TEST(ExperimentalExtras) {
// which should export the tested functions.
v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
- auto func = binding->Get(v8_str("testExperimentalExtraShouldReturnTen"))
- .As<v8::Function>();
+ auto func =
+ binding->Get(env.local(), v8_str("testExperimentalExtraShouldReturnTen"))
+ .ToLocalChecked()
+ .As<v8::Function>();
auto undefined = v8::Undefined(isolate);
- auto result = func->Call(undefined, 0, {}).As<v8::Number>();
- CHECK_EQ(10, result->Int32Value());
+ auto result = func->Call(env.local(), undefined, 0, {})
+ .ToLocalChecked()
+ .As<v8::Number>();
+ CHECK_EQ(10, result->Int32Value(env.local()).FromJust());
- v8::Handle<v8::FunctionTemplate> runtimeFunction =
+ v8::Local<v8::FunctionTemplate> runtimeFunction =
v8::FunctionTemplate::New(isolate, ExtrasBindingTestRuntimeFunction);
- binding->Set(v8_str("runtime"), runtimeFunction->GetFunction());
- func = binding->Get(v8_str("testExperimentalExtraShouldCallToRuntime"))
+ binding->Set(env.local(), v8_str("runtime"),
+ runtimeFunction->GetFunction(env.local()).ToLocalChecked())
+ .FromJust();
+ func = binding->Get(env.local(),
+ v8_str("testExperimentalExtraShouldCallToRuntime"))
+ .ToLocalChecked()
.As<v8::Function>();
- result = func->Call(undefined, 0, {}).As<v8::Number>();
- CHECK_EQ(7, result->Int32Value());
+ result = func->Call(env.local(), undefined, 0, {})
+ .ToLocalChecked()
+ .As<v8::Number>();
+ CHECK_EQ(7, result->Int32Value(env.local()).FromJust());
}
@@ -21786,34 +24043,44 @@ TEST(ExtrasUtilsObject) {
LocalContext env;
v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
- auto func = binding->Get(v8_str("testExtraCanUseUtils")).As<v8::Function>();
+ auto func = binding->Get(env.local(), v8_str("testExtraCanUseUtils"))
+ .ToLocalChecked()
+ .As<v8::Function>();
auto undefined = v8::Undefined(isolate);
- auto result = func->Call(undefined, 0, {}).As<v8::Object>();
+ auto result = func->Call(env.local(), undefined, 0, {})
+ .ToLocalChecked()
+ .As<v8::Object>();
- auto private_symbol = result->Get(v8_str("privateSymbol")).As<v8::Symbol>();
+ auto private_symbol = result->Get(env.local(), v8_str("privateSymbol"))
+ .ToLocalChecked()
+ .As<v8::Symbol>();
i::Handle<i::Symbol> ips = v8::Utils::OpenHandle(*private_symbol);
CHECK_EQ(true, ips->IsPrivate());
CompileRun("var result = 0; function store(x) { result = x; }");
auto store = CompileRun("store").As<v8::Function>();
- auto fulfilled_promise =
- result->Get(v8_str("fulfilledPromise")).As<v8::Promise>();
- fulfilled_promise->Then(store);
+ auto fulfilled_promise = result->Get(env.local(), v8_str("fulfilledPromise"))
+ .ToLocalChecked()
+ .As<v8::Promise>();
+ fulfilled_promise->Then(env.local(), store).ToLocalChecked();
isolate->RunMicrotasks();
- CHECK_EQ(1, CompileRun("result")->Int32Value());
+ CHECK_EQ(1, CompileRun("result")->Int32Value(env.local()).FromJust());
auto fulfilled_promise_2 =
- result->Get(v8_str("fulfilledPromise2")).As<v8::Promise>();
- fulfilled_promise_2->Then(store);
+ result->Get(env.local(), v8_str("fulfilledPromise2"))
+ .ToLocalChecked()
+ .As<v8::Promise>();
+ fulfilled_promise_2->Then(env.local(), store).ToLocalChecked();
isolate->RunMicrotasks();
- CHECK_EQ(2, CompileRun("result")->Int32Value());
+ CHECK_EQ(2, CompileRun("result")->Int32Value(env.local()).FromJust());
- auto rejected_promise =
- result->Get(v8_str("rejectedPromise")).As<v8::Promise>();
- rejected_promise->Catch(store);
+ auto rejected_promise = result->Get(env.local(), v8_str("rejectedPromise"))
+ .ToLocalChecked()
+ .As<v8::Promise>();
+ rejected_promise->Catch(env.local(), store).ToLocalChecked();
isolate->RunMicrotasks();
- CHECK_EQ(3, CompileRun("result")->Int32Value());
+ CHECK_EQ(3, CompileRun("result")->Int32Value(env.local()).FromJust());
}
@@ -21835,10 +24102,18 @@ TEST(Map) {
v8::Local<v8::Array> contents = map->AsArray();
CHECK_EQ(4U, contents->Length());
- CHECK_EQ(1, contents->Get(0).As<v8::Int32>()->Value());
- CHECK_EQ(2, contents->Get(1).As<v8::Int32>()->Value());
- CHECK_EQ(3, contents->Get(2).As<v8::Int32>()->Value());
- CHECK_EQ(4, contents->Get(3).As<v8::Int32>()->Value());
+ CHECK_EQ(
+ 1,
+ contents->Get(env.local(), 0).ToLocalChecked().As<v8::Int32>()->Value());
+ CHECK_EQ(
+ 2,
+ contents->Get(env.local(), 1).ToLocalChecked().As<v8::Int32>()->Value());
+ CHECK_EQ(
+ 3,
+ contents->Get(env.local(), 2).ToLocalChecked().As<v8::Int32>()->Value());
+ CHECK_EQ(
+ 4,
+ contents->Get(env.local(), 3).ToLocalChecked().As<v8::Int32>()->Value());
CHECK_EQ(2U, map->Size());
@@ -21850,10 +24125,12 @@ TEST(Map) {
CHECK_EQ(2, map->Get(env.local(), v8::Integer::New(isolate, 1))
.ToLocalChecked()
- ->Int32Value());
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(4, map->Get(env.local(), v8::Integer::New(isolate, 3))
.ToLocalChecked()
- ->Int32Value());
+ ->Int32Value(env.local())
+ .FromJust());
CHECK(map->Get(env.local(), v8::Integer::New(isolate, 42))
.ToLocalChecked()
@@ -21891,8 +24168,10 @@ TEST(Set) {
v8::Local<v8::Array> keys = set->AsArray();
CHECK_EQ(2U, keys->Length());
- CHECK_EQ(1, keys->Get(0).As<v8::Int32>()->Value());
- CHECK_EQ(2, keys->Get(1).As<v8::Int32>()->Value());
+ CHECK_EQ(1,
+ keys->Get(env.local(), 0).ToLocalChecked().As<v8::Int32>()->Value());
+ CHECK_EQ(2,
+ keys->Get(env.local(), 1).ToLocalChecked().As<v8::Int32>()->Value());
CHECK_EQ(2U, set->Size());
@@ -21927,7 +24206,10 @@ TEST(CompatibleReceiverCheckOnCachedICHandler) {
v8::Local<v8::FunctionTemplate> child = v8::FunctionTemplate::New(isolate);
child->Inherit(parent);
LocalContext env;
- env->Global()->Set(v8_str("Child"), child->GetFunction());
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("Child"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
// Make sure there's a compiled stub for "Child.prototype.age" in the cache.
CompileRun(
@@ -21962,7 +24244,7 @@ class FutexInterruptionThread : public v8::base::Thread {
virtual void Run() {
// Wait a bit before terminating.
v8::base::OS::Sleep(v8::base::TimeDelta::FromMilliseconds(100));
- v8::V8::TerminateExecution(isolate_);
+ isolate_->TerminateExecution();
}
private:
@@ -22010,7 +24292,7 @@ bool NoAbortOnUncaughtException(v8::Isolate* isolate) {
TEST(AbortOnUncaughtExceptionNoAbort) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
LocalContext env(NULL, global_template);
@@ -22020,10 +24302,10 @@ TEST(AbortOnUncaughtExceptionNoAbort) {
CompileRun("function boom() { throw new Error(\"boom\") }");
v8::Local<v8::Object> global_object = env->Global();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(global_object->Get(v8_str("boom")));
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ global_object->Get(env.local(), v8_str("boom")).ToLocalChecked());
- foo->Call(global_object, 0, NULL);
+ CHECK(foo->Call(env.local(), global_object, 0, NULL).IsEmpty());
CHECK_EQ(1, nb_uncaught_exception_callback_calls);
}
@@ -22040,13 +24322,14 @@ TEST(AccessCheckedIsConcatSpreadable) {
spreadable_template->SetAccessCheckCallback(AccessBlocker);
spreadable_template->Set(v8::Symbol::GetIsConcatSpreadable(isolate),
v8::Boolean::New(isolate, true));
- Local<Object> object = spreadable_template->NewInstance();
+ Local<Object> object =
+ spreadable_template->NewInstance(env.local()).ToLocalChecked();
allowed_access = true;
- env->Global()->Set(v8_str("object"), object);
- object->Set(v8_str("length"), v8_num(2));
- object->Set(0U, v8_str("a"));
- object->Set(1U, v8_str("b"));
+ CHECK(env->Global()->Set(env.local(), v8_str("object"), object).FromJust());
+ object->Set(env.local(), v8_str("length"), v8_num(2)).FromJust();
+ object->Set(env.local(), 0U, v8_str("a")).FromJust();
+ object->Set(env.local(), 1U, v8_str("b")).FromJust();
// Access check is allowed, and the object is spread
CompileRun("var result = [].concat(object)");
@@ -22075,11 +24358,13 @@ TEST(AccessCheckedToStringTag) {
// Object with access check
Local<ObjectTemplate> object_template = v8::ObjectTemplate::New(isolate);
object_template->SetAccessCheckCallback(AccessBlocker);
- Local<Object> object = object_template->NewInstance();
+ Local<Object> object =
+ object_template->NewInstance(env.local()).ToLocalChecked();
allowed_access = true;
- env->Global()->Set(v8_str("object"), object);
- object->Set(v8::Symbol::GetToStringTag(isolate), v8_str("hello"));
+ env->Global()->Set(env.local(), v8_str("object"), object).FromJust();
+ object->Set(env.local(), v8::Symbol::GetToStringTag(isolate), v8_str("hello"))
+ .FromJust();
// Access check is allowed, and the toStringTag is read
CompileRun("var result = Object.prototype.toString.call(object)");
@@ -22112,26 +24397,57 @@ TEST(ObjectTemplateIntrinsics) {
Local<ObjectTemplate> object_template = v8::ObjectTemplate::New(isolate);
object_template->SetIntrinsicDataProperty(v8_str("values"),
v8::kArrayProto_values);
- Local<Object> object = object_template->NewInstance();
+ Local<Object> object =
+ object_template->NewInstance(env.local()).ToLocalChecked();
- env->Global()->Set(v8_str("obj1"), object);
+ CHECK(env->Global()->Set(env.local(), v8_str("obj1"), object).FromJust());
ExpectString("typeof obj1.values", "function");
- auto values = Local<Function>::Cast(object->Get(v8_str("values")));
+ auto values = Local<Function>::Cast(
+ object->Get(env.local(), v8_str("values")).ToLocalChecked());
auto fn = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*values));
auto ctx = v8::Utils::OpenHandle(*env.local());
CHECK_EQ(fn->GetCreationContext(), *ctx);
{
LocalContext env2;
- Local<Object> object2 = object_template->NewInstance();
- env2->Global()->Set(v8_str("obj2"), object2);
+ Local<Object> object2 =
+ object_template->NewInstance(env2.local()).ToLocalChecked();
+ CHECK(
+ env2->Global()->Set(env2.local(), v8_str("obj2"), object2).FromJust());
ExpectString("typeof obj2.values", "function");
- CHECK_NE(*object->Get(v8_str("values")), *object2->Get(v8_str("values")));
+ CHECK_NE(*object->Get(env2.local(), v8_str("values")).ToLocalChecked(),
+ *object2->Get(env2.local(), v8_str("values")).ToLocalChecked());
- auto values2 = Local<Function>::Cast(object2->Get(v8_str("values")));
+ auto values2 = Local<Function>::Cast(
+ object2->Get(env2.local(), v8_str("values")).ToLocalChecked());
auto fn2 = i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*values2));
auto ctx2 = v8::Utils::OpenHandle(*env2.local());
CHECK_EQ(fn2->GetCreationContext(), *ctx2);
}
}
+
+
+TEST(Proxy) {
+ i::FLAG_harmony_proxies = true;
+ LocalContext context;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> target = CompileRun("({})").As<v8::Object>();
+ v8::Local<v8::Object> handler = CompileRun("({})").As<v8::Object>();
+
+ v8::Local<v8::Proxy> proxy =
+ v8::Proxy::New(context.local(), target, handler).ToLocalChecked();
+ CHECK(proxy->IsProxy());
+ CHECK(!target->IsProxy());
+ CHECK(!proxy->IsRevoked());
+ CHECK(proxy->GetTarget()->SameValue(target));
+ CHECK(proxy->GetHandler()->SameValue(handler));
+
+ proxy->Revoke();
+ CHECK(proxy->IsProxy());
+ CHECK(!target->IsProxy());
+ CHECK(proxy->IsRevoked());
+ CHECK(proxy->GetTarget()->SameValue(target));
+ CHECK(proxy->GetHandler()->IsNull());
+}
diff --git a/deps/v8/test/cctest/test-asm-validator.cc b/deps/v8/test/cctest/test-asm-validator.cc
index 8923e21e9f..fae75008c7 100644
--- a/deps/v8/test/cctest/test-asm-validator.cc
+++ b/deps/v8/test/cctest/test-asm-validator.cc
@@ -4,11 +4,11 @@
#include "src/v8.h"
-#include "src/ast.h"
-#include "src/ast-expression-visitor.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "src/type-cache.h"
#include "src/typing-asm.h"
#include "test/cctest/cctest.h"
@@ -16,28 +16,34 @@
#include "test/cctest/expression-type-collector-macros.h"
// Macros for function types.
-#define FUNC_V_TYPE Bounds(Type::Function(Type::Undefined(), zone))
-#define FUNC_I_TYPE Bounds(Type::Function(cache.kInt32, zone))
-#define FUNC_F_TYPE Bounds(Type::Function(cache.kFloat32, zone))
-#define FUNC_D_TYPE Bounds(Type::Function(cache.kFloat64, zone))
+#define FUNC_V_TYPE Bounds(Type::Function(Type::Undefined(zone), zone))
+#define FUNC_I_TYPE Bounds(Type::Function(cache.kAsmSigned, zone))
+#define FUNC_F_TYPE Bounds(Type::Function(cache.kAsmFloat, zone))
+#define FUNC_D_TYPE Bounds(Type::Function(cache.kAsmDouble, zone))
#define FUNC_D2D_TYPE \
- Bounds(Type::Function(cache.kFloat64, cache.kFloat64, zone))
+ Bounds(Type::Function(cache.kAsmDouble, cache.kAsmDouble, zone))
#define FUNC_N2F_TYPE \
- Bounds(Type::Function(cache.kFloat32, Type::Number(), zone))
-#define FUNC_I2I_TYPE Bounds(Type::Function(cache.kInt32, cache.kInt32, zone))
+ Bounds(Type::Function(cache.kAsmFloat, Type::Number(zone), zone))
+#define FUNC_I2I_TYPE \
+ Bounds(Type::Function(cache.kAsmSigned, cache.kAsmInt, zone))
#define FUNC_II2D_TYPE \
- Bounds(Type::Function(cache.kFloat64, cache.kInt32, cache.kInt32, zone))
+ Bounds(Type::Function(cache.kAsmDouble, cache.kAsmInt, cache.kAsmInt, zone))
#define FUNC_II2I_TYPE \
- Bounds(Type::Function(cache.kInt32, cache.kInt32, cache.kInt32, zone))
-#define FUNC_DD2D_TYPE \
- Bounds(Type::Function(cache.kFloat64, cache.kFloat64, cache.kFloat64, zone))
+ Bounds(Type::Function(cache.kAsmSigned, cache.kAsmInt, cache.kAsmInt, zone))
+#define FUNC_DD2D_TYPE \
+ Bounds(Type::Function(cache.kAsmDouble, cache.kAsmDouble, cache.kAsmDouble, \
+ zone))
+#define FUNC_NN2N_TYPE \
+ Bounds(Type::Function(Type::Number(zone), Type::Number(zone), \
+ Type::Number(zone), zone))
#define FUNC_N2N_TYPE \
- Bounds(Type::Function(Type::Number(), Type::Number(), zone))
+ Bounds(Type::Function(Type::Number(zone), Type::Number(zone), zone))
// Macros for array types.
-#define FLOAT64_ARRAY_TYPE Bounds(Type::Array(cache.kFloat64, zone))
-#define FUNC_I2I_ARRAY_TYPE \
- Bounds(Type::Array(Type::Function(cache.kInt32, cache.kInt32, zone), zone))
+#define FLOAT64_ARRAY_TYPE Bounds(Type::Array(cache.kAsmDouble, zone))
+#define FUNC_I2I_ARRAY_TYPE \
+ Bounds(Type::Array(Type::Function(cache.kAsmSigned, cache.kAsmInt, zone), \
+ zone))
using namespace v8::internal;
@@ -123,143 +129,143 @@ TEST(ValidateMinimum) {
CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
// function logSum
CHECK_EXPR(FunctionLiteral, FUNC_II2D_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(start, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(start, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(end, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(end, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
- CHECK_VAR(sum, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(sum, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(p, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(p, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(q, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(q, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
// for (p = start << 3, q = end << 3;
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(p, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(start, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(p, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(q, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(end, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(q, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
}
// (p|0) < (q|0);
- CHECK_EXPR(CompareOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(p, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(p, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(q, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(q, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
// p = (p + 8)|0) {\n"
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(p, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(p, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(p, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(p, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
// sum = sum + +log(values[p>>3]);
- CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
- CHECK_VAR(sum, Bounds(cache.kFloat64));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_VAR(sum, Bounds(cache.kFloat64));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_EXPR(Call, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(sum, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(sum, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
CHECK_VAR(log, FUNC_D2D_TYPE);
- CHECK_EXPR(Property, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(Property, Bounds(cache.kAsmDouble)) {
CHECK_VAR(values, FLOAT64_ARRAY_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(p, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(p, Bounds(cache.kAsmSigned));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
}
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
}
// return +sum;
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_VAR(sum, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(sum, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
// function geometricMean
CHECK_EXPR(FunctionLiteral, FUNC_II2D_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(start, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(start, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(end, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(end, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
// return +exp(+logSum(start, end) / +((end - start)|0));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_EXPR(Call, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
CHECK_VAR(exp, FUNC_D2D_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_EXPR(Call, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
CHECK_VAR(logSum, FUNC_II2D_TYPE);
- CHECK_VAR(start, Bounds(cache.kInt32));
- CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
}
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(end, Bounds(cache.kInt32));
- CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(end, Bounds(cache.kAsmInt));
+ CHECK_VAR(start, Bounds(cache.kAsmInt));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
}
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
// "use asm";
- CHECK_EXPR(Literal, Bounds(Type::String()));
+ CHECK_EXPR(Literal, Bounds(Type::String(zone)));
// var exp = stdlib.Math.exp;
CHECK_EXPR(Assignment, FUNC_D2D_TYPE) {
CHECK_VAR(exp, FUNC_D2D_TYPE);
@@ -303,6 +309,52 @@ TEST(ValidateMinimum) {
}
+TEST(MissingUseAsm) {
+ const char test_function[] =
+ "function foo() {\n"
+ " function bar() {}\n"
+ " return { bar: bar };\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 1: missing \"use asm\"\n",
+ Validate(zone, test_function, &types));
+}
+
+
+TEST(WrongUseAsm) {
+ const char test_function[] =
+ "function foo() {\n"
+ " \"use wasm\"\n"
+ " function bar() {}\n"
+ " return { bar: bar };\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 1: missing \"use asm\"\n",
+ Validate(zone, test_function, &types));
+}
+
+
+TEST(MissingReturnExports) {
+ const char test_function[] =
+ "function foo() {\n"
+ " \"use asm\"\n"
+ " function bar() {}\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 2: last statement in module is not a return\n",
+ Validate(zone, test_function, &types));
+}
+
+
#define HARNESS_STDLIB() \
"var Infinity = stdlib.Infinity;\n" \
"var NaN = stdlib.NaN;\n" \
@@ -394,12 +446,12 @@ TEST(ValidateMinimum) {
namespace {
-void CheckStdlibShortcuts(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
- size_t& index, int& depth, TypeCache& cache) {
- // var exp = stdlib.*; (D * 12)
- CHECK_VAR_SHORTCUT(Infinity, Bounds(cache.kFloat64));
- CHECK_VAR_SHORTCUT(NaN, Bounds(cache.kFloat64));
- // var x = stdlib.Math.x; D2D
+void CheckStdlibShortcuts1(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
+ size_t& index, int& depth, TypeCache& cache) {
+ // var exp = stdlib.*;
+ CHECK_VAR_SHORTCUT(Infinity, Bounds(cache.kAsmDouble));
+ CHECK_VAR_SHORTCUT(NaN, Bounds(cache.kAsmDouble));
+ // var x = stdlib.Math.x;
CHECK_VAR_MATH_SHORTCUT(acos, FUNC_D2D_TYPE);
CHECK_VAR_MATH_SHORTCUT(asin, FUNC_D2D_TYPE);
CHECK_VAR_MATH_SHORTCUT(atan, FUNC_D2D_TYPE);
@@ -408,27 +460,34 @@ void CheckStdlibShortcuts(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
CHECK_VAR_MATH_SHORTCUT(tan, FUNC_D2D_TYPE);
CHECK_VAR_MATH_SHORTCUT(exp, FUNC_D2D_TYPE);
CHECK_VAR_MATH_SHORTCUT(log, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(ceil, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(floor, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(sqrt, FUNC_D2D_TYPE);
- // var exp = stdlib.Math.*; (DD2D * 12)
- CHECK_VAR_MATH_SHORTCUT(min, FUNC_DD2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(max, FUNC_DD2D_TYPE);
+
+ CHECK_VAR_MATH_SHORTCUT(ceil, FUNC_N2N_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(floor, FUNC_N2N_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(sqrt, FUNC_N2N_TYPE);
+
+ CHECK_VAR_MATH_SHORTCUT(min, FUNC_NN2N_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(max, FUNC_NN2N_TYPE);
+
CHECK_VAR_MATH_SHORTCUT(atan2, FUNC_DD2D_TYPE);
CHECK_VAR_MATH_SHORTCUT(pow, FUNC_DD2D_TYPE);
- // Special ones.
+
CHECK_VAR_MATH_SHORTCUT(abs, FUNC_N2N_TYPE);
CHECK_VAR_MATH_SHORTCUT(imul, FUNC_II2I_TYPE);
CHECK_VAR_MATH_SHORTCUT(fround, FUNC_N2F_TYPE);
+}
+
+
+void CheckStdlibShortcuts2(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
+ size_t& index, int& depth, TypeCache& cache) {
// var exp = stdlib.Math.*; (D * 12)
- CHECK_VAR_MATH_SHORTCUT(E, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(LN10, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(LN2, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(LOG2E, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(LOG10E, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(PI, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(SQRT1_2, Bounds(cache.kFloat64));
- CHECK_VAR_MATH_SHORTCUT(SQRT2, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(E, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(LN10, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(LN2, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(LOG2E, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(LOG10E, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(PI, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(SQRT1_2, Bounds(cache.kAsmDouble));
+ CHECK_VAR_MATH_SHORTCUT(SQRT2, Bounds(cache.kAsmDouble));
// var values = new stdlib.*Array(buffer);
CHECK_VAR_NEW_SHORTCUT(u8, Bounds(cache.kUint8Array));
CHECK_VAR_NEW_SHORTCUT(i8, Bounds(cache.kInt8Array));
@@ -457,11 +516,12 @@ void CheckStdlibShortcuts(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
CHECK_TYPES_BEGIN { \
/* Module. */ \
CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
-#define CHECK_FUNC_TYPES_END_1() \
- /* "use asm"; */ \
- CHECK_EXPR(Literal, Bounds(Type::String())); \
- /* stdlib shortcuts. */ \
- CheckStdlibShortcuts(zone, types, index, depth, cache);
+#define CHECK_FUNC_TYPES_END_1() \
+ /* "use asm"; */ \
+ CHECK_EXPR(Literal, Bounds(Type::String(zone))); \
+ /* stdlib shortcuts. */ \
+ CheckStdlibShortcuts1(zone, types, index, depth, cache); \
+ CheckStdlibShortcuts2(zone, types, index, depth, cache);
#define CHECK_FUNC_TYPES_END_2() \
@@ -504,10 +564,10 @@ TEST(ReturnVoid) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
// return undefined;
- CHECK_EXPR(Literal, Bounds(Type::Undefined()));
+ CHECK_EXPR(Literal, Bounds(Type::Undefined(zone)));
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined())) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined(zone))) {
CHECK_VAR(bar, FUNC_V_TYPE);
}
}
@@ -522,7 +582,7 @@ TEST(EmptyBody) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE);
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined())) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined(zone))) {
CHECK_VAR(bar, FUNC_V_TYPE);
}
}
@@ -536,13 +596,13 @@ TEST(DoesNothing) {
"function bar() { var x = 1.0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
- CHECK_VAR(x, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined())) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined(zone))) {
CHECK_VAR(bar, FUNC_V_TYPE);
}
}
@@ -557,10 +617,12 @@ TEST(ReturnInt32Literal) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
// return 1;
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kInt32)) { CHECK_VAR(bar, FUNC_I_TYPE); }
+ CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(bar, FUNC_I_TYPE);
+ }
}
}
CHECK_FUNC_TYPES_END
@@ -573,10 +635,12 @@ TEST(ReturnFloat64Literal) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
// return 1.0;
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kFloat64)) { CHECK_VAR(bar, FUNC_D_TYPE); }
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(bar, FUNC_D_TYPE);
+ }
}
}
CHECK_FUNC_TYPES_END
@@ -589,13 +653,13 @@ TEST(ReturnFloat32Literal) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
// return fround(1.0);
- CHECK_EXPR(Call, Bounds(cache.kFloat32)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kFloat32)) { CHECK_VAR(bar, FUNC_F_TYPE); }
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) { CHECK_VAR(bar, FUNC_F_TYPE); }
}
}
CHECK_FUNC_TYPES_END
@@ -608,18 +672,20 @@ TEST(ReturnFloat64Var) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
// return 1.0;
- CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
- CHECK_VAR(x, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
// return 1.0;
- CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
- CHECK_VAR(x, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kFloat64)) { CHECK_VAR(bar, FUNC_D_TYPE); }
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(bar, FUNC_D_TYPE);
+ }
}
}
CHECK_FUNC_TYPES_END
@@ -631,20 +697,259 @@ TEST(Addition2) {
"function bar() { var x = 1; var y = 2; return (x+y)|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+#define TEST_COMPARE_OP(name, op) \
+ TEST(name) { \
+ CHECK_FUNC_TYPES_BEGIN("function bar() { return (0 " op \
+ " 0)|0; }\n" \
+ "function foo() { bar(); }") { \
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) { \
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) { \
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) { \
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
+ } \
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
+ } \
+ } \
+ CHECK_SKIP(); \
+ } \
+ CHECK_FUNC_TYPES_END \
+ }
+
+
+TEST_COMPARE_OP(EqOperator, "==")
+TEST_COMPARE_OP(LtOperator, "<")
+TEST_COMPARE_OP(LteOperator, "<=")
+TEST_COMPARE_OP(GtOperator, ">")
+TEST_COMPARE_OP(GteOperator, ">=")
+
+
+TEST(NeqOperator) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { return (0 != 0)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(UnaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(NotOperator) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0; return (!x)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(UnaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(InvertOperator) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0; return (~x)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(InvertConversion) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0.0; return (~~x)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(Ternary) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 1; return (x?y:5)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Conditional, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+#define TEST_INT_BIN_OP(name, op) \
+ TEST(name) { \
+ CHECK_FUNC_TYPES_BEGIN("function bar() { var x = 0; return (x " op \
+ " 123)|0; }\n" \
+ "function foo() { bar(); }") { \
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) { \
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) { \
+ CHECK_VAR(x, Bounds(cache.kAsmInt)); \
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
+ } \
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) { \
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) { \
+ CHECK_VAR(x, Bounds(cache.kAsmInt)); \
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
+ } \
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
+ } \
+ } \
+ CHECK_SKIP(); \
+ } \
+ CHECK_FUNC_TYPES_END \
+ }
+
+
+TEST_INT_BIN_OP(AndOperator, "&")
+TEST_INT_BIN_OP(OrOperator, "|")
+TEST_INT_BIN_OP(XorOperator, "^")
+
+
+TEST(SignedCompare) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 1; return ((x|0) < (y|0))|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(SignedCompareConst) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 1; return ((x|0) < (1<<31))|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_SKIP();
@@ -658,26 +963,87 @@ TEST(UnsignedCompare) {
"function bar() { var x = 1; var y = 1; return ((x>>>0) < (y>>>0))|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kUint32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(UnsignedCompareConst0) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 1; return ((x>>>0) < (0>>>0))|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kUint32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(UnsignedCompareConst1) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 1; return ((x>>>0) < "
+ "(0xffffffff>>>0))|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
+ CHECK_EXPR(Literal, Bounds(cache.kAsmUnsigned));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_SKIP();
@@ -691,26 +1057,26 @@ TEST(UnsignedDivide) {
"function bar() { var x = 1; var y = 1; return ((x>>>0) / (y>>>0))|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(Type::None(), Type::Any())) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kUint32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(Type::None(zone), Type::Any(zone))) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kUint32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_SKIP();
@@ -723,7 +1089,15 @@ TEST(UnsignedFromFloat64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1.0; return (x>>>0)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill typed bitwise operation\n");
+ "asm: line 39: left bitwise operand expected to be an integer\n");
+}
+
+
+TEST(AndFloat64) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1.0; return (x&0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: left bitwise operand expected to be an integer\n");
}
@@ -771,7 +1145,31 @@ TEST(TernaryMismatchInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 0.0; return (1 ? x : y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed conditional\n");
+ "asm: line 39: then and else expressions in ? must have the same type\n");
+}
+
+
+TEST(TernaryMismatchIntish) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; var y = 0; return (1 ? x + x : y)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: invalid type in ? then expression\n");
+}
+
+
+TEST(TernaryMismatchInt32Float32) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; var y = 2; return (x?fround(y):x)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: then and else expressions in ? must have the same type\n");
+}
+
+
+TEST(TernaryBadCondition) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; var y = 2.0; return (y?x:1)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: condition must be of type int\n");
}
@@ -780,13 +1178,13 @@ TEST(FroundFloat32) {
"function bar() { var x = 1; return fround(x); }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Call, Bounds(cache.kFloat32)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
}
}
CHECK_SKIP();
@@ -800,26 +1198,26 @@ TEST(Addition4) {
"function bar() { var x = 1; var y = 2; return (x+y+x+y)|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
}
- CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
}
- CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_SKIP();
@@ -844,19 +1242,35 @@ TEST(Division4) {
}
+TEST(CompareToStringLeft) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; return ('hi' > x)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: bad type on left side of comparison\n");
+}
+
+
+TEST(CompareToStringRight) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; return (x < 'hi')|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: bad type on right side of comparison\n");
+}
+
+
TEST(CompareMismatchInt32Float64) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2.0; return (x < y)|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed comparison operation\n");
+ "asm: line 39: left and right side of comparison must match\n");
}
TEST(CompareMismatchInt32Uint32) {
CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2; return (x < (y>>>0))|0; }\n"
+ "function bar() { var x = 1; var y = 2; return ((x|0) < (y>>>0))|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed comparison operation\n");
+ "asm: line 39: left and right side of comparison must match\n");
}
@@ -864,7 +1278,7 @@ TEST(CompareMismatchInt32Float32) {
CHECK_FUNC_ERROR(
"function bar() { var x = 1; var y = 2; return (x < fround(y))|0; }\n"
"function foo() { bar(); }",
- "asm: line 39: ill-typed comparison operation\n");
+ "asm: line 39: left and right side of comparison must match\n");
}
@@ -873,22 +1287,22 @@ TEST(Float64ToInt32) {
"function bar() { var x = 1; var y = 0.0; x = ~~y; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
- CHECK_VAR(y, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kFloat64));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kFloat64));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(y, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(y, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
}
}
}
@@ -903,21 +1317,116 @@ TEST(Load1) {
"function bar() { var x = 1; var y = i8[x>>0]|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(Property, Bounds(cache.kInt8)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Property, Bounds(cache.kAsmInt)) {
CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(LoadDouble) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 0.0; y = +f64[x>>3]; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(y, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(y, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Property, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(f64, Bounds(cache.kFloat64Array));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(Store1) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; i8[x>>0] = 0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(i8, Bounds(cache.kInt8Array));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(StoreFloat) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = fround(1.0); "
+ "f32[0] = fround(x + fround(1.0)); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(f32, Bounds(cache.kFloat32Array));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
}
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
}
}
}
@@ -932,18 +1441,18 @@ TEST(Load1Constant) {
"function bar() { var x = 1; var y = i8[5]|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(Property, Bounds(cache.kInt8)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Property, Bounds(cache.kAsmInt)) {
CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
}
@@ -962,64 +1471,64 @@ TEST(FunctionTables) {
" return table1[x & 1](y)|0; }\n"
"function foo() { bar(1, 2); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_EXPR(FunctionLiteral, FUNC_II2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(y, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_EXPR(Call, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
CHECK_EXPR(Property, FUNC_I2I_TYPE) {
CHECK_VAR(table1, FUNC_I2I_ARRAY_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
- CHECK_VAR(x, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_SKIP();
@@ -1075,7 +1584,7 @@ TEST(InvalidArgumentCount) {
CHECK_FUNC_ERROR(
"function bar(x) { return fround(4, 5); }\n"
"function foo() { bar(); }",
- "asm: line 39: invalid argument count calling fround\n");
+ "asm: line 39: invalid argument count calling function\n");
}
@@ -1127,29 +1636,112 @@ TEST(UnboundVariable) {
}
+TEST(EqStrict) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return (0 === 0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal comparison operator\n");
+}
+
+
+TEST(NeStrict) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return (0 !== 0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal comparison operator\n");
+}
+
+
+TEST(InstanceOf) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return (0 instanceof 0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal comparison operator\n");
+}
+
+
+TEST(InOperator) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return (0 in 0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal comparison operator\n");
+}
+
+
+TEST(LogicalAndOperator) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return (0 && 0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal logical operator\n");
+}
+
+
+TEST(LogicalOrOperator) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return (0 || 0)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal logical operator\n");
+}
+
+
+TEST(BadLiteral) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return true | 0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal literal\n");
+}
+
+
+TEST(MismatchedReturnTypeLiteral) {
+ CHECK_FUNC_ERROR(
+ "function bar() { if(1) { return 1; } return 1.0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: return type does not match function signature\n");
+}
+
+
+TEST(MismatchedReturnTypeExpression) {
+ CHECK_FUNC_ERROR(
+ "function bar() {\n"
+ " var x = 1; var y = 1.0; if(1) { return x; } return +y; }\n"
+ "function foo() { bar(); }",
+ "asm: line 40: return type does not match function signature\n");
+}
+
+
+TEST(AssignToFloatishToF64) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var v = fround(1.0); f32[0] = v + fround(1.0); }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: intish or floatish assignment\n");
+}
+
+
TEST(ForeignFunction) {
CHECK_FUNC_TYPES_BEGIN(
"var baz = foreign.baz;\n"
"function bar() { return baz(1, 2)|0; }\n"
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
CHECK_EXPR(Call, Bounds(Type::Number(zone))) {
- CHECK_VAR(baz, Bounds(Type::Any()));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_VAR(baz, Bounds(Type::Any(zone)));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
- CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kInt32)) { CHECK_VAR(bar, FUNC_I_TYPE); }
+ CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(bar, FUNC_I_TYPE);
+ }
}
}
CHECK_FUNC_TYPES_END_1()
- CHECK_EXPR(Assignment, Bounds(Type::Any())) {
- CHECK_VAR(baz, Bounds(Type::Any()));
- CHECK_EXPR(Property, Bounds(Type::Any())) {
+ CHECK_EXPR(Assignment, Bounds(Type::Any(zone))) {
+ CHECK_VAR(baz, Bounds(Type::Any(zone)));
+ CHECK_EXPR(Property, Bounds(Type::Any(zone))) {
CHECK_VAR(foreign, Bounds::Unbounded());
CHECK_EXPR(Literal, Bounds::Unbounded());
}
@@ -1171,3 +1763,288 @@ TEST(BadExports) {
CHECK_EQ("asm: line 40: non-function in function table\n",
Validate(zone, test_function, &types));
}
+
+
+TEST(NestedHeapAssignment) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 0; i8[x = 1] = 2; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: expected >> in heap access\n");
+}
+
+
+TEST(BadArrayAssignment) {
+ CHECK_FUNC_ERROR(
+ "function bar() { i8[0] = 0.0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: illegal type in assignment\n");
+}
+
+
+TEST(BadStandardFunctionCallOutside) {
+ CHECK_FUNC_ERROR(
+ "var s0 = sin(0);\n"
+ "function bar() { }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: calls forbidden outside function bodies\n");
+}
+
+
+TEST(BadFunctionCallOutside) {
+ CHECK_FUNC_ERROR(
+ "function bar() { return 0.0; }\n"
+ "var s0 = bar(0);\n"
+ "function foo() { bar(); }",
+ "asm: line 40: calls forbidden outside function bodies\n");
+}
+
+
+TEST(NestedVariableAssignment) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0; x = x = 4; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(NestedAssignmentInHeap) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0; i8[(x = 1) >> 0] = 2; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(i8, Bounds(cache.kInt8Array));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(NegativeDouble) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = -123.2; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(NegativeInteger) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = -123; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(AbsFunction) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = -123.0; x = abs(x); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(abs, FUNC_N2N_TYPE);
+ CHECK_VAR(x, Bounds(cache.kAsmDouble));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(CeilFloat) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = fround(3.1); x = ceil(x); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(ceil, FUNC_N2N_TYPE);
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(TypeConsistency) {
+ v8::V8::Initialize();
+ TypeCache cache;
+ // Check the consistency of each of the main Asm.js types.
+ CHECK(cache.kAsmFixnum->Is(cache.kAsmFixnum));
+ CHECK(cache.kAsmFixnum->Is(cache.kAsmSigned));
+ CHECK(cache.kAsmFixnum->Is(cache.kAsmUnsigned));
+ CHECK(cache.kAsmFixnum->Is(cache.kAsmInt));
+ CHECK(!cache.kAsmFixnum->Is(cache.kAsmFloat));
+ CHECK(!cache.kAsmFixnum->Is(cache.kAsmDouble));
+
+ CHECK(cache.kAsmSigned->Is(cache.kAsmSigned));
+ CHECK(cache.kAsmSigned->Is(cache.kAsmInt));
+ CHECK(!cache.kAsmSigned->Is(cache.kAsmFixnum));
+ CHECK(!cache.kAsmSigned->Is(cache.kAsmUnsigned));
+ CHECK(!cache.kAsmSigned->Is(cache.kAsmFloat));
+ CHECK(!cache.kAsmSigned->Is(cache.kAsmDouble));
+
+ CHECK(cache.kAsmUnsigned->Is(cache.kAsmUnsigned));
+ CHECK(cache.kAsmUnsigned->Is(cache.kAsmInt));
+ CHECK(!cache.kAsmUnsigned->Is(cache.kAsmSigned));
+ CHECK(!cache.kAsmUnsigned->Is(cache.kAsmFixnum));
+ CHECK(!cache.kAsmUnsigned->Is(cache.kAsmFloat));
+ CHECK(!cache.kAsmUnsigned->Is(cache.kAsmDouble));
+
+ CHECK(cache.kAsmInt->Is(cache.kAsmInt));
+ CHECK(!cache.kAsmInt->Is(cache.kAsmUnsigned));
+ CHECK(!cache.kAsmInt->Is(cache.kAsmSigned));
+ CHECK(!cache.kAsmInt->Is(cache.kAsmFixnum));
+ CHECK(!cache.kAsmInt->Is(cache.kAsmFloat));
+ CHECK(!cache.kAsmInt->Is(cache.kAsmDouble));
+
+ CHECK(cache.kAsmFloat->Is(cache.kAsmFloat));
+ CHECK(!cache.kAsmFloat->Is(cache.kAsmInt));
+ CHECK(!cache.kAsmFloat->Is(cache.kAsmUnsigned));
+ CHECK(!cache.kAsmFloat->Is(cache.kAsmSigned));
+ CHECK(!cache.kAsmFloat->Is(cache.kAsmFixnum));
+ CHECK(!cache.kAsmFloat->Is(cache.kAsmDouble));
+
+ CHECK(cache.kAsmDouble->Is(cache.kAsmDouble));
+ CHECK(!cache.kAsmDouble->Is(cache.kAsmInt));
+ CHECK(!cache.kAsmDouble->Is(cache.kAsmUnsigned));
+ CHECK(!cache.kAsmDouble->Is(cache.kAsmSigned));
+ CHECK(!cache.kAsmDouble->Is(cache.kAsmFixnum));
+ CHECK(!cache.kAsmDouble->Is(cache.kAsmFloat));
+}
+
+
+TEST(SwitchTest) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function switcher(x) {\n"
+ " x = x|0;\n"
+ " switch (x|0) {\n"
+ " case 1: return 23;\n"
+ " case 2: return 43;\n"
+ " default: return 66;\n"
+ " }\n"
+ " return 0;\n"
+ "}\n"
+ "function foo() { switcher(1); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(.switch_tag, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(Type::Undefined(zone)));
+ CHECK_VAR(.switch_tag, Bounds(cache.kAsmSigned));
+ // case 1: return 23;
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ // case 2: return 43;
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ // default: return 66;
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ // return 0;
+ CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(BadSwitchRange) {
+ CHECK_FUNC_ERROR(
+ "function bar() { switch (1) { case -1: case 0x7fffffff: } }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: case range too large\n");
+}
+
+
+TEST(DuplicateSwitchCase) {
+ CHECK_FUNC_ERROR(
+ "function bar() { switch (1) { case 0: case 0: } }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: duplicate case value\n");
+}
+
+
+TEST(BadSwitchOrder) {
+ CHECK_FUNC_ERROR(
+ "function bar() { switch (1) { default: case 0: } }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: default case out of order\n");
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 3c91d3a7fa..563b050c48 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -69,7 +69,8 @@ TEST(0) {
code->Print(os);
#endif
F2 f = FUNCTION_CAST<F2>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(7, res);
}
@@ -105,7 +106,8 @@ TEST(1) {
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(5050, res);
}
@@ -150,7 +152,8 @@ TEST(2) {
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(3628800, res);
}
@@ -200,7 +203,8 @@ TEST(3) {
t.i = 100000;
t.c = 10;
t.s = 1000;
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(101010, res);
CHECK_EQ(100000/2, t.i);
@@ -335,7 +339,7 @@ TEST(4) {
t.n = 123.456;
t.x = 4.5;
t.y = 9.0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(-123.456, t.n);
CHECK_EQ(2718.2818, t.m);
@@ -383,7 +387,7 @@ TEST(5) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, 0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
}
@@ -417,7 +421,7 @@ TEST(6) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
}
@@ -490,8 +494,8 @@ static void TestRoundingMode(VCVTTypes types,
code->Print(os);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
::printf("res = %d\n", res);
CHECK_EQ(expected, res);
}
@@ -692,7 +696,7 @@ TEST(8) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
@@ -802,7 +806,7 @@ TEST(9) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
@@ -908,7 +912,7 @@ TEST(10) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
@@ -985,7 +989,7 @@ TEST(11) {
code->Print(os);
#endif
F3 f = FUNCTION_CAST<F3>(code->entry());
- Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &i, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0xabcd0001), i.a);
@@ -1119,7 +1123,7 @@ TEST(13) {
t.x = 1.5;
t.y = 2.75;
t.z = 17.17;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(14.7610017472335499, t.a);
CHECK_EQ(3.84200491244266251, t.b);
@@ -1192,14 +1196,14 @@ TEST(14) {
t.sub_result = 0;
t.mul_result = 0;
t.div_result = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
const uint32_t kArmNanUpper32 = 0x7ff80000;
const uint32_t kArmNanLower32 = 0x00000000;
#ifdef DEBUG
const uint64_t kArmNanInt64 =
(static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32;
- DCHECK(kArmNanInt64 != kHoleNanInt64);
+ CHECK(kArmNanInt64 != kHoleNanInt64);
#endif
// With VFP2 the sign of the canonicalized Nan is undefined. So
// we remove the sign bit for the upper tests.
@@ -1320,7 +1324,7 @@ TEST(15) {
t.dstA5 = 0;
t.dstA6 = 0;
t.dstA7 = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x01020304u, t.dst0);
CHECK_EQ(0x11121314u, t.dst1);
@@ -1405,7 +1409,7 @@ TEST(16) {
t.dst2 = 0;
t.dst3 = 0;
t.dst4 = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0x12130304u, t.dst0);
CHECK_EQ(0x01021213u, t.dst1);
@@ -1435,12 +1439,12 @@ TEST(17) {
}
-#define TEST_SDIV(expected_, dividend_, divisor_) \
- t.dividend = dividend_; \
- t.divisor = divisor_; \
- t.result = 0; \
- dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
- CHECK_EQ(expected_, t.result);
+#define TEST_SDIV(expected_, dividend_, divisor_) \
+ t.dividend = dividend_; \
+ t.divisor = divisor_; \
+ t.result = 0; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+ CHECK_EQ(expected_, t.result);
TEST(sdiv) {
@@ -1499,11 +1503,11 @@ TEST(sdiv) {
#undef TEST_SDIV
-#define TEST_UDIV(expected_, dividend_, divisor_) \
- t.dividend = dividend_; \
- t.divisor = divisor_; \
- t.result = 0; \
- dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
+#define TEST_UDIV(expected_, dividend_, divisor_) \
+ t.dividend = dividend_; \
+ t.divisor = divisor_; \
+ t.result = 0; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
CHECK_EQ(expected_, t.result);
@@ -1574,7 +1578,7 @@ TEST(smmla) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, z, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, z, 0);
CHECK_EQ(bits::SignedMulHighAndAdd32(x, y, z), r);
USE(dummy);
}
@@ -1600,7 +1604,7 @@ TEST(smmul) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
CHECK_EQ(bits::SignedMulHigh32(x, y), r);
USE(dummy);
}
@@ -1626,7 +1630,7 @@ TEST(sxtb) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)), r);
USE(dummy);
}
@@ -1652,7 +1656,7 @@ TEST(sxtab) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)) + y, r);
USE(dummy);
}
@@ -1678,7 +1682,7 @@ TEST(sxth) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)), r);
USE(dummy);
}
@@ -1704,7 +1708,7 @@ TEST(sxtah) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)) + y, r);
USE(dummy);
}
@@ -1730,7 +1734,7 @@ TEST(uxtb) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)), r);
USE(dummy);
}
@@ -1756,7 +1760,7 @@ TEST(uxtab) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)) + y, r);
USE(dummy);
}
@@ -1782,7 +1786,7 @@ TEST(uxth) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, 0, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)), r);
USE(dummy);
}
@@ -1808,7 +1812,7 @@ TEST(uxtah) {
F3 f = FUNCTION_CAST<F3>(code->entry());
for (size_t i = 0; i < 128; ++i) {
int32_t r, x = rng->NextInt(), y = rng->NextInt();
- Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &r, x, y, 0, 0);
CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)) + y, r);
USE(dummy);
}
@@ -1880,12 +1884,118 @@ TEST(code_relative_offset) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), code_object);
F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 21, 0, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, 21, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(42, res);
}
+TEST(ARMv8_float32_vrintX) {
+ // Test the vrintX floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ float input;
+ float ar;
+ float nr;
+ float mr;
+ float pr;
+ float zr;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the floats.
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
+
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+
+ __ mov(r4, Operand(r0));
+
+ // Test vrinta
+ __ vldr(s6, r4, offsetof(T, input));
+ __ vrinta(s5, s6);
+ __ vstr(s5, r4, offsetof(T, ar));
+
+ // Test vrintn
+ __ vldr(s6, r4, offsetof(T, input));
+ __ vrintn(s5, s6);
+ __ vstr(s5, r4, offsetof(T, nr));
+
+ // Test vrintp
+ __ vldr(s6, r4, offsetof(T, input));
+ __ vrintp(s5, s6);
+ __ vstr(s5, r4, offsetof(T, pr));
+
+ // Test vrintm
+ __ vldr(s6, r4, offsetof(T, input));
+ __ vrintm(s5, s6);
+ __ vstr(s5, r4, offsetof(T, mr));
+
+ // Test vrintz
+ __ vldr(s6, r4, offsetof(T, input));
+ __ vrintz(s5, s6);
+ __ vstr(s5, r4, offsetof(T, zr));
+
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
+ t.input = input_val; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+ CHECK_EQ(ares, t.ar); \
+ CHECK_EQ(nres, t.nr); \
+ CHECK_EQ(mres, t.mr); \
+ CHECK_EQ(pres, t.pr); \
+ CHECK_EQ(zres, t.zr);
+
+ CHECK_VRINT(-0.5, -1.0, -0.0, -1.0, -0.0, -0.0)
+ CHECK_VRINT(-0.6, -1.0, -1.0, -1.0, -0.0, -0.0)
+ CHECK_VRINT(-1.1, -1.0, -1.0, -2.0, -1.0, -1.0)
+ CHECK_VRINT(0.5, 1.0, 0.0, 0.0, 1.0, 0.0)
+ CHECK_VRINT(0.6, 1.0, 1.0, 0.0, 1.0, 0.0)
+ CHECK_VRINT(1.1, 1.0, 1.0, 1.0, 2.0, 1.0)
+ float inf = std::numeric_limits<float>::infinity();
+ CHECK_VRINT(inf, inf, inf, inf, inf, inf)
+ CHECK_VRINT(-inf, -inf, -inf, -inf, -inf, -inf)
+ CHECK_VRINT(-0.0, -0.0, -0.0, -0.0, -0.0, -0.0)
+
+ // Check NaN propagation.
+ float nan = std::numeric_limits<float>::quiet_NaN();
+ t.input = nan;
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.ar));
+ CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.nr));
+ CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.mr));
+ CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.pr));
+ CHECK_EQ(bit_cast<int32_t>(nan), bit_cast<int32_t>(t.zr));
+
+#undef CHECK_VRINT
+ }
+}
+
+
TEST(ARMv8_vrintX) {
// Test the vrintX floating point instructions.
CcTest::InitializeVM();
@@ -1958,7 +2068,7 @@ TEST(ARMv8_vrintX) {
#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
t.input = input_val; \
- dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); \
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
CHECK_EQ(ares, t.ar); \
CHECK_EQ(nres, t.nr); \
CHECK_EQ(mres, t.mr); \
@@ -1979,7 +2089,7 @@ TEST(ARMv8_vrintX) {
// Check NaN propagation.
double nan = std::numeric_limits<double>::quiet_NaN();
t.input = nan;
- dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.ar));
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.nr));
CHECK_EQ(bit_cast<int64_t>(nan), bit_cast<int64_t>(t.mr));
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 7450a4375f..d930173937 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -114,16 +114,17 @@ static void InitializeVM() {
#ifdef USE_SIMULATOR
// Run tests with the simulator.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = Isolate::Current(); \
- HandleScope scope(isolate); \
- DCHECK(isolate != NULL); \
- byte* buf = new byte[buf_size]; \
- MacroAssembler masm(isolate, buf, buf_size); \
- Decoder<DispatchingDecoderVisitor>* decoder = \
- new Decoder<DispatchingDecoderVisitor>(); \
- Simulator simulator(decoder); \
- PrintDisassembler* pdis = NULL; \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size, \
+ v8::internal::CodeObjectRequired::kYes); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ Simulator simulator(decoder); \
+ PrintDisassembler* pdis = NULL; \
RegisterDump core;
/* if (Cctest::trace_sim()) { \
@@ -168,12 +169,13 @@ static void InitializeVM() {
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = Isolate::Current(); \
- HandleScope scope(isolate); \
- DCHECK(isolate != NULL); \
- byte* buf = new byte[buf_size]; \
- MacroAssembler masm(isolate, buf, buf_size); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK(isolate != NULL); \
+ byte* buf = new byte[buf_size]; \
+ MacroAssembler masm(isolate, buf, buf_size, \
+ v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \
@@ -229,11 +231,10 @@ static void InitializeVM() {
CHECK(EqualFP64(expected, &core, result))
#ifdef DEBUG
-#define DCHECK_LITERAL_POOL_SIZE(expected) \
+#define CHECK_LITERAL_POOL_SIZE(expected) \
CHECK((expected) == (__ LiteralPoolSize()))
#else
-#define DCHECK_LITERAL_POOL_SIZE(expected) \
- ((void) 0)
+#define CHECK_LITERAL_POOL_SIZE(expected) ((void)0)
#endif
@@ -3299,7 +3300,7 @@ TEST(ldr_literal) {
static void LdrLiteralRangeHelper(ptrdiff_t range_,
LiteralPoolEmitOption option,
bool expect_dump) {
- DCHECK(range_ > 0);
+ CHECK(range_ > 0);
SETUP_SIZE(range_ + 1024);
Label label_1, label_2;
@@ -3318,19 +3319,19 @@ static void LdrLiteralRangeHelper(ptrdiff_t range_,
START();
// Force a pool dump so the pool starts off empty.
__ EmitLiteralPool(JumpRequired);
- DCHECK_LITERAL_POOL_SIZE(0);
+ CHECK_LITERAL_POOL_SIZE(0);
__ Ldr(x0, 0x1234567890abcdefUL);
__ Ldr(w1, 0xfedcba09);
__ Ldr(d0, 1.234);
__ Ldr(s1, 2.5);
- DCHECK_LITERAL_POOL_SIZE(4);
+ CHECK_LITERAL_POOL_SIZE(4);
code_size += 4 * sizeof(Instr);
// Check that the requested range (allowing space for a branch over the pool)
// can be handled by this test.
- DCHECK((code_size + pool_guard_size) <= range);
+ CHECK((code_size + pool_guard_size) <= range);
// Emit NOPs up to 'range', leaving space for the pool guard.
while ((code_size + pool_guard_size) < range) {
@@ -3344,28 +3345,28 @@ static void LdrLiteralRangeHelper(ptrdiff_t range_,
code_size += sizeof(Instr);
}
- DCHECK(code_size == range);
- DCHECK_LITERAL_POOL_SIZE(4);
+ CHECK(code_size == range);
+ CHECK_LITERAL_POOL_SIZE(4);
// Possibly generate a literal pool.
__ CheckLiteralPool(option);
__ Bind(&label_1);
if (expect_dump) {
- DCHECK_LITERAL_POOL_SIZE(0);
+ CHECK_LITERAL_POOL_SIZE(0);
} else {
- DCHECK_LITERAL_POOL_SIZE(4);
+ CHECK_LITERAL_POOL_SIZE(4);
}
// Force a pool flush to check that a second pool functions correctly.
__ EmitLiteralPool(JumpRequired);
- DCHECK_LITERAL_POOL_SIZE(0);
+ CHECK_LITERAL_POOL_SIZE(0);
// These loads should be after the pool (and will require a new one).
__ Ldr(x4, 0x34567890abcdef12UL);
__ Ldr(w5, 0xdcba09fe);
__ Ldr(d4, 123.4);
__ Ldr(s5, 250.0);
- DCHECK_LITERAL_POOL_SIZE(4);
+ CHECK_LITERAL_POOL_SIZE(4);
END();
RUN();
@@ -5443,12 +5444,12 @@ TEST(fmadd_fmsub_double_nans) {
double q1 = rawbits_to_double(0x7ffaaaaa11111111);
double q2 = rawbits_to_double(0x7ffaaaaa22222222);
double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
- DCHECK(IsSignallingNaN(s1));
- DCHECK(IsSignallingNaN(s2));
- DCHECK(IsSignallingNaN(sa));
- DCHECK(IsQuietNaN(q1));
- DCHECK(IsQuietNaN(q2));
- DCHECK(IsQuietNaN(qa));
+ CHECK(IsSignallingNaN(s1));
+ CHECK(IsSignallingNaN(s2));
+ CHECK(IsSignallingNaN(sa));
+ CHECK(IsQuietNaN(q1));
+ CHECK(IsQuietNaN(q2));
+ CHECK(IsQuietNaN(qa));
// The input NaNs after passing through ProcessNaN.
double s1_proc = rawbits_to_double(0x7ffd555511111111);
@@ -5457,22 +5458,22 @@ TEST(fmadd_fmsub_double_nans) {
double q1_proc = q1;
double q2_proc = q2;
double qa_proc = qa;
- DCHECK(IsQuietNaN(s1_proc));
- DCHECK(IsQuietNaN(s2_proc));
- DCHECK(IsQuietNaN(sa_proc));
- DCHECK(IsQuietNaN(q1_proc));
- DCHECK(IsQuietNaN(q2_proc));
- DCHECK(IsQuietNaN(qa_proc));
+ CHECK(IsQuietNaN(s1_proc));
+ CHECK(IsQuietNaN(s2_proc));
+ CHECK(IsQuietNaN(sa_proc));
+ CHECK(IsQuietNaN(q1_proc));
+ CHECK(IsQuietNaN(q2_proc));
+ CHECK(IsQuietNaN(qa_proc));
// Negated NaNs as it would be done on ARMv8 hardware.
double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
- DCHECK(IsQuietNaN(s1_proc_neg));
- DCHECK(IsQuietNaN(sa_proc_neg));
- DCHECK(IsQuietNaN(q1_proc_neg));
- DCHECK(IsQuietNaN(qa_proc_neg));
+ CHECK(IsQuietNaN(s1_proc_neg));
+ CHECK(IsQuietNaN(sa_proc_neg));
+ CHECK(IsQuietNaN(q1_proc_neg));
+ CHECK(IsQuietNaN(qa_proc_neg));
// Quiet NaNs are propagated.
FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
@@ -5526,12 +5527,12 @@ TEST(fmadd_fmsub_float_nans) {
float q1 = rawbits_to_float(0x7fea1111);
float q2 = rawbits_to_float(0x7fea2222);
float qa = rawbits_to_float(0x7feaaaaa);
- DCHECK(IsSignallingNaN(s1));
- DCHECK(IsSignallingNaN(s2));
- DCHECK(IsSignallingNaN(sa));
- DCHECK(IsQuietNaN(q1));
- DCHECK(IsQuietNaN(q2));
- DCHECK(IsQuietNaN(qa));
+ CHECK(IsSignallingNaN(s1));
+ CHECK(IsSignallingNaN(s2));
+ CHECK(IsSignallingNaN(sa));
+ CHECK(IsQuietNaN(q1));
+ CHECK(IsQuietNaN(q2));
+ CHECK(IsQuietNaN(qa));
// The input NaNs after passing through ProcessNaN.
float s1_proc = rawbits_to_float(0x7fd51111);
@@ -5540,22 +5541,22 @@ TEST(fmadd_fmsub_float_nans) {
float q1_proc = q1;
float q2_proc = q2;
float qa_proc = qa;
- DCHECK(IsQuietNaN(s1_proc));
- DCHECK(IsQuietNaN(s2_proc));
- DCHECK(IsQuietNaN(sa_proc));
- DCHECK(IsQuietNaN(q1_proc));
- DCHECK(IsQuietNaN(q2_proc));
- DCHECK(IsQuietNaN(qa_proc));
+ CHECK(IsQuietNaN(s1_proc));
+ CHECK(IsQuietNaN(s2_proc));
+ CHECK(IsQuietNaN(sa_proc));
+ CHECK(IsQuietNaN(q1_proc));
+ CHECK(IsQuietNaN(q2_proc));
+ CHECK(IsQuietNaN(qa_proc));
// Negated NaNs as it would be done on ARMv8 hardware.
float s1_proc_neg = rawbits_to_float(0xffd51111);
float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
float q1_proc_neg = rawbits_to_float(0xffea1111);
float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
- DCHECK(IsQuietNaN(s1_proc_neg));
- DCHECK(IsQuietNaN(sa_proc_neg));
- DCHECK(IsQuietNaN(q1_proc_neg));
- DCHECK(IsQuietNaN(qa_proc_neg));
+ CHECK(IsQuietNaN(s1_proc_neg));
+ CHECK(IsQuietNaN(sa_proc_neg));
+ CHECK(IsQuietNaN(q1_proc_neg));
+ CHECK(IsQuietNaN(qa_proc_neg));
// Quiet NaNs are propagated.
FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
@@ -5773,10 +5774,10 @@ TEST(fmax_fmin_d) {
double snan_processed = rawbits_to_double(0x7ffd555512345678);
double qnan_processed = qnan;
- DCHECK(IsSignallingNaN(snan));
- DCHECK(IsQuietNaN(qnan));
- DCHECK(IsQuietNaN(snan_processed));
- DCHECK(IsQuietNaN(qnan_processed));
+ CHECK(IsSignallingNaN(snan));
+ CHECK(IsQuietNaN(qnan));
+ CHECK(IsQuietNaN(snan_processed));
+ CHECK(IsQuietNaN(qnan_processed));
// Bootstrap tests.
FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
@@ -5858,10 +5859,10 @@ TEST(fmax_fmin_s) {
float snan_processed = rawbits_to_float(0x7fd51234);
float qnan_processed = qnan;
- DCHECK(IsSignallingNaN(snan));
- DCHECK(IsQuietNaN(qnan));
- DCHECK(IsQuietNaN(snan_processed));
- DCHECK(IsQuietNaN(qnan_processed));
+ CHECK(IsSignallingNaN(snan));
+ CHECK(IsQuietNaN(qnan));
+ CHECK(IsQuietNaN(snan_processed));
+ CHECK(IsQuietNaN(qnan_processed));
// Bootstrap tests.
FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
@@ -6833,8 +6834,8 @@ TEST(fcvt_sd) {
float expected = test[i].expected;
// We only expect positive input.
- DCHECK(std::signbit(in) == 0);
- DCHECK(std::signbit(expected) == 0);
+ CHECK(std::signbit(in) == 0);
+ CHECK(std::signbit(expected) == 0);
SETUP();
START();
@@ -8545,7 +8546,7 @@ TEST(peek_poke_mixed) {
__ Poke(x1, 8);
__ Poke(x0, 0);
{
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(x4, __ StackPointer());
__ SetStackPointer(x4);
@@ -8642,7 +8643,7 @@ static void PushPopJsspSimpleHelper(int reg_count,
uint64_t literal_base = 0x0100001000100101UL;
{
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(jssp, __ StackPointer());
__ SetStackPointer(jssp);
@@ -8671,7 +8672,9 @@ static void PushPopJsspSimpleHelper(int reg_count,
case 3: __ Push(r[2], r[1], r[0]); break;
case 2: __ Push(r[1], r[0]); break;
case 1: __ Push(r[0]); break;
- default: DCHECK(i == 0); break;
+ default:
+ CHECK(i == 0);
+ break;
}
break;
case PushPopRegList:
@@ -8693,7 +8696,9 @@ static void PushPopJsspSimpleHelper(int reg_count,
case 3: __ Pop(r[i], r[i+1], r[i+2]); break;
case 2: __ Pop(r[i], r[i+1]); break;
case 1: __ Pop(r[i]); break;
- default: DCHECK(i == reg_count); break;
+ default:
+ CHECK(i == reg_count);
+ break;
}
break;
case PushPopRegList:
@@ -8824,7 +8829,7 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
uint64_t literal_base = 0x0100001000100101UL;
{
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(jssp, __ StackPointer());
__ SetStackPointer(jssp);
@@ -8855,7 +8860,9 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
case 3: __ Push(v[2], v[1], v[0]); break;
case 2: __ Push(v[1], v[0]); break;
case 1: __ Push(v[0]); break;
- default: DCHECK(i == 0); break;
+ default:
+ CHECK(i == 0);
+ break;
}
break;
case PushPopRegList:
@@ -8877,7 +8884,9 @@ static void PushPopFPJsspSimpleHelper(int reg_count,
case 3: __ Pop(v[i], v[i+1], v[i+2]); break;
case 2: __ Pop(v[i], v[i+1]); break;
case 1: __ Pop(v[i]); break;
- default: DCHECK(i == reg_count); break;
+ default:
+ CHECK(i == reg_count);
+ break;
}
break;
case PushPopRegList:
@@ -9001,7 +9010,7 @@ static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
START();
{
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(jssp, __ StackPointer());
__ SetStackPointer(jssp);
@@ -9106,7 +9115,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
START();
{
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(jssp, __ StackPointer());
__ SetStackPointer(jssp);
@@ -9154,7 +9163,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
int active_w_slots = 0;
for (int i = 0; active_w_slots < requested_w_slots; i++) {
- DCHECK(i < reg_count);
+ CHECK(i < reg_count);
// In order to test various arguments to PushMultipleTimes, and to try to
// exercise different alignment and overlap effects, we push each
// register a different number of times.
@@ -9227,7 +9236,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
}
next_is_64 = !next_is_64;
}
- DCHECK(active_w_slots == 0);
+ CHECK(active_w_slots == 0);
// Drop memory to restore jssp.
__ Drop(claim, kByteSizeInBytes);
@@ -9263,7 +9272,7 @@ static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
CHECK_EQUAL_64(expected, x[i]);
}
}
- DCHECK(slot == requested_w_slots);
+ CHECK(slot == requested_w_slots);
TEARDOWN();
}
@@ -9293,7 +9302,7 @@ TEST(push_pop_csp) {
START();
- DCHECK(csp.Is(__ StackPointer()));
+ CHECK(csp.Is(__ StackPointer()));
__ Mov(x3, 0x3333333333333333UL);
__ Mov(x2, 0x2222222222222222UL);
@@ -9382,7 +9391,7 @@ TEST(push_queued) {
START();
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(jssp, __ StackPointer());
__ SetStackPointer(jssp);
@@ -9457,7 +9466,7 @@ TEST(pop_queued) {
START();
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
__ Mov(jssp, __ StackPointer());
__ SetStackPointer(jssp);
@@ -10078,7 +10087,7 @@ TEST(printf) {
__ Printf("%%%%%s%%%c%%\n", x2, w13);
// Print the stack pointer (csp).
- DCHECK(csp.Is(__ StackPointer()));
+ CHECK(csp.Is(__ StackPointer()));
__ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
__ StackPointer(), __ StackPointer().W());
@@ -10236,75 +10245,6 @@ TEST(printf_no_preserve) {
}
-// This is a V8-specific test.
-static void CopyFieldsHelper(CPURegList temps) {
- static const uint64_t kLiteralBase = 0x0100001000100101UL;
- static const uint64_t src[] = {kLiteralBase * 1,
- kLiteralBase * 2,
- kLiteralBase * 3,
- kLiteralBase * 4,
- kLiteralBase * 5,
- kLiteralBase * 6,
- kLiteralBase * 7,
- kLiteralBase * 8,
- kLiteralBase * 9,
- kLiteralBase * 10,
- kLiteralBase * 11};
- static const uint64_t src_tagged =
- reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
-
- static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
- uint64_t* dst[kTestCount];
- uint64_t dst_tagged[kTestCount];
-
- // The first test will be to copy 0 fields. The destination (and source)
- // should not be accessed in any way.
- dst[0] = NULL;
- dst_tagged[0] = kHeapObjectTag;
-
- // Allocate memory for each other test. Each test <n> will have <n> fields.
- // This is intended to exercise as many paths in CopyFields as possible.
- for (unsigned i = 1; i < kTestCount; i++) {
- dst[i] = new uint64_t[i];
- memset(dst[i], 0, i * sizeof(kLiteralBase));
- dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
- }
-
- SETUP();
- START();
-
- __ Mov(x0, dst_tagged[0]);
- __ Mov(x1, 0);
- __ CopyFields(x0, x1, temps, 0);
- for (unsigned i = 1; i < kTestCount; i++) {
- __ Mov(x0, dst_tagged[i]);
- __ Mov(x1, src_tagged);
- __ CopyFields(x0, x1, temps, i);
- }
-
- END();
- RUN();
- TEARDOWN();
-
- for (unsigned i = 1; i < kTestCount; i++) {
- for (unsigned j = 0; j < i; j++) {
- CHECK(src[j] == dst[i][j]);
- }
- delete [] dst[i];
- }
-}
-
-
-// This is a V8-specific test.
-TEST(copyfields) {
- INIT_V8();
- CopyFieldsHelper(CPURegList(x10));
- CopyFieldsHelper(CPURegList(x10, x11));
- CopyFieldsHelper(CPURegList(x10, x11, x12));
- CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
-}
-
-
TEST(blr_lr) {
// A simple test to check that the simulator correcty handle "blr lr".
INIT_V8();
@@ -10400,14 +10340,14 @@ TEST(process_nan_double) {
// Make sure that NaN propagation works correctly.
double sn = rawbits_to_double(0x7ff5555511111111);
double qn = rawbits_to_double(0x7ffaaaaa11111111);
- DCHECK(IsSignallingNaN(sn));
- DCHECK(IsQuietNaN(qn));
+ CHECK(IsSignallingNaN(sn));
+ CHECK(IsQuietNaN(qn));
// The input NaNs after passing through ProcessNaN.
double sn_proc = rawbits_to_double(0x7ffd555511111111);
double qn_proc = qn;
- DCHECK(IsQuietNaN(sn_proc));
- DCHECK(IsQuietNaN(qn_proc));
+ CHECK(IsQuietNaN(sn_proc));
+ CHECK(IsQuietNaN(qn_proc));
SETUP();
START();
@@ -10476,14 +10416,14 @@ TEST(process_nan_float) {
// Make sure that NaN propagation works correctly.
float sn = rawbits_to_float(0x7f951111);
float qn = rawbits_to_float(0x7fea1111);
- DCHECK(IsSignallingNaN(sn));
- DCHECK(IsQuietNaN(qn));
+ CHECK(IsSignallingNaN(sn));
+ CHECK(IsQuietNaN(qn));
// The input NaNs after passing through ProcessNaN.
float sn_proc = rawbits_to_float(0x7fd51111);
float qn_proc = qn;
- DCHECK(IsQuietNaN(sn_proc));
- DCHECK(IsQuietNaN(qn_proc));
+ CHECK(IsQuietNaN(sn_proc));
+ CHECK(IsQuietNaN(qn_proc));
SETUP();
START();
@@ -10548,8 +10488,8 @@ TEST(process_nan_float) {
static void ProcessNaNsHelper(double n, double m, double expected) {
- DCHECK(std::isnan(n) || std::isnan(m));
- DCHECK(std::isnan(expected));
+ CHECK(std::isnan(n) || std::isnan(m));
+ CHECK(std::isnan(expected));
SETUP();
START();
@@ -10587,20 +10527,20 @@ TEST(process_nans_double) {
double sm = rawbits_to_double(0x7ff5555522222222);
double qn = rawbits_to_double(0x7ffaaaaa11111111);
double qm = rawbits_to_double(0x7ffaaaaa22222222);
- DCHECK(IsSignallingNaN(sn));
- DCHECK(IsSignallingNaN(sm));
- DCHECK(IsQuietNaN(qn));
- DCHECK(IsQuietNaN(qm));
+ CHECK(IsSignallingNaN(sn));
+ CHECK(IsSignallingNaN(sm));
+ CHECK(IsQuietNaN(qn));
+ CHECK(IsQuietNaN(qm));
// The input NaNs after passing through ProcessNaN.
double sn_proc = rawbits_to_double(0x7ffd555511111111);
double sm_proc = rawbits_to_double(0x7ffd555522222222);
double qn_proc = qn;
double qm_proc = qm;
- DCHECK(IsQuietNaN(sn_proc));
- DCHECK(IsQuietNaN(sm_proc));
- DCHECK(IsQuietNaN(qn_proc));
- DCHECK(IsQuietNaN(qm_proc));
+ CHECK(IsQuietNaN(sn_proc));
+ CHECK(IsQuietNaN(sm_proc));
+ CHECK(IsQuietNaN(qn_proc));
+ CHECK(IsQuietNaN(qm_proc));
// Quiet NaNs are propagated.
ProcessNaNsHelper(qn, 0, qn_proc);
@@ -10620,8 +10560,8 @@ TEST(process_nans_double) {
static void ProcessNaNsHelper(float n, float m, float expected) {
- DCHECK(std::isnan(n) || std::isnan(m));
- DCHECK(std::isnan(expected));
+ CHECK(std::isnan(n) || std::isnan(m));
+ CHECK(std::isnan(expected));
SETUP();
START();
@@ -10659,20 +10599,20 @@ TEST(process_nans_float) {
float sm = rawbits_to_float(0x7f952222);
float qn = rawbits_to_float(0x7fea1111);
float qm = rawbits_to_float(0x7fea2222);
- DCHECK(IsSignallingNaN(sn));
- DCHECK(IsSignallingNaN(sm));
- DCHECK(IsQuietNaN(qn));
- DCHECK(IsQuietNaN(qm));
+ CHECK(IsSignallingNaN(sn));
+ CHECK(IsSignallingNaN(sm));
+ CHECK(IsQuietNaN(qn));
+ CHECK(IsQuietNaN(qm));
// The input NaNs after passing through ProcessNaN.
float sn_proc = rawbits_to_float(0x7fd51111);
float sm_proc = rawbits_to_float(0x7fd52222);
float qn_proc = qn;
float qm_proc = qm;
- DCHECK(IsQuietNaN(sn_proc));
- DCHECK(IsQuietNaN(sm_proc));
- DCHECK(IsQuietNaN(qn_proc));
- DCHECK(IsQuietNaN(qm_proc));
+ CHECK(IsQuietNaN(sn_proc));
+ CHECK(IsQuietNaN(sm_proc));
+ CHECK(IsQuietNaN(qn_proc));
+ CHECK(IsQuietNaN(qm_proc));
// Quiet NaNs are propagated.
ProcessNaNsHelper(qn, 0, qn_proc);
@@ -10692,7 +10632,7 @@ TEST(process_nans_float) {
static void DefaultNaNHelper(float n, float m, float a) {
- DCHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
+ CHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
bool test_1op = std::isnan(n);
bool test_2op = std::isnan(n) || std::isnan(m);
@@ -10785,12 +10725,12 @@ TEST(default_nan_float) {
float qn = rawbits_to_float(0x7fea1111);
float qm = rawbits_to_float(0x7fea2222);
float qa = rawbits_to_float(0x7feaaaaa);
- DCHECK(IsSignallingNaN(sn));
- DCHECK(IsSignallingNaN(sm));
- DCHECK(IsSignallingNaN(sa));
- DCHECK(IsQuietNaN(qn));
- DCHECK(IsQuietNaN(qm));
- DCHECK(IsQuietNaN(qa));
+ CHECK(IsSignallingNaN(sn));
+ CHECK(IsSignallingNaN(sm));
+ CHECK(IsSignallingNaN(sa));
+ CHECK(IsQuietNaN(qn));
+ CHECK(IsQuietNaN(qm));
+ CHECK(IsQuietNaN(qa));
// - Signalling NaNs
DefaultNaNHelper(sn, 0.0f, 0.0f);
@@ -10820,7 +10760,7 @@ TEST(default_nan_float) {
static void DefaultNaNHelper(double n, double m, double a) {
- DCHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
+ CHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
bool test_1op = std::isnan(n);
bool test_2op = std::isnan(n) || std::isnan(m);
@@ -10913,12 +10853,12 @@ TEST(default_nan_double) {
double qn = rawbits_to_double(0x7ffaaaaa11111111);
double qm = rawbits_to_double(0x7ffaaaaa22222222);
double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
- DCHECK(IsSignallingNaN(sn));
- DCHECK(IsSignallingNaN(sm));
- DCHECK(IsSignallingNaN(sa));
- DCHECK(IsQuietNaN(qn));
- DCHECK(IsQuietNaN(qm));
- DCHECK(IsQuietNaN(qa));
+ CHECK(IsSignallingNaN(sn));
+ CHECK(IsSignallingNaN(sm));
+ CHECK(IsSignallingNaN(sa));
+ CHECK(IsQuietNaN(qn));
+ CHECK(IsQuietNaN(qm));
+ CHECK(IsQuietNaN(qa));
// - Signalling NaNs
DefaultNaNHelper(sn, 0.0, 0.0);
@@ -11156,16 +11096,16 @@ TEST(pool_size) {
for (RelocIterator it(*code, pool_mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (RelocInfo::IsConstPool(info->rmode())) {
- DCHECK(info->data() == constant_pool_size);
+ CHECK(info->data() == constant_pool_size);
++pool_count;
}
if (RelocInfo::IsVeneerPool(info->rmode())) {
- DCHECK(info->data() == veneer_pool_size);
+ CHECK(info->data() == veneer_pool_size);
++pool_count;
}
}
- DCHECK(pool_count == 2);
+ CHECK(pool_count == 2);
TEARDOWN();
}
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index e62cc210eb..12733c2cdd 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -310,7 +310,8 @@ TEST(AssemblerIa329) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
Label equal_l, less_l, greater_l, nan_l;
__ fld_d(Operand(esp, 3 * kPointerSize));
@@ -443,6 +444,7 @@ TEST(AssemblerMultiByteNop) {
void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
CHECK(args[0]->IsArray());
v8::Local<v8::Array> vec = v8::Local<v8::Array>::Cast(args[0]);
@@ -456,7 +458,8 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Store input vector on the stack.
for (unsigned i = 0; i < ELEMENT_COUNT; ++i) {
- __ push(Immediate(vec->Get(i)->Int32Value()));
+ __ push(Immediate(
+ vec->Get(context, i).ToLocalChecked()->Int32Value(context).FromJust()));
}
// Read vector into a xmm register.
@@ -490,7 +493,7 @@ TEST(StackAlignmentForSSE2) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->Set(v8_str("do_sse2"),
v8::FunctionTemplate::New(isolate, DoSSE2));
@@ -502,20 +505,21 @@ TEST(StackAlignmentForSSE2) {
"}");
v8::Local<v8::Object> global_object = env->Global();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ global_object->Get(env.local(), v8_str("foo")).ToLocalChecked());
int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
v8::Local<v8::Array> v8_vec = v8::Array::New(isolate, ELEMENT_COUNT);
for (unsigned i = 0; i < ELEMENT_COUNT; i++) {
- v8_vec->Set(i, v8_num(vec[i]));
+ v8_vec->Set(env.local(), i, v8_num(vec[i])).FromJust();
}
v8::Local<v8::Value> args[] = { v8_vec };
- v8::Local<v8::Value> result = foo->Call(global_object, 1, args);
+ v8::Local<v8::Value> result =
+ foo->Call(env.local(), global_object, 1, args).ToLocalChecked();
// The mask should be 0b1000.
- CHECK_EQ(8, result->Int32Value());
+ CHECK_EQ(8, result->Int32Value(env.local()).FromJust());
}
#undef ELEMENT_COUNT
@@ -529,7 +533,8 @@ TEST(AssemblerIa32Extractps) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{ CpuFeatureScope fscope41(&assm, SSE4_1);
__ movsd(xmm1, Operand(esp, 4));
__ extractps(eax, xmm1, 0x1);
@@ -560,7 +565,8 @@ TEST(AssemblerIa32SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
__ movss(xmm0, Operand(esp, kPointerSize));
__ movss(xmm1, Operand(esp, 2 * kPointerSize));
@@ -597,7 +603,8 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
@@ -825,7 +832,8 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
@@ -1052,7 +1060,8 @@ TEST(AssemblerIa32BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, BMI1);
Label exit;
@@ -1159,7 +1168,8 @@ TEST(AssemblerIa32LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, LZCNT);
Label exit;
@@ -1206,7 +1216,8 @@ TEST(AssemblerIa32POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, POPCNT);
Label exit;
@@ -1253,7 +1264,8 @@ TEST(AssemblerIa32BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, BMI2);
Label exit;
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 4b4e658afb..4f986cea9b 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -47,8 +47,6 @@ typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
-// clang-format off
-
#define __ assm.
@@ -57,7 +55,7 @@ TEST(MIPS0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// Addition.
__ addu(v0, a0, a1);
@@ -69,7 +67,8 @@ TEST(MIPS0) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0xabc), res);
}
@@ -79,7 +78,7 @@ TEST(MIPS1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ mov(a1, a0);
@@ -104,7 +103,8 @@ TEST(MIPS1) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(isolate, f, 50, 0, 0, 0, 0));
CHECK_EQ(1275, res);
}
@@ -114,7 +114,7 @@ TEST(MIPS2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label exit, error;
@@ -241,7 +241,8 @@ TEST(MIPS2) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0x31415926), res);
}
@@ -274,7 +275,7 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Double precision floating point instructions.
@@ -358,7 +359,7 @@ TEST(MIPS3) {
t.fd = 0.0;
t.fe = 0.0;
t.ff = 0.0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
// Expected double results.
CHECK_EQ(1.5e14, t.a);
@@ -413,7 +414,7 @@ TEST(MIPS4) {
__ mtc1(t2, f4);
__ mtc1(t3, f5);
} else {
- DCHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
+ CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
__ mfc1(t0, f4);
__ mfhc1(t1, f4);
__ mfc1(t2, f6);
@@ -439,7 +440,7 @@ TEST(MIPS4) {
t.a = 1.5e22;
t.b = 2.75e11;
t.c = 17.17;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(2.75e11, t.a);
@@ -503,7 +504,7 @@ TEST(MIPS5) {
t.b = 2.75e8;
t.i = 12345678;
t.j = -100000;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(12345678.0, t.a);
@@ -571,7 +572,7 @@ TEST(MIPS6) {
F3 f = FUNCTION_CAST<F3>(code->entry());
t.ui = 0x11223344;
t.si = 0x99aabbcc;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
@@ -612,7 +613,7 @@ TEST(MIPS7) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
__ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
@@ -669,7 +670,7 @@ TEST(MIPS7) {
t.e = 0.0;
t.f = 0.0;
t.result = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(1.5e14, t.a);
CHECK_EQ(2.75e11, t.b);
@@ -703,7 +704,8 @@ TEST(MIPS8) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Basic word load.
__ lw(t0, MemOperand(a0, offsetof(T, input)) );
@@ -760,7 +762,7 @@ TEST(MIPS8) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.input = 0x12345678;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0x0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
@@ -787,7 +789,7 @@ TEST(MIPS9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(zero_reg));
@@ -868,7 +870,7 @@ TEST(MIPS10) {
F3 f = FUNCTION_CAST<F3>(code->entry());
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0ff00ff0; // 0x0FF00FF0 -> 0x as double.
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
CHECK_EQ(static_cast<int32_t>(0xFF800000), t.dbl_mant);
@@ -997,7 +999,7 @@ TEST(MIPS11) {
t.reg_init = 0xaabbccdd;
t.mem_init = 0x11223344;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -1061,7 +1063,7 @@ TEST(MIPS12) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ mov(t6, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
@@ -1127,7 +1129,7 @@ TEST(MIPS12) {
t.y3 = 0XBABA;
t.y4 = 0xDEDA;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(3, t.y1);
@@ -1150,7 +1152,7 @@ TEST(MIPS13) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in)));
__ Cvt_d_uw(f10, t0, f4);
@@ -1178,7 +1180,7 @@ TEST(MIPS13) {
t.cvt_big_in = 0xFFFFFFFF;
t.cvt_small_in = 333;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
@@ -1197,6 +1199,7 @@ TEST(MIPS14) {
HandleScope scope(isolate);
#define ROUND_STRUCT_ELEMENT(x) \
+ uint32_t x##_isNaN2008; \
int32_t x##_up_out; \
int32_t x##_down_out; \
int32_t neg_##x##_up_out; \
@@ -1227,13 +1230,15 @@ TEST(MIPS14) {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// Save FCSR.
__ cfc1(a1, FCSR);
// Disable FPU exceptions.
__ ctc1(zero_reg, FCSR);
#define RUN_ROUND_TEST(x) \
+ __ cfc1(t0, FCSR);\
+ __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
__ ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
__ x##_w_d(f0, f0); \
__ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
@@ -1302,16 +1307,22 @@ TEST(MIPS14) {
t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
t.err4_in = NAN;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
+#define CHECK_NAN2008(x) (x & kFCSRNaN2008FlagMask)
#define CHECK_ROUND_RESULT(type) \
CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
- CHECK_EQ(kFPUInvalidResult, static_cast<uint>(t.type##_invalid_result));
+ if (CHECK_NAN2008(t.type##_isNaN2008) && kArchVariant == kMips32r6) {\
+ CHECK_EQ(static_cast<int32_t>(0), t.type##_invalid_result);\
+ } else {\
+ CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);\
+ }
+
CHECK_ROUND_RESULT(round);
CHECK_ROUND_RESULT(floor);
@@ -1343,7 +1354,8 @@ TEST(seleqz_selnez) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test {
int a;
@@ -1392,7 +1404,7 @@ TEST(seleqz_selnez) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, 1);
CHECK_EQ(test.b, 0);
@@ -1420,7 +1432,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j];
test.i = inputs_S[i];
test.j = tests_S[j];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.g, outputs_D[i]);
CHECK_EQ(test.h, 0);
CHECK_EQ(test.k, outputs_S[i]);
@@ -1428,7 +1440,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j+1];
test.j = tests_S[j+1];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.g, 0);
CHECK_EQ(test.h, outputs_D[i]);
CHECK_EQ(test.k, 0);
@@ -1444,7 +1456,8 @@ TEST(min_max) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -1497,7 +1510,7 @@ TEST(min_max) {
test.e = inputse[i];
test.f = inputsf[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
if (i < kTableLength - 1) {
CHECK_EQ(test.c, outputsdmin[i]);
@@ -1505,10 +1518,10 @@ TEST(min_max) {
CHECK_EQ(test.g, outputsfmin[i]);
CHECK_EQ(test.h, outputsfmax[i]);
} else {
- DCHECK(std::isnan(test.c));
- DCHECK(std::isnan(test.d));
- DCHECK(std::isnan(test.g));
- DCHECK(std::isnan(test.h));
+ CHECK(std::isnan(test.c));
+ CHECK(std::isnan(test.d));
+ CHECK(std::isnan(test.g));
+ CHECK(std::isnan(test.h));
}
}
}
@@ -1521,7 +1534,8 @@ TEST(rint_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -1613,7 +1627,7 @@ TEST(rint_d) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1626,7 +1640,8 @@ TEST(sel) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dd;
@@ -1679,13 +1694,13 @@ TEST(sel) {
test.ft = inputs_ft[i];
test.fd = tests_S[j];
test.fs = inputs_fs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dd, inputs_ds[i]);
CHECK_EQ(test.fd, inputs_fs[i]);
test.dd = tests_D[j+1];
test.fd = tests_S[j+1];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dd, inputs_dt[i]);
CHECK_EQ(test.fd, inputs_ft[i]);
}
@@ -1700,7 +1715,8 @@ TEST(rint_s) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -1792,7 +1808,7 @@ TEST(rint_s) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1800,13 +1816,59 @@ TEST(rint_s) {
}
+TEST(Cvt_d_uw) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
+
+ typedef struct test_struct {
+ unsigned input;
+ uint64_t output;
+ } TestStruct;
+
+ unsigned inputs[] = {
+ 0x0, 0xffffffff, 0x80000000, 0x7fffffff
+ };
+
+ uint64_t outputs[] = {
+ 0x0, 0x41efffffffe00000,
+ 0x41e0000000000000, 0x41dfffffffc00000
+ };
+
+ int kTableLength = sizeof(inputs)/sizeof(inputs[0]);
+
+ TestStruct test;
+
+ __ lw(t1, MemOperand(a0, offsetof(TestStruct, input)));
+ __ Cvt_d_uw(f4, t1, f6);
+ __ sdc1(f4, MemOperand(a0, offsetof(TestStruct, output)));
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ for (int i = 0; i < kTableLength; i++) {
+ test.input = inputs[i];
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ // Check outputs
+ CHECK_EQ(test.output, outputs[i]);
+ }
+}
+
+
TEST(mina_maxa) {
if (IsMipsArchVariant(kMips32r6)) {
const int kTableLength = 15;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double double_nan = std::numeric_limits<double>::quiet_NaN();
const float float_nan = std::numeric_limits<float>::quiet_NaN();
@@ -1880,17 +1942,17 @@ TEST(mina_maxa) {
test.b = inputsb[i];
test.c = inputsc[i];
test.d = inputsd[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
if (i < kTableLength - 1) {
CHECK_EQ(test.resd, resd[i]);
CHECK_EQ(test.resf, resf[i]);
CHECK_EQ(test.resd1, resd1[i]);
CHECK_EQ(test.resf1, resf1[i]);
} else {
- DCHECK(std::isnan(test.resd));
- DCHECK(std::isnan(test.resf));
- DCHECK(std::isnan(test.resd1));
- DCHECK(std::isnan(test.resf1));
+ CHECK(std::isnan(test.resd));
+ CHECK(std::isnan(test.resf));
+ CHECK(std::isnan(test.resd1));
+ CHECK(std::isnan(test.resf1));
}
}
}
@@ -1903,9 +1965,11 @@ TEST(trunc_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c; // a trunc result
@@ -1931,7 +1995,15 @@ TEST(trunc_l) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ trunc_l_d(f8, f4);
@@ -1949,8 +2021,13 @@ TEST(trunc_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -1963,7 +2040,8 @@ TEST(movz_movn) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t rt;
@@ -2026,14 +2104,14 @@ TEST(movz_movn) {
test.c = inputs_S[i];
test.rt = 1;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, test.bold);
CHECK_EQ(test.d, test.dold);
CHECK_EQ(test.b1, outputs_D[i]);
CHECK_EQ(test.d1, outputs_S[i]);
test.rt = 0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
CHECK_EQ(test.b1, test.bold1);
@@ -2092,7 +2170,8 @@ TEST(movt_movd) {
test.fcsr = 1 << (24+condition_flags[j]);
}
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)) );
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
__ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) );
@@ -2123,13 +2202,13 @@ TEST(movt_movd) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dstf, outputs_S[i]);
CHECK_EQ(test.dstd, outputs_D[i]);
CHECK_EQ(test.dstf1, test.dstfold1);
CHECK_EQ(test.dstd1, test.dstdold1);
test.fcsr = 0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dstf, test.dstfold);
CHECK_EQ(test.dstd, test.dstdold);
CHECK_EQ(test.dstf1, outputs_S[i]);
@@ -2145,7 +2224,7 @@ TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2211,7 +2290,7 @@ TEST(cvt_w_d) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -2222,9 +2301,10 @@ TEST(trunc_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
@@ -2250,7 +2330,15 @@ TEST(trunc_w) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult,
+ 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ trunc_w_d(f8, f4);
@@ -2268,8 +2356,12 @@ TEST(trunc_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2279,9 +2371,10 @@ TEST(round_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
@@ -2307,7 +2400,14 @@ TEST(round_w) {
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ round_w_d(f8, f4);
@@ -2325,8 +2425,12 @@ TEST(round_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2337,9 +2441,11 @@ TEST(round_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c;
@@ -2365,7 +2471,15 @@ TEST(round_l) {
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ round_l_d(f8, f4);
@@ -2383,8 +2497,13 @@ TEST(round_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2396,7 +2515,7 @@ TEST(sub) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2453,7 +2572,7 @@ TEST(sub) {
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2469,7 +2588,7 @@ TEST(sqrt_rsqrt_recip) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2534,7 +2653,7 @@ TEST(sqrt_rsqrt_recip) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
@@ -2569,7 +2688,7 @@ TEST(neg) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2610,7 +2729,7 @@ TEST(neg) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2622,7 +2741,7 @@ TEST(mul) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2669,7 +2788,7 @@ TEST(mul) {
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]);
CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]);
}
@@ -2681,7 +2800,7 @@ TEST(mov) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2724,7 +2843,7 @@ TEST(mov) {
test.a = inputs_D[i];
test.c = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
}
@@ -2735,9 +2854,10 @@ TEST(floor_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
@@ -2763,7 +2883,15 @@ TEST(floor_w) {
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ kFPUInvalidResult,
+ 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ floor_w_d(f8, f4);
@@ -2781,8 +2909,12 @@ TEST(floor_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2793,9 +2925,11 @@ TEST(floor_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c;
@@ -2821,7 +2955,15 @@ TEST(floor_l) {
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ floor_l_d(f8, f4);
@@ -2839,8 +2981,13 @@ TEST(floor_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2851,9 +2998,10 @@ TEST(ceil_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
@@ -2879,7 +3027,15 @@ TEST(ceil_w) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult,
+ 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ ceil_w_d(f8, f4);
@@ -2897,8 +3053,12 @@ TEST(ceil_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2909,9 +3069,11 @@ TEST(ceil_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c;
@@ -2937,7 +3099,15 @@ TEST(ceil_l) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ ceil_l_d(f8, f4);
@@ -2955,8 +3125,13 @@ TEST(ceil_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips32r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -3021,7 +3196,8 @@ TEST(jump_tables1) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -3090,7 +3266,8 @@ TEST(jump_tables2) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -3166,7 +3343,8 @@ TEST(jump_tables3) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
- Handle<Object> result(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0), isolate);
+ Handle<Object> result(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0), isolate);
#ifdef OBJECT_PRINT
::printf("f(%d) = ", i);
result->Print(std::cout);
@@ -3214,7 +3392,7 @@ TEST(BITSWAP) {
F3 f = FUNCTION_CAST<F3>(code->entry());
t.r1 = 0x781A15C3;
t.r2 = 0x8B71FCDE;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x1E58A8C3), t.r1);
@@ -3255,7 +3433,8 @@ TEST(class_fmt) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
__ class_d(f6, f4);
@@ -3370,7 +3549,7 @@ TEST(class_fmt) {
t.fPosSubnorm = FLT_MIN / 20.0;
t.fPosZero = +0.0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
// Expected double results.
CHECK_EQ(bit_cast<int64_t>(t.dSignalingNan), 0x001);
@@ -3403,7 +3582,7 @@ TEST(ABS) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t fir;
@@ -3440,34 +3619,34 @@ TEST(ABS) {
F3 f = FUNCTION_CAST<F3>(code->entry());
test.a = -2.0;
test.b = -2.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
test.a = 2.0;
test.b = 2.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
// Testing biggest positive number
test.a = std::numeric_limits<double>::max();
test.b = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest negative number
test.a = -std::numeric_limits<double>::max(); // lowest()
test.b = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest positive number
test.a = -std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::min());
@@ -3476,7 +3655,7 @@ TEST(ABS) {
/ std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::max()
/ std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max()
/ std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::max()
@@ -3484,13 +3663,13 @@ TEST(ABS) {
test.a = std::numeric_limits<double>::quiet_NaN();
test.b = std::numeric_limits<float>::quiet_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isnan(test.a), true);
CHECK_EQ(std::isnan(test.b), true);
test.a = std::numeric_limits<double>::signaling_NaN();
test.b = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isnan(test.a), true);
CHECK_EQ(std::isnan(test.b), true);
}
@@ -3500,7 +3679,7 @@ TEST(ADD_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -3535,7 +3714,7 @@ TEST(ADD_FMT) {
test.b = 3.0;
test.fa = 2.0;
test.fb = 3.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.c, 5.0);
CHECK_EQ(test.fc, 5.0);
@@ -3543,7 +3722,7 @@ TEST(ADD_FMT) {
test.b = -std::numeric_limits<double>::max(); // lowest()
test.fa = std::numeric_limits<float>::max();
test.fb = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.c, 0.0);
CHECK_EQ(test.fc, 0.0);
@@ -3551,7 +3730,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::max();
test.fa = std::numeric_limits<float>::max();
test.fb = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isfinite(test.c), false);
CHECK_EQ(std::isfinite(test.fc), false);
@@ -3559,7 +3738,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::signaling_NaN();
test.fa = 5.0;
test.fb = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isnan(test.c), true);
CHECK_EQ(std::isnan(test.fc), true);
}
@@ -3570,7 +3749,8 @@ TEST(C_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -3689,7 +3869,7 @@ TEST(C_COND_FMT) {
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -3711,7 +3891,7 @@ TEST(C_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -3733,7 +3913,7 @@ TEST(C_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 1U);
@@ -3755,7 +3935,7 @@ TEST(C_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 1U);
CHECK_EQ(test.dEq, 0U);
@@ -3781,7 +3961,8 @@ TEST(CMP_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -3894,7 +4075,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -3919,7 +4100,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -3944,7 +4125,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dTrue);
@@ -3969,7 +4150,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dTrue);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -3997,7 +4178,7 @@ TEST(CVT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float cvt_d_s_in;
@@ -4040,7 +4221,8 @@ TEST(CVT) {
GENERATE_CVT_TEST(cvt_d_s, lw, sd)
GENERATE_CVT_TEST(cvt_d_w, lw, sd)
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
GENERATE_CVT_TEST(cvt_d_l, ld, sd)
}
@@ -4051,7 +4233,8 @@ TEST(CVT) {
GENERATE_CVT_TEST(cvt_s_d, ld, sw)
GENERATE_CVT_TEST(cvt_s_w, lw, sw)
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
GENERATE_CVT_TEST(cvt_s_l, ld, sw)
}
@@ -4081,10 +4264,11 @@ TEST(CVT) {
test.cvt_w_s_in = -0.51;
test.cvt_w_d_in = -0.51;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
}
if (IsFp64Mode()) {
@@ -4093,7 +4277,8 @@ TEST(CVT) {
}
CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
}
CHECK_EQ(test.cvt_w_s_out, -1);
@@ -4111,10 +4296,11 @@ TEST(CVT) {
test.cvt_w_s_in = 0.49;
test.cvt_w_d_in = 0.49;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
}
if (IsFp64Mode()) {
@@ -4123,7 +4309,8 @@ TEST(CVT) {
}
CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
}
CHECK_EQ(test.cvt_w_s_out, 0);
@@ -4140,10 +4327,11 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::max();
test.cvt_w_d_in = std::numeric_limits<double>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
}
if (IsFp64Mode()) {
@@ -4152,7 +4340,8 @@ TEST(CVT) {
}
CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
}
CHECK_EQ(test.cvt_w_s_out, std::numeric_limits<int32_t>::max());
@@ -4170,10 +4359,11 @@ TEST(CVT) {
test.cvt_w_s_in = -std::numeric_limits<float>::max(); // lowest()
test.cvt_w_d_in = -std::numeric_limits<double>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
}
// The returned value when converting from fixed-point to float-point
@@ -4187,7 +4377,8 @@ TEST(CVT) {
}
CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
}
CHECK(test.cvt_w_s_out == std::numeric_limits<int32_t>::min() ||
@@ -4207,10 +4398,11 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::min();
test.cvt_w_d_in = std::numeric_limits<double>::min();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
}
if (IsFp64Mode()) {
@@ -4219,7 +4411,8 @@ TEST(CVT) {
}
CHECK_EQ(test.cvt_s_d_out, static_cast<float>(test.cvt_s_d_in));
CHECK_EQ(test.cvt_s_w_out, static_cast<float>(test.cvt_s_w_in));
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
CHECK_EQ(test.cvt_s_l_out, static_cast<float>(test.cvt_s_l_in));
}
CHECK_EQ(test.cvt_w_s_out, 0);
@@ -4231,7 +4424,7 @@ TEST(DIV_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dOp1;
@@ -4274,7 +4467,7 @@ TEST(DIV_FMT) {
F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
const int test_size = 3;
@@ -4315,7 +4508,7 @@ TEST(DIV_FMT) {
test.fOp1 = fOp1[i];
test.fOp2 = fOp2[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dRes, dRes[i]);
CHECK_EQ(test.fRes, fRes[i]);
}
@@ -4325,7 +4518,7 @@ TEST(DIV_FMT) {
test.fOp1 = FLT_MAX;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(false, std::isfinite(test.dRes));
CHECK_EQ(false, std::isfinite(test.fRes));
@@ -4334,7 +4527,7 @@ TEST(DIV_FMT) {
test.fOp1 = 0.0;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(true, std::isnan(test.dRes));
CHECK_EQ(true, std::isnan(test.fRes));
@@ -4343,7 +4536,7 @@ TEST(DIV_FMT) {
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = -5.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(true, std::isnan(test.dRes));
CHECK_EQ(true, std::isnan(test.fRes));
}
@@ -4353,7 +4546,7 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ align(v0, a0, a1, bp);
__ jr(ra);
@@ -4366,10 +4559,8 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, rs_value,
- rt_value,
- 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(
+ isolate, f, rs_value, rt_value, 0, 0, 0));
return res;
}
@@ -4408,7 +4599,7 @@ uint32_t run_aluipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ aluipc(v0, offset);
__ jr(ra);
@@ -4422,8 +4613,8 @@ uint32_t run_aluipc(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint32_t) f; // Set the program counter.
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4462,7 +4653,7 @@ uint32_t run_auipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ auipc(v0, offset);
__ jr(ra);
@@ -4476,8 +4667,8 @@ uint32_t run_auipc(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint32_t) f; // Set the program counter.
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4516,7 +4707,7 @@ uint32_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t7, t0, 0xffff; (0x250fffff)
@@ -4551,8 +4742,8 @@ uint32_t run_lwpc(int offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4592,7 +4783,7 @@ uint32_t run_jic(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label get_program_counter, stop_execution;
__ push(ra);
@@ -4635,8 +4826,8 @@ uint32_t run_jic(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4673,7 +4864,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
__ li(v0, 0);
@@ -4707,8 +4898,8 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, value, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, value, 0, 0, 0, 0));
return res;
}
@@ -4746,7 +4937,7 @@ uint32_t run_jialc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label main_block, get_program_counter;
__ push(ra);
@@ -4801,8 +4992,8 @@ uint32_t run_jialc(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4838,7 +5029,7 @@ uint64_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ addiupc(v0, imm19);
__ jr(ra);
@@ -4852,8 +5043,8 @@ uint64_t run_addiupc(int32_t imm19) {
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint32_t) f; // Set the program counter.
- uint32_t rs =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, imm19, 0, 0, 0, 0));
+ uint32_t rs = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, imm19, 0, 0, 0, 0));
return rs;
}
@@ -4892,7 +5083,7 @@ int32_t run_bc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -4900,9 +5091,8 @@ int32_t run_bc(int32_t offset) {
__ li(t8, 0);
__ li(t9, 2); // A condition for stopping execution.
- uint32_t instruction_addiu = 0x24420001; // addiu v0, v0, 1
for (int32_t i = -100; i <= -11; ++i) {
- __ dd(instruction_addiu);
+ __ addiu(v0, v0, 1);
}
__ addiu(t8, t8, 1); // -10
@@ -4921,7 +5111,7 @@ int32_t run_bc(int32_t offset) {
__ bc(offset); // -1
for (int32_t i = 0; i <= 99; ++i) {
- __ dd(instruction_addiu);
+ __ addiu(v0, v0, 1);
}
__ pop(ra);
@@ -4935,8 +5125,8 @@ int32_t run_bc(int32_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- int32_t res =
- reinterpret_cast<int32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4973,7 +5163,7 @@ int32_t run_balc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5017,13 +5207,71 @@ int32_t run_balc(int32_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- int32_t res =
- reinterpret_cast<int32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
+uint32_t run_aui(uint32_t rs, uint16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(t0, rs);
+ __ aui(v0, t0, offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint32_t res =
+ reinterpret_cast<uint32_t>
+ (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(r6_aui) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ CcTest::InitializeVM();
+
+ struct TestCaseAui {
+ uint32_t rs;
+ uint16_t offset;
+ uint32_t ref_res;
+ };
+
+ struct TestCaseAui tc[] = {
+ // input, offset, result
+ {0xfffeffff, 1, 0xffffffff},
+ {0xffffffff, 0, 0xffffffff},
+ {0, 0xffff, 0xffff0000},
+ {0x0008ffff, 0xfff7, 0xffffffff},
+ {32767, 32767, 0x7fff7fff},
+ // overflow cases
+ {0xffffffff, 0x1, 0x0000ffff},
+ {0xffffffff, 0xffff, 0xfffeffff},
+ };
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAui);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ PC = 0;
+ uint32_t res = run_aui(tc[i].rs, tc[i].offset);
+ CHECK_EQ(tc[i].ref_res, res);
+ }
+ }
+}
+
+
TEST(r6_balc) {
if (IsMipsArchVariant(kMips32r6)) {
CcTest::InitializeVM();
@@ -5054,7 +5302,7 @@ uint32_t run_bal(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ mov(t0, ra);
__ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
@@ -5075,8 +5323,8 @@ uint32_t run_bal(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint32_t res =
- reinterpret_cast<uint32_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5102,6 +5350,78 @@ TEST(bal) {
}
+static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+
+ __ lsa(v0, a0, a1, sa);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(lsa) {
+ if (!IsMipsArchVariant(kMips32r6)) return;
+
+ CcTest::InitializeVM();
+ struct TestCaseLsa {
+ int32_t rt;
+ int32_t rs;
+ uint8_t sa;
+ uint32_t expected_res;
+ };
+
+ struct TestCaseLsa tc[] = {
+ // rt, rs, sa, expected_res
+ {0x4, 0x1, 1, 0x6},
+ {0x4, 0x1, 2, 0x8},
+ {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 4, 0x14},
+ {0x0, 0x1, 1, 0x2},
+ {0x0, 0x1, 2, 0x4},
+ {0x0, 0x1, 3, 0x8},
+ {0x0, 0x1, 4, 0x10},
+ {0x4, 0x0, 1, 0x4},
+ {0x4, 0x0, 2, 0x4},
+ {0x4, 0x0, 3, 0x4},
+ {0x4, 0x0, 4, 0x4},
+ {0x4, INT32_MAX, 1, 0x2}, // Shift overflow.
+ {0x4, INT32_MAX >> 1, 2, 0x0}, // Shift overflow.
+ {0x4, INT32_MAX >> 2, 3, 0xfffffffc}, // Shift overflow.
+ {0x4, INT32_MAX >> 3, 4, 0xfffffff4}, // Shift overflow.
+ {INT32_MAX - 1, 0x1, 1, 0x80000000}, // Signed adition overflow.
+ {INT32_MAX - 3, 0x1, 2, 0x80000000}, // Signed addition overflow.
+ {INT32_MAX - 7, 0x1, 3, 0x80000000}, // Signed addition overflow.
+ {INT32_MAX - 15, 0x1, 4, 0x80000000}, // Signed addition overflow.
+ {-2, 0x1, 1, 0x0}, // Addition overflow.
+ {-4, 0x1, 2, 0x0}, // Addition overflow.
+ {-8, 0x1, 3, 0x0}, // Addition overflow.
+ {-16, 0x1, 4, 0x0}}; // Addition overflow.
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint32_t res = run_lsa(tc[i].rt, tc[i].rs, tc[i].sa);
+ PrintF("0x%x =? 0x%x == lsa(v0, %x, %x, %hhu)\n", tc[i].expected_res, res,
+ tc[i].rt, tc[i].rs, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+
TEST(Trampoline) {
// Private member of Assembler class.
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
@@ -5110,7 +5430,8 @@ TEST(Trampoline) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label done;
size_t nr_calls = kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
@@ -5127,10 +5448,9 @@ TEST(Trampoline) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
- int32_t res =
- reinterpret_cast<int32_t>(CALL_GENERATED_CODE(f, 42, 42, 0, 0, 0));
+ int32_t res = reinterpret_cast<int32_t>(
+ CALL_GENERATED_CODE(isolate, f, 42, 42, 0, 0, 0));
CHECK_EQ(res, 0);
}
-
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index d13a8b46a3..988083cadc 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -47,8 +47,6 @@ typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
typedef Object* (*F4)(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
-// clang-format off
-
#define __ assm.
@@ -57,7 +55,7 @@ TEST(MIPS0) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// Addition.
__ addu(v0, a0, a1);
@@ -69,8 +67,8 @@ TEST(MIPS0) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
CHECK_EQ(0xabcL, res);
}
@@ -80,7 +78,7 @@ TEST(MIPS1) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
__ mov(a1, a0);
@@ -105,8 +103,8 @@ TEST(MIPS1) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F1 f = FUNCTION_CAST<F1>(code->entry());
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, 50, 0, 0, 0, 0));
CHECK_EQ(1275L, res);
}
@@ -116,7 +114,7 @@ TEST(MIPS2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label exit, error;
@@ -251,8 +249,8 @@ TEST(MIPS2) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0xab0, 0xc, 0, 0, 0));
CHECK_EQ(0x31415926L, res);
}
@@ -286,7 +284,7 @@ TEST(MIPS3) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Double precision floating point instructions.
@@ -370,7 +368,7 @@ TEST(MIPS3) {
t.fd = 0.0;
t.fe = 0.0;
t.ff = 0.0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
// Expected double results.
CHECK_EQ(1.5e14, t.a);
@@ -452,7 +450,7 @@ TEST(MIPS4) {
t.b = 2.75e11;
t.c = 17.17;
t.d = -2.75e11;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(2.75e11, t.a);
@@ -518,7 +516,7 @@ TEST(MIPS5) {
t.b = 2.75e8;
t.i = 12345678;
t.j = -100000;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(12345678.0, t.a);
@@ -586,7 +584,7 @@ TEST(MIPS6) {
F3 f = FUNCTION_CAST<F3>(code->entry());
t.ui = 0x11223344;
t.si = 0x99aabbcc;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
@@ -625,7 +623,7 @@ TEST(MIPS7) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
__ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
@@ -679,7 +677,7 @@ TEST(MIPS7) {
t.e = 0.0;
t.f = 0.0;
t.result = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(1.5e14, t.a);
CHECK_EQ(2.75e11, t.b);
@@ -713,7 +711,8 @@ TEST(MIPS8) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Basic word load.
__ lw(a4, MemOperand(a0, offsetof(T, input)) );
@@ -770,7 +769,7 @@ TEST(MIPS8) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
t.input = 0x12345678;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0x0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
@@ -797,7 +796,7 @@ TEST(MIPS9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label exit, exit2, exit3;
__ Branch(&exit, ge, a0, Operand(zero_reg));
@@ -896,7 +895,7 @@ TEST(MIPS10) {
t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
t.b_long_hi = 0x000000ff; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
t.b_long_lo = 0x00ff00ff;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x41DFFFFF), t.dbl_exp);
@@ -1031,7 +1030,7 @@ TEST(MIPS11) {
t.reg_init = 0xaabbccdd;
t.mem_init = 0x11223344;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
if (kArchEndian == kLittle) {
@@ -1094,7 +1093,7 @@ TEST(MIPS12) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ mov(t2, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
@@ -1160,7 +1159,7 @@ TEST(MIPS12) {
t.y3 = 0XBABA;
t.y4 = 0xDEDA;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(3, t.y1);
@@ -1183,17 +1182,17 @@ TEST(MIPS13) {
} T;
T t;
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ sw(a4, MemOperand(a0, offsetof(T, cvt_small_in)));
- __ Cvt_d_uw(f10, a4, f4);
+ __ Cvt_d_uw(f10, a4);
__ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
__ Trunc_uw_d(f10, f10, f4);
__ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
__ sw(a4, MemOperand(a0, offsetof(T, cvt_big_in)));
- __ Cvt_d_uw(f8, a4, f4);
+ __ Cvt_d_uw(f8, a4);
__ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
__ Trunc_uw_d(f8, f8, f4);
@@ -1211,7 +1210,7 @@ TEST(MIPS13) {
t.cvt_big_in = 0xFFFFFFFF;
t.cvt_small_in = 333;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
@@ -1230,6 +1229,7 @@ TEST(MIPS14) {
HandleScope scope(isolate);
#define ROUND_STRUCT_ELEMENT(x) \
+ uint32_t x##_isNaN2008; \
int32_t x##_up_out; \
int32_t x##_down_out; \
int32_t neg_##x##_up_out; \
@@ -1260,13 +1260,15 @@ TEST(MIPS14) {
#undef ROUND_STRUCT_ELEMENT
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// Save FCSR.
__ cfc1(a1, FCSR);
// Disable FPU exceptions.
__ ctc1(zero_reg, FCSR);
#define RUN_ROUND_TEST(x) \
+ __ cfc1(t0, FCSR);\
+ __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
__ ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
__ x##_w_d(f0, f0); \
__ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
@@ -1335,16 +1337,21 @@ TEST(MIPS14) {
t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
t.err4_in = NAN;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
+#define CHECK_NAN2008(x) (x & kFCSRNaN2008FlagMask)
#define CHECK_ROUND_RESULT(type) \
CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
- CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
+ if (CHECK_NAN2008(t.type##_isNaN2008) && kArchVariant == kMips64r6) { \
+ CHECK_EQ(static_cast<int32_t>(0), t.type##_invalid_result);\
+ } else { \
+ CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);\
+ }
CHECK_ROUND_RESULT(round);
CHECK_ROUND_RESULT(floor);
@@ -1453,7 +1460,7 @@ TEST(MIPS16) {
t.r4 = 0x4444444444444444;
t.r5 = 0x5555555555555555;
t.r6 = 0x6666666666666666;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
// Unsigned data, 32 & 64.
@@ -1476,7 +1483,8 @@ TEST(seleqz_selnez) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test {
int a;
@@ -1525,7 +1533,7 @@ TEST(seleqz_selnez) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, 1);
CHECK_EQ(test.b, 0);
@@ -1553,7 +1561,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j];
test.i = inputs_S[i];
test.j = tests_S[j];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.g, outputs_D[i]);
CHECK_EQ(test.h, 0);
CHECK_EQ(test.k, outputs_S[i]);
@@ -1561,7 +1569,7 @@ TEST(seleqz_selnez) {
test.f = tests_D[j+1];
test.j = tests_S[j+1];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.g, 0);
CHECK_EQ(test.h, outputs_D[i]);
CHECK_EQ(test.k, 0);
@@ -1578,7 +1586,8 @@ TEST(min_max) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -1631,7 +1640,7 @@ TEST(min_max) {
test.e = inputse[i];
test.f = inputsf[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
if (i < kTableLength - 1) {
CHECK_EQ(test.c, outputsdmin[i]);
@@ -1639,10 +1648,10 @@ TEST(min_max) {
CHECK_EQ(test.g, outputsfmin[i]);
CHECK_EQ(test.h, outputsfmax[i]);
} else {
- DCHECK(std::isnan(test.c));
- DCHECK(std::isnan(test.d));
- DCHECK(std::isnan(test.g));
- DCHECK(std::isnan(test.h));
+ CHECK(std::isnan(test.c));
+ CHECK(std::isnan(test.d));
+ CHECK(std::isnan(test.g));
+ CHECK(std::isnan(test.h));
}
}
}
@@ -1655,7 +1664,8 @@ TEST(rint_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -1745,7 +1755,7 @@ TEST(rint_d) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1758,7 +1768,8 @@ TEST(sel) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dd;
@@ -1811,13 +1822,13 @@ TEST(sel) {
test.ft = inputs_ft[i];
test.fd = tests_S[j];
test.fs = inputs_fs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dd, inputs_ds[i]);
CHECK_EQ(test.fd, inputs_fs[i]);
test.dd = tests_D[j+1];
test.fd = tests_S[j+1];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dd, inputs_dt[i]);
CHECK_EQ(test.fd, inputs_ft[i]);
}
@@ -1832,7 +1843,8 @@ TEST(rint_s) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -1924,7 +1936,7 @@ TEST(rint_s) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -1938,7 +1950,8 @@ TEST(mina_maxa) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double double_nan = std::numeric_limits<double>::quiet_NaN();
const float float_nan = std::numeric_limits<float>::quiet_NaN();
@@ -2012,7 +2025,7 @@ TEST(mina_maxa) {
test.b = inputsb[i];
test.c = inputsc[i];
test.d = inputsd[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
if (i < kTableLength - 1) {
CHECK_EQ(test.resd, resd[i]);
@@ -2020,10 +2033,10 @@ TEST(mina_maxa) {
CHECK_EQ(test.resd1, resd1[i]);
CHECK_EQ(test.resf1, resf1[i]);
} else {
- DCHECK(std::isnan(test.resd));
- DCHECK(std::isnan(test.resf));
- DCHECK(std::isnan(test.resd1));
- DCHECK(std::isnan(test.resf1));
+ CHECK(std::isnan(test.resd));
+ CHECK(std::isnan(test.resf));
+ CHECK(std::isnan(test.resd1));
+ CHECK(std::isnan(test.resf1));
}
}
}
@@ -2037,9 +2050,11 @@ TEST(trunc_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c; // a trunc result
@@ -2065,7 +2080,14 @@ TEST(trunc_l) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0, dFPU64InvalidResult,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ trunc_l_d(f8, f4);
@@ -2083,8 +2105,13 @@ TEST(trunc_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2097,7 +2124,8 @@ TEST(movz_movn) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t rt;
@@ -2160,14 +2188,14 @@ TEST(movz_movn) {
test.c = inputs_S[i];
test.rt = 1;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, test.bold);
CHECK_EQ(test.d, test.dold);
CHECK_EQ(test.b1, outputs_D[i]);
CHECK_EQ(test.d1, outputs_S[i]);
test.rt = 0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
CHECK_EQ(test.b1, test.bold1);
@@ -2225,7 +2253,8 @@ TEST(movt_movd) {
test.fcsr = 1 << (24+condition_flags[j]);
}
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)) );
__ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
__ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) );
@@ -2256,13 +2285,13 @@ TEST(movt_movd) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dstf, outputs_S[i]);
CHECK_EQ(test.dstd, outputs_D[i]);
CHECK_EQ(test.dstf1, test.dstfold1);
CHECK_EQ(test.dstd1, test.dstdold1);
test.fcsr = 0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dstf, test.dstfold);
CHECK_EQ(test.dstd, test.dstdold);
CHECK_EQ(test.dstf1, outputs_S[i]);
@@ -2279,7 +2308,7 @@ TEST(cvt_w_d) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2345,7 +2374,7 @@ TEST(cvt_w_d) {
test.fcsr = fcsr_inputs[j];
for (int i = 0; i < kTableLength; i++) {
test.a = inputs[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs[j][i]);
}
}
@@ -2356,9 +2385,10 @@ TEST(trunc_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
@@ -2384,7 +2414,15 @@ TEST(trunc_w) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult,
+ 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ trunc_w_d(f8, f4);
@@ -2402,8 +2440,12 @@ TEST(trunc_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2413,9 +2455,10 @@ TEST(round_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
@@ -2441,7 +2484,14 @@ TEST(round_w) {
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ round_w_d(f8, f4);
@@ -2459,8 +2509,12 @@ TEST(round_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2470,9 +2524,11 @@ TEST(round_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c;
@@ -2498,7 +2554,15 @@ TEST(round_l) {
-2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ round_l_d(f8, f4);
@@ -2516,9 +2580,13 @@ TEST(round_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- std::cout<< i<< "\n";
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2529,7 +2597,7 @@ TEST(sub) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2586,7 +2654,7 @@ TEST(sub) {
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2602,7 +2670,7 @@ TEST(sqrt_rsqrt_recip) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2661,7 +2729,7 @@ TEST(sqrt_rsqrt_recip) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
@@ -2694,7 +2762,7 @@ TEST(neg) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2735,7 +2803,7 @@ TEST(neg) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
test.c = inputs_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, outputs_S[i]);
CHECK_EQ(test.resultD, outputs_D[i]);
}
@@ -2748,7 +2816,7 @@ TEST(mul) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float a;
@@ -2795,7 +2863,7 @@ TEST(mul) {
test.b = inputft_S[i];
test.c = inputfs_D[i];
test.d = inputft_D[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]);
CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]);
}
@@ -2807,7 +2875,7 @@ TEST(mov) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -2849,7 +2917,7 @@ TEST(mov) {
test.a = inputs_D[i];
test.c = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.b, outputs_D[i]);
CHECK_EQ(test.d, outputs_S[i]);
}
@@ -2860,9 +2928,10 @@ TEST(floor_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
@@ -2888,7 +2957,15 @@ TEST(floor_w) {
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ kFPUInvalidResult,
+ 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ floor_w_d(f8, f4);
@@ -2906,8 +2983,12 @@ TEST(floor_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2917,9 +2998,11 @@ TEST(floor_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c;
@@ -2945,7 +3028,15 @@ TEST(floor_l) {
-3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ floor_l_d(f8, f4);
@@ -2963,8 +3054,13 @@ TEST(floor_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -2974,9 +3070,10 @@ TEST(ceil_w) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
@@ -3002,7 +3099,15 @@ TEST(ceil_w) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
kFPUInvalidResult, kFPUInvalidResult,
kFPUInvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult,
+ 0,
+ kFPUInvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ ceil_w_d(f8, f4);
@@ -3020,8 +3125,12 @@ TEST(ceil_w) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -3031,9 +3140,11 @@ TEST(ceil_l) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
typedef struct test_float {
+ uint32_t isNaN2008;
double a;
float b;
int64_t c;
@@ -3059,7 +3170,15 @@ TEST(ceil_l) {
-2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
2147483648.0, dFPU64InvalidResult,
dFPU64InvalidResult};
+ double outputsNaN2008[kTableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 2147483648.0,
+ 0,
+ dFPU64InvalidResult};
+ __ cfc1(t1, FCSR);
+ __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
__ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
__ ceil_l_d(f8, f4);
@@ -3077,8 +3196,13 @@ TEST(ceil_l) {
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
test.b = inputs_S[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
- CHECK_EQ(test.c, outputs[i]);
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
+ if ((test.isNaN2008 & kFCSRNaN2008FlagMask) &&
+ kArchVariant == kMips64r6) {
+ CHECK_EQ(test.c, outputsNaN2008[i]);
+ } else {
+ CHECK_EQ(test.c, outputs[i]);
+ }
CHECK_EQ(test.d, test.c);
}
}
@@ -3098,23 +3222,20 @@ TEST(jump_tables1) {
__ daddiu(sp, sp, -8);
__ sd(ra, MemOperand(sp));
- if ((assm.pc_offset() & 7) == 0) {
- __ nop();
- }
+ __ Align(8);
Label done;
{
- __ BlockTrampolinePoolFor(kNumCases * 2 + 7);
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
- &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
+ &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
Label here;
__ bal(&here);
- __ nop();
+ __ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
- __ dsll(at, a0, 3);
__ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3146,7 +3267,7 @@ TEST(jump_tables1) {
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], static_cast<int>(res));
}
@@ -3180,22 +3301,19 @@ TEST(jump_tables2) {
__ nop();
}
- if ((assm.pc_offset() & 7) == 0) {
- __ nop();
- }
+ __ Align(8);
__ bind(&dispatch);
{
- __ BlockTrampolinePoolFor(kNumCases * 2 + 7);
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
- &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
+ &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
Label here;
__ bal(&here);
- __ nop();
+ __ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
- __ dsll(at, a0, 3);
__ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3219,7 +3337,7 @@ TEST(jump_tables2) {
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(
- CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
@@ -3248,6 +3366,7 @@ TEST(jump_tables3) {
Label done, dispatch;
__ b(&dispatch);
+ __ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3262,23 +3381,19 @@ TEST(jump_tables3) {
__ nop();
}
- __ stop("chk");
- if ((assm.pc_offset() & 7) == 0) {
- __ nop();
- }
+ __ Align(8);
__ bind(&dispatch);
{
- __ BlockTrampolinePoolFor(kNumCases * 2 + 7);
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
PredictableCodeSizeScope predictable(
- &assm, (kNumCases * 2 + 7) * Assembler::kInstrSize);
+ &assm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
Label here;
__ bal(&here);
- __ nop();
+ __ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
- __ dsll(at, a0, 3);
__ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 5 * Assembler::kInstrSize));
+ __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3301,7 +3416,8 @@ TEST(jump_tables3) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
- Handle<Object> result(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0), isolate);
+ Handle<Object> result(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0), isolate);
#ifdef OBJECT_PRINT
::printf("f(%d) = ", i);
result->Print(std::cout);
@@ -3375,7 +3491,7 @@ TEST(BITSWAP) {
t.r4 = 0xFF8017FF8B71FCDE;
t.r5 = 0x10C021098B71FCDE;
t.r6 = 0xFB8017FF781A15C3;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(static_cast<int64_t>(0x000000001E58A8C3L), t.r1);
@@ -3420,7 +3536,8 @@ TEST(class_fmt) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles t.a ... t.f.
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
__ ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
__ class_d(f6, f4);
@@ -3536,7 +3653,7 @@ TEST(class_fmt) {
t.fPosSubnorm = FLT_MIN / 20.0;
t.fPosZero = +0.0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
// Expected double results.
CHECK_EQ(bit_cast<int64_t>(t.dSignalingNan), 0x001);
@@ -3569,7 +3686,7 @@ TEST(ABS) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
int64_t fir;
@@ -3607,34 +3724,34 @@ TEST(ABS) {
F3 f = FUNCTION_CAST<F3>(code->entry());
test.a = -2.0;
test.b = -2.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
test.a = 2.0;
test.b = 2.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, 2.0);
CHECK_EQ(test.b, 2.0);
// Testing biggest positive number
test.a = std::numeric_limits<double>::max();
test.b = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest negative number
test.a = -std::numeric_limits<double>::max(); // lowest()
test.b = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max());
CHECK_EQ(test.b, std::numeric_limits<float>::max());
// Testing smallest positive number
test.a = -std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::min());
@@ -3643,7 +3760,7 @@ TEST(ABS) {
/ std::numeric_limits<double>::min();
test.b = -std::numeric_limits<float>::max()
/ std::numeric_limits<float>::min();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.a, std::numeric_limits<double>::max()
/ std::numeric_limits<double>::min());
CHECK_EQ(test.b, std::numeric_limits<float>::max()
@@ -3651,13 +3768,13 @@ TEST(ABS) {
test.a = std::numeric_limits<double>::quiet_NaN();
test.b = std::numeric_limits<float>::quiet_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isnan(test.a), true);
CHECK_EQ(std::isnan(test.b), true);
test.a = std::numeric_limits<double>::signaling_NaN();
test.b = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isnan(test.a), true);
CHECK_EQ(std::isnan(test.b), true);
}
@@ -3667,7 +3784,7 @@ TEST(ADD_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double a;
@@ -3702,7 +3819,7 @@ TEST(ADD_FMT) {
test.b = 3.0;
test.fa = 2.0;
test.fb = 3.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.c, 5.0);
CHECK_EQ(test.fc, 5.0);
@@ -3710,7 +3827,7 @@ TEST(ADD_FMT) {
test.b = -std::numeric_limits<double>::max(); // lowest()
test.fa = std::numeric_limits<float>::max();
test.fb = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.c, 0.0);
CHECK_EQ(test.fc, 0.0);
@@ -3718,7 +3835,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::max();
test.fa = std::numeric_limits<float>::max();
test.fb = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isfinite(test.c), false);
CHECK_EQ(std::isfinite(test.fc), false);
@@ -3726,7 +3843,7 @@ TEST(ADD_FMT) {
test.b = std::numeric_limits<double>::signaling_NaN();
test.fa = 5.0;
test.fb = std::numeric_limits<float>::signaling_NaN();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(std::isnan(test.c), true);
CHECK_EQ(std::isnan(test.fc), true);
}
@@ -3737,7 +3854,8 @@ TEST(C_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -3856,7 +3974,7 @@ TEST(C_COND_FMT) {
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -3878,7 +3996,7 @@ TEST(C_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 0U);
@@ -3900,7 +4018,7 @@ TEST(C_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 0U);
CHECK_EQ(test.dEq, 1U);
@@ -3922,7 +4040,7 @@ TEST(C_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dF, 0U);
CHECK_EQ(test.dUn, 1U);
CHECK_EQ(test.dEq, 0U);
@@ -3948,7 +4066,8 @@ TEST(CMP_COND_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
double dOp1;
@@ -4061,7 +4180,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 3.0;
test.fOp1 = 2.0;
test.fOp2 = 3.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4086,7 +4205,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = std::numeric_limits<double>::min();
test.fOp1 = std::numeric_limits<float>::min();
test.fOp2 = -std::numeric_limits<float>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4111,7 +4230,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = -std::numeric_limits<double>::max(); // lowest()
test.fOp1 = std::numeric_limits<float>::max();
test.fOp2 = std::numeric_limits<float>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dTrue);
@@ -4136,7 +4255,7 @@ TEST(CMP_COND_FMT) {
test.dOp2 = 0.0;
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = 0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(bit_cast<uint64_t>(test.dF), dFalse);
CHECK_EQ(bit_cast<uint64_t>(test.dUn), dTrue);
CHECK_EQ(bit_cast<uint64_t>(test.dEq), dFalse);
@@ -4164,7 +4283,7 @@ TEST(CVT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
float cvt_d_s_in;
@@ -4242,7 +4361,7 @@ TEST(CVT) {
test.cvt_w_s_in = -0.51;
test.cvt_w_d_in = -0.51;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4266,7 +4385,7 @@ TEST(CVT) {
test.cvt_w_s_in = 0.49;
test.cvt_w_d_in = 0.49;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4289,7 +4408,7 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::max();
test.cvt_w_d_in = std::numeric_limits<double>::max();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4313,7 +4432,7 @@ TEST(CVT) {
test.cvt_w_s_in = -std::numeric_limits<float>::max(); // lowest()
test.cvt_w_d_in = -std::numeric_limits<double>::max(); // lowest()
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4344,7 +4463,7 @@ TEST(CVT) {
test.cvt_w_s_in = std::numeric_limits<float>::min();
test.cvt_w_d_in = std::numeric_limits<double>::min();
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.cvt_d_s_out, static_cast<double>(test.cvt_d_s_in));
CHECK_EQ(test.cvt_d_w_out, static_cast<double>(test.cvt_d_w_in));
CHECK_EQ(test.cvt_d_l_out, static_cast<double>(test.cvt_d_l_in));
@@ -4362,7 +4481,7 @@ TEST(DIV_FMT) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
typedef struct test {
double dOp1;
@@ -4403,7 +4522,7 @@ TEST(DIV_FMT) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F3 f = FUNCTION_CAST<F3>(code->entry());
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
const int test_size = 3;
@@ -4444,7 +4563,7 @@ TEST(DIV_FMT) {
test.fOp1 = fOp1[i];
test.fOp2 = fOp2[i];
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(test.dRes, dRes[i]);
CHECK_EQ(test.fRes, fRes[i]);
}
@@ -4454,7 +4573,7 @@ TEST(DIV_FMT) {
test.fOp1 = FLT_MAX;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(false, std::isfinite(test.dRes));
CHECK_EQ(false, std::isfinite(test.fRes));
@@ -4463,7 +4582,7 @@ TEST(DIV_FMT) {
test.fOp1 = 0.0;
test.fOp2 = -0.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(true, std::isnan(test.dRes));
CHECK_EQ(true, std::isnan(test.fRes));
@@ -4472,7 +4591,7 @@ TEST(DIV_FMT) {
test.fOp1 = std::numeric_limits<float>::quiet_NaN();
test.fOp2 = -5.0;
- (CALL_GENERATED_CODE(f, &test, 0, 0, 0, 0));
+ (CALL_GENERATED_CODE(isolate, f, &test, 0, 0, 0, 0));
CHECK_EQ(true, std::isnan(test.dRes));
CHECK_EQ(true, std::isnan(test.fRes));
}
@@ -4482,7 +4601,7 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ align(v0, a0, a1, bp);
__ jr(ra);
@@ -4493,12 +4612,10 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ F4 f = FUNCTION_CAST<F4>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, rs_value,
- rt_value,
- 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, rs_value, rt_value, 0, 0, 0));
return res;
}
@@ -4537,7 +4654,7 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ dalign(v0, a0, a1, bp);
__ jr(ra);
@@ -4549,10 +4666,8 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F4 f = FUNCTION_CAST<F4>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, rs_value,
- rt_value,
- 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, rs_value, rt_value, 0, 0, 0));
return res;
}
@@ -4597,7 +4712,7 @@ uint64_t run_aluipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ aluipc(v0, offset);
__ jr(ra);
@@ -4611,8 +4726,8 @@ uint64_t run_aluipc(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint64_t) f; // Set the program counter.
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4651,7 +4766,7 @@ uint64_t run_auipc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ auipc(v0, offset);
__ jr(ra);
@@ -4665,8 +4780,8 @@ uint64_t run_auipc(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint64_t) f; // Set the program counter.
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4701,11 +4816,192 @@ TEST(r6_auipc) {
}
+uint64_t run_aui(uint64_t rs, uint16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(t0, rs);
+ __ aui(v0, t0, offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res =
+ reinterpret_cast<uint64_t>
+ (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+uint64_t run_daui(uint64_t rs, uint16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(t0, rs);
+ __ daui(v0, t0, offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res =
+ reinterpret_cast<uint64_t>
+ (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+uint64_t run_dahi(uint64_t rs, uint16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(v0, rs);
+ __ dahi(v0, offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res =
+ reinterpret_cast<uint64_t>
+ (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+uint64_t run_dati(uint64_t rs, uint16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(v0, rs);
+ __ dati(v0, offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res =
+ reinterpret_cast<uint64_t>
+ (CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(r6_aui_family) {
+ if (kArchVariant == kMips64r6) {
+ CcTest::InitializeVM();
+
+ struct TestCaseAui {
+ uint64_t rs;
+ uint16_t offset;
+ uint64_t ref_res;
+ };
+
+ // AUI test cases.
+ struct TestCaseAui aui_tc[] = {
+ {0xfffeffff, 0x1, 0xffffffffffffffff},
+ {0xffffffff, 0x0, 0xffffffffffffffff},
+ {0, 0xffff, 0xffffffffffff0000},
+ {0x0008ffff, 0xfff7, 0xffffffffffffffff},
+ {32767, 32767, 0x000000007fff7fff},
+ {0x00000000ffffffff, 0x1, 0x000000000000ffff},
+ {0xffffffff, 0xffff, 0xfffffffffffeffff},
+ };
+
+ size_t nr_test_cases = sizeof(aui_tc) / sizeof(TestCaseAui);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_aui(aui_tc[i].rs, aui_tc[i].offset);
+ CHECK_EQ(aui_tc[i].ref_res, res);
+ }
+
+ // DAUI test cases.
+ struct TestCaseAui daui_tc[] = {
+ {0xfffffffffffeffff, 0x1, 0xffffffffffffffff},
+ {0xffffffffffffffff, 0x0, 0xffffffffffffffff},
+ {0, 0xffff, 0xffffffffffff0000},
+ {0x0008ffff, 0xfff7, 0xffffffffffffffff},
+ {32767, 32767, 0x000000007fff7fff},
+ {0x00000000ffffffff, 0x1, 0x000000010000ffff},
+ {0xffffffff, 0xffff, 0x00000000fffeffff},
+ };
+
+ nr_test_cases = sizeof(daui_tc) / sizeof(TestCaseAui);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_daui(daui_tc[i].rs, daui_tc[i].offset);
+ CHECK_EQ(daui_tc[i].ref_res, res);
+ }
+
+ // DATI test cases.
+ struct TestCaseAui dati_tc[] = {
+ {0xfffffffffffeffff, 0x1, 0x0000fffffffeffff},
+ {0xffffffffffffffff, 0x0, 0xffffffffffffffff},
+ {0, 0xffff, 0xffff000000000000},
+ {0x0008ffff, 0xfff7, 0xfff700000008ffff},
+ {32767, 32767, 0x7fff000000007fff},
+ {0x00000000ffffffff, 0x1, 0x00010000ffffffff},
+ {0xffffffffffff, 0xffff, 0xffffffffffffffff},
+ };
+
+ nr_test_cases = sizeof(dati_tc) / sizeof(TestCaseAui);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_dati(dati_tc[i].rs, dati_tc[i].offset);
+ CHECK_EQ(dati_tc[i].ref_res, res);
+ }
+
+ // DAHI test cases.
+ struct TestCaseAui dahi_tc[] = {
+ {0xfffffffeffffffff, 0x1, 0xffffffffffffffff},
+ {0xffffffffffffffff, 0x0, 0xffffffffffffffff},
+ {0, 0xffff, 0xffffffff00000000},
+ };
+
+ nr_test_cases = sizeof(dahi_tc) / sizeof(TestCaseAui);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_dahi(dahi_tc[i].rs, dahi_tc[i].offset);
+ CHECK_EQ(dahi_tc[i].ref_res, res);
+ }
+ }
+}
+
+
uint64_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t3, a4, 0xffff; (0x250fffff)
@@ -4740,8 +5036,8 @@ uint64_t run_lwpc(int offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4781,7 +5077,7 @@ uint64_t run_lwupc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2^8k
// addiu t3, a4, 0xffff; (0x250fffff)
@@ -4816,8 +5112,8 @@ uint64_t run_lwupc(int offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4857,7 +5153,7 @@ uint64_t run_jic(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label get_program_counter, stop_execution;
__ push(ra);
@@ -4900,8 +5196,8 @@ uint64_t run_jic(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -4938,7 +5234,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label stop_execution;
__ li(v0, 0);
@@ -4972,8 +5268,8 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, value, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, value, 0, 0, 0, 0));
return res;
}
@@ -5011,7 +5307,7 @@ uint64_t run_jialc(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label main_block, get_program_counter;
__ push(ra);
@@ -5066,8 +5362,8 @@ uint64_t run_jialc(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5105,7 +5401,7 @@ uint64_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ addiupc(v0, imm19);
__ jr(ra);
@@ -5119,8 +5415,8 @@ uint64_t run_addiupc(int32_t imm19) {
F2 f = FUNCTION_CAST<F2>(code->entry());
PC = (uint64_t) f; // Set the program counter.
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5159,7 +5455,7 @@ uint64_t run_ldpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
// 256k instructions; 2 * 2^7k = 2^8k
// addiu t3, a4, 0xffff; (0x250fffff)
@@ -5194,8 +5490,8 @@ uint64_t run_ldpc(int offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5234,7 +5530,7 @@ int64_t run_bc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5242,9 +5538,8 @@ int64_t run_bc(int32_t offset) {
__ li(t8, 0);
__ li(t9, 2); // Condition for the stopping execution.
- uint32_t instruction_addiu = 0x24420001; // addiu v0, v0, 1
for (int32_t i = -100; i <= -11; ++i) {
- __ dd(instruction_addiu);
+ __ addiu(v0, v0, 1);
}
__ addiu(t8, t8, 1); // -10
@@ -5263,7 +5558,7 @@ int64_t run_bc(int32_t offset) {
__ bc(offset); // -1
for (int32_t i = 0; i <= 99; ++i) {
- __ dd(instruction_addiu);
+ __ addiu(v0, v0, 1);
}
__ pop(ra);
@@ -5277,8 +5572,8 @@ int64_t run_bc(int32_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5315,7 +5610,7 @@ int64_t run_balc(int32_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label continue_1, stop_execution;
__ push(ra);
@@ -5359,8 +5654,8 @@ int64_t run_balc(int32_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5396,7 +5691,7 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ dsll(v0, a0, sa_value);
__ jr(ra);
@@ -5407,10 +5702,10 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
- F2 f = FUNCTION_CAST<F2>(code->entry());
+ F4 f = FUNCTION_CAST<F4>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, rt_value, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, rt_value, 0, 0, 0, 0));
return res;
}
@@ -5444,7 +5739,7 @@ uint64_t run_bal(int16_t offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
__ mov(t0, ra);
__ bal(offset); // Equivalent for "BGEZAL zero_reg, offset".
@@ -5465,8 +5760,8 @@ uint64_t run_bal(int16_t offset) {
F2 f = FUNCTION_CAST<F2>(code->entry());
- uint64_t res =
- reinterpret_cast<uint64_t>(CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
return res;
}
@@ -5500,7 +5795,8 @@ TEST(Trampoline) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
Label done;
size_t nr_calls = kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
@@ -5517,8 +5813,8 @@ TEST(Trampoline) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
F2 f = FUNCTION_CAST<F2>(code->entry());
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 42, 42, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, 42, 42, 0, 0, 0));
CHECK_EQ(res, 0);
}
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index b2dca6a9fc..d29a59d91d 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -48,7 +48,7 @@ typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
// Simple add parameter 1 to parameter 2 and return
TEST(0) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -66,8 +66,8 @@ TEST(0) {
code->Print();
#endif
F2 f = FUNCTION_CAST<F2>(code->entry());
- intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0));
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 3, 4, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(7, static_cast<int>(res));
}
@@ -76,7 +76,7 @@ TEST(0) {
// Loop 100 times, adding loop counter to result
TEST(1) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -105,8 +105,8 @@ TEST(1) {
code->Print();
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
- intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0));
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 100, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(5050, static_cast<int>(res));
}
@@ -114,7 +114,7 @@ TEST(1) {
TEST(2) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -156,8 +156,8 @@ TEST(2) {
code->Print();
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
- intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0));
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, 10, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(3628800, static_cast<int>(res));
}
@@ -165,7 +165,7 @@ TEST(2) {
TEST(3) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -175,7 +175,7 @@ TEST(3) {
} T;
T t;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Assembler assm(CcTest::i_isolate(), NULL, 0);
Label L, C;
__ function_descriptor();
@@ -232,8 +232,8 @@ TEST(3) {
t.i = 100000;
t.c = 10;
t.s = 1000;
- intptr_t res =
- reinterpret_cast<intptr_t>(CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0));
+ intptr_t res = reinterpret_cast<intptr_t>(
+ CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0));
::printf("f() = %" V8PRIdPTR "\n", res);
CHECK_EQ(101010, static_cast<int>(res));
CHECK_EQ(100000 / 2, t.i);
@@ -245,7 +245,7 @@ TEST(3) {
TEST(4) {
// Test the VFP floating point instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -267,7 +267,7 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(Isolate::Current(), NULL, 0);
+ Assembler assm(CcTest::i_isolate(), NULL, 0);
Label L, C;
if (CpuFeatures::IsSupported(VFP3)) {
@@ -358,7 +358,7 @@ TEST(4) {
t.n = 123.456;
t.x = 4.5;
t.y = 9.0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(4.5, t.y);
CHECK_EQ(9.0, t.x);
@@ -380,7 +380,7 @@ TEST(4) {
TEST(5) {
// Test the ARMv7 bitfield instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -407,7 +407,7 @@ TEST(5) {
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0xAAAAAAAA, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, 0xAAAAAAAA, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(-7, res);
}
@@ -417,7 +417,7 @@ TEST(5) {
TEST(6) {
// Test saturating instructions.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -443,7 +443,7 @@ TEST(6) {
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
::printf("f() = %d\n", res);
CHECK_EQ(382, res);
}
@@ -460,7 +460,7 @@ static void TestRoundingMode(VCVTTypes types,
int expected,
bool expected_exception = false) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -519,7 +519,7 @@ static void TestRoundingMode(VCVTTypes types,
#endif
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
::printf("res = %d\n", res);
CHECK_EQ(expected, res);
}
@@ -639,7 +639,7 @@ TEST(7) {
TEST(8) {
// Test VFP multi load/store with ia_w.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -724,7 +724,7 @@ TEST(8) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
@@ -751,7 +751,7 @@ TEST(8) {
TEST(9) {
// Test VFP multi load/store with ia.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -840,7 +840,7 @@ TEST(9) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
@@ -867,7 +867,7 @@ TEST(9) {
TEST(10) {
// Test VFP multi load/store with db_w.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -952,7 +952,7 @@ TEST(10) {
f.g = 7.0;
f.h = 8.0;
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, fn, &d, &f, 0, 0, 0);
USE(dummy);
CHECK_EQ(7.7, d.a);
@@ -979,7 +979,7 @@ TEST(10) {
TEST(11) {
// Test instructions using the carry flag.
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
typedef struct {
@@ -1032,7 +1032,7 @@ TEST(11) {
Code::cast(code)->Print();
#endif
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &i, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(0xabcd0001, i.a);
@@ -1045,7 +1045,7 @@ TEST(11) {
TEST(12) {
// Test chaining of label usages within instructions (issue 1644).
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index cc87c4ebf6..66199fb540 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -591,6 +591,7 @@ TEST(AssemblerMultiByteNop) {
void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
byte buffer[1024];
CHECK(args[0]->IsArray());
@@ -605,9 +606,15 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Store input vector on the stack.
for (unsigned i = 0; i < ELEMENT_COUNT; i++) {
- __ movl(rax, Immediate(vec->Get(i)->Int32Value()));
+ __ movl(rax, Immediate(vec->Get(context, i)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust()));
__ shlq(rax, Immediate(0x20));
- __ orq(rax, Immediate(vec->Get(++i)->Int32Value()));
+ __ orq(rax, Immediate(vec->Get(context, ++i)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust()));
__ pushq(rax);
}
@@ -641,7 +648,7 @@ TEST(StackAlignmentForSSE2) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
global_template->Set(v8_str("do_sse2"),
v8::FunctionTemplate::New(isolate, DoSSE2));
@@ -653,20 +660,21 @@ TEST(StackAlignmentForSSE2) {
"}");
v8::Local<v8::Object> global_object = env->Global();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+ v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
+ global_object->Get(env.local(), v8_str("foo")).ToLocalChecked());
int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
v8::Local<v8::Array> v8_vec = v8::Array::New(isolate, ELEMENT_COUNT);
for (unsigned i = 0; i < ELEMENT_COUNT; i++) {
- v8_vec->Set(i, v8_num(vec[i]));
+ v8_vec->Set(env.local(), i, v8_num(vec[i])).FromJust();
}
v8::Local<v8::Value> args[] = { v8_vec };
- v8::Local<v8::Value> result = foo->Call(global_object, 1, args);
+ v8::Local<v8::Value> result =
+ foo->Call(env.local(), global_object, 1, args).ToLocalChecked();
// The mask should be 0b1000.
- CHECK_EQ(8, result->Int32Value());
+ CHECK_EQ(8, result->Int32Value(env.local()).FromJust());
}
#undef ELEMENT_COUNT
@@ -710,7 +718,8 @@ TEST(AssemblerX64SSE) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
__ shufps(xmm1, xmm1, 0x0); // brocast second argument
@@ -747,7 +756,8 @@ TEST(AssemblerX64FMA_sd) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
@@ -972,7 +982,8 @@ TEST(AssemblerX64FMA_ss) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, FMA3);
Label exit;
@@ -1597,7 +1608,8 @@ TEST(AssemblerX64BMI1) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, BMI1);
Label exit;
@@ -1786,7 +1798,8 @@ TEST(AssemblerX64LZCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, LZCNT);
Label exit;
@@ -1845,7 +1858,8 @@ TEST(AssemblerX64POPCNT) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, POPCNT);
Label exit;
@@ -1904,7 +1918,8 @@ TEST(AssemblerX64BMI2) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
{
CpuFeatureScope fscope(&assm, BMI2);
Label exit;
@@ -2164,7 +2179,8 @@ TEST(AssemblerX64JumpTables1) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -2211,7 +2227,8 @@ TEST(AssemblerX64JumpTables2) {
CcTest::InitializeVM();
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
- MacroAssembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
diff --git a/deps/v8/test/cctest/test-assembler-x87.cc b/deps/v8/test/cctest/test-assembler-x87.cc
index d6a0d2d461..a831a0b2f1 100644
--- a/deps/v8/test/cctest/test-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-assembler-x87.cc
@@ -185,7 +185,8 @@ TEST(AssemblerIa329) {
Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
- MacroAssembler assm(isolate, buffer, sizeof buffer);
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
Label equal_l, less_l, greater_l, nan_l;
__ fld_d(Operand(esp, 3 * kPointerSize));
diff --git a/deps/v8/test/cctest/test-ast-expression-visitor.cc b/deps/v8/test/cctest/test-ast-expression-visitor.cc
index b95905e32e..b6cca6ac38 100644
--- a/deps/v8/test/cctest/test-ast-expression-visitor.cc
+++ b/deps/v8/test/cctest/test-ast-expression-visitor.cc
@@ -6,11 +6,11 @@
#include "src/v8.h"
-#include "src/ast.h"
-#include "src/ast-expression-visitor.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
#include "test/cctest/expression-type-collector.h"
#include "test/cctest/expression-type-collector-macros.h"
@@ -270,6 +270,33 @@ TEST(VisitExpressions) {
}
+TEST(VisitConditional) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ // Check that traversing the ternary operator works.
+ const char test_function[] =
+ "function foo() {\n"
+ " var a, b, c;\n"
+ " var x = a ? b : c;\n"
+ "}\n";
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(x, Bounds::Unbounded());
+ CHECK_EXPR(Conditional, Bounds::Unbounded()) {
+ CHECK_VAR(a, Bounds::Unbounded());
+ CHECK_VAR(b, Bounds::Unbounded());
+ CHECK_VAR(c, Bounds::Unbounded());
+ }
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
+
+
TEST(VisitEmptyForStatment) {
v8::V8::Initialize();
HandleAndZoneScope handles;
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 7d8b4059f5..49e44eb7b9 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index 385718f486..f59d85483d 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -51,7 +51,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
inline_fastpath);
@@ -146,7 +147,7 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
- return CALL_GENERATED_FP_INT(func, from, 0);
+ return CALL_GENERATED_FP_INT(CcTest::i_isolate(), func, from, 0);
#else
return (*func)(from);
#endif
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index c7a6cdb4be..f5a40789be 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -51,7 +51,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
v8::base::OS::Allocate(actual_size, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
inline_fastpath);
@@ -146,7 +147,7 @@ int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
Simulator::CallArgument(from),
Simulator::CallArgument::End()
};
- return static_cast<int32_t>(Simulator::current(Isolate::Current())
+ return static_cast<int32_t>(Simulator::current(CcTest::i_isolate())
->CallInt64(FUNCTION_ADDR(func), args));
#else
return (*func)(from);
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index e0f9a453ed..efc39e9a2e 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -51,7 +51,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
int offset =
source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index de6203c0ff..ce577da46b 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -53,7 +53,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
inline_fastpath);
@@ -140,7 +141,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(&desc);
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
@@ -156,8 +157,9 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
- Simulator::current(Isolate::Current())->CallFP(FUNCTION_ADDR(func), from, 0.);
- return Simulator::current(Isolate::Current())->get_register(v0.code());
+ Simulator::current(CcTest::i_isolate())
+ ->CallFP(FUNCTION_ADDR(func), from, 0.);
+ return Simulator::current(CcTest::i_isolate())->get_register(v0.code());
#else
return (*func)(from);
#endif
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index 929c4c326c..97f57b9168 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -53,7 +53,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
inline_fastpath);
@@ -138,7 +139,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(&desc);
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
@@ -154,9 +155,10 @@ static Isolate* GetIsolateFrom(LocalContext* context) {
int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
double from) {
#ifdef USE_SIMULATOR
- Simulator::current(Isolate::Current())->CallFP(FUNCTION_ADDR(func), from, 0.);
+ Simulator::current(CcTest::i_isolate())
+ ->CallFP(FUNCTION_ADDR(func), from, 0.);
return static_cast<int32_t>(
- Simulator::current(Isolate::Current())->get_register(v0.code()));
+ Simulator::current(CcTest::i_isolate())->get_register(v0.code()));
#else
return (*func)(from);
#endif
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index 31c8a134ea..786da547e4 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -51,7 +51,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
int offset =
source_reg.is(rsp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
diff --git a/deps/v8/test/cctest/test-code-stubs-x87.cc b/deps/v8/test/cctest/test-code-stubs-x87.cc
index e0f9a453ed..efc39e9a2e 100644
--- a/deps/v8/test/cctest/test-code-stubs-x87.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x87.cc
@@ -51,7 +51,8 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
Assembler::kMinimalBufferSize, &actual_size, true));
CHECK(buffer);
HandleScope handles(isolate);
- MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
int offset =
source_reg.is(esp) ? 0 : (HeapNumber::kValueOffset - kSmiTagSize);
DoubleToIStub stub(isolate, source_reg, destination_reg, offset, true);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index e15aef74a0..db2ccb29af 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -32,7 +32,7 @@
#include "src/compiler.h"
#include "src/disasm.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -262,8 +262,7 @@ TEST(Regression236) {
TEST(GetScriptLineNumber) {
LocalContext context;
v8::HandleScope scope(CcTest::isolate());
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(CcTest::isolate(), "test"));
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("test"));
const char function_f[] = "function f() {}";
const int max_rows = 1000;
const int buffer_size = max_rows + sizeof(function_f);
@@ -275,12 +274,13 @@ TEST(GetScriptLineNumber) {
if (i > 0)
buffer[i - 1] = '\n';
MemCopy(&buffer[i], function_f, sizeof(function_f) - 1);
- v8::Handle<v8::String> script_body =
- v8::String::NewFromUtf8(CcTest::isolate(), buffer.start());
- v8::Script::Compile(script_body, &origin)->Run();
- v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(context->Global()->Get(
- v8::String::NewFromUtf8(CcTest::isolate(), "f")));
+ v8::Local<v8::String> script_body = v8_str(buffer.start());
+ v8::Script::Compile(context.local(), script_body, &origin)
+ .ToLocalChecked()
+ ->Run(context.local())
+ .ToLocalChecked();
+ v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
+ context->Global()->Get(context.local(), v8_str("f")).ToLocalChecked());
CHECK_EQ(i, f->GetScriptLineNumber());
}
}
@@ -292,14 +292,16 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft()) return;
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
// Make sure function f has a call that uses a type feedback slot.
CompileRun("function fun() {};"
"fun1 = fun;"
"function f(a) { a(); } f(fun1);");
- Handle<JSFunction> f = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f")))));
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CcTest::global()->Get(context, v8_str("f")).ToLocalChecked())));
// We shouldn't have deoptimization support. We want to recompile and
// verify that our feedback vector preserves information.
@@ -329,6 +331,7 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
if (i::FLAG_always_opt || !i::FLAG_lazy) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
CompileRun("function builder() {"
" call_target = function() { return 3; };"
@@ -342,9 +345,10 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
"}"
"morphing_call = builder();");
- Handle<JSFunction> f = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Handle<v8::Function>::Cast(
- CcTest::global()->Get(v8_str("morphing_call")))));
+ Handle<JSFunction> f = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CcTest::global()
+ ->Get(context, v8_str("morphing_call"))
+ .ToLocalChecked())));
// Not compiled, and so no feedback vector allocated yet.
CHECK(!f->shared()->is_compiled());
@@ -367,8 +371,9 @@ TEST(OptimizedCodeSharing1) {
v8::HandleScope scope(CcTest::isolate());
for (int i = 0; i < 3; i++) {
LocalContext env;
- env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
- v8::Integer::New(CcTest::isolate(), i));
+ env->Global()
+ ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
+ .FromJust();
CompileRun(
"function MakeClosure() {"
" return function() { return x; };"
@@ -381,10 +386,14 @@ TEST(OptimizedCodeSharing1) {
"var closure2 = MakeClosure();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure1")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure1"))
+ .ToLocalChecked())));
Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure2")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure2"))
+ .ToLocalChecked())));
CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK_EQ(fun1->code(), fun2->code());
@@ -410,9 +419,13 @@ TEST(OptimizedCodeSharing2) {
Handle<Code> reference_code;
{
LocalContext env;
- env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
- v8::Integer::New(CcTest::isolate(), 23));
- script->GetUnboundScript()->BindToCurrentContext()->Run();
+ env->Global()
+ ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), 23))
+ .FromJust();
+ script->GetUnboundScript()
+ ->BindToCurrentContext()
+ ->Run(env.local())
+ .ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
@@ -420,15 +433,21 @@ TEST(OptimizedCodeSharing2) {
"%DebugPrint(closure0());");
Handle<JSFunction> fun0 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure0")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure0"))
+ .ToLocalChecked())));
CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
reference_code = handle(fun0->code());
}
for (int i = 0; i < 3; i++) {
LocalContext env;
- env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
- v8::Integer::New(CcTest::isolate(), i));
- script->GetUnboundScript()->BindToCurrentContext()->Run();
+ env->Global()
+ ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
+ .FromJust();
+ script->GetUnboundScript()
+ ->BindToCurrentContext()
+ ->Run(env.local())
+ .ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
@@ -438,10 +457,14 @@ TEST(OptimizedCodeSharing2) {
"var closure2 = MakeClosure();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure1")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure1"))
+ .ToLocalChecked())));
Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure2")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure2"))
+ .ToLocalChecked())));
CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK_EQ(*reference_code, fun1->code());
@@ -468,9 +491,13 @@ TEST(OptimizedCodeSharing3) {
Handle<Code> reference_code;
{
LocalContext env;
- env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
- v8::Integer::New(CcTest::isolate(), 23));
- script->GetUnboundScript()->BindToCurrentContext()->Run();
+ env->Global()
+ ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), 23))
+ .FromJust();
+ script->GetUnboundScript()
+ ->BindToCurrentContext()
+ ->Run(env.local())
+ .ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
@@ -478,7 +505,9 @@ TEST(OptimizedCodeSharing3) {
"%DebugPrint(closure0());");
Handle<JSFunction> fun0 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure0")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure0"))
+ .ToLocalChecked())));
CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
reference_code = handle(fun0->code());
// Evict only the context-dependent entry from the optimized code map. This
@@ -487,9 +516,13 @@ TEST(OptimizedCodeSharing3) {
}
for (int i = 0; i < 3; i++) {
LocalContext env;
- env->Global()->Set(v8::String::NewFromUtf8(CcTest::isolate(), "x"),
- v8::Integer::New(CcTest::isolate(), i));
- script->GetUnboundScript()->BindToCurrentContext()->Run();
+ env->Global()
+ ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
+ .FromJust();
+ script->GetUnboundScript()
+ ->BindToCurrentContext()
+ ->Run(env.local())
+ .ToLocalChecked();
CompileRun(
"var closure0 = MakeClosure();"
"%DebugPrint(closure0());"
@@ -499,10 +532,14 @@ TEST(OptimizedCodeSharing3) {
"var closure2 = MakeClosure();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure1")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure1"))
+ .ToLocalChecked())));
Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8_str("closure2")))));
+ env->Global()
+ ->Get(env.local(), v8_str("closure2"))
+ .ToLocalChecked())));
CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
CHECK_EQ(*reference_code, fun1->code());
@@ -516,28 +553,33 @@ TEST(CompileFunctionInContext) {
v8::HandleScope scope(CcTest::isolate());
LocalContext env;
CompileRun("var r = 10;");
- v8::Local<v8::Object> math =
- v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("Math")));
+ v8::Local<v8::Object> math = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("Math")).ToLocalChecked());
v8::ScriptCompiler::Source script_source(v8_str(
"a = PI * r * r;"
"x = r * cos(PI);"
"y = r * sin(PI / 2);"));
- v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
- CcTest::isolate(), &script_source, env.local(), 0, NULL, 1, &math);
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
+ 0, NULL, 1, &math)
+ .ToLocalChecked();
CHECK(!fun.IsEmpty());
- fun->Call(env->Global(), 0, NULL);
- CHECK(env->Global()->Has(v8_str("a")));
- v8::Local<v8::Value> a = env->Global()->Get(v8_str("a"));
+ fun->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ CHECK(env->Global()->Has(env.local(), v8_str("a")).FromJust());
+ v8::Local<v8::Value> a =
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked();
CHECK(a->IsNumber());
- CHECK(env->Global()->Has(v8_str("x")));
- v8::Local<v8::Value> x = env->Global()->Get(v8_str("x"));
+ CHECK(env->Global()->Has(env.local(), v8_str("x")).FromJust());
+ v8::Local<v8::Value> x =
+ env->Global()->Get(env.local(), v8_str("x")).ToLocalChecked();
CHECK(x->IsNumber());
- CHECK(env->Global()->Has(v8_str("y")));
- v8::Local<v8::Value> y = env->Global()->Get(v8_str("y"));
+ CHECK(env->Global()->Has(env.local(), v8_str("y")).FromJust());
+ v8::Local<v8::Value> y =
+ env->Global()->Get(env.local(), v8_str("y")).ToLocalChecked();
CHECK(y->IsNumber());
- CHECK_EQ(314.1592653589793, a->NumberValue());
- CHECK_EQ(-10.0, x->NumberValue());
- CHECK_EQ(10.0, y->NumberValue());
+ CHECK_EQ(314.1592653589793, a->NumberValue(env.local()).FromJust());
+ CHECK_EQ(-10.0, x->NumberValue(env.local()).FromJust());
+ CHECK_EQ(10.0, y->NumberValue(env.local()).FromJust());
}
@@ -552,17 +594,22 @@ TEST(CompileFunctionInContextComplex) {
"var a = {x: 8, y: 16};"
"var b = {x: 32};");
v8::Local<v8::Object> ext[2];
- ext[0] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
- ext[1] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("b")));
+ ext[0] = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
+ ext[1] = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("b")).ToLocalChecked());
v8::ScriptCompiler::Source script_source(v8_str("result = x + y + z"));
- v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
- CcTest::isolate(), &script_source, env.local(), 0, NULL, 2, ext);
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
+ 0, NULL, 2, ext)
+ .ToLocalChecked();
CHECK(!fun.IsEmpty());
- fun->Call(env->Global(), 0, NULL);
- CHECK(env->Global()->Has(v8_str("result")));
- v8::Local<v8::Value> result = env->Global()->Get(v8_str("result"));
+ fun->Call(env.local(), env->Global(), 0, NULL).ToLocalChecked();
+ CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
+ v8::Local<v8::Value> result =
+ env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
CHECK(result->IsNumber());
- CHECK_EQ(52.0, result->NumberValue());
+ CHECK_EQ(52.0, result->NumberValue(env.local()).FromJust());
}
@@ -572,18 +619,22 @@ TEST(CompileFunctionInContextArgs) {
LocalContext env;
CompileRun("var a = {x: 23};");
v8::Local<v8::Object> ext[1];
- ext[0] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ ext[0] = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
v8::ScriptCompiler::Source script_source(v8_str("result = x + b"));
v8::Local<v8::String> arg = v8_str("b");
- v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
- CcTest::isolate(), &script_source, env.local(), 1, &arg, 1, ext);
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
+ 1, &arg, 1, ext)
+ .ToLocalChecked();
CHECK(!fun.IsEmpty());
v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
- fun->Call(env->Global(), 1, &b_value);
- CHECK(env->Global()->Has(v8_str("result")));
- v8::Local<v8::Value> result = env->Global()->Get(v8_str("result"));
+ fun->Call(env.local(), env->Global(), 1, &b_value).ToLocalChecked();
+ CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
+ v8::Local<v8::Value> result =
+ env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
CHECK(result->IsNumber());
- CHECK_EQ(65.0, result->NumberValue());
+ CHECK_EQ(65.0, result->NumberValue(env.local()).FromJust());
}
@@ -593,19 +644,23 @@ TEST(CompileFunctionInContextComments) {
LocalContext env;
CompileRun("var a = {x: 23, y: 1, z: 2};");
v8::Local<v8::Object> ext[1];
- ext[0] = v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ ext[0] = v8::Local<v8::Object>::Cast(
+ env->Global()->Get(env.local(), v8_str("a")).ToLocalChecked());
v8::ScriptCompiler::Source script_source(
v8_str("result = /* y + */ x + b // + z"));
v8::Local<v8::String> arg = v8_str("b");
- v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
- CcTest::isolate(), &script_source, env.local(), 1, &arg, 1, ext);
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
+ 1, &arg, 1, ext)
+ .ToLocalChecked();
CHECK(!fun.IsEmpty());
v8::Local<v8::Value> b_value = v8::Number::New(CcTest::isolate(), 42.0);
- fun->Call(env->Global(), 1, &b_value);
- CHECK(env->Global()->Has(v8_str("result")));
- v8::Local<v8::Value> result = env->Global()->Get(v8_str("result"));
+ fun->Call(env.local(), env->Global(), 1, &b_value).ToLocalChecked();
+ CHECK(env->Global()->Has(env.local(), v8_str("result")).FromJust());
+ v8::Local<v8::Value> result =
+ env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
CHECK(result->IsNumber());
- CHECK_EQ(65.0, result->NumberValue());
+ CHECK_EQ(65.0, result->NumberValue(env.local()).FromJust());
}
@@ -615,9 +670,9 @@ TEST(CompileFunctionInContextNonIdentifierArgs) {
LocalContext env;
v8::ScriptCompiler::Source script_source(v8_str("result = 1"));
v8::Local<v8::String> arg = v8_str("b }");
- v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
- CcTest::isolate(), &script_source, env.local(), 1, &arg, 0, NULL);
- CHECK(fun.IsEmpty());
+ CHECK(v8::ScriptCompiler::CompileFunctionInContext(
+ env.local(), &script_source, 1, &arg, 0, NULL)
+ .IsEmpty());
}
@@ -629,12 +684,14 @@ TEST(CompileFunctionInContextScriptOrigin) {
v8::Integer::New(CcTest::isolate(), 22),
v8::Integer::New(CcTest::isolate(), 41));
v8::ScriptCompiler::Source script_source(v8_str("throw new Error()"), origin);
- v8::Local<v8::Function> fun = v8::ScriptCompiler::CompileFunctionInContext(
- CcTest::isolate(), &script_source, env.local(), 0, NULL, 0, NULL);
+ v8::Local<v8::Function> fun =
+ v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
+ 0, NULL, 0, NULL)
+ .ToLocalChecked();
CHECK(!fun.IsEmpty());
- v8::TryCatch try_catch;
+ v8::TryCatch try_catch(CcTest::isolate());
CcTest::isolate()->SetCaptureStackTraceForUncaughtExceptions(true);
- fun->Call(env->Global(), 0, NULL);
+ CHECK(fun->Call(env.local(), env->Global(), 0, NULL).IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(!try_catch.Exception().IsEmpty());
v8::Local<v8::StackTrace> stack =
@@ -648,10 +705,11 @@ TEST(CompileFunctionInContextScriptOrigin) {
#ifdef ENABLE_DISASSEMBLER
-static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
+static Handle<JSFunction> GetJSFunction(v8::Local<v8::Object> obj,
const char* property_name) {
- v8::Local<v8::Function> fun =
- v8::Local<v8::Function>::Cast(obj->Get(v8_str(property_name)));
+ v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
+ obj->Get(CcTest::isolate()->GetCurrentContext(), v8_str(property_name))
+ .ToLocalChecked());
return Handle<JSFunction>::cast(v8::Utils::OpenHandle(*fun));
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index ef3571a7d2..29a24e62df 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -51,8 +51,10 @@ using v8::base::SmartPointer;
// Helper methods
-static v8::Local<v8::Function> GetFunction(v8::Context* env, const char* name) {
- return v8::Local<v8::Function>::Cast(env->Global()->Get(v8_str(name)));
+static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
+ const char* name) {
+ return v8::Local<v8::Function>::Cast(
+ env->Global()->Get(env, v8_str(name)).ToLocalChecked());
}
@@ -135,7 +137,7 @@ i::Code* CreateCode(LocalContext* env) {
CompileRun(script.start());
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*GetFunction(**env, name_start)));
+ v8::Utils::OpenHandle(*GetFunction(env->local(), name_start)));
return fun->code();
}
@@ -416,10 +418,11 @@ TEST(ProfileStartEndTime) {
}
-static v8::CpuProfile* RunProfiler(
- v8::Handle<v8::Context> env, v8::Handle<v8::Function> function,
- v8::Handle<v8::Value> argv[], int argc,
- unsigned min_js_samples, bool collect_samples = false) {
+static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
+ v8::Local<v8::Function> function,
+ v8::Local<v8::Value> argv[], int argc,
+ unsigned min_js_samples,
+ bool collect_samples = false) {
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
v8::Local<v8::String> profile_name = v8_str("my_profile");
@@ -429,7 +432,7 @@ static v8::CpuProfile* RunProfiler(
reinterpret_cast<i::Isolate*>(env->GetIsolate())->logger()->sampler();
sampler->StartCountingSamples();
do {
- function->Call(env->Global(), argc, argv);
+ function->Call(env, env->Global(), argc, argv).ToLocalChecked();
} while (sampler->js_and_external_sample_count() < min_js_samples);
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
@@ -442,21 +445,23 @@ static v8::CpuProfile* RunProfiler(
}
-static bool ContainsString(v8::Handle<v8::String> string,
- const Vector<v8::Handle<v8::String> >& vector) {
+static bool ContainsString(v8::Local<v8::Context> context,
+ v8::Local<v8::String> string,
+ const Vector<v8::Local<v8::String> >& vector) {
for (int i = 0; i < vector.length(); i++) {
- if (string->Equals(vector[i])) return true;
+ if (string->Equals(context, vector[i]).FromJust()) return true;
}
return false;
}
-static void CheckChildrenNames(const v8::CpuProfileNode* node,
- const Vector<v8::Handle<v8::String> >& names) {
+static void CheckChildrenNames(v8::Local<v8::Context> context,
+ const v8::CpuProfileNode* node,
+ const Vector<v8::Local<v8::String> >& names) {
int count = node->GetChildrenCount();
for (int i = 0; i < count; i++) {
- v8::Handle<v8::String> name = node->GetChild(i)->GetFunctionName();
- if (!ContainsString(name, names)) {
+ v8::Local<v8::String> name = node->GetChild(i)->GetFunctionName();
+ if (!ContainsString(context, name, names)) {
char buffer[100];
i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
"Unexpected child '%s' found in '%s'",
@@ -467,7 +472,8 @@ static void CheckChildrenNames(const v8::CpuProfileNode* node,
// Check that there are no duplicates.
for (int j = 0; j < count; j++) {
if (j == i) continue;
- if (name->Equals(node->GetChild(j)->GetFunctionName())) {
+ if (name->Equals(context, node->GetChild(j)->GetFunctionName())
+ .FromJust()) {
char buffer[100];
i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
"Second child with the same name '%s' found in '%s'",
@@ -480,21 +486,25 @@ static void CheckChildrenNames(const v8::CpuProfileNode* node,
}
-static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
+static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
+ const v8::CpuProfileNode* node,
const char* name) {
int count = node->GetChildrenCount();
- v8::Handle<v8::String> nameHandle = v8_str(name);
+ v8::Local<v8::String> nameHandle = v8_str(name);
for (int i = 0; i < count; i++) {
const v8::CpuProfileNode* child = node->GetChild(i);
- if (nameHandle->Equals(child->GetFunctionName())) return child;
+ if (nameHandle->Equals(context, child->GetFunctionName()).FromJust()) {
+ return child;
+ }
}
return NULL;
}
-static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
+static const v8::CpuProfileNode* GetChild(v8::Local<v8::Context> context,
+ const v8::CpuProfileNode* node,
const char* name) {
- const v8::CpuProfileNode* result = FindChild(node, name);
+ const v8::CpuProfileNode* result = FindChild(context, node, name);
if (!result) {
char buffer[100];
i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
@@ -505,22 +515,24 @@ static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
}
-static void CheckSimpleBranch(const v8::CpuProfileNode* node,
+static void CheckSimpleBranch(v8::Local<v8::Context> context,
+ const v8::CpuProfileNode* node,
const char* names[], int length) {
for (int i = 0; i < length; i++) {
const char* name = names[i];
- node = GetChild(node, name);
+ node = GetChild(context, node, name);
int expectedChildrenCount = (i == length - 1) ? 0 : 1;
CHECK_EQ(expectedChildrenCount, node->GetChildrenCount());
}
}
-static const ProfileNode* GetSimpleBranch(v8::CpuProfile* profile,
+static const ProfileNode* GetSimpleBranch(v8::Local<v8::Context> context,
+ v8::CpuProfile* profile,
const char* names[], int length) {
const v8::CpuProfileNode* node = profile->GetTopDownRoot();
for (int i = 0; i < length; i++) {
- node = GetChild(node, names[i]);
+ node = GetChild(context, node, names[i]);
}
return reinterpret_cast<const ProfileNode*>(node);
}
@@ -580,36 +592,36 @@ TEST(CollectCpuProfile) {
v8::HandleScope scope(env->GetIsolate());
CompileRun(cpu_profiler_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t profiling_interval_ms = 200;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
- };
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 200);
- function->Call(env->Global(), arraysize(args), args);
+ function->Call(env.local(), env->Global(), arraysize(args), args)
+ .ToLocalChecked();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env.local(), root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* fooNode = GetChild(startNode, "foo");
+ const v8::CpuProfileNode* fooNode = GetChild(env.local(), startNode, "foo");
CHECK_EQ(3, fooNode->GetChildrenCount());
const char* barBranch[] = { "bar", "delay", "loop" };
- CheckSimpleBranch(fooNode, barBranch, arraysize(barBranch));
+ CheckSimpleBranch(env.local(), fooNode, barBranch, arraysize(barBranch));
const char* bazBranch[] = { "baz", "delay", "loop" };
- CheckSimpleBranch(fooNode, bazBranch, arraysize(bazBranch));
+ CheckSimpleBranch(env.local(), fooNode, bazBranch, arraysize(bazBranch));
const char* delayBranch[] = { "delay", "loop" };
- CheckSimpleBranch(fooNode, delayBranch, arraysize(delayBranch));
+ CheckSimpleBranch(env.local(), fooNode, delayBranch, arraysize(delayBranch));
profile->Delete();
}
@@ -648,28 +660,28 @@ TEST(HotDeoptNoFrameEntry) {
v8::HandleScope scope(env->GetIsolate());
CompileRun(hot_deopt_no_frame_entry_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t profiling_interval_ms = 200;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
- };
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 200);
- function->Call(env->Global(), arraysize(args), args);
+ function->Call(env.local(), env->Global(), arraysize(args), args)
+ .ToLocalChecked();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env.local(), root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
- GetChild(startNode, "foo");
+ GetChild(env.local(), startNode, "foo");
profile->Delete();
}
@@ -680,12 +692,11 @@ TEST(CollectCpuProfileSamples) {
v8::HandleScope scope(env->GetIsolate());
CompileRun(cpu_profiler_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t profiling_interval_ms = 200;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
- };
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 200, true);
@@ -730,36 +741,36 @@ TEST(SampleWhenFrameIsNotSetup) {
v8::HandleScope scope(env->GetIsolate());
CompileRun(cpu_profiler_test_source2);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t repeat_count = 100;
#if defined(USE_SIMULATOR)
// Simulators are much slower.
repeat_count = 1;
#endif
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), repeat_count)
- };
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), repeat_count)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env.local(), root, names);
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode = FindChild(env.local(), root, "start");
// On slow machines there may be no meaningfull samples at all, skip the
// check there.
if (startNode && startNode->GetChildrenCount() > 0) {
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* delayNode = GetChild(startNode, "delay");
+ const v8::CpuProfileNode* delayNode =
+ GetChild(env.local(), startNode, "delay");
if (delayNode->GetChildrenCount() > 0) {
CHECK_EQ(1, delayNode->GetChildrenCount());
- GetChild(delayNode, "loop");
+ GetChild(env.local(), delayNode, "loop");
}
}
@@ -842,22 +853,24 @@ TEST(NativeAccessorUninitializedIC) {
v8::External::New(isolate, &accessors);
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
&TestApiCallbacks::Setter, data);
- v8::Local<v8::Function> func = func_template->GetFunction();
- v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8_str("instance"), instance);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ func->NewInstance(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
CompileRun(native_accessor_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t repeat_count = 1;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
+ v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 180);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "get foo");
- GetChild(startNode, "set foo");
+ const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
+ GetChild(env.local(), startNode, "get foo");
+ GetChild(env.local(), startNode, "set foo");
profile->Delete();
}
@@ -881,34 +894,36 @@ TEST(NativeAccessorMonomorphicIC) {
v8::External::New(isolate, &accessors);
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
&TestApiCallbacks::Setter, data);
- v8::Local<v8::Function> func = func_template->GetFunction();
- v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8_str("instance"), instance);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ func->NewInstance(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
CompileRun(native_accessor_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
{
// Make sure accessors ICs are in monomorphic state before starting
// profiling.
accessors.set_warming_up(true);
int32_t warm_up_iterations = 3;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(isolate, warm_up_iterations)
- };
- function->Call(env->Global(), arraysize(args), args);
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(isolate, warm_up_iterations)};
+ function->Call(env.local(), env->Global(), arraysize(args), args)
+ .ToLocalChecked();
accessors.set_warming_up(false);
}
int32_t repeat_count = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
+ v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "get foo");
- GetChild(startNode, "set foo");
+ const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
+ GetChild(env.local(), startNode, "get foo");
+ GetChild(env.local(), startNode, "set foo");
profile->Delete();
}
@@ -942,21 +957,23 @@ TEST(NativeMethodUninitializedIC) {
v8::FunctionTemplate::New(isolate, &TestApiCallbacks::Callback, data,
signature, 0));
- v8::Local<v8::Function> func = func_template->GetFunction();
- v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8_str("instance"), instance);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ func->NewInstance(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
CompileRun(native_method_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t repeat_count = 1;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
+ v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "fooMethod");
+ const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
+ GetChild(env.local(), startNode, "fooMethod");
profile->Delete();
}
@@ -983,33 +1000,35 @@ TEST(NativeMethodMonomorphicIC) {
v8::FunctionTemplate::New(isolate, &TestApiCallbacks::Callback, data,
signature, 0));
- v8::Local<v8::Function> func = func_template->GetFunction();
- v8::Local<v8::Object> instance = func->NewInstance();
- env->Global()->Set(v8_str("instance"), instance);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ func->NewInstance(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
CompileRun(native_method_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
{
// Make sure method ICs are in monomorphic state before starting
// profiling.
callbacks.set_warming_up(true);
int32_t warm_up_iterations = 3;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(isolate, warm_up_iterations)
- };
- function->Call(env->Global(), arraysize(args), args);
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(isolate, warm_up_iterations)};
+ function->Call(env.local(), env->Global(), arraysize(args), args)
+ .ToLocalChecked();
callbacks.set_warming_up(false);
}
int32_t repeat_count = 100;
- v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
+ v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- GetChild(root, "start");
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "fooMethod");
+ GetChild(env.local(), root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
+ GetChild(env.local(), startNode, "fooMethod");
profile->Delete();
}
@@ -1031,20 +1050,20 @@ TEST(BoundFunctionCall) {
v8::Context::Scope context_scope(env);
CompileRun(bound_function_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
// Don't allow |foo| node to be at the top level.
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env, root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
- GetChild(startNode, "foo");
+ const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
+ GetChild(env, startNode, "foo");
profile->Delete();
}
@@ -1077,7 +1096,7 @@ TEST(TickLines) {
CompileRun(script.start());
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
- v8::Utils::OpenHandle(*GetFunction(*env, func_name)));
+ v8::Utils::OpenHandle(*GetFunction(env.local(), func_name)));
CHECK(func->shared());
CHECK(func->shared()->code());
i::Code* code = NULL;
@@ -1181,45 +1200,44 @@ TEST(FunctionCallSample) {
CcTest::heap()->CollectAllGarbage();
CompileRun(call_function_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t duration_ms = 100;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), duration_ms)
- };
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
- ScopedVector<v8::Handle<v8::String> > names(4);
+ ScopedVector<v8::Local<v8::String> > names(4);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
names[3] = v8_str(i::ProfileGenerator::kUnresolvedFunctionName);
// Don't allow |bar| and |call| nodes to be at the top level.
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env.local(), root, names);
}
// In case of GC stress tests all samples may be in GC phase and there
// won't be |start| node in the profiles.
bool is_gc_stress_testing =
(i::FLAG_gc_interval != -1) || i::FLAG_stress_compaction;
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode = FindChild(env.local(), root, "start");
CHECK(is_gc_stress_testing || startNode);
if (startNode) {
- ScopedVector<v8::Handle<v8::String> > names(2);
+ ScopedVector<v8::Local<v8::String> > names(2);
names[0] = v8_str("bar");
names[1] = v8_str("call");
- CheckChildrenNames(startNode, names);
+ CheckChildrenNames(env.local(), startNode, names);
}
- const v8::CpuProfileNode* unresolvedNode =
- FindChild(root, i::ProfileGenerator::kUnresolvedFunctionName);
+ const v8::CpuProfileNode* unresolvedNode = FindChild(
+ env.local(), root, i::ProfileGenerator::kUnresolvedFunctionName);
if (unresolvedNode) {
- ScopedVector<v8::Handle<v8::String> > names(1);
+ ScopedVector<v8::Local<v8::String> > names(1);
names[0] = v8_str("call");
- CheckChildrenNames(unresolvedNode, names);
+ CheckChildrenNames(env.local(), unresolvedNode, names);
}
profile->Delete();
@@ -1257,52 +1275,53 @@ TEST(FunctionApplySample) {
v8::HandleScope scope(env->GetIsolate());
CompileRun(function_apply_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
int32_t duration_ms = 100;
- v8::Handle<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), duration_ms)
- };
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), duration_ms)};
v8::CpuProfile* profile =
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
// Don't allow |test|, |bar| and |apply| nodes to be at the top level.
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env.local(), root, names);
}
- const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ const v8::CpuProfileNode* startNode = FindChild(env.local(), root, "start");
if (startNode) {
{
- ScopedVector<v8::Handle<v8::String> > names(2);
+ ScopedVector<v8::Local<v8::String> > names(2);
names[0] = v8_str("test");
names[1] = v8_str(ProfileGenerator::kUnresolvedFunctionName);
- CheckChildrenNames(startNode, names);
+ CheckChildrenNames(env.local(), startNode, names);
}
- const v8::CpuProfileNode* testNode = FindChild(startNode, "test");
+ const v8::CpuProfileNode* testNode =
+ FindChild(env.local(), startNode, "test");
if (testNode) {
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str("bar");
names[1] = v8_str("apply");
// apply calls "get length" before invoking the function itself
// and we may get hit into it.
names[2] = v8_str("get length");
- CheckChildrenNames(testNode, names);
+ CheckChildrenNames(env.local(), testNode, names);
}
if (const v8::CpuProfileNode* unresolvedNode =
- FindChild(startNode, ProfileGenerator::kUnresolvedFunctionName)) {
- ScopedVector<v8::Handle<v8::String> > names(1);
+ FindChild(env.local(), startNode,
+ ProfileGenerator::kUnresolvedFunctionName)) {
+ ScopedVector<v8::Local<v8::String> > names(1);
names[0] = v8_str("apply");
- CheckChildrenNames(unresolvedNode, names);
- GetChild(unresolvedNode, "apply");
+ CheckChildrenNames(env.local(), unresolvedNode, names);
+ GetChild(env.local(), unresolvedNode, "apply");
}
}
@@ -1339,11 +1358,11 @@ TEST(CpuProfileDeepStack) {
v8::Context::Scope context_scope(env);
CompileRun(cpu_profiler_deep_stack_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env, "start");
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
v8::Local<v8::String> profile_name = v8_str("my_profile");
- function->Call(env->Global(), 0, NULL);
+ function->Call(env, env->Global(), 0, NULL).ToLocalChecked();
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
@@ -1351,16 +1370,16 @@ TEST(CpuProfileDeepStack) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env, root, names);
}
- const v8::CpuProfileNode* node = GetChild(root, "start");
+ const v8::CpuProfileNode* node = GetChild(env, root, "start");
for (int i = 0; i < 250; ++i) {
- node = GetChild(node, "foo");
+ node = GetChild(env, node, "foo");
}
// TODO(alph):
// In theory there must be one more 'foo' and a 'startProfiling' nodes,
@@ -1384,9 +1403,11 @@ static const char* js_native_js_test_source =
"}";
static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
- v8::Handle<v8::Function> function = info[0].As<v8::Function>();
- v8::Handle<v8::Value> argv[] = { info[1] };
- function->Call(info.This(), arraysize(argv), argv);
+ v8::Local<v8::Function> function = info[0].As<v8::Function>();
+ v8::Local<v8::Value> argv[] = {info[1]};
+ function->Call(info.GetIsolate()->GetCurrentContext(), info.This(),
+ arraysize(argv), argv)
+ .ToLocalChecked();
}
@@ -1404,34 +1425,35 @@ TEST(JsNativeJsSample) {
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction);
- v8::Local<v8::Function> func = func_template->GetFunction();
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env).ToLocalChecked();
func->SetName(v8_str("CallJsFunction"));
- env->Global()->Set(v8_str("CallJsFunction"), func);
+ env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
CompileRun(js_native_js_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
{
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env, root, names);
}
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(startNode, "CallJsFunction");
+ GetChild(env, startNode, "CallJsFunction");
CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
+ const v8::CpuProfileNode* barNode = GetChild(env, nativeFunctionNode, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- GetChild(barNode, "foo");
+ GetChild(env, barNode, "foo");
profile->Delete();
}
@@ -1466,36 +1488,37 @@ TEST(JsNativeJsRuntimeJsSample) {
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction);
- v8::Local<v8::Function> func = func_template->GetFunction();
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env).ToLocalChecked();
func->SetName(v8_str("CallJsFunction"));
- env->Global()->Set(v8_str("CallJsFunction"), func);
+ env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
CompileRun(js_native_js_runtime_js_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env, root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(startNode, "CallJsFunction");
+ GetChild(env, startNode, "CallJsFunction");
CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
+ const v8::CpuProfileNode* barNode = GetChild(env, nativeFunctionNode, "bar");
// The child is in fact a bound foo.
// A bound function has a wrapper that may make calls to
// other functions e.g. "get length".
CHECK_LE(1, barNode->GetChildrenCount());
CHECK_GE(2, barNode->GetChildrenCount());
- GetChild(barNode, "foo");
+ GetChild(env, barNode, "foo");
profile->Delete();
}
@@ -1538,40 +1561,44 @@ TEST(JsNative1JsNative2JsSample) {
v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
env->GetIsolate(), CallJsFunction);
- v8::Local<v8::Function> func1 = func_template->GetFunction();
+ v8::Local<v8::Function> func1 =
+ func_template->GetFunction(env).ToLocalChecked();
func1->SetName(v8_str("CallJsFunction1"));
- env->Global()->Set(v8_str("CallJsFunction1"), func1);
+ env->Global()->Set(env, v8_str("CallJsFunction1"), func1).FromJust();
- v8::Local<v8::Function> func2 = v8::FunctionTemplate::New(
- env->GetIsolate(), CallJsFunction2)->GetFunction();
+ v8::Local<v8::Function> func2 =
+ v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction2)
+ ->GetFunction(env)
+ .ToLocalChecked();
func2->SetName(v8_str("CallJsFunction2"));
- env->Global()->Set(v8_str("CallJsFunction2"), func2);
+ env->Global()->Set(env, v8_str("CallJsFunction2"), func2).FromJust();
CompileRun(js_native1_js_native2_js_test_source);
- v8::Local<v8::Function> function = GetFunction(*env, "start");
+ v8::Local<v8::Function> function = GetFunction(env, "start");
v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str("start");
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env, root, names);
- const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeNode1 =
- GetChild(startNode, "CallJsFunction1");
+ GetChild(env, startNode, "CallJsFunction1");
CHECK_EQ(1, nativeNode1->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(nativeNode1, "bar");
+ const v8::CpuProfileNode* barNode = GetChild(env, nativeNode1, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeNode2 = GetChild(barNode, "CallJsFunction2");
+ const v8::CpuProfileNode* nativeNode2 =
+ GetChild(env, barNode, "CallJsFunction2");
CHECK_EQ(1, nativeNode2->GetChildrenCount());
- GetChild(nativeNode2, "foo");
+ GetChild(env, nativeNode2, "foo");
profile->Delete();
}
@@ -1609,19 +1636,19 @@ TEST(IdleTime) {
reinterpret_cast<i::CpuProfile*>(profile)->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Handle<v8::String> > names(3);
+ ScopedVector<v8::Local<v8::String> > names(3);
names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
names[1] = v8_str(ProfileGenerator::kProgramEntryName);
names[2] = v8_str(ProfileGenerator::kIdleEntryName);
- CheckChildrenNames(root, names);
+ CheckChildrenNames(env.local(), root, names);
const v8::CpuProfileNode* programNode =
- GetChild(root, ProfileGenerator::kProgramEntryName);
+ GetChild(env.local(), root, ProfileGenerator::kProgramEntryName);
CHECK_EQ(0, programNode->GetChildrenCount());
CHECK_GE(programNode->GetHitCount(), 3u);
const v8::CpuProfileNode* idleNode =
- GetChild(root, ProfileGenerator::kIdleEntryName);
+ GetChild(env.local(), root, ProfileGenerator::kIdleEntryName);
CHECK_EQ(0, idleNode->GetChildrenCount());
CHECK_GE(idleNode->GetHitCount(), 3u);
@@ -1633,8 +1660,11 @@ static void CheckFunctionDetails(v8::Isolate* isolate,
const v8::CpuProfileNode* node,
const char* name, const char* script_name,
int script_id, int line, int column) {
- CHECK(v8_str(name)->Equals(node->GetFunctionName()));
- CHECK(v8_str(script_name)->Equals(node->GetScriptResourceName()));
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ CHECK(v8_str(name)->Equals(context, node->GetFunctionName()).FromJust());
+ CHECK(v8_str(script_name)
+ ->Equals(context, node->GetScriptResourceName())
+ .FromJust());
CHECK_EQ(script_id, node->GetScriptId());
CHECK_EQ(line, node->GetLineNumber());
CHECK_EQ(column, node->GetColumnNumber());
@@ -1646,17 +1676,17 @@ TEST(FunctionDetails) {
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Handle<v8::Script> script_a = CompileWithOrigin(
- " function foo\n() { try { bar(); } catch(e) {} }\n"
- " function bar() { startProfiling(); }\n",
- "script_a");
- script_a->Run();
- v8::Handle<v8::Script> script_b = CompileWithOrigin(
- "\n\n function baz() { try { foo(); } catch(e) {} }\n"
- "\n\nbaz();\n"
- "stopProfiling();\n",
- "script_b");
- script_b->Run();
+ v8::Local<v8::Script> script_a = CompileWithOrigin(
+ " function foo\n() { try { bar(); } catch(e) {} }\n"
+ " function bar() { startProfiling(); }\n",
+ "script_a");
+ script_a->Run(env).ToLocalChecked();
+ v8::Local<v8::Script> script_b = CompileWithOrigin(
+ "\n\n function baz() { try { foo(); } catch(e) {} }\n"
+ "\n\nbaz();\n"
+ "stopProfiling();\n",
+ "script_b");
+ script_b->Run(env).ToLocalChecked();
const v8::CpuProfile* profile = i::ProfilerExtension::last_profile;
const v8::CpuProfileNode* current = profile->GetTopDownRoot();
reinterpret_cast<ProfileNode*>(
@@ -1668,16 +1698,16 @@ TEST(FunctionDetails) {
// 0 foo 18 #4 TryCatchStatement script_a:2
// 1 bar 18 #5 no reason script_a:3
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* script = GetChild(root, "");
+ const v8::CpuProfileNode* script = GetChild(env, root, "");
CheckFunctionDetails(env->GetIsolate(), script, "", "script_b",
script_b->GetUnboundScript()->GetId(), 1, 1);
- const v8::CpuProfileNode* baz = GetChild(script, "baz");
+ const v8::CpuProfileNode* baz = GetChild(env, script, "baz");
CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b",
script_b->GetUnboundScript()->GetId(), 3, 16);
- const v8::CpuProfileNode* foo = GetChild(baz, "foo");
+ const v8::CpuProfileNode* foo = GetChild(env, baz, "foo");
CheckFunctionDetails(env->GetIsolate(), foo, "foo", "script_a",
script_a->GetUnboundScript()->GetId(), 2, 1);
- const v8::CpuProfileNode* bar = GetChild(foo, "bar");
+ const v8::CpuProfileNode* bar = GetChild(env, foo, "bar");
CheckFunctionDetails(env->GetIsolate(), bar, "bar", "script_a",
script_a->GetUnboundScript()->GetId(), 3, 14);
}
@@ -1692,11 +1722,11 @@ TEST(DontStopOnFinishedProfileDelete) {
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Handle<v8::String> outer = v8_str("outer");
+ v8::Local<v8::String> outer = v8_str("outer");
profiler->StartProfiling(outer);
CHECK_EQ(0, iprofiler->GetProfilesCount());
- v8::Handle<v8::String> inner = v8_str("inner");
+ v8::Local<v8::String> inner = v8_str("inner");
profiler->StartProfiling(inner);
CHECK_EQ(0, iprofiler->GetProfilesCount());
@@ -1716,11 +1746,12 @@ TEST(DontStopOnFinishedProfileDelete) {
}
-const char* GetBranchDeoptReason(i::CpuProfile* iprofile, const char* branch[],
+const char* GetBranchDeoptReason(v8::Local<v8::Context> context,
+ i::CpuProfile* iprofile, const char* branch[],
int length) {
v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
const ProfileNode* iopt_function = NULL;
- iopt_function = GetSimpleBranch(profile, branch, length);
+ iopt_function = GetSimpleBranch(context, profile, branch, length);
CHECK_EQ(1U, iopt_function->deopt_infos().size());
return iopt_function->deopt_infos()[0].deopt_reason;
}
@@ -1748,7 +1779,10 @@ TEST(CollectDeoptEvents) {
for (int i = 0; i < 3; ++i) {
i::EmbeddedVector<char, sizeof(opt_source) + 100> buffer;
i::SNPrintF(buffer, opt_source, i, i);
- v8::Script::Compile(v8_str(buffer.start()))->Run();
+ v8::Script::Compile(env, v8_str(buffer.start()))
+ .ToLocalChecked()
+ ->Run(env)
+ .ToLocalChecked();
}
const char* source =
@@ -1781,7 +1815,10 @@ TEST(CollectDeoptEvents) {
"stopProfiling();\n"
"\n";
- v8::Script::Compile(v8_str(source))->Run();
+ v8::Script::Compile(env, v8_str(source))
+ .ToLocalChecked()
+ ->Run(env)
+ .ToLocalChecked();
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
iprofile->Print();
/* The expected profile
@@ -1805,12 +1842,12 @@ TEST(CollectDeoptEvents) {
{
const char* branch[] = {"", "opt_function0", "opt_function0"};
CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber),
- GetBranchDeoptReason(iprofile, branch, arraysize(branch)));
+ GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
}
{
const char* branch[] = {"", "opt_function1", "opt_function1"};
const char* deopt_reason =
- GetBranchDeoptReason(iprofile, branch, arraysize(branch));
+ GetBranchDeoptReason(env, iprofile, branch, arraysize(branch));
if (deopt_reason != reason(i::Deoptimizer::kNaN) &&
deopt_reason != reason(i::Deoptimizer::kLostPrecisionOrNaN)) {
FATAL(deopt_reason);
@@ -1819,7 +1856,7 @@ TEST(CollectDeoptEvents) {
{
const char* branch[] = {"", "opt_function2", "opt_function2"};
CHECK_EQ(reason(i::Deoptimizer::kDivisionByZero),
- GetBranchDeoptReason(iprofile, branch, arraysize(branch)));
+ GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
}
iprofiler->DeleteProfile(iprofile);
}
@@ -1837,7 +1874,10 @@ TEST(SourceLocation) {
"}\n"
"CompareStatementWithThis();\n";
- v8::Script::Compile(v8_str(source))->Run();
+ v8::Script::Compile(env.local(), v8_str(source))
+ .ToLocalChecked()
+ ->Run(env.local())
+ .ToLocalChecked();
}
@@ -1875,12 +1915,12 @@ TEST(DeoptAtFirstLevelInlinedSource) {
"stopProfiling();\n"
"\n";
- v8::Handle<v8::Script> inlined_script = v8_compile(inlined_source);
- inlined_script->Run();
+ v8::Local<v8::Script> inlined_script = v8_compile(inlined_source);
+ inlined_script->Run(env).ToLocalChecked();
int inlined_script_id = inlined_script->GetUnboundScript()->GetId();
- v8::Handle<v8::Script> script = v8_compile(source);
- script->Run();
+ v8::Local<v8::Script> script = v8_compile(source);
+ script->Run(env).ToLocalChecked();
int script_id = script->GetUnboundScript()->GetId();
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
@@ -1899,7 +1939,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
const char* branch[] = {"", "test"};
const ProfileNode* itest_node =
- GetSimpleBranch(profile, branch, arraysize(branch));
+ GetSimpleBranch(env, profile, branch, arraysize(branch));
const std::vector<v8::CpuProfileDeoptInfo>& deopt_infos =
itest_node->deopt_infos();
CHECK_EQ(1U, deopt_infos.size());
@@ -1945,12 +1985,12 @@ TEST(DeoptAtSecondLevelInlinedSource) {
"stopProfiling();\n"
"\n";
- v8::Handle<v8::Script> inlined_script = v8_compile(inlined_source);
- inlined_script->Run();
+ v8::Local<v8::Script> inlined_script = v8_compile(inlined_source);
+ inlined_script->Run(env).ToLocalChecked();
int inlined_script_id = inlined_script->GetUnboundScript()->GetId();
- v8::Handle<v8::Script> script = v8_compile(source);
- script->Run();
+ v8::Local<v8::Script> script = v8_compile(source);
+ script->Run(env).ToLocalChecked();
int script_id = script->GetUnboundScript()->GetId();
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
@@ -1972,7 +2012,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
const char* branch[] = {"", "test1"};
const ProfileNode* itest_node =
- GetSimpleBranch(profile, branch, arraysize(branch));
+ GetSimpleBranch(env, profile, branch, arraysize(branch));
const std::vector<v8::CpuProfileDeoptInfo>& deopt_infos =
itest_node->deopt_infos();
CHECK_EQ(1U, deopt_infos.size());
@@ -2018,11 +2058,11 @@ TEST(DeoptUntrackedFunction) {
"stopProfiling();\n"
"\n";
- v8::Handle<v8::Script> inlined_script = v8_compile(inlined_source);
- inlined_script->Run();
+ v8::Local<v8::Script> inlined_script = v8_compile(inlined_source);
+ inlined_script->Run(env).ToLocalChecked();
- v8::Handle<v8::Script> script = v8_compile(source);
- script->Run();
+ v8::Local<v8::Script> script = v8_compile(source);
+ script->Run(env).ToLocalChecked();
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
iprofile->Print();
@@ -2030,7 +2070,7 @@ TEST(DeoptUntrackedFunction) {
const char* branch[] = {"", "test"};
const ProfileNode* itest_node =
- GetSimpleBranch(profile, branch, arraysize(branch));
+ GetSimpleBranch(env, profile, branch, arraysize(branch));
CHECK_EQ(0U, itest_node->deopt_infos().size());
iprofiler->DeleteProfile(iprofile);
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 89372c07ee..67b9501592 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(jochen): Remove this after the setting is turned on globally.
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/global-handles.h"
@@ -169,31 +166,3 @@ TEST(DaylightSavingsTime) {
CheckDST(august_20 + 2 * 3600 - 1000);
CheckDST(august_20);
}
-
-
-TEST(DateCacheVersion) {
- FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- v8::Local<v8::Array> date_cache_version =
- v8::Local<v8::Array>::Cast(CompileRun("%DateCacheVersion()"));
-
- CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
- CHECK(date_cache_version->Get(context, 0).ToLocalChecked()->IsNumber());
- CHECK_EQ(0.0, date_cache_version->Get(context, 0)
- .ToLocalChecked()
- ->NumberValue(context)
- .FromJust());
-
- v8::Date::DateTimeConfigurationChangeNotification(isolate);
-
- CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
- CHECK(date_cache_version->Get(context, 0).ToLocalChecked()->IsNumber());
- CHECK_EQ(1.0, date_cache_version->Get(context, 0)
- .ToLocalChecked()
- ->NumberValue(context)
- .FromJust());
-}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index e1e928f43a..e35227ab23 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -73,9 +73,9 @@ class DebugLocalContext {
public:
inline DebugLocalContext(
v8::Isolate* isolate, v8::ExtensionConfiguration* extensions = 0,
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::Handle<v8::ObjectTemplate>(),
- v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate>(),
+ v8::Local<v8::Value> global_object = v8::Local<v8::Value>())
: scope_(isolate),
context_(v8::Context::New(isolate, extensions, global_template,
global_object)) {
@@ -83,9 +83,9 @@ class DebugLocalContext {
}
inline DebugLocalContext(
v8::ExtensionConfiguration* extensions = 0,
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::Handle<v8::ObjectTemplate>(),
- v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate>(),
+ v8::Local<v8::Value> global_object = v8::Local<v8::Value>())
: scope_(CcTest::isolate()),
context_(v8::Context::New(CcTest::isolate(), extensions,
global_template, global_object)) {
@@ -115,7 +115,8 @@ class DebugLocalContext {
Handle<v8::internal::String> debug_string =
factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("debug"));
v8::internal::JSObject::SetOwnPropertyIgnoreAttributes(
- global, debug_string, handle(debug_context->global_proxy()), DONT_ENUM)
+ global, debug_string, handle(debug_context->global_proxy()),
+ v8::internal::DONT_ENUM)
.Check();
}
@@ -127,31 +128,29 @@ class DebugLocalContext {
// --- H e l p e r F u n c t i o n s
-
-// Compile and run the supplied source and return the fequested function.
-static v8::Local<v8::Function> CompileFunction(DebugLocalContext* env,
+// Compile and run the supplied source and return the requested function.
+static v8::Local<v8::Function> CompileFunction(v8::Isolate* isolate,
const char* source,
const char* function_name) {
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), source))
- ->Run();
- return v8::Local<v8::Function>::Cast((*env)->Global()->Get(
- v8::String::NewFromUtf8(env->GetIsolate(), function_name)));
+ CompileRunChecked(isolate, source);
+ v8::Local<v8::String> name = v8_str(isolate, function_name);
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::MaybeLocal<v8::Value> maybe_function =
+ context->Global()->Get(context, name);
+ return v8::Local<v8::Function>::Cast(maybe_function.ToLocalChecked());
}
// Compile and run the supplied source and return the requested function.
-static v8::Local<v8::Function> CompileFunction(v8::Isolate* isolate,
+static v8::Local<v8::Function> CompileFunction(DebugLocalContext* env,
const char* source,
const char* function_name) {
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, source))->Run();
- v8::Local<v8::Object> global = isolate->GetCurrentContext()->Global();
- return v8::Local<v8::Function>::Cast(
- global->Get(v8::String::NewFromUtf8(isolate, function_name)));
+ return CompileFunction(env->GetIsolate(), source, function_name);
}
// Is there any debug info for the function?
-static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
+static bool HasDebugInfo(v8::Local<v8::Function> fun) {
Handle<v8::internal::JSFunction> f =
Handle<v8::internal::JSFunction>::cast(v8::Utils::OpenHandle(*fun));
Handle<v8::internal::SharedFunctionInfo> shared(f->shared());
@@ -175,7 +174,7 @@ static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
// Set a break point in a function and return the associated break point
// number.
-static int SetBreakPoint(v8::Handle<v8::Function> fun, int position) {
+static int SetBreakPoint(v8::Local<v8::Function> fun, int position) {
return SetBreakPoint(
i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*fun)), position);
}
@@ -191,8 +190,8 @@ static int SetBreakPointFromJS(v8::Isolate* isolate,
"debug.Debug.setBreakPoint(%s,%d,%d)",
function_name, line, position);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Handle<v8::String> str = v8::String::NewFromUtf8(isolate, buffer.start());
- return v8::Script::Compile(str)->Run()->Int32Value();
+ v8::Local<v8::Value> value = CompileRunChecked(isolate, buffer.start());
+ return value->Int32Value(isolate->GetCurrentContext()).FromJust();
}
@@ -214,11 +213,9 @@ static int SetScriptBreakPointByIdFromJS(v8::Isolate* isolate, int script_id,
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
{
v8::TryCatch try_catch(isolate);
- v8::Handle<v8::String> str =
- v8::String::NewFromUtf8(isolate, buffer.start());
- v8::Handle<v8::Value> value = v8::Script::Compile(str)->Run();
+ v8::Local<v8::Value> value = CompileRunChecked(isolate, buffer.start());
CHECK(!try_catch.HasCaught());
- return value->Int32Value();
+ return value->Int32Value(isolate->GetCurrentContext()).FromJust();
}
}
@@ -243,11 +240,9 @@ static int SetScriptBreakPointByNameFromJS(v8::Isolate* isolate,
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
{
v8::TryCatch try_catch(isolate);
- v8::Handle<v8::String> str =
- v8::String::NewFromUtf8(isolate, buffer.start());
- v8::Handle<v8::Value> value = v8::Script::Compile(str)->Run();
+ v8::Local<v8::Value> value = CompileRunChecked(isolate, buffer.start());
CHECK(!try_catch.HasCaught());
- return value->Int32Value();
+ return value->Int32Value(isolate->GetCurrentContext()).FromJust();
}
}
@@ -269,7 +264,7 @@ static void ClearBreakPointFromJS(v8::Isolate* isolate,
"debug.Debug.clearBreakPoint(%d)",
break_point_number);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
+ CompileRunChecked(isolate, buffer.start());
}
@@ -280,7 +275,7 @@ static void EnableScriptBreakPointFromJS(v8::Isolate* isolate,
"debug.Debug.enableScriptBreakPoint(%d)",
break_point_number);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
+ CompileRunChecked(isolate, buffer.start());
}
@@ -291,7 +286,7 @@ static void DisableScriptBreakPointFromJS(v8::Isolate* isolate,
"debug.Debug.disableScriptBreakPoint(%d)",
break_point_number);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
+ CompileRunChecked(isolate, buffer.start());
}
@@ -303,7 +298,7 @@ static void ChangeScriptBreakPointConditionFromJS(v8::Isolate* isolate,
"debug.Debug.changeScriptBreakPointCondition(%d, \"%s\")",
break_point_number, condition);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
+ CompileRunChecked(isolate, buffer.start());
}
@@ -315,7 +310,7 @@ static void ChangeScriptBreakPointIgnoreCountFromJS(v8::Isolate* isolate,
"debug.Debug.changeScriptBreakPointIgnoreCount(%d, %d)",
break_point_number, ignoreCount);
buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, buffer.start()))->Run();
+ CompileRunChecked(isolate, buffer.start());
}
@@ -331,22 +326,14 @@ static void ChangeBreakOnException(bool caught, bool uncaught) {
static void ChangeBreakOnExceptionFromJS(v8::Isolate* isolate, bool caught,
bool uncaught) {
if (caught) {
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "debug.Debug.setBreakOnException()"))
- ->Run();
+ CompileRunChecked(isolate, "debug.Debug.setBreakOnException()");
} else {
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "debug.Debug.clearBreakOnException()"))
- ->Run();
+ CompileRunChecked(isolate, "debug.Debug.clearBreakOnException()");
}
if (uncaught) {
- v8::Script::Compile(
- v8::String::NewFromUtf8(
- isolate, "debug.Debug.setBreakOnUncaughtException()"))->Run();
+ CompileRunChecked(isolate, "debug.Debug.setBreakOnUncaughtException()");
} else {
- v8::Script::Compile(
- v8::String::NewFromUtf8(
- isolate, "debug.Debug.clearBreakOnUncaughtException()"))->Run();
+ CompileRunChecked(isolate, "debug.Debug.clearBreakOnUncaughtException()");
}
}
@@ -354,10 +341,13 @@ static void ChangeBreakOnExceptionFromJS(v8::Isolate* isolate, bool caught,
// Prepare to step to next break location.
static void PrepareStep(StepAction step_action) {
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
- debug->PrepareStep(step_action, 1, StackFrame::NO_ID);
+ debug->PrepareStep(step_action);
}
+static void ClearStepping() { CcTest::i_isolate()->debug()->ClearStepping(); }
+
+
// This function is in namespace v8::internal to be friend with class
// v8::internal::Debug.
namespace v8 {
@@ -429,9 +419,10 @@ void CheckDebuggerUnloaded(bool check_functions) {
// Check that the debugger has been fully unloaded.
-static void CheckDebuggerUnloaded(bool check_functions = false) {
+static void CheckDebuggerUnloaded(v8::Isolate* isolate,
+ bool check_functions = false) {
// Let debugger to unload itself synchronously
- v8::Debug::ProcessDebugMessages();
+ v8::Debug::ProcessDebugMessages(isolate);
v8::internal::CheckDebuggerUnloaded(check_functions);
}
@@ -520,7 +511,7 @@ static const char* frame_count_source =
"function frame_count(exec_state) {"
" return exec_state.frameCount();"
"}";
-v8::Handle<v8::Function> frame_count;
+v8::Local<v8::Function> frame_count;
// Global variable to store the last function hit - used by some tests.
@@ -539,7 +530,8 @@ int break_point_hit_count_deoptimize = 0;
static void DebugEventBreakPointHitCount(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
v8::internal::Isolate* isolate = CcTest::i_isolate();
Debug* debug = isolate->debug();
// When hitting a debug event listener there must be a break set.
@@ -551,16 +543,16 @@ static void DebugEventBreakPointHitCount(
if (!frame_function_name.IsEmpty()) {
// Get the name of the function.
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = {
- exec_state, v8::Integer::New(CcTest::isolate(), 0)
- };
- v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
- argc, argv);
+ v8::Local<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)};
+ v8::Local<v8::Value> result =
+ frame_function_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
if (result->IsUndefined()) {
last_function_hit[0] = '\0';
} else {
CHECK(result->IsString());
- v8::Handle<v8::String> function_name(result.As<v8::String>());
+ v8::Local<v8::String> function_name(result.As<v8::String>());
function_name->WriteUtf8(last_function_hit);
}
}
@@ -568,34 +560,37 @@ static void DebugEventBreakPointHitCount(
if (!frame_source_line.IsEmpty()) {
// Get the source line.
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { exec_state };
- v8::Handle<v8::Value> result = frame_source_line->Call(exec_state,
- argc, argv);
+ v8::Local<v8::Value> argv[argc] = {exec_state};
+ v8::Local<v8::Value> result =
+ frame_source_line->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
CHECK(result->IsNumber());
- last_source_line = result->Int32Value();
+ last_source_line = result->Int32Value(context).FromJust();
}
if (!frame_source_column.IsEmpty()) {
// Get the source column.
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { exec_state };
- v8::Handle<v8::Value> result = frame_source_column->Call(exec_state,
- argc, argv);
+ v8::Local<v8::Value> argv[argc] = {exec_state};
+ v8::Local<v8::Value> result =
+ frame_source_column->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
CHECK(result->IsNumber());
- last_source_column = result->Int32Value();
+ last_source_column = result->Int32Value(context).FromJust();
}
if (!frame_script_name.IsEmpty()) {
// Get the script name of the function script.
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { exec_state };
- v8::Handle<v8::Value> result = frame_script_name->Call(exec_state,
- argc, argv);
+ v8::Local<v8::Value> argv[argc] = {exec_state};
+ v8::Local<v8::Value> result =
+ frame_script_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
if (result->IsUndefined()) {
last_script_name_hit[0] = '\0';
} else {
CHECK(result->IsString());
- v8::Handle<v8::String> script_name(result.As<v8::String>());
+ v8::Local<v8::String> script_name(result.As<v8::String>());
script_name->WriteUtf8(last_script_name_hit);
}
}
@@ -614,7 +609,7 @@ static void DebugEventBreakPointHitCount(
int exception_hit_count = 0;
int uncaught_exception_hit_count = 0;
int last_js_stack_height = -1;
-v8::Handle<v8::Function> debug_event_listener_callback;
+v8::Local<v8::Function> debug_event_listener_callback;
int debug_event_listener_callback_result;
static void DebugEventCounterClear() {
@@ -626,8 +621,9 @@ static void DebugEventCounterClear() {
static void DebugEventCounter(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
- v8::Handle<v8::Object> event_data = event_details.GetEventData();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> event_data = event_details.GetEventData();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
@@ -640,11 +636,11 @@ static void DebugEventCounter(
exception_hit_count++;
// Check whether the exception was uncaught.
- v8::Local<v8::String> fun_name =
- v8::String::NewFromUtf8(CcTest::isolate(), "uncaught");
- v8::Local<v8::Function> fun =
- v8::Local<v8::Function>::Cast(event_data->Get(fun_name));
- v8::Local<v8::Value> result = fun->Call(event_data, 0, NULL);
+ v8::Local<v8::String> fun_name = v8_str(CcTest::isolate(), "uncaught");
+ v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
+ event_data->Get(context, fun_name).ToLocalChecked());
+ v8::Local<v8::Value> result =
+ fun->Call(context, event_data, 0, NULL).ToLocalChecked();
if (result->IsTrue()) {
uncaught_exception_hit_count++;
}
@@ -654,18 +650,21 @@ static void DebugEventCounter(
// compiled.
if (!frame_count.IsEmpty()) {
static const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = { exec_state };
+ v8::Local<v8::Value> argv[kArgc] = {exec_state};
// Using exec_state as receiver is just to have a receiver.
- v8::Handle<v8::Value> result = frame_count->Call(exec_state, kArgc, argv);
- last_js_stack_height = result->Int32Value();
+ v8::Local<v8::Value> result =
+ frame_count->Call(context, exec_state, kArgc, argv).ToLocalChecked();
+ last_js_stack_height = result->Int32Value(context).FromJust();
}
// Run callback from DebugEventListener and check the result.
if (!debug_event_listener_callback.IsEmpty()) {
- v8::Handle<v8::Value> result =
- debug_event_listener_callback->Call(event_data, 0, NULL);
+ v8::Local<v8::Value> result =
+ debug_event_listener_callback->Call(context, event_data, 0, NULL)
+ .ToLocalChecked();
CHECK(!result.IsEmpty());
- CHECK_EQ(debug_event_listener_callback_result, result->Int32Value());
+ CHECK_EQ(debug_event_listener_callback_result,
+ result->Int32Value(context).FromJust());
}
}
@@ -680,7 +679,7 @@ static void DebugEventCounter(
// Structure for holding checks to do.
struct EvaluateCheck {
const char* expr; // An expression to evaluate when a break point is hit.
- v8::Handle<v8::Value> expected; // The expected result.
+ v8::Local<v8::Value> expected; // The expected result.
};
@@ -698,7 +697,9 @@ v8::Local<v8::Function> evaluate_check_function;
static void DebugEventEvaluate(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -707,12 +708,12 @@ static void DebugEventEvaluate(
break_point_hit_count++;
for (int i = 0; checks[i].expr != NULL; i++) {
const int argc = 3;
- v8::Handle<v8::Value> argv[argc] = {
- exec_state,
- v8::String::NewFromUtf8(CcTest::isolate(), checks[i].expr),
- checks[i].expected};
- v8::Handle<v8::Value> result =
- evaluate_check_function->Call(exec_state, argc, argv);
+ v8::Local<v8::String> string = v8_str(isolate, checks[i].expr);
+ v8::Local<v8::Value> argv[argc] = {exec_state, string,
+ checks[i].expected};
+ v8::Local<v8::Value> result =
+ evaluate_check_function->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
if (!result->IsTrue()) {
v8::String::Utf8Value utf8(checks[i].expected);
V8_Fatal(__FILE__, __LINE__, "%s != %s", checks[i].expr, *utf8);
@@ -727,7 +728,7 @@ int debug_event_remove_break_point = 0;
static void DebugEventRemoveBreakPoint(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Value> data = event_details.GetCallbackData();
+ v8::Local<v8::Value> data = event_details.GetCallbackData();
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -772,7 +773,7 @@ const char* expected_step_sequence = NULL;
static void DebugEventStepSequence(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
// When hitting a debug event listener there must be a break set.
CHECK_NE(debug->break_id(), 0);
@@ -782,13 +783,15 @@ static void DebugEventStepSequence(
CHECK(break_point_hit_count <
StrLength(expected_step_sequence));
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = {
- exec_state, v8::Integer::New(CcTest::isolate(), 0)
- };
- v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
- argc, argv);
+ v8::Local<v8::Value> argv[argc] = {exec_state,
+ v8::Integer::New(CcTest::isolate(), 0)};
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ v8::Local<v8::Value> result =
+ frame_function_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
CHECK(result->IsString());
- v8::String::Utf8Value function_name(result->ToString(CcTest::isolate()));
+ v8::String::Utf8Value function_name(
+ result->ToString(context).ToLocalChecked());
CHECK_EQ(1, StrLength(*function_name));
CHECK_EQ((*function_name)[0],
expected_step_sequence[break_point_hit_count]);
@@ -870,7 +873,7 @@ static void DebugEventBreakMax(
} else if (terminate_after_max_break_point_hit) {
// Terminate execution after the last break if requested.
- v8::V8::TerminateExecution(v8_isolate);
+ v8_isolate->TerminateExecution();
}
// Perform a full deoptimization when the specified number of
@@ -892,8 +895,8 @@ static void MessageCallbackCountClear() {
message_callback_count = 0;
}
-static void MessageCallbackCount(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
+static void MessageCallbackCount(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
message_callback_count++;
}
@@ -914,7 +917,7 @@ TEST(DebugInfo) {
CHECK_EQ(0, v8::internal::GetDebuggedFunctions()->length());
CHECK(!HasDebugInfo(foo));
CHECK(!HasDebugInfo(bar));
- EnableDebugger();
+ EnableDebugger(env->GetIsolate());
// One function (foo) is debugged.
int bp1 = SetBreakPoint(foo, 0);
CHECK_EQ(1, v8::internal::GetDebuggedFunctions()->length());
@@ -932,7 +935,7 @@ TEST(DebugInfo) {
CHECK(HasDebugInfo(bar));
// No functions are debugged.
ClearBreakPoint(bp2);
- DisableDebugger();
+ DisableDebugger(env->GetIsolate());
CHECK_EQ(0, v8::internal::GetDebuggedFunctions()->length());
CHECK(!HasDebugInfo(foo));
CHECK(!HasDebugInfo(bar));
@@ -945,30 +948,29 @@ TEST(BreakPointICStore) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- "function foo(){bar=0;}"))->Run();
- v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){bar=0;}", "foo");
// Run without breakpoints.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
int bp = SetBreakPoint(foo, 0);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -977,33 +979,31 @@ TEST(BreakPointICLoad) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "bar=1"))
- ->Run();
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function foo(){var x=bar;}"))
- ->Run();
- v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+
+ CompileRunChecked(env->GetIsolate(), "bar=1");
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){var x=bar;}", "foo");
// Run without breakpoints.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1012,32 +1012,30 @@ TEST(BreakPointICCall) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){}"))->Run();
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- "function foo(){bar();}"))->Run();
- v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ CompileRunChecked(env->GetIsolate(), "function bar(){}");
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){bar();}", "foo");
// Run without breakpoints.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
int bp = SetBreakPoint(foo, 0);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1046,34 +1044,40 @@ TEST(BreakPointICCallWithGC) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){return 1;}"))
- ->Run();
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- "function foo(){return bar();}"))
- ->Run();
- v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointCollectGarbage);
+ CompileRunChecked(env->GetIsolate(), "function bar(){return 1;}");
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){return bar();}", "foo");
+ v8::Local<v8::Context> context = env.context();
// Run without breakpoints.
- CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
- CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1082,34 +1086,40 @@ TEST(BreakPointConstructCallWithGC) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- "function bar(){ this.x = 1;}"))
- ->Run();
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(),
- "function foo(){return new bar(1).x;}"))->Run();
- v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointCollectGarbage);
+ CompileRunChecked(env->GetIsolate(), "function bar(){ this.x = 1;}");
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){return new bar(1).x;}", "foo");
+ v8::Local<v8::Context> context = env.context();
// Run without breakpoints.
- CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
int bp = SetBreakPoint(foo, 0);
- CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
CHECK_EQ(1, break_point_hit_count);
- CHECK_EQ(1, foo->Call(env->Global(), 0, NULL)->Int32Value());
+ CHECK_EQ(1, foo->Call(context, env->Global(), 0, NULL)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
CHECK_EQ(2, break_point_hit_count);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1129,44 +1139,44 @@ TEST(BreakPointReturn) {
"frame_source_column");
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function foo(){}"))->Run();
- v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){}", "foo");
+ v8::Local<v8::Context> context = env.context();
// Run without breakpoints.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint
int bp = SetBreakPoint(foo, 0);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(0, last_source_line);
CHECK_EQ(15, last_source_column);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, last_source_line);
CHECK_EQ(15, last_source_column);
// Run without breakpoints.
ClearBreakPoint(bp);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
-static void CallWithBreakPoints(v8::Local<v8::Object> recv,
+static void CallWithBreakPoints(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> recv,
v8::Local<v8::Function> f,
- int break_point_count,
- int call_count) {
+ int break_point_count, int call_count) {
break_point_hit_count = 0;
for (int i = 0; i < call_count; i++) {
- f->Call(recv, 0, NULL);
+ f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ((i + 1) * break_point_count, break_point_hit_count);
}
}
@@ -1177,59 +1187,61 @@ TEST(GCDuringBreakPointProcessing) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointCollectGarbage);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointCollectGarbage);
v8::Local<v8::Function> foo;
// Test IC store break point with garbage collection.
foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
SetBreakPoint(foo, 0);
- CallWithBreakPoints(env->Global(), foo, 1, 10);
+ CallWithBreakPoints(context, env->Global(), foo, 1, 10);
// Test IC load break point with garbage collection.
foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
SetBreakPoint(foo, 0);
- CallWithBreakPoints(env->Global(), foo, 1, 10);
+ CallWithBreakPoints(context, env->Global(), foo, 1, 10);
// Test IC call break point with garbage collection.
foo = CompileFunction(&env, "function bar(){};function foo(){bar();}", "foo");
SetBreakPoint(foo, 0);
- CallWithBreakPoints(env->Global(), foo, 1, 10);
+ CallWithBreakPoints(context, env->Global(), foo, 1, 10);
// Test return break point with garbage collection.
foo = CompileFunction(&env, "function foo(){}", "foo");
SetBreakPoint(foo, 0);
- CallWithBreakPoints(env->Global(), foo, 1, 25);
+ CallWithBreakPoints(context, env->Global(), foo, 1, 25);
// Test debug break slot break point with garbage collection.
foo = CompileFunction(&env, "function foo(){var a;}", "foo");
SetBreakPoint(foo, 0);
- CallWithBreakPoints(env->Global(), foo, 1, 25);
+ CallWithBreakPoints(context, env->Global(), foo, 1, 25);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
// Call the function three times with different garbage collections in between
// and make sure that the break point survives.
-static void CallAndGC(v8::Local<v8::Object> recv,
- v8::Local<v8::Function> f) {
+static void CallAndGC(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> recv, v8::Local<v8::Function> f) {
break_point_hit_count = 0;
for (int i = 0; i < 3; i++) {
// Call function.
- f->Call(recv, 0, NULL);
+ f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ(1 + i * 3, break_point_hit_count);
// Scavenge and call function.
CcTest::heap()->CollectGarbage(v8::internal::NEW_SPACE);
- f->Call(recv, 0, NULL);
+ f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
CcTest::heap()->CollectAllGarbage();
- f->Call(recv, 0, NULL);
+ f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
}
@@ -1240,8 +1252,10 @@ TEST(BreakPointSurviveGC) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
v8::Local<v8::Function> foo;
// Test IC store break point with garbage collection.
@@ -1250,7 +1264,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(context, env->Global(), foo);
// Test IC load break point with garbage collection.
{
@@ -1258,7 +1272,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(context, env->Global(), foo);
// Test IC call break point with garbage collection.
{
@@ -1268,7 +1282,7 @@ TEST(BreakPointSurviveGC) {
"foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(context, env->Global(), foo);
// Test return break point with garbage collection.
{
@@ -1276,7 +1290,7 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "function foo(){}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(context, env->Global(), foo);
// Test non IC break point with garbage collection.
{
@@ -1284,11 +1298,11 @@ TEST(BreakPointSurviveGC) {
foo = CompileFunction(&env, "function foo(){var bar=0;}", "foo");
SetBreakPoint(foo, 0);
}
- CallAndGC(env->Global(), foo);
+ CallAndGC(context, env->Global(), foo);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1296,53 +1310,51 @@ TEST(BreakPointSurviveGC) {
TEST(BreakPointThroughJavaScript) {
break_point_hit_count = 0;
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){}"))->Run();
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(),
- "function foo(){bar();bar();}"))
- ->Run();
- // 012345678901234567890
- // 1 2
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
+ CompileRunChecked(isolate, "function bar(){}");
+ CompileFunction(isolate, "function foo(){bar();bar();}", "foo");
+ // 012345678901234567890
+ // 1 2
// Break points are set at position 3 and 9
+ v8::Local<v8::String> source = v8_str(env->GetIsolate(), "foo()");
v8::Local<v8::Script> foo =
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "foo()"));
+ v8::Script::Compile(context, source).ToLocalChecked();
- // Run without breakpoints.
- foo->Run();
CHECK_EQ(0, break_point_hit_count);
// Run with one breakpoint
int bp1 = SetBreakPointFromJS(env->GetIsolate(), "foo", 0, 3);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Run with two breakpoints
int bp2 = SetBreakPointFromJS(env->GetIsolate(), "foo", 0, 9);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
// Run with one breakpoint
ClearBreakPointFromJS(env->GetIsolate(), bp2);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(7, break_point_hit_count);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(8, break_point_hit_count);
// Run without breakpoints.
ClearBreakPointFromJS(env->GetIsolate(), bp1);
- foo->Run();
+ foo->Run(context).ToLocalChecked();
CHECK_EQ(8, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
// Make sure that the break point numbers are consecutive.
CHECK_EQ(1, bp1);
@@ -1355,96 +1367,99 @@ TEST(BreakPointThroughJavaScript) {
TEST(ScriptBreakPointByNameThroughJavaScript) {
break_point_hit_count = 0;
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function f() {\n"
- " function h() {\n"
- " a = 0; // line 2\n"
- " }\n"
- " b = 1; // line 4\n"
- " return h();\n"
- "}\n"
- "\n"
- "function g() {\n"
- " function h() {\n"
- " a = 0;\n"
- " }\n"
- " b = 2; // line 12\n"
- " h();\n"
- " b = 3; // line 14\n"
- " f(); // line 15\n"
- "}");
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
+
+ v8::Local<v8::String> script = v8_str(isolate,
+ "function f() {\n"
+ " function h() {\n"
+ " a = 0; // line 2\n"
+ " }\n"
+ " b = 1; // line 4\n"
+ " return h();\n"
+ "}\n"
+ "\n"
+ "function g() {\n"
+ " function h() {\n"
+ " a = 0;\n"
+ " }\n"
+ " b = 2; // line 12\n"
+ " h();\n"
+ " b = 3; // line 14\n"
+ " f(); // line 15\n"
+ "}");
// Compile the script and get the two functions.
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Script::Compile(script, &origin)->Run();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(isolate, "test"));
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()->Get(context, v8_str(isolate, "g")).ToLocalChecked());
// Call f and g without break points.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 12.
- int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 12, 0);
+ int sbp1 = SetScriptBreakPointByNameFromJS(isolate, "test", 12, 0);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Remove the break point again.
break_point_hit_count = 0;
ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 2.
int sbp2 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 2, 0);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Call f and g with break point on line 2, 4, 12, 14 and 15.
- int sbp3 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 4, 0);
- int sbp4 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 12, 0);
- int sbp5 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 14, 0);
- int sbp6 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 15, 0);
+ int sbp3 = SetScriptBreakPointByNameFromJS(isolate, "test", 4, 0);
+ int sbp4 = SetScriptBreakPointByNameFromJS(isolate, "test", 12, 0);
+ int sbp5 = SetScriptBreakPointByNameFromJS(isolate, "test", 14, 0);
+ int sbp6 = SetScriptBreakPointByNameFromJS(isolate, "test", 15, 0);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(7, break_point_hit_count);
// Remove all the break points again.
break_point_hit_count = 0;
- ClearBreakPointFromJS(env->GetIsolate(), sbp2);
- ClearBreakPointFromJS(env->GetIsolate(), sbp3);
- ClearBreakPointFromJS(env->GetIsolate(), sbp4);
- ClearBreakPointFromJS(env->GetIsolate(), sbp5);
- ClearBreakPointFromJS(env->GetIsolate(), sbp6);
- f->Call(env->Global(), 0, NULL);
+ ClearBreakPointFromJS(isolate, sbp2);
+ ClearBreakPointFromJS(isolate, sbp3);
+ ClearBreakPointFromJS(isolate, sbp4);
+ ClearBreakPointFromJS(isolate, sbp5);
+ ClearBreakPointFromJS(isolate, sbp6);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
// Make sure that the break point numbers are consecutive.
CHECK_EQ(1, sbp1);
@@ -1459,73 +1474,74 @@ TEST(ScriptBreakPointByNameThroughJavaScript) {
TEST(ScriptBreakPointByIdThroughJavaScript) {
break_point_hit_count = 0;
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> source = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function f() {\n"
- " function h() {\n"
- " a = 0; // line 2\n"
- " }\n"
- " b = 1; // line 4\n"
- " return h();\n"
- "}\n"
- "\n"
- "function g() {\n"
- " function h() {\n"
- " a = 0;\n"
- " }\n"
- " b = 2; // line 12\n"
- " h();\n"
- " b = 3; // line 14\n"
- " f(); // line 15\n"
- "}");
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
+
+ v8::Local<v8::String> source = v8_str(isolate,
+ "function f() {\n"
+ " function h() {\n"
+ " a = 0; // line 2\n"
+ " }\n"
+ " b = 1; // line 4\n"
+ " return h();\n"
+ "}\n"
+ "\n"
+ "function g() {\n"
+ " function h() {\n"
+ " a = 0;\n"
+ " }\n"
+ " b = 2; // line 12\n"
+ " h();\n"
+ " b = 3; // line 14\n"
+ " f(); // line 15\n"
+ "}");
// Compile the script and get the two functions.
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Local<v8::Script> script = v8::Script::Compile(source, &origin);
- script->Run();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(isolate, "test"));
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(context, source, &origin).ToLocalChecked();
+ script->Run(context).ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()->Get(context, v8_str(isolate, "g")).ToLocalChecked());
// Get the script id knowing that internally it is a 32 integer.
int script_id = script->GetUnboundScript()->GetId();
// Call f and g without break points.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 12.
int sbp1 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 12, 0);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Remove the break point again.
break_point_hit_count = 0;
ClearBreakPointFromJS(env->GetIsolate(), sbp1);
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Call f and g with break point on line 2.
int sbp2 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 2, 0);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Call f and g with break point on line 2, 4, 12, 14 and 15.
@@ -1534,9 +1550,9 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
int sbp5 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 14, 0);
int sbp6 = SetScriptBreakPointByIdFromJS(env->GetIsolate(), script_id, 15, 0);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(7, break_point_hit_count);
// Remove all the break points again.
@@ -1546,13 +1562,13 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
ClearBreakPointFromJS(env->GetIsolate(), sbp4);
ClearBreakPointFromJS(env->GetIsolate(), sbp5);
ClearBreakPointFromJS(env->GetIsolate(), sbp6);
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
// Make sure that the break point numbers are consecutive.
CHECK_EQ(1, sbp1);
@@ -1568,57 +1584,63 @@ TEST(ScriptBreakPointByIdThroughJavaScript) {
TEST(EnableDisableScriptBreakPoint) {
break_point_hit_count = 0;
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 1\n"
- "};");
+ v8::Local<v8::String> script = v8_str(isolate,
+ "function f() {\n"
+ " a = 0; // line 1\n"
+ "};");
// Compile the script and get function f.
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Script::Compile(script, &origin)->Run();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(isolate, "test"));
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
// Set script break point on line 1 (in function f).
- int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
+ int sbp = SetScriptBreakPointByNameFromJS(isolate, "test", 1, 0);
// Call f while enabeling and disabling the script break point.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- DisableScriptBreakPointFromJS(env->GetIsolate(), sbp);
- f->Call(env->Global(), 0, NULL);
+ DisableScriptBreakPointFromJS(isolate, sbp);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- EnableScriptBreakPointFromJS(env->GetIsolate(), sbp);
- f->Call(env->Global(), 0, NULL);
+ EnableScriptBreakPointFromJS(isolate, sbp);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- DisableScriptBreakPointFromJS(env->GetIsolate(), sbp);
- f->Call(env->Global(), 0, NULL);
+ DisableScriptBreakPointFromJS(isolate, sbp);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- // Reload the script and get f again checking that the disabeling survives.
- v8::Script::Compile(script, &origin)->Run();
+ // Reload the script and get f again checking that the disabling survives.
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- f->Call(env->Global(), 0, NULL);
+ env->Global()->Get(context, v8_str(isolate, "f")).ToLocalChecked());
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- EnableScriptBreakPointFromJS(env->GetIsolate(), sbp);
- f->Call(env->Global(), 0, NULL);
+ EnableScriptBreakPointFromJS(isolate, sbp);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -1629,24 +1651,29 @@ TEST(ConditionalScriptBreakPoint) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "count = 0;\n"
- "function f() {\n"
- " g(count++); // line 2\n"
- "};\n"
- "function g(x) {\n"
- " var a=x; // line 5\n"
- "};");
+ v8::Local<v8::String> script = v8_str(env->GetIsolate(),
+ "count = 0;\n"
+ "function f() {\n"
+ " g(count++); // line 2\n"
+ "};\n"
+ "function g(x) {\n"
+ " var a=x; // line 5\n"
+ "};");
// Compile the script and get function f.
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::Context> context = env.context();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(env->GetIsolate(), "test"));
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
// Set script break point on line 5 (in function g).
int sbp1 = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 5, 0);
@@ -1654,34 +1681,39 @@ TEST(ConditionalScriptBreakPoint) {
// Call f with different conditions on the script break point.
break_point_hit_count = 0;
ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "false");
- f->Call(env->Global(), 0, NULL);
+ f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "true");
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
ChangeScriptBreakPointConditionFromJS(env->GetIsolate(), sbp1, "x % 2 == 0");
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
- f->Call(env->Global(), 0, NULL);
+ f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
}
CHECK_EQ(5, break_point_hit_count);
// Reload the script and get f again checking that the condition survives.
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
- f->Call(env->Global(), 0, NULL);
+ f->Call(env.context(), env->Global(), 0, NULL).ToLocalChecked();
}
CHECK_EQ(5, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1692,20 +1724,25 @@ TEST(ScriptBreakPointIgnoreCount) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
- v8::Local<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 1\n"
- "};");
+ v8::Local<v8::String> script = v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " a = 0; // line 1\n"
+ "};");
// Compile the script and get function f.
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
- v8::Script::Compile(script, &origin)->Run();
+ v8::Local<v8::Context> context = env.context();
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(env->GetIsolate(), "test"));
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
// Set script break point on line 1 (in function f).
int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
@@ -1713,31 +1750,36 @@ TEST(ScriptBreakPointIgnoreCount) {
// Call f with different ignores on the script break point.
break_point_hit_count = 0;
ChangeScriptBreakPointIgnoreCountFromJS(env->GetIsolate(), sbp, 1);
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
ChangeScriptBreakPointIgnoreCountFromJS(env->GetIsolate(), sbp, 5);
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
}
CHECK_EQ(5, break_point_hit_count);
// Reload the script and get f again checking that the ignore survives.
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
break_point_hit_count = 0;
for (int i = 0; i < 10; i++) {
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
}
CHECK_EQ(5, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1748,60 +1790,74 @@ TEST(ScriptBreakPointReload) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> f;
- v8::Local<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function f() {\n"
- " function h() {\n"
- " a = 0; // line 2\n"
- " }\n"
- " b = 1; // line 4\n"
- " return h();\n"
- "}");
-
- v8::ScriptOrigin origin_1 =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "1"));
- v8::ScriptOrigin origin_2 =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "2"));
+ v8::Local<v8::String> script = v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " function h() {\n"
+ " a = 0; // line 2\n"
+ " }\n"
+ " b = 1; // line 4\n"
+ " return h();\n"
+ "}");
+
+ v8::ScriptOrigin origin_1 = v8::ScriptOrigin(v8_str(env->GetIsolate(), "1"));
+ v8::ScriptOrigin origin_2 = v8::ScriptOrigin(v8_str(env->GetIsolate(), "2"));
// Set a script break point before the script is loaded.
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "1", 2, 0);
// Compile the script and get the function.
- v8::Script::Compile(script, &origin_1)->Run();
+ v8::Script::Compile(context, script, &origin_1)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
// Call f and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Compile the script again with a different script data and get the
// function.
- v8::Script::Compile(script, &origin_2)->Run();
+ v8::Script::Compile(context, script, &origin_2)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
// Call f and check that no break points are set.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Compile the script again and get the function.
- v8::Script::Compile(script, &origin_1)->Run();
+ v8::Script::Compile(context, script, &origin_1)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
// Call f and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1812,41 +1868,50 @@ TEST(ScriptBreakPointMultiple) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> f;
- v8::Local<v8::String> script_f =
- v8::String::NewFromUtf8(env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 1\n"
- "}");
+ v8::Local<v8::String> script_f = v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " a = 0; // line 1\n"
+ "}");
v8::Local<v8::Function> g;
- v8::Local<v8::String> script_g =
- v8::String::NewFromUtf8(env->GetIsolate(),
- "function g() {\n"
- " b = 0; // line 1\n"
- "}");
+ v8::Local<v8::String> script_g = v8_str(env->GetIsolate(),
+ "function g() {\n"
+ " b = 0; // line 1\n"
+ "}");
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
+ v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(env->GetIsolate(), "test"));
// Set a script break point before the scripts are loaded.
int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
// Compile the scripts with same script data and get the functions.
- v8::Script::Compile(script_f, &origin)->Run();
+ v8::Script::Compile(context, script_f, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- v8::Script::Compile(script_g, &origin)->Run();
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
+ v8::Script::Compile(context, script_g, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "g"))
+ .ToLocalChecked());
// Call f and g and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Clear the script break point.
@@ -1854,9 +1919,9 @@ TEST(ScriptBreakPointMultiple) {
// Call f and g and check that the script break point is no longer active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Set script break point with the scripts loaded.
@@ -1864,13 +1929,13 @@ TEST(ScriptBreakPointMultiple) {
// Call f and g and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1881,20 +1946,21 @@ TEST(ScriptBreakPointLineOffset) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> f;
- v8::Local<v8::String> script = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 8 as this script has line offset 7\n"
- " b = 0; // line 9 as this script has line offset 7\n"
- "}");
+ v8::Local<v8::String> script =
+ v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " a = 0; // line 8 as this script has line offset 7\n"
+ " b = 0; // line 9 as this script has line offset 7\n"
+ "}");
// Create script origin both name and line offset.
- v8::ScriptOrigin origin(
- v8::String::NewFromUtf8(env->GetIsolate(), "test.html"),
- v8::Integer::New(env->GetIsolate(), 7));
+ v8::ScriptOrigin origin(v8_str(env->GetIsolate(), "test.html"),
+ v8::Integer::New(env->GetIsolate(), 7));
// Set two script break points before the script is loaded.
int sbp1 =
@@ -1903,13 +1969,18 @@ TEST(ScriptBreakPointLineOffset) {
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 9, 0);
// Compile the script and get the function.
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
// Call f and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Clear the script break points.
@@ -1918,7 +1989,7 @@ TEST(ScriptBreakPointLineOffset) {
// Call f and check that no script break points are active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Set a script break point with the script loaded.
@@ -1926,11 +1997,11 @@ TEST(ScriptBreakPointLineOffset) {
// Call f and check that the script break point is active.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -1945,25 +2016,27 @@ TEST(ScriptBreakPointLine) {
frame_function_name_source,
"frame_function_name");
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> f;
v8::Local<v8::Function> g;
v8::Local<v8::String> script =
- v8::String::NewFromUtf8(env->GetIsolate(),
- "a = 0 // line 0\n"
- "function f() {\n"
- " a = 1; // line 2\n"
- "}\n"
- " a = 2; // line 4\n"
- " /* xx */ function g() { // line 5\n"
- " function h() { // line 6\n"
- " a = 3; // line 7\n"
- " }\n"
- " h(); // line 9\n"
- " a = 4; // line 10\n"
- " }\n"
- " a=5; // line 12");
+ v8_str(env->GetIsolate(),
+ "a = 0 // line 0\n"
+ "function f() {\n"
+ " a = 1; // line 2\n"
+ "}\n"
+ " a = 2; // line 4\n"
+ " /* xx */ function g() { // line 5\n"
+ " function h() { // line 6\n"
+ " a = 3; // line 7\n"
+ " }\n"
+ " h(); // line 9\n"
+ " a = 4; // line 10\n"
+ " }\n"
+ " a=5; // line 12");
// Set a couple script break point before the script is loaded.
int sbp1 =
@@ -1975,26 +2048,32 @@ TEST(ScriptBreakPointLine) {
// Compile the script and get the function.
break_point_hit_count = 0;
- v8::ScriptOrigin origin(
- v8::String::NewFromUtf8(env->GetIsolate(), "test.html"),
- v8::Integer::New(env->GetIsolate(), 0));
- v8::Script::Compile(script, &origin)->Run();
+ v8::ScriptOrigin origin(v8_str(env->GetIsolate(), "test.html"),
+ v8::Integer::New(env->GetIsolate(), 0));
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "g"))
+ .ToLocalChecked());
// Check that a break point was hit when the script was run.
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(0, StrLength(last_function_hit));
// Call f and check that the script break point.
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, strcmp("f", last_function_hit));
// Call g and check that the script break point.
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ(0, strcmp("g", last_function_hit));
@@ -2004,7 +2083,7 @@ TEST(ScriptBreakPointLine) {
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 6, -1);
// Call g and check that the script break point in h is hit.
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
CHECK_EQ(0, strcmp("h", last_function_hit));
@@ -2016,13 +2095,16 @@ TEST(ScriptBreakPointLine) {
int sbp5 =
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 4, -1);
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
- g->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Reload the script which should hit two break points.
break_point_hit_count = 0;
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, StrLength(last_function_hit));
@@ -2032,7 +2114,10 @@ TEST(ScriptBreakPointLine) {
// Reload the script which should hit three break points.
break_point_hit_count = 0;
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ(0, StrLength(last_function_hit));
@@ -2042,11 +2127,14 @@ TEST(ScriptBreakPointLine) {
ClearBreakPointFromJS(env->GetIsolate(), sbp5);
ClearBreakPointFromJS(env->GetIsolate(), sbp6);
break_point_hit_count = 0;
- v8::Script::Compile(script, &origin)->Run();
+ v8::Script::Compile(context, script, &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2056,21 +2144,25 @@ TEST(ScriptBreakPointLineTopLevel) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::String> script =
- v8::String::NewFromUtf8(env->GetIsolate(),
- "function f() {\n"
- " a = 1; // line 1\n"
- "}\n"
- "a = 2; // line 3\n");
+ v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " a = 1; // line 1\n"
+ "}\n"
+ "a = 2; // line 3\n");
v8::Local<v8::Function> f;
{
v8::HandleScope scope(env->GetIsolate());
CompileRunWithOrigin(script, "test.html");
}
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
CcTest::heap()->CollectAllGarbage();
@@ -2078,7 +2170,7 @@ TEST(ScriptBreakPointLineTopLevel) {
// Call f and check that there was no break points.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Recompile and run script and check that break point was hit.
@@ -2089,11 +2181,13 @@ TEST(ScriptBreakPointLineTopLevel) {
// Call f and check that there are still no break points.
break_point_hit_count = 0;
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
CHECK_EQ(0, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2104,14 +2198,14 @@ TEST(ScriptBreakPointTopLevelCrash) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
- v8::Local<v8::String> script_source =
- v8::String::NewFromUtf8(env->GetIsolate(),
- "function f() {\n"
- " return 0;\n"
- "}\n"
- "f()");
+ v8::Local<v8::String> script_source = v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " return 0;\n"
+ "}\n"
+ "f()");
int sbp1 =
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
@@ -2127,8 +2221,8 @@ TEST(ScriptBreakPointTopLevelCrash) {
ClearBreakPointFromJS(env->GetIsolate(), sbp1);
ClearBreakPointFromJS(env->GetIsolate(), sbp2);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2138,24 +2232,26 @@ TEST(RemoveBreakPointInBreak) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> foo =
CompileFunction(&env, "function foo(){a=1;}", "foo");
// Register the debug event listener pasing the function
- v8::Debug::SetDebugEventListener(DebugEventRemoveBreakPoint, foo);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventRemoveBreakPoint, foo);
debug_event_remove_break_point = SetBreakPoint(foo, 0);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2164,28 +2260,38 @@ TEST(DebuggerStatement) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function bar(){debugger}"))
- ->Run();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
+ v8::Script::Compile(context,
+ v8_str(env->GetIsolate(), "function bar(){debugger}"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(),
- "function foo(){debugger;debugger;}"))->Run();
+ context, v8_str(env->GetIsolate(), "function foo(){debugger;debugger;}"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "foo"))
+ .ToLocalChecked());
v8::Local<v8::Function> bar = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "bar")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "bar"))
+ .ToLocalChecked());
// Run function with debugger statement
- bar->Call(env->Global(), 0, NULL);
+ bar->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
// Run function with two debugger statement
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2194,26 +2300,32 @@ TEST(DebuggerStatementBreakpoint) {
break_point_hit_count = 0;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "function foo(){debugger;}"))
- ->Run();
+ v8::Local<v8::Context> context = env.context();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Script::Compile(context,
+ v8_str(env->GetIsolate(), "function foo(){debugger;}"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Function> foo = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "foo"))
+ .ToLocalChecked());
// The debugger statement triggers breakpoint hit
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
int bp = SetBreakPoint(foo, 0);
// Set breakpoint does not duplicate hits
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
ClearBreakPoint(bp);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2230,25 +2342,21 @@ TEST(DebugEvaluate) {
evaluate_check_source,
"evaluate_check");
// Register the debug event listener
- v8::Debug::SetDebugEventListener(DebugEventEvaluate);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventEvaluate);
// Different expected vaules of x and a when in a break point (u = undefined,
// d = Hello, world!).
- struct EvaluateCheck checks_uu[] = {
- {"x", v8::Undefined(isolate)},
- {"a", v8::Undefined(isolate)},
- {NULL, v8::Handle<v8::Value>()}
- };
+ struct EvaluateCheck checks_uu[] = {{"x", v8::Undefined(isolate)},
+ {"a", v8::Undefined(isolate)},
+ {NULL, v8::Local<v8::Value>()}};
struct EvaluateCheck checks_hu[] = {
- {"x", v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")},
- {"a", v8::Undefined(isolate)},
- {NULL, v8::Handle<v8::Value>()}
- };
+ {"x", v8_str(env->GetIsolate(), "Hello, world!")},
+ {"a", v8::Undefined(isolate)},
+ {NULL, v8::Local<v8::Value>()}};
struct EvaluateCheck checks_hh[] = {
- {"x", v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")},
- {"a", v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")},
- {NULL, v8::Handle<v8::Value>()}
- };
+ {"x", v8_str(env->GetIsolate(), "Hello, world!")},
+ {"a", v8_str(env->GetIsolate(), "Hello, world!")},
+ {NULL, v8::Local<v8::Value>()}};
// Simple test function. The "y=0" is in the function foo to provide a break
// location. For "y=0" the "y" is at position 15 in the foo function
@@ -2265,24 +2373,25 @@ TEST(DebugEvaluate) {
const int foo_break_position_1 = 15;
const int foo_break_position_2 = 29;
+ v8::Local<v8::Context> context = env.context();
// Arguments with one parameter "Hello, world!"
- v8::Handle<v8::Value> argv_foo[1] = {
- v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")};
+ v8::Local<v8::Value> argv_foo[1] = {
+ v8_str(env->GetIsolate(), "Hello, world!")};
// Call foo with breakpoint set before a=x and undefined as parameter.
int bp = SetBreakPoint(foo, foo_break_position_1);
checks = checks_uu;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Call foo with breakpoint set before a=x and parameter "Hello, world!".
checks = checks_hu;
- foo->Call(env->Global(), 1, argv_foo);
+ foo->Call(context, env->Global(), 1, argv_foo).ToLocalChecked();
// Call foo with breakpoint set after a=x and parameter "Hello, world!".
ClearBreakPoint(bp);
SetBreakPoint(foo, foo_break_position_2);
checks = checks_hh;
- foo->Call(env->Global(), 1, argv_foo);
+ foo->Call(context, env->Global(), 1, argv_foo).ToLocalChecked();
// Test that overriding Object.prototype will not interfere into evaluation
// on call frame.
@@ -2301,14 +2410,14 @@ TEST(DebugEvaluate) {
const int zoo_break_position = 50;
// Arguments with one parameter "Hello, world!"
- v8::Handle<v8::Value> argv_zoo[1] = {
- v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")};
+ v8::Local<v8::Value> argv_zoo[1] = {
+ v8_str(env->GetIsolate(), "Hello, world!")};
// Call zoo with breakpoint set at y=0.
DebugEventCounterClear();
bp = SetBreakPoint(zoo, zoo_break_position);
checks = checks_hu;
- zoo->Call(env->Global(), 1, argv_zoo);
+ zoo->Call(context, env->Global(), 1, argv_zoo).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
ClearBreakPoint(bp);
@@ -2335,32 +2444,28 @@ TEST(DebugEvaluate) {
// Call bar setting breakpoint before a=x in barbar and undefined as
// parameter.
checks = checks_uu;
- v8::Handle<v8::Value> argv_bar_1[2] = {
- v8::Undefined(isolate),
- v8::Number::New(isolate, barbar_break_position)
- };
- bar->Call(env->Global(), 2, argv_bar_1);
+ v8::Local<v8::Value> argv_bar_1[2] = {
+ v8::Undefined(isolate), v8::Number::New(isolate, barbar_break_position)};
+ bar->Call(context, env->Global(), 2, argv_bar_1).ToLocalChecked();
// Call bar setting breakpoint before a=x in barbar and parameter
// "Hello, world!".
checks = checks_hu;
- v8::Handle<v8::Value> argv_bar_2[2] = {
- v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!"),
- v8::Number::New(env->GetIsolate(), barbar_break_position)
- };
- bar->Call(env->Global(), 2, argv_bar_2);
+ v8::Local<v8::Value> argv_bar_2[2] = {
+ v8_str(env->GetIsolate(), "Hello, world!"),
+ v8::Number::New(env->GetIsolate(), barbar_break_position)};
+ bar->Call(context, env->Global(), 2, argv_bar_2).ToLocalChecked();
// Call bar setting breakpoint after a=x in barbar and parameter
// "Hello, world!".
checks = checks_hh;
- v8::Handle<v8::Value> argv_bar_3[2] = {
- v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!"),
- v8::Number::New(env->GetIsolate(), barbar_break_position + 1)
- };
- bar->Call(env->Global(), 2, argv_bar_3);
+ v8::Local<v8::Value> argv_bar_3[2] = {
+ v8_str(env->GetIsolate(), "Hello, world!"),
+ v8::Number::New(env->GetIsolate(), barbar_break_position + 1)};
+ bar->Call(context, env->Global(), 2, argv_bar_3).ToLocalChecked();
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -2377,8 +2482,9 @@ TEST(ConditionalBreakpointWithCodeGenerationDisallowed) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(CheckDebugEvent);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), CheckDebugEvent);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> foo = CompileFunction(&env,
"function foo(x) {\n"
" var s = 'String value2';\n"
@@ -2391,27 +2497,34 @@ TEST(ConditionalBreakpointWithCodeGenerationDisallowed) {
debugEventCount = 0;
env->AllowCodeGenerationFromStrings(false);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, debugEventCount);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
bool checkedDebugEvals = true;
-v8::Handle<v8::Function> checkGlobalEvalFunction;
-v8::Handle<v8::Function> checkFrameEvalFunction;
+v8::Local<v8::Function> checkGlobalEvalFunction;
+v8::Local<v8::Function> checkFrameEvalFunction;
static void CheckDebugEval(const v8::Debug::EventDetails& eventDetails) {
if (eventDetails.GetEvent() == v8::Break) {
++debugEventCount;
v8::HandleScope handleScope(CcTest::isolate());
- v8::Handle<v8::Value> args[] = { eventDetails.GetExecutionState() };
- CHECK(checkGlobalEvalFunction->Call(
- eventDetails.GetEventContext()->Global(), 1, args)->IsTrue());
- CHECK(checkFrameEvalFunction->Call(
- eventDetails.GetEventContext()->Global(), 1, args)->IsTrue());
+ v8::Local<v8::Value> args[] = {eventDetails.GetExecutionState()};
+ CHECK(
+ checkGlobalEvalFunction->Call(eventDetails.GetEventContext(),
+ eventDetails.GetEventContext()->Global(),
+ 1, args)
+ .ToLocalChecked()
+ ->IsTrue());
+ CHECK(checkFrameEvalFunction->Call(eventDetails.GetEventContext(),
+ eventDetails.GetEventContext()->Global(),
+ 1, args)
+ .ToLocalChecked()
+ ->IsTrue());
}
}
@@ -2424,8 +2537,9 @@ TEST(DebugEvaluateWithCodeGenerationDisallowed) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(CheckDebugEval);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), CheckDebugEval);
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> foo = CompileFunction(&env,
"var global = 'Global';\n"
"function foo(x) {\n"
@@ -2447,13 +2561,13 @@ TEST(DebugEvaluateWithCodeGenerationDisallowed) {
"checkFrameEval");
debugEventCount = 0;
env->AllowCodeGenerationFromStrings(false);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, debugEventCount);
checkGlobalEvalFunction.Clear();
checkFrameEvalFunction.Clear();
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2541,7 +2655,7 @@ DebugProcessDebugMessagesData process_debug_messages_data;
static void DebugProcessDebugMessagesHandler(
const v8::Debug::Message& message) {
- v8::Handle<v8::String> json = message.GetJSON();
+ v8::Local<v8::String> json = message.GetJSON();
v8::String::Utf8Value utf8(json);
EvaluateResult* array_item = process_debug_messages_data.current();
@@ -2557,18 +2671,21 @@ static void DebugProcessDebugMessagesHandler(
// Test that the evaluation of expressions works even from ProcessDebugMessages
// i.e. with empty stack.
TEST(DebugEvaluateWithoutStack) {
- v8::Debug::SetMessageHandler(DebugProcessDebugMessagesHandler);
-
DebugLocalContext env;
+ v8::Debug::SetMessageHandler(env->GetIsolate(),
+ DebugProcessDebugMessagesHandler);
v8::HandleScope scope(env->GetIsolate());
const char* source =
"var v1 = 'Pinguin';\n function getAnimal() { return 'Capy' + 'bara'; }";
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), source))
- ->Run();
+ v8::Local<v8::Context> context = env.context();
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), source))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
- v8::Debug::ProcessDebugMessages();
+ v8::Debug::ProcessDebugMessages(env->GetIsolate());
const int kBufferSize = 1000;
uint16_t buffer[kBufferSize];
@@ -2604,7 +2721,7 @@ TEST(DebugEvaluateWithoutStack) {
v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_113, buffer));
- v8::Debug::ProcessDebugMessages();
+ v8::Debug::ProcessDebugMessages(isolate);
CHECK_EQ(3, process_debug_messages_data.counter);
@@ -2613,9 +2730,9 @@ TEST(DebugEvaluateWithoutStack) {
0);
CHECK_EQ(strcmp("805", process_debug_messages_data.results[2].buffer), 0);
- v8::Debug::SetMessageHandler(NULL);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2633,32 +2750,34 @@ TEST(DebugStepLinear) {
CompileRun("a=0; b=0; c=0; foo();");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
SetBreakPoint(foo, 3);
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ v8::Local<v8::Context> context = env.context();
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(4, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
// Register a debug event listener which just counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
SetBreakPoint(foo, 3);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Without stepping only active break points are hit.
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2668,7 +2787,7 @@ TEST(DebugStepKeyedLoadLoop) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
// Create a function for testing stepping of keyed load. The statement 'y=1'
// is there to have more than one breakable statement in the loop, TODO(315).
@@ -2685,29 +2804,31 @@ TEST(DebugStepKeyedLoadLoop) {
"y=0\n",
"foo");
+ v8::Local<v8::Context> context = env.context();
// Create array [0,1,2,3,4,5,6,7,8,9]
v8::Local<v8::Array> a = v8::Array::New(env->GetIsolate(), 10);
for (int i = 0; i < 10; i++) {
- a->Set(v8::Number::New(env->GetIsolate(), i),
- v8::Number::New(env->GetIsolate(), i));
+ CHECK(a->Set(context, v8::Number::New(env->GetIsolate(), i),
+ v8::Number::New(env->GetIsolate(), i))
+ .FromJust());
}
// Call function without any break points to ensure inlining is in place.
const int kArgc = 1;
- v8::Handle<v8::Value> args[kArgc] = { a };
- foo->Call(env->Global(), kArgc, args);
+ v8::Local<v8::Value> args[kArgc] = {a};
+ foo->Call(context, env->Global(), kArgc, args).ToLocalChecked();
// Set up break point and step through the function.
SetBreakPoint(foo, 3);
step_action = StepNext;
break_point_hit_count = 0;
- foo->Call(env->Global(), kArgc, args);
+ foo->Call(context, env->Global(), kArgc, args).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(45, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2717,7 +2838,7 @@ TEST(DebugStepKeyedStoreLoop) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
// Create a function for testing stepping of keyed store. The statement 'y=1'
// is there to have more than one breakable statement in the loop, TODO(315).
@@ -2733,29 +2854,31 @@ TEST(DebugStepKeyedStoreLoop) {
"y=0\n",
"foo");
+ v8::Local<v8::Context> context = env.context();
// Create array [0,1,2,3,4,5,6,7,8,9]
v8::Local<v8::Array> a = v8::Array::New(env->GetIsolate(), 10);
for (int i = 0; i < 10; i++) {
- a->Set(v8::Number::New(env->GetIsolate(), i),
- v8::Number::New(env->GetIsolate(), i));
+ CHECK(a->Set(context, v8::Number::New(env->GetIsolate(), i),
+ v8::Number::New(env->GetIsolate(), i))
+ .FromJust());
}
// Call function without any break points to ensure inlining is in place.
const int kArgc = 1;
- v8::Handle<v8::Value> args[kArgc] = { a };
- foo->Call(env->Global(), kArgc, args);
+ v8::Local<v8::Value> args[kArgc] = {a};
+ foo->Call(context, env->Global(), kArgc, args).ToLocalChecked();
// Set up break point and step through the function.
SetBreakPoint(foo, 3);
step_action = StepNext;
break_point_hit_count = 0;
- foo->Call(env->Global(), kArgc, args);
+ foo->Call(context, env->Global(), kArgc, args).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(44, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2765,8 +2888,9 @@ TEST(DebugStepNamedLoadLoop) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping of named load.
v8::Local<v8::Function> foo = CompileFunction(
&env,
@@ -2787,19 +2911,19 @@ TEST(DebugStepNamedLoadLoop) {
"foo");
// Call function without any break points to ensure inlining is in place.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Set up break point and step through the function.
SetBreakPoint(foo, 4);
step_action = StepNext;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(65, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2808,9 +2932,10 @@ static void DoDebugStepNamedStoreLoop(int expected) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
// Create a function for testing stepping of named store.
+ v8::Local<v8::Context> context = env.context();
v8::Local<v8::Function> foo = CompileFunction(
&env,
"function foo() {\n"
@@ -2822,19 +2947,19 @@ static void DoDebugStepNamedStoreLoop(int expected) {
"foo");
// Call function without any break points to ensure inlining is in place.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Set up break point and step through the function.
SetBreakPoint(foo, 3);
step_action = StepNext;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all expected break locations are hit.
CHECK_EQ(expected, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2848,8 +2973,9 @@ TEST(DebugStepLinearMixedICs) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping.
v8::Local<v8::Function> foo = CompileFunction(&env,
"function bar() {};"
@@ -2866,26 +2992,27 @@ TEST(DebugStepLinearMixedICs) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(11, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
// Register a debug event listener which just counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
SetBreakPoint(foo, 0);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Without stepping only active break points are hit.
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2894,8 +3021,9 @@ TEST(DebugStepDeclarations) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function foo() { "
@@ -2913,12 +3041,12 @@ TEST(DebugStepDeclarations) {
// Stepping through the declarations.
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2927,8 +3055,9 @@ TEST(DebugStepLocals) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function foo() { "
@@ -2946,12 +3075,12 @@ TEST(DebugStepLocals) {
// Stepping through the declarations.
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -2961,8 +3090,9 @@ TEST(DebugStepIf) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -2982,20 +3112,20 @@ TEST(DebugStepIf) {
// Stepping through the true part.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_true[argc] = { v8::True(isolate) };
- foo->Call(env->Global(), argc, argv_true);
+ v8::Local<v8::Value> argv_true[argc] = {v8::True(isolate)};
+ foo->Call(context, env->Global(), argc, argv_true).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
// Stepping through the false part.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_false[argc] = { v8::False(isolate) };
- foo->Call(env->Global(), argc, argv_false);
+ v8::Local<v8::Value> argv_false[argc] = {v8::False(isolate)};
+ foo->Call(context, env->Global(), argc, argv_false).ToLocalChecked();
CHECK_EQ(5, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3005,8 +3135,9 @@ TEST(DebugStepSwitch) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -3032,27 +3163,27 @@ TEST(DebugStepSwitch) {
// One case with fall-through.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_1[argc] = { v8::Number::New(isolate, 1) };
- foo->Call(env->Global(), argc, argv_1);
+ v8::Local<v8::Value> argv_1[argc] = {v8::Number::New(isolate, 1)};
+ foo->Call(context, env->Global(), argc, argv_1).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
// Another case.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_2[argc] = { v8::Number::New(isolate, 2) };
- foo->Call(env->Global(), argc, argv_2);
+ v8::Local<v8::Value> argv_2[argc] = {v8::Number::New(isolate, 2)};
+ foo->Call(context, env->Global(), argc, argv_2).ToLocalChecked();
CHECK_EQ(5, break_point_hit_count);
// Last case.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_3[argc] = { v8::Number::New(isolate, 3) };
- foo->Call(env->Global(), argc, argv_3);
+ v8::Local<v8::Value> argv_3[argc] = {v8::Number::New(isolate, 3)};
+ foo->Call(context, env->Global(), argc, argv_3).ToLocalChecked();
CHECK_EQ(7, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3062,8 +3193,9 @@ TEST(DebugStepWhile) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -3080,27 +3212,27 @@ TEST(DebugStepWhile) {
// Looping 0 times. We still should break at the while-condition once.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_0[argc] = { v8::Number::New(isolate, 0) };
- foo->Call(env->Global(), argc, argv_0);
+ v8::Local<v8::Value> argv_0[argc] = {v8::Number::New(isolate, 0)};
+ foo->Call(context, env->Global(), argc, argv_0).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
- foo->Call(env->Global(), argc, argv_10);
+ v8::Local<v8::Value> argv_10[argc] = {v8::Number::New(isolate, 10)};
+ foo->Call(context, env->Global(), argc, argv_10).ToLocalChecked();
CHECK_EQ(23, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
- foo->Call(env->Global(), argc, argv_100);
+ v8::Local<v8::Value> argv_100[argc] = {v8::Number::New(isolate, 100)};
+ foo->Call(context, env->Global(), argc, argv_100).ToLocalChecked();
CHECK_EQ(203, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3110,8 +3242,9 @@ TEST(DebugStepDoWhile) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -3128,27 +3261,27 @@ TEST(DebugStepDoWhile) {
// Looping 0 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_0[argc] = {v8::Number::New(isolate, 0)};
- foo->Call(env->Global(), argc, argv_0);
+ v8::Local<v8::Value> argv_0[argc] = {v8::Number::New(isolate, 0)};
+ foo->Call(context, env->Global(), argc, argv_0).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
- foo->Call(env->Global(), argc, argv_10);
+ v8::Local<v8::Value> argv_10[argc] = {v8::Number::New(isolate, 10)};
+ foo->Call(context, env->Global(), argc, argv_10).ToLocalChecked();
CHECK_EQ(22, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
- foo->Call(env->Global(), argc, argv_100);
+ v8::Local<v8::Value> argv_100[argc] = {v8::Number::New(isolate, 100)};
+ foo->Call(context, env->Global(), argc, argv_100).ToLocalChecked();
CHECK_EQ(202, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3158,8 +3291,9 @@ TEST(DebugStepFor) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -3177,27 +3311,27 @@ TEST(DebugStepFor) {
// Looping 0 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_0[argc] = {v8::Number::New(isolate, 0)};
- foo->Call(env->Global(), argc, argv_0);
+ v8::Local<v8::Value> argv_0[argc] = {v8::Number::New(isolate, 0)};
+ foo->Call(context, env->Global(), argc, argv_0).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
- foo->Call(env->Global(), argc, argv_10);
+ v8::Local<v8::Value> argv_10[argc] = {v8::Number::New(isolate, 10)};
+ foo->Call(context, env->Global(), argc, argv_10).ToLocalChecked();
CHECK_EQ(34, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
- foo->Call(env->Global(), argc, argv_100);
+ v8::Local<v8::Value> argv_100[argc] = {v8::Number::New(isolate, 100)};
+ foo->Call(context, env->Global(), argc, argv_100).ToLocalChecked();
CHECK_EQ(304, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3207,8 +3341,9 @@ TEST(DebugStepForContinue) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -3226,7 +3361,7 @@ TEST(DebugStepForContinue) {
"}"
"foo()";
v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
- v8::Handle<v8::Value> result;
+ v8::Local<v8::Value> result;
SetBreakPoint(foo, 8); // "var a = 0;"
// Each loop generates 4 or 5 steps depending on whether a is equal.
@@ -3234,22 +3369,22 @@ TEST(DebugStepForContinue) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
- result = foo->Call(env->Global(), argc, argv_10);
- CHECK_EQ(5, result->Int32Value());
+ v8::Local<v8::Value> argv_10[argc] = {v8::Number::New(isolate, 10)};
+ result = foo->Call(context, env->Global(), argc, argv_10).ToLocalChecked();
+ CHECK_EQ(5, result->Int32Value(context).FromJust());
CHECK_EQ(62, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
- result = foo->Call(env->Global(), argc, argv_100);
- CHECK_EQ(50, result->Int32Value());
+ v8::Local<v8::Value> argv_100[argc] = {v8::Number::New(isolate, 100)};
+ result = foo->Call(context, env->Global(), argc, argv_100).ToLocalChecked();
+ CHECK_EQ(50, result->Int32Value(context).FromJust());
CHECK_EQ(557, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3259,8 +3394,9 @@ TEST(DebugStepForBreak) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const int argc = 1;
@@ -3278,7 +3414,7 @@ TEST(DebugStepForBreak) {
"}"
"foo()";
v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
- v8::Handle<v8::Value> result;
+ v8::Local<v8::Value> result;
SetBreakPoint(foo, 8); // "var a = 0;"
// Each loop generates 5 steps except for the last (when break is executed)
@@ -3287,22 +3423,22 @@ TEST(DebugStepForBreak) {
// Looping 10 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
- result = foo->Call(env->Global(), argc, argv_10);
- CHECK_EQ(9, result->Int32Value());
+ v8::Local<v8::Value> argv_10[argc] = {v8::Number::New(isolate, 10)};
+ result = foo->Call(context, env->Global(), argc, argv_10).ToLocalChecked();
+ CHECK_EQ(9, result->Int32Value(context).FromJust());
CHECK_EQ(64, break_point_hit_count);
// Looping 100 times.
step_action = StepIn;
break_point_hit_count = 0;
- v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
- result = foo->Call(env->Global(), argc, argv_100);
- CHECK_EQ(99, result->Int32Value());
+ v8::Local<v8::Value> argv_100[argc] = {v8::Number::New(isolate, 100)};
+ result = foo->Call(context, env->Global(), argc, argv_100).ToLocalChecked();
+ CHECK_EQ(99, result->Int32Value(context).FromJust());
CHECK_EQ(604, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3311,8 +3447,9 @@ TEST(DebugStepForIn) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
v8::Local<v8::Function> foo;
@@ -3328,7 +3465,7 @@ TEST(DebugStepForIn) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(8, break_point_hit_count);
// Create a function for testing stepping. Run it to allow it to get
@@ -3345,12 +3482,12 @@ TEST(DebugStepForIn) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(10, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3359,8 +3496,9 @@ TEST(DebugStepWith) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function foo(x) { "
@@ -3369,20 +3507,22 @@ TEST(DebugStepWith) {
" with (b) {}"
"}"
"foo()";
- env->Global()->Set(v8::String::NewFromUtf8(env->GetIsolate(), "b"),
- v8::Object::New(env->GetIsolate()));
+ CHECK(env->Global()
+ ->Set(context, v8_str(env->GetIsolate(), "b"),
+ v8::Object::New(env->GetIsolate()))
+ .FromJust());
v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
- v8::Handle<v8::Value> result;
+ v8::Local<v8::Value> result;
SetBreakPoint(foo, 8); // "var a = {};"
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3392,8 +3532,9 @@ TEST(DebugConditional) {
v8::HandleScope scope(isolate);
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function foo(x) { "
@@ -3407,19 +3548,19 @@ TEST(DebugConditional) {
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
step_action = StepIn;
break_point_hit_count = 0;
const int argc = 1;
- v8::Handle<v8::Value> argv_true[argc] = { v8::True(isolate) };
- foo->Call(env->Global(), argc, argv_true);
+ v8::Local<v8::Value> argv_true[argc] = {v8::True(isolate)};
+ foo->Call(context, env->Global(), argc, argv_true).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3433,8 +3574,9 @@ TEST(StepInOutSimple) {
"frame_function_name");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStepSequence);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStepSequence);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function a() {b();c();}; "
@@ -3448,7 +3590,7 @@ TEST(StepInOutSimple) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "abcbaca";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3456,7 +3598,7 @@ TEST(StepInOutSimple) {
step_action = StepNext;
break_point_hit_count = 0;
expected_step_sequence = "aaa";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3464,13 +3606,13 @@ TEST(StepInOutSimple) {
step_action = StepOut;
break_point_hit_count = 0;
expected_step_sequence = "a";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3484,8 +3626,9 @@ TEST(StepInOutTree) {
"frame_function_name");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStepSequence);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStepSequence);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function a() {b(c(d()),d());c(d());d()}; "
@@ -3500,7 +3643,7 @@ TEST(StepInOutTree) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "adacadabcbadacada";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3508,7 +3651,7 @@ TEST(StepInOutTree) {
step_action = StepNext;
break_point_hit_count = 0;
expected_step_sequence = "aaaa";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -3516,13 +3659,13 @@ TEST(StepInOutTree) {
step_action = StepOut;
break_point_hit_count = 0;
expected_step_sequence = "a";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded(true);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate(), true);
}
@@ -3536,8 +3679,9 @@ TEST(StepInOutBranch) {
"frame_function_name");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStepSequence);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStepSequence);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping. Run it to allow it to get
// optimized.
const char* src = "function a() {b(false);c();}; "
@@ -3551,13 +3695,13 @@ TEST(StepInOutBranch) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "abbaca";
- a->Call(env->Global(), 0, NULL);
+ a->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3573,29 +3717,31 @@ TEST(DebugStepNatives) {
"foo");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(3, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
// Register a debug event listener which just counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Without stepping only active break points are hit.
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3612,29 +3758,31 @@ TEST(DebugStepFunctionApply) {
"foo");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStep);
+ v8::Local<v8::Context> context = env.context();
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// With stepping all break locations are hit.
CHECK_EQ(7, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
// Register a debug event listener which just counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Without stepping only the debugger statement is hit.
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3644,6 +3792,7 @@ TEST(DebugStepFunctionCall) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping.
v8::Local<v8::Function> foo = CompileFunction(
&env,
@@ -3658,35 +3807,35 @@ TEST(DebugStepFunctionCall) {
"foo");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
step_action = StepIn;
// Check stepping where the if condition in bar is false.
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(6, break_point_hit_count);
// Check stepping where the if condition in bar is true.
break_point_hit_count = 0;
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { v8::True(isolate) };
- foo->Call(env->Global(), argc, argv);
+ v8::Local<v8::Value> argv[argc] = {v8::True(isolate)};
+ foo->Call(context, env->Global(), argc, argv).ToLocalChecked();
CHECK_EQ(8, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
// Register a debug event listener which just counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Without stepping only the debugger statement is hit.
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3696,6 +3845,7 @@ TEST(DebugStepFunctionCallApply) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping.
v8::Local<v8::Function> foo =
CompileFunction(&env,
@@ -3708,27 +3858,27 @@ TEST(DebugStepFunctionCallApply) {
"foo");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStep);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventStep);
step_action = StepIn;
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
- CHECK_EQ(5, break_point_hit_count);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ CHECK_EQ(6, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
// Register a debug event listener which just counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreakPointHitCount);
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Without stepping only the debugger statement is hit.
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -3739,8 +3889,9 @@ TEST(PauseInScript) {
env.ExposeDebug();
// Register a debug event listener which counts.
- v8::Debug::SetDebugEventListener(DebugEventCounter);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventCounter);
+ v8::Local<v8::Context> context = env.context();
// Create a script that returns a function.
const char* src = "(function (evt) {})";
const char* script_name = "StepInHandlerTest";
@@ -3749,19 +3900,19 @@ TEST(PauseInScript) {
SetScriptBreakPointByNameFromJS(env->GetIsolate(), script_name, 0, -1);
break_point_hit_count = 0;
- v8::ScriptOrigin origin(
- v8::String::NewFromUtf8(env->GetIsolate(), script_name),
- v8::Integer::New(env->GetIsolate(), 0));
- v8::Handle<v8::Script> script = v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), src), &origin);
- v8::Local<v8::Value> r = script->Run();
+ v8::ScriptOrigin origin(v8_str(env->GetIsolate(), script_name),
+ v8::Integer::New(env->GetIsolate(), 0));
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), src), &origin)
+ .ToLocalChecked();
+ v8::Local<v8::Value> r = script->Run(context).ToLocalChecked();
CHECK(r->IsFunction());
CHECK_EQ(1, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -3783,6 +3934,7 @@ TEST(BreakOnException) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
+ v8::Local<v8::Context> context = env.context();
// Create functions for testing break on exception.
CompileFunction(&env, "function throws(){throw 1;}", "throws");
v8::Local<v8::Function> caught =
@@ -3801,135 +3953,136 @@ TEST(BreakOnException) {
&env, "function caughtFinally(){L:try{throws();}finally{break L;}}",
"caughtFinally");
- v8::V8::AddMessageListener(MessageCallbackCount);
- v8::Debug::SetDebugEventListener(DebugEventCounter);
+ env->GetIsolate()->AddMessageListener(MessageCallbackCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventCounter);
// Initial state should be no break on exceptions.
DebugEventCounterClear();
MessageCallbackCountClear();
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// No break on exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(false, false);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// Break on uncaught exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(false, true);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(1, 1, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(2, 2, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(3, 3, 2);
// Break on exception and uncaught exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(true, true);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// Break on exception
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnException(true, false);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// No break on exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), false, false);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(0, 0, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(0, 0, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 2);
// Break on uncaught exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), false, true);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(0, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(1, 1, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(2, 2, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(3, 3, 2);
// Break on exception and uncaught exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), true, true);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
// Break on exception using JavaScript
DebugEventCounterClear();
MessageCallbackCountClear();
ChangeBreakOnExceptionFromJS(env->GetIsolate(), true, false);
- caught->Call(env->Global(), 0, NULL);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(1, 0, 0);
- notCaught->Call(env->Global(), 0, NULL);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(2, 1, 1);
- notCaughtFinally->Call(env->Global(), 0, NULL);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
DebugEventCounterCheck(3, 2, 2);
- edgeCaseFinally->Call(env->Global(), 0, NULL);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
- v8::V8::RemoveMessageListeners(MessageCallbackCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
+ env->GetIsolate()->RemoveMessageListeners(MessageCallbackCount);
}
-static void try_finally_original_message(v8::Handle<v8::Message> message,
- v8::Handle<v8::Value> data) {
- CHECK_EQ(2, message->GetLineNumber());
- CHECK_EQ(2, message->GetStartColumn());
+static void try_finally_original_message(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ CHECK_EQ(2, message->GetLineNumber(context).FromJust());
+ CHECK_EQ(2, message->GetStartColumn(context).FromJust());
message_callback_count++;
}
@@ -3938,11 +4091,11 @@ TEST(TryFinallyOriginalMessage) {
// Test that the debugger plays nicely with the pending message.
message_callback_count = 0;
DebugEventCounterClear();
- v8::V8::AddMessageListener(try_finally_original_message);
- v8::Debug::SetDebugEventListener(DebugEventCounter);
- ChangeBreakOnException(true, true);
DebugLocalContext env;
v8::Isolate* isolate = CcTest::isolate();
+ isolate->AddMessageListener(try_finally_original_message);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventCounter);
+ ChangeBreakOnException(true, true);
v8::HandleScope scope(isolate);
CompileRun(
"try {\n"
@@ -3950,8 +4103,8 @@ TEST(TryFinallyOriginalMessage) {
"} finally {\n"
"}\n");
DebugEventCounterCheck(1, 1, 1);
- v8::Debug::SetDebugEventListener(NULL);
- v8::V8::RemoveMessageListeners(try_finally_original_message);
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ isolate->RemoveMessageListeners(try_finally_original_message);
}
@@ -3968,8 +4121,8 @@ TEST(EvalJSInDebugEventListenerOnNativeReThrownException) {
debug_event_listener_callback = noThrowJS;
debug_event_listener_callback_result = 2;
- v8::V8::AddMessageListener(MessageCallbackCount);
- v8::Debug::SetDebugEventListener(DebugEventCounter);
+ env->GetIsolate()->AddMessageListener(MessageCallbackCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventCounter);
// Break on uncaught exception
ChangeBreakOnException(false, true);
DebugEventCounterClear();
@@ -3978,8 +4131,8 @@ TEST(EvalJSInDebugEventListenerOnNativeReThrownException) {
// ReThrow native error
{
v8::TryCatch tryCatch(env->GetIsolate());
- env->GetIsolate()->ThrowException(v8::Exception::TypeError(
- v8::String::NewFromUtf8(env->GetIsolate(), "Type error")));
+ env->GetIsolate()->ThrowException(
+ v8::Exception::TypeError(v8_str(env->GetIsolate(), "Type error")));
CHECK(tryCatch.HasCaught());
tryCatch.ReThrow();
}
@@ -3999,14 +4152,15 @@ TEST(BreakOnCompileException) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
// For this test, we want to break on uncaught exceptions:
ChangeBreakOnException(false, true);
// Create a function for checking the function when hitting a break point.
frame_count = CompileFunction(&env, frame_count_source, "frame_count");
- v8::V8::AddMessageListener(MessageCallbackCount);
- v8::Debug::SetDebugEventListener(DebugEventCounter);
+ env->GetIsolate()->AddMessageListener(MessageCallbackCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventCounter);
DebugEventCounterClear();
MessageCallbackCountClear();
@@ -4018,30 +4172,36 @@ TEST(BreakOnCompileException) {
CHECK_EQ(-1, last_js_stack_height);
// Throws SyntaxError: Unexpected end of input
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "+++"));
+ CHECK(
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), "+++")).IsEmpty());
CHECK_EQ(1, exception_hit_count);
CHECK_EQ(1, uncaught_exception_hit_count);
CHECK_EQ(1, message_callback_count);
CHECK_EQ(0, last_js_stack_height); // No JavaScript stack.
// Throws SyntaxError: Unexpected identifier
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "x x"));
+ CHECK(
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), "x x")).IsEmpty());
CHECK_EQ(2, exception_hit_count);
CHECK_EQ(2, uncaught_exception_hit_count);
CHECK_EQ(2, message_callback_count);
CHECK_EQ(0, last_js_stack_height); // No JavaScript stack.
// Throws SyntaxError: Unexpected end of input
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "eval('+++')"))
- ->Run();
+ CHECK(v8::Script::Compile(context, v8_str(env->GetIsolate(), "eval('+++')"))
+ .ToLocalChecked()
+ ->Run(context)
+ .IsEmpty());
CHECK_EQ(3, exception_hit_count);
CHECK_EQ(3, uncaught_exception_hit_count);
CHECK_EQ(3, message_callback_count);
CHECK_EQ(1, last_js_stack_height);
// Throws SyntaxError: Unexpected identifier
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "eval('x x')"))
- ->Run();
+ CHECK(v8::Script::Compile(context, v8_str(env->GetIsolate(), "eval('x x')"))
+ .ToLocalChecked()
+ ->Run(context)
+ .IsEmpty());
CHECK_EQ(4, exception_hit_count);
CHECK_EQ(4, uncaught_exception_hit_count);
CHECK_EQ(4, message_callback_count);
@@ -4062,8 +4222,9 @@ TEST(StepWithException) {
"frame_function_name");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventStepSequence);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStepSequence);
+ v8::Local<v8::Context> context = env.context();
// Create functions for testing stepping.
const char* src = "function a() { n(); }; "
"function b() { c(); }; "
@@ -4075,32 +4236,36 @@ TEST(StepWithException) {
"function h() { x = 1; throw 1; }; ";
// Step through invocation of a.
+ ClearStepping();
v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
SetBreakPoint(a, 0);
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "aa";
- a->Call(env->Global(), 0, NULL);
+ CHECK(a->Call(context, env->Global(), 0, NULL).IsEmpty());
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
// Step through invocation of b + c.
+ ClearStepping();
v8::Local<v8::Function> b = CompileFunction(&env, src, "b");
SetBreakPoint(b, 0);
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "bcc";
- b->Call(env->Global(), 0, NULL);
+ CHECK(b->Call(context, env->Global(), 0, NULL).IsEmpty());
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
+
// Step through invocation of d + e.
+ ClearStepping();
v8::Local<v8::Function> d = CompileFunction(&env, src, "d");
SetBreakPoint(d, 0);
ChangeBreakOnException(false, true);
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ddedd";
- d->Call(env->Global(), 0, NULL);
+ d->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -4109,18 +4274,19 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ddeedd";
- d->Call(env->Global(), 0, NULL);
+ d->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
// Step through invocation of f + g + h.
+ ClearStepping();
v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
SetBreakPoint(f, 0);
ChangeBreakOnException(false, true);
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ffghhff";
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
@@ -4129,13 +4295,13 @@ TEST(StepWithException) {
step_action = StepIn;
break_point_hit_count = 0;
expected_step_sequence = "ffghhhff";
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(StrLength(expected_step_sequence),
break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -4149,8 +4315,9 @@ TEST(DebugBreak) {
v8::HandleScope scope(isolate);
// Register a debug event listener which sets the break flag and counts.
- v8::Debug::SetDebugEventListener(DebugEventBreak);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventBreak);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping.
const char* src = "function f0() {}"
"function f1(x1) {}"
@@ -4162,16 +4329,15 @@ TEST(DebugBreak) {
v8::Local<v8::Function> f3 = CompileFunction(&env, src, "f3");
// Call the function to make sure it is compiled.
- v8::Handle<v8::Value> argv[] = { v8::Number::New(isolate, 1),
- v8::Number::New(isolate, 1),
- v8::Number::New(isolate, 1),
- v8::Number::New(isolate, 1) };
+ v8::Local<v8::Value> argv[] = {
+ v8::Number::New(isolate, 1), v8::Number::New(isolate, 1),
+ v8::Number::New(isolate, 1), v8::Number::New(isolate, 1)};
// Call all functions to make sure that they are compiled.
- f0->Call(env->Global(), 0, NULL);
- f1->Call(env->Global(), 0, NULL);
- f2->Call(env->Global(), 0, NULL);
- f3->Call(env->Global(), 0, NULL);
+ f0->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f1->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f2->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ f3->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Set the debug break flag.
v8::Debug::DebugBreak(env->GetIsolate());
@@ -4180,18 +4346,18 @@ TEST(DebugBreak) {
// Call all functions with different argument count.
break_point_hit_count = 0;
for (unsigned int i = 0; i < arraysize(argv); i++) {
- f0->Call(env->Global(), i, argv);
- f1->Call(env->Global(), i, argv);
- f2->Call(env->Global(), i, argv);
- f3->Call(env->Global(), i, argv);
+ f0->Call(context, env->Global(), i, argv).ToLocalChecked();
+ f1->Call(context, env->Global(), i, argv).ToLocalChecked();
+ f2->Call(context, env->Global(), i, argv).ToLocalChecked();
+ f3->Call(context, env->Global(), i, argv).ToLocalChecked();
}
// One break for each function called.
CHECK(4 * arraysize(argv) == break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -4202,8 +4368,9 @@ TEST(DisableBreak) {
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which sets the break flag and counts.
- v8::Debug::SetDebugEventListener(DebugEventCounter);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventCounter);
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing stepping.
const char* src = "function f() {g()};function g(){i=0; while(i<10){i++}}";
v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
@@ -4219,23 +4386,23 @@ TEST(DisableBreak) {
// Call all functions with different argument count.
break_point_hit_count = 0;
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
{
v8::Debug::DebugBreak(env->GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::internal::DisableBreak disable_break(isolate->debug(), true);
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
}
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
static const char* kSimpleExtensionSource =
@@ -4250,7 +4417,7 @@ TEST(NoBreakWhenBootstrapping) {
v8::HandleScope scope(isolate);
// Register a debug event listener which sets the break flag and counts.
- v8::Debug::SetDebugEventListener(DebugEventCounter);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventCounter);
// Set the debug break flag.
v8::Debug::DebugBreak(isolate);
@@ -4269,28 +4436,37 @@ TEST(NoBreakWhenBootstrapping) {
CHECK_EQ(0, break_point_hit_count);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
static void NamedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
- v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
- result->Set(v8::Integer::New(info.GetIsolate(), 0),
- v8::String::NewFromUtf8(info.GetIsolate(), "a"));
- result->Set(v8::Integer::New(info.GetIsolate(), 1),
- v8::String::NewFromUtf8(info.GetIsolate(), "b"));
- result->Set(v8::Integer::New(info.GetIsolate(), 2),
- v8::String::NewFromUtf8(info.GetIsolate(), "c"));
+ v8::Local<v8::Array> result = v8::Array::New(info.GetIsolate(), 3);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 0),
+ v8_str(info.GetIsolate(), "a"))
+ .FromJust());
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 1),
+ v8_str(info.GetIsolate(), "b"))
+ .FromJust());
+ CHECK(result->Set(context, v8::Integer::New(info.GetIsolate(), 2),
+ v8_str(info.GetIsolate(), "c"))
+ .FromJust());
info.GetReturnValue().Set(result);
}
static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
v8::Isolate* isolate = info.GetIsolate();
- v8::Handle<v8::Array> result = v8::Array::New(isolate, 2);
- result->Set(v8::Integer::New(isolate, 0), v8::Number::New(isolate, 1));
- result->Set(v8::Integer::New(isolate, 1), v8::Number::New(isolate, 10));
+ v8::Local<v8::Array> result = v8::Array::New(isolate, 2);
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(result->Set(context, v8::Integer::New(isolate, 0),
+ v8::Number::New(isolate, 1))
+ .FromJust());
+ CHECK(result->Set(context, v8::Integer::New(isolate, 1),
+ v8::Number::New(isolate, 10))
+ .FromJust());
info.GetReturnValue().Set(result);
}
@@ -4300,13 +4476,13 @@ static void NamedGetter(v8::Local<v8::Name> name,
if (name->IsSymbol()) return;
v8::String::Utf8Value n(v8::Local<v8::String>::Cast(name));
if (strcmp(*n, "a") == 0) {
- info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "AA"));
+ info.GetReturnValue().Set(v8_str(info.GetIsolate(), "AA"));
return;
} else if (strcmp(*n, "b") == 0) {
- info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "BB"));
+ info.GetReturnValue().Set(v8_str(info.GetIsolate(), "BB"));
return;
} else if (strcmp(*n, "c") == 0) {
- info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "CC"));
+ info.GetReturnValue().Set(v8_str(info.GetIsolate(), "CC"));
return;
} else {
info.GetReturnValue().SetUndefined();
@@ -4329,75 +4505,71 @@ TEST(InterceptorPropertyMirror) {
v8::HandleScope scope(isolate);
env.ExposeDebug();
+ v8::Local<v8::Context> context = env.context();
// Create object with named interceptor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
named->SetHandler(v8::NamedPropertyHandlerConfiguration(
NamedGetter, NULL, NULL, NULL, NamedEnum));
- env->Global()->Set(
- v8::String::NewFromUtf8(isolate, "intercepted_named"),
- named->NewInstance());
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "intercepted_named"),
+ named->NewInstance(context).ToLocalChecked())
+ .FromJust());
// Create object with indexed interceptor.
- v8::Handle<v8::ObjectTemplate> indexed = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> indexed = v8::ObjectTemplate::New(isolate);
indexed->SetHandler(v8::IndexedPropertyHandlerConfiguration(
IndexedGetter, NULL, NULL, NULL, IndexedEnum));
- env->Global()->Set(
- v8::String::NewFromUtf8(isolate, "intercepted_indexed"),
- indexed->NewInstance());
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "intercepted_indexed"),
+ indexed->NewInstance(context).ToLocalChecked())
+ .FromJust());
// Create object with both named and indexed interceptor.
- v8::Handle<v8::ObjectTemplate> both = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> both = v8::ObjectTemplate::New(isolate);
both->SetHandler(v8::NamedPropertyHandlerConfiguration(
NamedGetter, NULL, NULL, NULL, NamedEnum));
both->SetHandler(v8::IndexedPropertyHandlerConfiguration(
IndexedGetter, NULL, NULL, NULL, IndexedEnum));
- env->Global()->Set(
- v8::String::NewFromUtf8(isolate, "intercepted_both"),
- both->NewInstance());
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "intercepted_both"),
+ both->NewInstance(context).ToLocalChecked())
+ .FromJust());
// Get mirrors for the three objects with interceptor.
CompileRun(
"var named_mirror = debug.MakeMirror(intercepted_named);"
"var indexed_mirror = debug.MakeMirror(intercepted_indexed);"
"var both_mirror = debug.MakeMirror(intercepted_both)");
- CHECK(CompileRun(
- "named_mirror instanceof debug.ObjectMirror")->BooleanValue());
- CHECK(CompileRun(
- "indexed_mirror instanceof debug.ObjectMirror")->BooleanValue());
- CHECK(CompileRun(
- "both_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CHECK(CompileRun("named_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("indexed_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("both_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
// Get the property names from the interceptors
CompileRun(
"named_names = named_mirror.propertyNames();"
"indexed_names = indexed_mirror.propertyNames();"
"both_names = both_mirror.propertyNames()");
- CHECK_EQ(3, CompileRun("named_names.length")->Int32Value());
- CHECK_EQ(2, CompileRun("indexed_names.length")->Int32Value());
- CHECK_EQ(5, CompileRun("both_names.length")->Int32Value());
+ CHECK_EQ(3, CompileRun("named_names.length")->Int32Value(context).FromJust());
+ CHECK_EQ(2,
+ CompileRun("indexed_names.length")->Int32Value(context).FromJust());
+ CHECK_EQ(5, CompileRun("both_names.length")->Int32Value(context).FromJust());
// Check the expected number of properties.
const char* source;
source = "named_mirror.properties().length";
- CHECK_EQ(3, CompileRun(source)->Int32Value());
+ CHECK_EQ(3, CompileRun(source)->Int32Value(context).FromJust());
source = "indexed_mirror.properties().length";
- CHECK_EQ(2, CompileRun(source)->Int32Value());
+ CHECK_EQ(2, CompileRun(source)->Int32Value(context).FromJust());
source = "both_mirror.properties().length";
- CHECK_EQ(5, CompileRun(source)->Int32Value());
-
- // 1 is PropertyKind.Named;
- source = "both_mirror.properties(1).length";
- CHECK_EQ(3, CompileRun(source)->Int32Value());
-
- // 2 is PropertyKind.Indexed;
- source = "both_mirror.properties(2).length";
- CHECK_EQ(2, CompileRun(source)->Int32Value());
-
- // 3 is PropertyKind.Named | PropertyKind.Indexed;
- source = "both_mirror.properties(3).length";
- CHECK_EQ(5, CompileRun(source)->Int32Value());
+ CHECK_EQ(5, CompileRun(source)->Int32Value(context).FromJust());
// Get the interceptor properties for the object with only named interceptor.
CompileRun("var named_values = named_mirror.properties()");
@@ -4407,10 +4579,10 @@ TEST(InterceptorPropertyMirror) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
SNPrintF(buffer,
"named_values[%d] instanceof debug.PropertyMirror", i);
- CHECK(CompileRun(buffer.start())->BooleanValue());
+ CHECK(CompileRun(buffer.start())->BooleanValue(context).FromJust());
SNPrintF(buffer, "named_values[%d].isNative()", i);
- CHECK(CompileRun(buffer.start())->BooleanValue());
+ CHECK(CompileRun(buffer.start())->BooleanValue(context).FromJust());
}
// Get the interceptor properties for the object with only indexed
@@ -4422,7 +4594,7 @@ TEST(InterceptorPropertyMirror) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
SNPrintF(buffer,
"indexed_values[%d] instanceof debug.PropertyMirror", i);
- CHECK(CompileRun(buffer.start())->BooleanValue());
+ CHECK(CompileRun(buffer.start())->BooleanValue(context).FromJust());
}
// Get the interceptor properties for the object with both types of
@@ -4433,24 +4605,24 @@ TEST(InterceptorPropertyMirror) {
for (int i = 0; i < 5; i++) {
EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
SNPrintF(buffer, "both_values[%d] instanceof debug.PropertyMirror", i);
- CHECK(CompileRun(buffer.start())->BooleanValue());
+ CHECK(CompileRun(buffer.start())->BooleanValue(context).FromJust());
}
// Check the property names.
- source = "both_values[0].name() == 'a'";
- CHECK(CompileRun(source)->BooleanValue());
+ source = "both_values[0].name() == '1'";
+ CHECK(CompileRun(source)->BooleanValue(context).FromJust());
- source = "both_values[1].name() == 'b'";
- CHECK(CompileRun(source)->BooleanValue());
+ source = "both_values[1].name() == '10'";
+ CHECK(CompileRun(source)->BooleanValue(context).FromJust());
- source = "both_values[2].name() == 'c'";
- CHECK(CompileRun(source)->BooleanValue());
+ source = "both_values[2].name() == 'a'";
+ CHECK(CompileRun(source)->BooleanValue(context).FromJust());
- source = "both_values[3].name() == 1";
- CHECK(CompileRun(source)->BooleanValue());
+ source = "both_values[3].name() == 'b'";
+ CHECK(CompileRun(source)->BooleanValue(context).FromJust());
- source = "both_values[4].name() == 10";
- CHECK(CompileRun(source)->BooleanValue());
+ source = "both_values[4].name() == 'c'";
+ CHECK(CompileRun(source)->BooleanValue(context).FromJust());
}
@@ -4461,30 +4633,43 @@ TEST(HiddenPrototypePropertyMirror) {
v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
- t0->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "x"),
+ v8::Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
+ t0->InstanceTemplate()->Set(v8_str(isolate, "x"),
v8::Number::New(isolate, 0));
- v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->SetHiddenPrototype(true);
- t1->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "y"),
+ t1->InstanceTemplate()->Set(v8_str(isolate, "y"),
v8::Number::New(isolate, 1));
- v8::Handle<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->SetHiddenPrototype(true);
- t2->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "z"),
+ t2->InstanceTemplate()->Set(v8_str(isolate, "z"),
v8::Number::New(isolate, 2));
- v8::Handle<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
- t3->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "u"),
+ v8::Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
+ t3->InstanceTemplate()->Set(v8_str(isolate, "u"),
v8::Number::New(isolate, 3));
+ v8::Local<v8::Context> context = env.context();
// Create object and set them on the global object.
- v8::Handle<v8::Object> o0 = t0->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "o0"), o0);
- v8::Handle<v8::Object> o1 = t1->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "o1"), o1);
- v8::Handle<v8::Object> o2 = t2->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "o2"), o2);
- v8::Handle<v8::Object> o3 = t3->GetFunction()->NewInstance();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "o3"), o3);
+ v8::Local<v8::Object> o0 = t0->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
+ CHECK(env->Global()->Set(context, v8_str(isolate, "o0"), o0).FromJust());
+ v8::Local<v8::Object> o1 = t1->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
+ CHECK(env->Global()->Set(context, v8_str(isolate, "o1"), o1).FromJust());
+ v8::Local<v8::Object> o2 = t2->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
+ CHECK(env->Global()->Set(context, v8_str(isolate, "o2"), o2).FromJust());
+ v8::Local<v8::Object> o3 = t3->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
+ CHECK(env->Global()->Set(context, v8_str(isolate, "o3"), o3).FromJust());
// Get mirrors for the four objects.
CompileRun(
@@ -4492,43 +4677,62 @@ TEST(HiddenPrototypePropertyMirror) {
"var o1_mirror = debug.MakeMirror(o1);"
"var o2_mirror = debug.MakeMirror(o2);"
"var o3_mirror = debug.MakeMirror(o3)");
- CHECK(CompileRun("o0_mirror instanceof debug.ObjectMirror")->BooleanValue());
- CHECK(CompileRun("o1_mirror instanceof debug.ObjectMirror")->BooleanValue());
- CHECK(CompileRun("o2_mirror instanceof debug.ObjectMirror")->BooleanValue());
- CHECK(CompileRun("o3_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CHECK(CompileRun("o0_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("o1_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("o2_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("o3_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
// Check that each object has one property.
- CHECK_EQ(1, CompileRun(
- "o0_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o1_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o2_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o3_mirror.propertyNames().length")->Int32Value());
+ CHECK_EQ(1, CompileRun("o0_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o1_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o2_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o3_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
// Set o1 as prototype for o0. o1 has the hidden prototype flag so all
// properties on o1 should be seen on o0.
- o0->Set(v8::String::NewFromUtf8(isolate, "__proto__"), o1);
- CHECK_EQ(2, CompileRun(
- "o0_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(0, CompileRun(
- "o0_mirror.property('x').value().value()")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o0_mirror.property('y').value().value()")->Int32Value());
+ CHECK(o0->Set(context, v8_str(isolate, "__proto__"), o1).FromJust());
+ CHECK_EQ(2, CompileRun("o0_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(0, CompileRun("o0_mirror.property('x').value().value()")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o0_mirror.property('y').value().value()")
+ ->Int32Value(context)
+ .FromJust());
// Set o2 as prototype for o0 (it will end up after o1 as o1 has the hidden
// prototype flag. o2 also has the hidden prototype flag so all properties
// on o2 should be seen on o0 as well as properties on o1.
- o0->Set(v8::String::NewFromUtf8(isolate, "__proto__"), o2);
- CHECK_EQ(3, CompileRun(
- "o0_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(0, CompileRun(
- "o0_mirror.property('x').value().value()")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o0_mirror.property('y').value().value()")->Int32Value());
- CHECK_EQ(2, CompileRun(
- "o0_mirror.property('z').value().value()")->Int32Value());
+ CHECK(o0->Set(context, v8_str(isolate, "__proto__"), o2).FromJust());
+ CHECK_EQ(3, CompileRun("o0_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(0, CompileRun("o0_mirror.property('x').value().value()")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o0_mirror.property('y').value().value()")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(2, CompileRun("o0_mirror.property('z').value().value()")
+ ->Int32Value(context)
+ .FromJust());
// Set o3 as prototype for o0 (it will end up after o1 and o2 as both o1 and
// o2 has the hidden prototype flag. o3 does not have the hidden prototype
@@ -4536,21 +4740,30 @@ TEST(HiddenPrototypePropertyMirror) {
// from o1 and o2 should still be seen on o0.
// Final prototype chain: o0 -> o1 -> o2 -> o3
// Hidden prototypes: ^^ ^^
- o0->Set(v8::String::NewFromUtf8(isolate, "__proto__"), o3);
- CHECK_EQ(3, CompileRun(
- "o0_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o3_mirror.propertyNames().length")->Int32Value());
- CHECK_EQ(0, CompileRun(
- "o0_mirror.property('x').value().value()")->Int32Value());
- CHECK_EQ(1, CompileRun(
- "o0_mirror.property('y').value().value()")->Int32Value());
- CHECK_EQ(2, CompileRun(
- "o0_mirror.property('z').value().value()")->Int32Value());
- CHECK(CompileRun("o0_mirror.property('u').isUndefined()")->BooleanValue());
+ CHECK(o0->Set(context, v8_str(isolate, "__proto__"), o3).FromJust());
+ CHECK_EQ(3, CompileRun("o0_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o3_mirror.propertyNames().length")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(0, CompileRun("o0_mirror.property('x').value().value()")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(1, CompileRun("o0_mirror.property('y').value().value()")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK_EQ(2, CompileRun("o0_mirror.property('z').value().value()")
+ ->Int32Value(context)
+ .FromJust());
+ CHECK(CompileRun("o0_mirror.property('u').isUndefined()")
+ ->BooleanValue(context)
+ .FromJust());
// The prototype (__proto__) for o0 should be o3 as o1 and o2 are hidden.
- CHECK(CompileRun("o0_mirror.protoObject() == o3_mirror")->BooleanValue());
+ CHECK(CompileRun("o0_mirror.protoObject() == o3_mirror")
+ ->BooleanValue(context)
+ .FromJust());
}
@@ -4568,29 +4781,35 @@ TEST(NativeGetterPropertyMirror) {
v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Handle<v8::String> name = v8::String::NewFromUtf8(isolate, "x");
+ v8::Local<v8::Context> context = env.context();
+ v8::Local<v8::String> name = v8_str(isolate, "x");
// Create object with named accessor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
- named->SetAccessor(name, &ProtperyXNativeGetter, NULL,
- v8::Handle<v8::Value>(), v8::DEFAULT, v8::None);
+ v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
+ named->SetAccessor(name, &ProtperyXNativeGetter, NULL, v8::Local<v8::Value>(),
+ v8::DEFAULT, v8::None);
// Create object with named property getter.
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
- named->NewInstance());
- CHECK_EQ(10, CompileRun("instance.x")->Int32Value());
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "instance"),
+ named->NewInstance(context).ToLocalChecked())
+ .FromJust());
+ CHECK_EQ(10, CompileRun("instance.x")->Int32Value(context).FromJust());
// Get mirror for the object with property getter.
CompileRun("var instance_mirror = debug.MakeMirror(instance);");
- CHECK(CompileRun(
- "instance_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CHECK(CompileRun("instance_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
CompileRun("var named_names = instance_mirror.propertyNames();");
- CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
- CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue());
- CHECK(CompileRun(
- "instance_mirror.property('x').value().isNumber()")->BooleanValue());
- CHECK(CompileRun(
- "instance_mirror.property('x').value().value() == 10")->BooleanValue());
+ CHECK_EQ(1, CompileRun("named_names.length")->Int32Value(context).FromJust());
+ CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue(context).FromJust());
+ CHECK(CompileRun("instance_mirror.property('x').value().isNumber()")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("instance_mirror.property('x').value().value() == 10")
+ ->BooleanValue(context)
+ .FromJust());
}
@@ -4608,30 +4827,37 @@ TEST(NativeGetterThrowingErrorPropertyMirror) {
v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Handle<v8::String> name = v8::String::NewFromUtf8(isolate, "x");
+ v8::Local<v8::Context> context = env.context();
+ v8::Local<v8::String> name = v8_str(isolate, "x");
// Create object with named accessor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
named->SetAccessor(name, &ProtperyXNativeGetterThrowingError, NULL,
- v8::Handle<v8::Value>(), v8::DEFAULT, v8::None);
+ v8::Local<v8::Value>(), v8::DEFAULT, v8::None);
// Create object with named property getter.
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "instance"),
- named->NewInstance());
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "instance"),
+ named->NewInstance(context).ToLocalChecked())
+ .FromJust());
// Get mirror for the object with property getter.
CompileRun("var instance_mirror = debug.MakeMirror(instance);");
- CHECK(CompileRun(
- "instance_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CHECK(CompileRun("instance_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
CompileRun("named_names = instance_mirror.propertyNames();");
- CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
- CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue());
- CHECK(CompileRun(
- "instance_mirror.property('x').value().isError()")->BooleanValue());
+ CHECK_EQ(1, CompileRun("named_names.length")->Int32Value(context).FromJust());
+ CHECK(CompileRun("named_names[0] == 'x'")->BooleanValue(context).FromJust());
+ CHECK(CompileRun("instance_mirror.property('x').value().isError()")
+ ->BooleanValue(context)
+ .FromJust());
// Check that the message is that passed to the Error constructor.
- CHECK(CompileRun(
- "instance_mirror.property('x').value().message() == 'Error message'")->
- BooleanValue());
+ CHECK(
+ CompileRun(
+ "instance_mirror.property('x').value().message() == 'Error message'")
+ ->BooleanValue(context)
+ .FromJust());
}
@@ -4645,81 +4871,97 @@ TEST(NoHiddenProperties) {
v8::HandleScope scope(isolate);
env.ExposeDebug();
+ v8::Local<v8::Context> context = env.context();
// Create an object in the global scope.
const char* source = "var obj = {a: 1};";
- v8::Script::Compile(v8::String::NewFromUtf8(isolate, source))
- ->Run();
+ v8::Script::Compile(context, v8_str(isolate, source))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(isolate, "obj")));
+ env->Global()->Get(context, v8_str(isolate, "obj")).ToLocalChecked());
// Set a hidden property on the object.
- obj->SetPrivate(env.context(),
- v8::Private::New(isolate, v8::String::NewFromUtf8(
- isolate, "v8::test-debug::a")),
- v8::Int32::New(isolate, 11))
+ obj->SetPrivate(
+ env.context(),
+ v8::Private::New(isolate, v8_str(isolate, "v8::test-debug::a")),
+ v8::Int32::New(isolate, 11))
.FromJust();
// Get mirror for the object with property getter.
CompileRun("var obj_mirror = debug.MakeMirror(obj);");
- CHECK(CompileRun(
- "obj_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CHECK(CompileRun("obj_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
CompileRun("var named_names = obj_mirror.propertyNames();");
// There should be exactly one property. But there is also an unnamed
// property whose value is hidden properties dictionary. The latter
// property should not be in the list of reguar properties.
- CHECK_EQ(1, CompileRun("named_names.length")->Int32Value());
- CHECK(CompileRun("named_names[0] == 'a'")->BooleanValue());
- CHECK(CompileRun(
- "obj_mirror.property('a').value().value() == 1")->BooleanValue());
+ CHECK_EQ(1, CompileRun("named_names.length")->Int32Value(context).FromJust());
+ CHECK(CompileRun("named_names[0] == 'a'")->BooleanValue(context).FromJust());
+ CHECK(CompileRun("obj_mirror.property('a').value().value() == 1")
+ ->BooleanValue(context)
+ .FromJust());
// Object created by t0 will become hidden prototype of object 'obj'.
- v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
- t0->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "b"),
+ v8::Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
+ t0->InstanceTemplate()->Set(v8_str(isolate, "b"),
v8::Number::New(isolate, 2));
t0->SetHiddenPrototype(true);
- v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
- t1->InstanceTemplate()->Set(v8::String::NewFromUtf8(isolate, "c"),
+ v8::Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
+ t1->InstanceTemplate()->Set(v8_str(isolate, "c"),
v8::Number::New(isolate, 3));
// Create proto objects, add hidden properties to them and set them on
// the global object.
- v8::Handle<v8::Object> protoObj = t0->GetFunction()->NewInstance();
+ v8::Local<v8::Object> protoObj = t0->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
protoObj->SetPrivate(
env.context(),
- v8::Private::New(isolate, v8::String::NewFromUtf8(
- isolate, "v8::test-debug::b")),
+ v8::Private::New(isolate, v8_str(isolate, "v8::test-debug::b")),
v8::Int32::New(isolate, 12))
.FromJust();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "protoObj"),
- protoObj);
- v8::Handle<v8::Object> grandProtoObj = t1->GetFunction()->NewInstance();
- grandProtoObj->SetPrivate(
- env.context(),
- v8::Private::New(isolate, v8::String::NewFromUtf8(
- isolate, "v8::test-debug::c")),
- v8::Int32::New(isolate, 13))
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "protoObj"), protoObj)
+ .FromJust());
+ v8::Local<v8::Object> grandProtoObj = t1->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
+ grandProtoObj->SetPrivate(env.context(),
+ v8::Private::New(
+ isolate, v8_str(isolate, "v8::test-debug::c")),
+ v8::Int32::New(isolate, 13))
.FromJust();
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "grandProtoObj"),
- grandProtoObj);
+ CHECK(env->Global()
+ ->Set(context, v8_str(isolate, "grandProtoObj"), grandProtoObj)
+ .FromJust());
// Setting prototypes: obj->protoObj->grandProtoObj
- protoObj->Set(v8::String::NewFromUtf8(isolate, "__proto__"),
- grandProtoObj);
- obj->Set(v8::String::NewFromUtf8(isolate, "__proto__"), protoObj);
+ CHECK(protoObj->Set(context, v8_str(isolate, "__proto__"), grandProtoObj)
+ .FromJust());
+ CHECK(obj->Set(context, v8_str(isolate, "__proto__"), protoObj).FromJust());
// Get mirror for the object with property getter.
CompileRun("var obj_mirror = debug.MakeMirror(obj);");
- CHECK(CompileRun(
- "obj_mirror instanceof debug.ObjectMirror")->BooleanValue());
+ CHECK(CompileRun("obj_mirror instanceof debug.ObjectMirror")
+ ->BooleanValue(context)
+ .FromJust());
CompileRun("var named_names = obj_mirror.propertyNames();");
// There should be exactly two properties - one from the object itself and
// another from its hidden prototype.
- CHECK_EQ(2, CompileRun("named_names.length")->Int32Value());
+ CHECK_EQ(2, CompileRun("named_names.length")->Int32Value(context).FromJust());
CHECK(CompileRun("named_names.sort(); named_names[0] == 'a' &&"
- "named_names[1] == 'b'")->BooleanValue());
- CHECK(CompileRun(
- "obj_mirror.property('a').value().value() == 1")->BooleanValue());
- CHECK(CompileRun(
- "obj_mirror.property('b').value().value() == 2")->BooleanValue());
+ "named_names[1] == 'b'")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("obj_mirror.property('a').value().value() == 1")
+ ->BooleanValue(context)
+ .FromJust());
+ CHECK(CompileRun("obj_mirror.property('b').value().value() == 2")
+ ->BooleanValue(context)
+ .FromJust());
}
@@ -4889,7 +5131,7 @@ class MessageQueueDebuggerThread : public v8::base::Thread {
static void MessageHandler(const v8::Debug::Message& message) {
- v8::Handle<v8::String> json = message.GetJSON();
+ v8::Local<v8::String> json = message.GetJSON();
v8::String::Utf8Value utf8(json);
if (IsBreakEventMessage(*utf8)) {
// Lets test script wait until break occurs to send commands.
@@ -4995,7 +5237,7 @@ TEST(MessageQueues) {
// Create a V8 environment
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetMessageHandler(MessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), MessageHandler);
message_queue_debugger_thread.Start();
const char* source_1 = "a = 3; b = 4; c = new Object(); c.d = 5;";
@@ -5089,7 +5331,7 @@ TEST(SendClientDataToHandler) {
v8::HandleScope scope(isolate);
TestClientData::ResetCounters();
handled_client_data_instances_count = 0;
- v8::Debug::SetMessageHandler(MessageHandlerCountingClientData);
+ v8::Debug::SetMessageHandler(isolate, MessageHandlerCountingClientData);
const char* source_1 = "a = 3; b = 4; c = new Object(); c.d = 5;";
const int kBufferSize = 1000;
uint16_t buffer[kBufferSize];
@@ -5171,7 +5413,7 @@ static void ThreadedMessageHandler(const v8::Debug::Message& message) {
if (IsBreakEventMessage(print_buffer)) {
// Check that we are inside the while loop.
int source_line = GetSourceLineFromBreakEventMessage(print_buffer);
- CHECK(8 <= source_line && source_line <= 13);
+ CHECK(4 <= source_line && source_line <= 10);
threaded_debugging_barriers.barrier_2.Wait();
}
}
@@ -5180,10 +5422,6 @@ static void ThreadedMessageHandler(const v8::Debug::Message& message) {
void V8Thread::Run() {
const char* source =
"flag = true;\n"
- "function bar( new_value ) {\n"
- " flag = new_value;\n"
- " return \"Return from bar(\" + new_value + \")\";\n"
- "}\n"
"\n"
"function foo() {\n"
" var x = 1;\n"
@@ -5205,13 +5443,13 @@ void V8Thread::Run() {
v8::Isolate::Scope isolate_scope(isolate_);
DebugLocalContext env(isolate_);
v8::HandleScope scope(isolate_);
- v8::Debug::SetMessageHandler(&ThreadedMessageHandler);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Debug::SetMessageHandler(isolate_, &ThreadedMessageHandler);
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(env->GetIsolate());
global_template->Set(
- v8::String::NewFromUtf8(env->GetIsolate(), "ThreadedAtBarrier1"),
+ v8_str(env->GetIsolate(), "ThreadedAtBarrier1"),
v8::FunctionTemplate::New(isolate_, ThreadedAtBarrier1));
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(isolate_, NULL, global_template);
v8::Context::Scope context_scope(context);
@@ -5226,10 +5464,11 @@ void DebuggerThread::Run() {
const int kBufSize = 1000;
uint16_t buffer[kBufSize];
- const char* command_1 = "{\"seq\":102,"
+ const char* command_1 =
+ "{\"seq\":102,"
"\"type\":\"request\","
"\"command\":\"evaluate\","
- "\"arguments\":{\"expression\":\"bar(false)\"}}";
+ "\"arguments\":{\"expression\":\"flag = false\"}}";
const char* command_2 = "{\"seq\":103,"
"\"type\":\"request\","
"\"command\":\"continue\"}";
@@ -5338,7 +5577,7 @@ void BreakpointsV8Thread::Run() {
v8::Isolate::Scope isolate_scope(isolate_);
DebugLocalContext env(isolate_);
v8::HandleScope scope(isolate_);
- v8::Debug::SetMessageHandler(&BreakpointsMessageHandler);
+ v8::Debug::SetMessageHandler(isolate_, &BreakpointsMessageHandler);
CompileRun(source_1);
breakpoints_barriers->barrier_1.Wait();
@@ -5494,7 +5733,7 @@ TEST(RecursiveBreakpointsGlobal) {
TEST(SetDebugEventListenerOnUninitializedVM) {
- v8::Debug::SetDebugEventListener(DummyDebugEventListener);
+ v8::Debug::SetDebugEventListener(CcTest::isolate(), DummyDebugEventListener);
}
@@ -5503,7 +5742,7 @@ static void DummyMessageHandler(const v8::Debug::Message& message) {
TEST(SetMessageHandlerOnUninitializedVM) {
- v8::Debug::SetMessageHandler(DummyMessageHandler);
+ v8::Debug::SetMessageHandler(CcTest::isolate(), DummyMessageHandler);
}
@@ -5515,7 +5754,7 @@ static const char* debugger_call_with_data_source =
" if (data) return data;"
" throw 'No data!'"
"}";
-v8::Handle<v8::Function> debugger_call_with_data;
+v8::Local<v8::Function> debugger_call_with_data;
// Source for a JavaScript function which returns the data parameter of a
@@ -5528,23 +5767,32 @@ static const char* debugger_call_with_closure_source =
" exec_state.y = x;"
" return exec_state.y"
"})";
-v8::Handle<v8::Function> debugger_call_with_closure;
+v8::Local<v8::Function> debugger_call_with_closure;
// Function to retrieve the number of JavaScript frames by calling a JavaScript
// in the debugger.
static void CheckFrameCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(v8::Debug::Call(frame_count)->IsNumber());
- CHECK_EQ(args[0]->Int32Value(),
- v8::Debug::Call(frame_count)->Int32Value());
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ CHECK(v8::Debug::Call(context, frame_count).ToLocalChecked()->IsNumber());
+ CHECK_EQ(args[0]->Int32Value(context).FromJust(),
+ v8::Debug::Call(context, frame_count)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
}
// Function to retrieve the source line of the top JavaScript frame by calling a
// JavaScript function in the debugger.
static void CheckSourceLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(v8::Debug::Call(frame_source_line)->IsNumber());
- CHECK_EQ(args[0]->Int32Value(),
- v8::Debug::Call(frame_source_line)->Int32Value());
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ CHECK(
+ v8::Debug::Call(context, frame_source_line).ToLocalChecked()->IsNumber());
+ CHECK_EQ(args[0]->Int32Value(context).FromJust(),
+ v8::Debug::Call(context, frame_source_line)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
}
@@ -5553,13 +5801,15 @@ static void CheckSourceLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
// can throw exceptions.
static void CheckDataParameter(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Handle<v8::String> data =
- v8::String::NewFromUtf8(args.GetIsolate(), "Test");
- CHECK(v8::Debug::Call(debugger_call_with_data, data)->IsString());
+ v8::Local<v8::String> data = v8_str(args.GetIsolate(), "Test");
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ CHECK(v8::Debug::Call(context, debugger_call_with_data, data)
+ .ToLocalChecked()
+ ->IsString());
for (int i = 0; i < 3; i++) {
v8::TryCatch catcher(args.GetIsolate());
- CHECK(v8::Debug::Call(debugger_call_with_data).IsEmpty());
+ CHECK(v8::Debug::Call(context, debugger_call_with_data).IsEmpty());
CHECK(catcher.HasCaught());
CHECK(catcher.Exception()->IsString());
}
@@ -5568,8 +5818,14 @@ static void CheckDataParameter(
// Function to test using a JavaScript with closure in the debugger.
static void CheckClosure(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(v8::Debug::Call(debugger_call_with_closure)->IsNumber());
- CHECK_EQ(3, v8::Debug::Call(debugger_call_with_closure)->Int32Value());
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ CHECK(v8::Debug::Call(context, debugger_call_with_closure)
+ .ToLocalChecked()
+ ->IsNumber());
+ CHECK_EQ(3, v8::Debug::Call(context, debugger_call_with_closure)
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust());
}
@@ -5579,94 +5835,122 @@ TEST(CallFunctionInDebugger) {
// CheckSourceLine and CheckDataParameter installed.
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->Set(
- v8::String::NewFromUtf8(isolate, "CheckFrameCount"),
- v8::FunctionTemplate::New(isolate, CheckFrameCount));
- global_template->Set(
- v8::String::NewFromUtf8(isolate, "CheckSourceLine"),
- v8::FunctionTemplate::New(isolate, CheckSourceLine));
- global_template->Set(
- v8::String::NewFromUtf8(isolate, "CheckDataParameter"),
- v8::FunctionTemplate::New(isolate, CheckDataParameter));
- global_template->Set(
- v8::String::NewFromUtf8(isolate, "CheckClosure"),
- v8::FunctionTemplate::New(isolate, CheckClosure));
- v8::Handle<v8::Context> context = v8::Context::New(isolate,
- NULL,
- global_template);
+ global_template->Set(v8_str(isolate, "CheckFrameCount"),
+ v8::FunctionTemplate::New(isolate, CheckFrameCount));
+ global_template->Set(v8_str(isolate, "CheckSourceLine"),
+ v8::FunctionTemplate::New(isolate, CheckSourceLine));
+ global_template->Set(v8_str(isolate, "CheckDataParameter"),
+ v8::FunctionTemplate::New(isolate, CheckDataParameter));
+ global_template->Set(v8_str(isolate, "CheckClosure"),
+ v8::FunctionTemplate::New(isolate, CheckClosure));
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate, NULL, global_template);
v8::Context::Scope context_scope(context);
// Compile a function for checking the number of JavaScript frames.
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, frame_count_source))->Run();
- frame_count = v8::Local<v8::Function>::Cast(context->Global()->Get(
- v8::String::NewFromUtf8(isolate, "frame_count")));
+ v8::Script::Compile(context, v8_str(isolate, frame_count_source))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ frame_count = v8::Local<v8::Function>::Cast(
+ context->Global()
+ ->Get(context, v8_str(isolate, "frame_count"))
+ .ToLocalChecked());
// Compile a function for returning the source line for the top frame.
- v8::Script::Compile(v8::String::NewFromUtf8(isolate,
- frame_source_line_source))->Run();
- frame_source_line = v8::Local<v8::Function>::Cast(context->Global()->Get(
- v8::String::NewFromUtf8(isolate, "frame_source_line")));
+ v8::Script::Compile(context, v8_str(isolate, frame_source_line_source))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ frame_source_line = v8::Local<v8::Function>::Cast(
+ context->Global()
+ ->Get(context, v8_str(isolate, "frame_source_line"))
+ .ToLocalChecked());
// Compile a function returning the data parameter.
- v8::Script::Compile(v8::String::NewFromUtf8(isolate,
- debugger_call_with_data_source))
- ->Run();
+ v8::Script::Compile(context, v8_str(isolate, debugger_call_with_data_source))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
debugger_call_with_data = v8::Local<v8::Function>::Cast(
- context->Global()->Get(v8::String::NewFromUtf8(
- isolate, "debugger_call_with_data")));
+ context->Global()
+ ->Get(context, v8_str(isolate, "debugger_call_with_data"))
+ .ToLocalChecked());
// Compile a function capturing closure.
- debugger_call_with_closure =
- v8::Local<v8::Function>::Cast(v8::Script::Compile(
- v8::String::NewFromUtf8(isolate,
- debugger_call_with_closure_source))->Run());
+ debugger_call_with_closure = v8::Local<v8::Function>::Cast(
+ v8::Script::Compile(context,
+ v8_str(isolate, debugger_call_with_closure_source))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked());
// Calling a function through the debugger returns 0 frames if there are
// no JavaScript frames.
- CHECK(v8::Integer::New(isolate, 0)->Equals(v8::Debug::Call(frame_count)));
+ CHECK(v8::Integer::New(isolate, 0)
+ ->Equals(context,
+ v8::Debug::Call(context, frame_count).ToLocalChecked())
+ .FromJust());
// Test that the number of frames can be retrieved.
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "CheckFrameCount(1)"))->Run();
- v8::Script::Compile(v8::String::NewFromUtf8(isolate,
- "function f() {"
- " CheckFrameCount(2);"
- "}; f()"))->Run();
+ v8::Script::Compile(context, v8_str(isolate, "CheckFrameCount(1)"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Script::Compile(context, v8_str(isolate,
+ "function f() {"
+ " CheckFrameCount(2);"
+ "}; f()"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
// Test that the source line can be retrieved.
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "CheckSourceLine(0)"))->Run();
- v8::Script::Compile(v8::String::NewFromUtf8(isolate,
- "function f() {\n"
- " CheckSourceLine(1)\n"
- " CheckSourceLine(2)\n"
- " CheckSourceLine(3)\n"
- "}; f()"))->Run();
+ v8::Script::Compile(context, v8_str(isolate, "CheckSourceLine(0)"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Script::Compile(context, v8_str(isolate,
+ "function f() {\n"
+ " CheckSourceLine(1)\n"
+ " CheckSourceLine(2)\n"
+ " CheckSourceLine(3)\n"
+ "}; f()"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
// Test that a parameter can be passed to a function called in the debugger.
- v8::Script::Compile(v8::String::NewFromUtf8(isolate,
- "CheckDataParameter()"))->Run();
+ v8::Script::Compile(context, v8_str(isolate, "CheckDataParameter()"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
// Test that a function with closure can be run in the debugger.
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "CheckClosure()"))->Run();
+ v8::Script::Compile(context, v8_str(isolate, "CheckClosure()"))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
// Test that the source line is correct when there is a line offset.
- v8::ScriptOrigin origin(v8::String::NewFromUtf8(isolate, "test"),
+ v8::ScriptOrigin origin(v8_str(isolate, "test"),
v8::Integer::New(isolate, 7));
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "CheckSourceLine(7)"), &origin)
- ->Run();
- v8::Script::Compile(v8::String::NewFromUtf8(isolate,
- "function f() {\n"
- " CheckSourceLine(8)\n"
- " CheckSourceLine(9)\n"
- " CheckSourceLine(10)\n"
- "}; f()"),
- &origin)->Run();
+ v8::Script::Compile(context, v8_str(isolate, "CheckSourceLine(7)"), &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Script::Compile(context, v8_str(isolate,
+ "function f() {\n"
+ " CheckSourceLine(8)\n"
+ " CheckSourceLine(9)\n"
+ " CheckSourceLine(10)\n"
+ "}; f()"),
+ &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
}
@@ -5689,11 +5973,13 @@ TEST(DebuggerUnload) {
DebugLocalContext env;
// Check debugger is unloaded before it is used.
- CheckDebuggerUnloaded();
+ CheckDebuggerUnloaded(env->GetIsolate());
// Set a debug event listener.
break_point_hit_count = 0;
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
{
v8::HandleScope scope(env->GetIsolate());
// Create a couple of functions for the test.
@@ -5710,41 +5996,44 @@ TEST(DebuggerUnload) {
// Make sure that the break points are there.
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
- bar->Call(env->Global(), 0, NULL);
+ bar->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
}
// Remove the debug event listener without clearing breakpoints. Do this
// outside a handle scope.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded(true);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate(), true);
// Now set a debug message handler.
break_point_hit_count = 0;
- v8::Debug::SetMessageHandler(MessageHandlerBreakPointHitCount);
+ v8::Debug::SetMessageHandler(env->GetIsolate(),
+ MessageHandlerBreakPointHitCount);
{
v8::HandleScope scope(env->GetIsolate());
// Get the test functions again.
v8::Local<v8::Function> foo(v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "foo"))));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "foo"))
+ .ToLocalChecked()));
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(0, break_point_hit_count);
// Set break points and run again.
SetBreakPoint(foo, 0);
SetBreakPoint(foo, 4);
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
}
// Remove the debug message handler without clearing breakpoints. Do this
// outside a handle scope.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded(true);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate(), true);
}
@@ -5783,10 +6072,10 @@ TEST(DebuggerClearMessageHandler) {
v8::HandleScope scope(env->GetIsolate());
// Check debugger is unloaded before it is used.
- CheckDebuggerUnloaded();
+ CheckDebuggerUnloaded(env->GetIsolate());
// Set a debug message handler.
- v8::Debug::SetMessageHandler(MessageHandlerHitCount);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), MessageHandlerHitCount);
// Run code to throw a unhandled exception. This should end up in the message
// handler.
@@ -5797,7 +6086,7 @@ TEST(DebuggerClearMessageHandler) {
// Clear debug message handler.
message_handler_hit_count = 0;
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
// Run code to throw a unhandled exception. This should end up in the message
// handler.
@@ -5806,7 +6095,7 @@ TEST(DebuggerClearMessageHandler) {
// The message handler should not be called more.
CHECK_EQ(0, message_handler_hit_count);
- CheckDebuggerUnloaded(true);
+ CheckDebuggerUnloaded(env->GetIsolate(), true);
}
@@ -5816,7 +6105,7 @@ static void MessageHandlerClearingMessageHandler(
message_handler_hit_count++;
// Clear debug message handler.
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(message.GetIsolate(), nullptr);
}
@@ -5826,10 +6115,11 @@ TEST(DebuggerClearMessageHandlerWhileActive) {
v8::HandleScope scope(env->GetIsolate());
// Check debugger is unloaded before it is used.
- CheckDebuggerUnloaded();
+ CheckDebuggerUnloaded(env->GetIsolate());
// Set a debug message handler.
- v8::Debug::SetMessageHandler(MessageHandlerClearingMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(),
+ MessageHandlerClearingMessageHandler);
// Run code to throw a unhandled exception. This should end up in the message
// handler.
@@ -5838,7 +6128,7 @@ TEST(DebuggerClearMessageHandlerWhileActive) {
// The message handler should be called.
CHECK_EQ(1, message_handler_hit_count);
- CheckDebuggerUnloaded(true);
+ CheckDebuggerUnloaded(env->GetIsolate(), true);
}
@@ -5861,12 +6151,12 @@ TEST(DebugGetLoadedScripts) {
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
+ v8::Local<v8::Context> context = env.context();
EmptyExternalStringResource source_ext_str;
v8::Local<v8::String> source =
- v8::String::NewExternal(env->GetIsolate(), &source_ext_str);
- v8::Handle<v8::Script> evil_script(v8::Script::Compile(source));
- // "use" evil_script to make the compiler happy.
- USE(evil_script);
+ v8::String::NewExternalTwoByte(env->GetIsolate(), &source_ext_str)
+ .ToLocalChecked();
+ CHECK(v8::Script::Compile(context, source).IsEmpty());
Handle<i::ExternalTwoByteString> i_source(
i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
// This situation can happen if source was an external string disposed
@@ -5875,7 +6165,7 @@ TEST(DebugGetLoadedScripts) {
bool allow_natives_syntax = i::FLAG_allow_natives_syntax;
i::FLAG_allow_natives_syntax = true;
- EnableDebugger();
+ EnableDebugger(env->GetIsolate());
v8::MaybeLocal<v8::Value> result =
CompileRun(env.context(),
"var scripts = %DebugGetLoadedScripts();"
@@ -5891,15 +6181,16 @@ TEST(DebugGetLoadedScripts) {
" }"
"}");
CHECK(!result.IsEmpty());
- DisableDebugger();
+ DisableDebugger(env->GetIsolate());
// Must not crash while accessing line_ends.
i::FLAG_allow_natives_syntax = allow_natives_syntax;
// Some scripts are retrieved - at least the number of native scripts.
- CHECK_GT((*env)
- ->Global()
- ->Get(v8::String::NewFromUtf8(env->GetIsolate(), "count"))
- ->Int32Value(),
+ CHECK_GT(env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "count"))
+ .ToLocalChecked()
+ ->Int32Value(context)
+ .FromJust(),
8);
}
@@ -5916,63 +6207,82 @@ TEST(ScriptNameAndData) {
frame_script_name_source,
"frame_script_name");
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context = env.context();
// Test function source.
- v8::Local<v8::String> script = v8::String::NewFromUtf8(env->GetIsolate(),
- "function f() {\n"
- " debugger;\n"
- "}\n");
+ v8::Local<v8::String> script = v8_str(env->GetIsolate(),
+ "function f() {\n"
+ " debugger;\n"
+ "}\n");
v8::ScriptOrigin origin1 =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "name"));
- v8::Handle<v8::Script> script1 = v8::Script::Compile(script, &origin1);
- script1->Run();
+ v8::ScriptOrigin(v8_str(env->GetIsolate(), "name"));
+ v8::Local<v8::Script> script1 =
+ v8::Script::Compile(context, script, &origin1).ToLocalChecked();
+ script1->Run(context).ToLocalChecked();
v8::Local<v8::Function> f;
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
CHECK_EQ(0, strcmp("name", last_script_name_hit));
// Compile the same script again without setting data. As the compilation
// cache is disabled when debugging expect the data to be missing.
- v8::Script::Compile(script, &origin1)->Run();
+ v8::Script::Compile(context, script, &origin1)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- f->Call(env->Global(), 0, NULL);
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, break_point_hit_count);
CHECK_EQ(0, strcmp("name", last_script_name_hit));
- v8::Local<v8::String> data_obj_source = v8::String::NewFromUtf8(
- env->GetIsolate(),
- "({ a: 'abc',\n"
- " b: 123,\n"
- " toString: function() { return this.a + ' ' + this.b; }\n"
- "})\n");
- v8::Script::Compile(data_obj_source)->Run();
+ v8::Local<v8::String> data_obj_source =
+ v8_str(env->GetIsolate(),
+ "({ a: 'abc',\n"
+ " b: 123,\n"
+ " toString: function() { return this.a + ' ' + this.b; }\n"
+ "})\n");
+ v8::Script::Compile(context, data_obj_source)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
v8::ScriptOrigin origin2 =
- v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "new name"));
- v8::Handle<v8::Script> script2 = v8::Script::Compile(script, &origin2);
- script2->Run();
+ v8::ScriptOrigin(v8_str(env->GetIsolate(), "new name"));
+ v8::Local<v8::Script> script2 =
+ v8::Script::Compile(context, script, &origin2).ToLocalChecked();
+ script2->Run(context).ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- f->Call(env->Global(), 0, NULL);
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(3, break_point_hit_count);
CHECK_EQ(0, strcmp("new name", last_script_name_hit));
- v8::Handle<v8::Script> script3 = v8::Script::Compile(script, &origin2);
- script3->Run();
+ v8::Local<v8::Script> script3 =
+ v8::Script::Compile(context, script, &origin2).ToLocalChecked();
+ script3->Run(context).ToLocalChecked();
f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- f->Call(env->Global(), 0, NULL);
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(4, break_point_hit_count);
}
-static v8::Handle<v8::Context> expected_context;
-static v8::Handle<v8::Value> expected_context_data;
+static v8::Local<v8::Context> expected_context;
+static v8::Local<v8::Value> expected_context_data;
// Check that the expected context is the one generating the debug event.
@@ -6001,23 +6311,23 @@ TEST(ContextData) {
v8::HandleScope scope(isolate);
// Create two contexts.
- v8::Handle<v8::Context> context_1;
- v8::Handle<v8::Context> context_2;
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::Handle<v8::ObjectTemplate>();
- v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
+ v8::Local<v8::Context> context_1;
+ v8::Local<v8::Context> context_2;
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate>();
+ v8::Local<v8::Value> global_object = v8::Local<v8::Value>();
context_1 = v8::Context::New(isolate, NULL, global_template, global_object);
context_2 = v8::Context::New(isolate, NULL, global_template, global_object);
- v8::Debug::SetMessageHandler(ContextCheckMessageHandler);
+ v8::Debug::SetMessageHandler(isolate, ContextCheckMessageHandler);
// Default data value is undefined.
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
CHECK(context_2->GetEmbedderData(0)->IsUndefined());
// Set and check different data values.
- v8::Handle<v8::String> data_1 = v8::String::NewFromUtf8(isolate, "1");
- v8::Handle<v8::String> data_2 = v8::String::NewFromUtf8(isolate, "2");
+ v8::Local<v8::String> data_1 = v8_str(isolate, "1");
+ v8::Local<v8::String> data_2 = v8_str(isolate, "2");
context_1->SetEmbedderData(0, data_1);
context_2->SetEmbedderData(0, data_2);
CHECK(context_1->GetEmbedderData(0)->StrictEquals(data_1));
@@ -6032,7 +6342,7 @@ TEST(ContextData) {
expected_context = context_1;
expected_context_data = data_1;
v8::Local<v8::Function> f = CompileFunction(isolate, source, "f");
- f->Call(context_1->Global(), 0, NULL);
+ f->Call(context_1, context_1->Global(), 0, NULL).ToLocalChecked();
}
@@ -6042,14 +6352,14 @@ TEST(ContextData) {
expected_context = context_2;
expected_context_data = data_2;
v8::Local<v8::Function> f = CompileFunction(isolate, source, "f");
- f->Call(context_2->Global(), 0, NULL);
+ f->Call(context_2, context_2->Global(), 0, NULL).ToLocalChecked();
}
// Two times compile event and two times break event.
CHECK_GT(message_handler_hit_count, 4);
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -6078,22 +6388,27 @@ TEST(DebugBreakInMessageHandler) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetMessageHandler(DebugBreakMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), DebugBreakMessageHandler);
+ v8::Local<v8::Context> context = env.context();
// Test functions.
const char* script = "function f() { debugger; g(); } function g() { }";
CompileRun(script);
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
v8::Local<v8::Function> g = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "g")));
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "g"))
+ .ToLocalChecked());
// Call f then g. The debugger statement in f will cause a break which will
// cause another break.
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, message_handler_break_hit_count);
// Calling g will not cause any additional breaks.
- g->Call(env->Global(), 0, NULL);
+ g->Call(context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(2, message_handler_break_hit_count);
}
@@ -6104,8 +6419,8 @@ TEST(DebugBreakInMessageHandler) {
static void DebugEventDebugBreak(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
-
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
if (event == v8::Break) {
break_point_hit_count++;
@@ -6113,17 +6428,17 @@ static void DebugEventDebugBreak(
if (!frame_function_name.IsEmpty()) {
// Get the name of the function.
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = {
- exec_state, v8::Integer::New(CcTest::isolate(), 0)
- };
- v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
- argc, argv);
+ v8::Local<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)};
+ v8::Local<v8::Value> result =
+ frame_function_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
if (result->IsUndefined()) {
last_function_hit[0] = '\0';
} else {
CHECK(result->IsString());
- v8::Handle<v8::String> function_name(
- result->ToString(CcTest::isolate()));
+ v8::Local<v8::String> function_name(
+ result->ToString(context).ToLocalChecked());
function_name->WriteUtf8(last_function_hit);
}
}
@@ -6140,7 +6455,7 @@ TEST(RegExpDebugBreak) {
// This test only applies to native regexps.
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
-
+ v8::Local<v8::Context> context = env.context();
// Create a function for checking the function when hitting a break point.
frame_function_name = CompileFunction(&env,
frame_function_name_source,
@@ -6154,14 +6469,15 @@ TEST(RegExpDebugBreak) {
v8::Local<v8::Function> f = CompileFunction(env->GetIsolate(), script, "f");
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = {
- v8::String::NewFromUtf8(env->GetIsolate(), " /* xxx */ a=0;")};
- v8::Local<v8::Value> result = f->Call(env->Global(), argc, argv);
- CHECK_EQ(12, result->Int32Value());
+ v8::Local<v8::Value> argv[argc] = {
+ v8_str(env->GetIsolate(), " /* xxx */ a=0;")};
+ v8::Local<v8::Value> result =
+ f->Call(context, env->Global(), argc, argv).ToLocalChecked();
+ CHECK_EQ(12, result->Int32Value(context).FromJust());
- v8::Debug::SetDebugEventListener(DebugEventDebugBreak);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventDebugBreak);
v8::Debug::DebugBreak(env->GetIsolate());
- result = f->Call(env->Global(), argc, argv);
+ result = f->Call(context, env->Global(), argc, argv).ToLocalChecked();
// Check that there was only one break event. Matching RegExp should not
// cause Break events.
@@ -6175,20 +6491,19 @@ TEST(RegExpDebugBreak) {
static void ExecuteScriptForContextCheck(
v8::Debug::MessageHandler message_handler) {
// Create a context.
- v8::Handle<v8::Context> context_1;
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::Handle<v8::ObjectTemplate>();
+ v8::Local<v8::Context> context_1;
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate>();
context_1 =
v8::Context::New(CcTest::isolate(), NULL, global_template);
- v8::Debug::SetMessageHandler(message_handler);
+ v8::Debug::SetMessageHandler(CcTest::isolate(), message_handler);
// Default data value is undefined.
CHECK(context_1->GetEmbedderData(0)->IsUndefined());
// Set and check a data value.
- v8::Handle<v8::String> data_1 =
- v8::String::NewFromUtf8(CcTest::isolate(), "1");
+ v8::Local<v8::String> data_1 = v8_str(CcTest::isolate(), "1");
context_1->SetEmbedderData(0, data_1);
CHECK(context_1->GetEmbedderData(0)->StrictEquals(data_1));
@@ -6201,10 +6516,10 @@ static void ExecuteScriptForContextCheck(
expected_context = context_1;
expected_context_data = data_1;
v8::Local<v8::Function> f = CompileFunction(CcTest::isolate(), source, "f");
- f->Call(context_1->Global(), 0, NULL);
+ f->Call(context_1, context_1->Global(), 0, NULL).ToLocalChecked();
}
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(CcTest::isolate(), nullptr);
}
@@ -6219,7 +6534,7 @@ TEST(EvalContextData) {
// One time compile event and one time break event.
CHECK_GT(message_handler_hit_count, 2);
- CheckDebuggerUnloaded();
+ CheckDebuggerUnloaded(CcTest::isolate());
}
@@ -6287,7 +6602,7 @@ TEST(NestedBreakEventContextData) {
// One break from the source and another from the evaluate request.
CHECK_EQ(break_count, 2);
- CheckDebuggerUnloaded();
+ CheckDebuggerUnloaded(CcTest::isolate());
}
@@ -6310,22 +6625,27 @@ static void AfterCompileMessageHandler(const v8::Debug::Message& message) {
TEST(AfterCompileMessageWhenMessageHandlerIsReset) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
after_compile_message_count = 0;
const char* script = "var a=1";
- v8::Debug::SetMessageHandler(AfterCompileMessageHandler);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
- ->Run();
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), AfterCompileMessageHandler);
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), script))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
- v8::Debug::SetMessageHandler(AfterCompileMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), AfterCompileMessageHandler);
v8::Debug::DebugBreak(env->GetIsolate());
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
- ->Run();
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), script))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
// Setting listener to NULL should cause debugger unload.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
// Compilation cache should be disabled when debugger is active.
CHECK_EQ(2, after_compile_message_count);
@@ -6358,7 +6678,8 @@ TEST(SyntaxErrorMessageOnSyntaxException) {
// For this test, we want to break on uncaught exceptions:
ChangeBreakOnException(false, true);
- v8::Debug::SetDebugEventListener(CompileErrorEventCounter);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), CompileErrorEventCounter);
+ v8::Local<v8::Context> context = env.context();
CompileErrorEventCounterClear();
@@ -6366,24 +6687,29 @@ TEST(SyntaxErrorMessageOnSyntaxException) {
CHECK_EQ(0, compile_error_event_count);
// Throws SyntaxError: Unexpected end of input
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "+++"));
+ CHECK(
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), "+++")).IsEmpty());
CHECK_EQ(1, compile_error_event_count);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "/sel\\/: \\"));
+ CHECK(v8::Script::Compile(context, v8_str(env->GetIsolate(), "/sel\\/: \\"))
+ .IsEmpty());
CHECK_EQ(2, compile_error_event_count);
- v8::Local<v8::Script> script = v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "JSON.parse('1234:')"));
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(context,
+ v8_str(env->GetIsolate(), "JSON.parse('1234:')"))
+ .ToLocalChecked();
CHECK_EQ(2, compile_error_event_count);
- script->Run();
+ CHECK(script->Run(context).IsEmpty());
CHECK_EQ(3, compile_error_event_count);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "new RegExp('/\\/\\\\');"));
+ v8::Script::Compile(context,
+ v8_str(env->GetIsolate(), "new RegExp('/\\/\\\\');"))
+ .ToLocalChecked();
CHECK_EQ(3, compile_error_event_count);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "throw 1;"));
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), "throw 1;"))
+ .ToLocalChecked();
CHECK_EQ(3, compile_error_event_count);
}
@@ -6392,23 +6718,28 @@ TEST(SyntaxErrorMessageOnSyntaxException) {
TEST(BreakMessageWhenMessageHandlerIsReset) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
after_compile_message_count = 0;
const char* script = "function f() {};";
- v8::Debug::SetMessageHandler(AfterCompileMessageHandler);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
- ->Run();
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), AfterCompileMessageHandler);
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), script))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
- v8::Debug::SetMessageHandler(AfterCompileMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), AfterCompileMessageHandler);
v8::Debug::DebugBreak(env->GetIsolate());
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- f->Call(env->Global(), 0, NULL);
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// Setting message handler to NULL should cause debugger unload.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
// Compilation cache should be disabled when debugger is active.
CHECK_EQ(1, after_compile_message_count);
@@ -6429,25 +6760,30 @@ TEST(ExceptionMessageWhenMessageHandlerIsReset) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
// For this test, we want to break on uncaught exceptions:
ChangeBreakOnException(false, true);
exception_event_count = 0;
const char* script = "function f() {throw new Error()};";
- v8::Debug::SetMessageHandler(AfterCompileMessageHandler);
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script))
- ->Run();
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), AfterCompileMessageHandler);
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), script))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
- v8::Debug::SetMessageHandler(ExceptionMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), ExceptionMessageHandler);
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "f")));
- f->Call(env->Global(), 0, NULL);
+ env->Global()
+ ->Get(context, v8_str(env->GetIsolate(), "f"))
+ .ToLocalChecked());
+ CHECK(f->Call(context, env->Global(), 0, NULL).IsEmpty());
// Setting message handler to NULL should cause debugger unload.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
CHECK_EQ(1, exception_event_count);
}
@@ -6462,7 +6798,8 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
const char* script = "function f() {};";
const char* resource_name = "test_resource";
- v8::Debug::SetMessageHandler(AfterCompileMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), AfterCompileMessageHandler);
+ v8::Local<v8::Context> context = env.context();
// Set a couple of provisional breakpoint on lines out of the script lines
// range.
@@ -6473,14 +6810,15 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
after_compile_message_count = 0;
- v8::ScriptOrigin origin(
- v8::String::NewFromUtf8(env->GetIsolate(), resource_name),
- v8::Integer::New(env->GetIsolate(), 10),
- v8::Integer::New(env->GetIsolate(), 1));
+ v8::ScriptOrigin origin(v8_str(env->GetIsolate(), resource_name),
+ v8::Integer::New(env->GetIsolate(), 10),
+ v8::Integer::New(env->GetIsolate(), 1));
// Compile a script whose first line number is greater than the breakpoints'
// lines.
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), script),
- &origin)->Run();
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), script), &origin)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
// If the script is compiled successfully there is exactly one after compile
// event. In case of an exception in debugger code after compile event is not
@@ -6489,7 +6827,7 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
ClearBreakPointFromJS(env->GetIsolate(), sbp1);
ClearBreakPointFromJS(env->GetIsolate(), sbp2);
- v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
}
@@ -6521,9 +6859,10 @@ static void BreakMessageHandler(const v8::Debug::Message& message) {
TEST(NoDebugBreakInAfterCompileMessageHandler) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
// Register a debug event listener which sets the break flag and counts.
- v8::Debug::SetMessageHandler(BreakMessageHandler);
+ v8::Debug::SetMessageHandler(env->GetIsolate(), BreakMessageHandler);
// Set the debug break flag.
v8::Debug::DebugBreak(env->GetIsolate());
@@ -6537,13 +6876,13 @@ TEST(NoDebugBreakInAfterCompileMessageHandler) {
// Set the debug break flag again.
v8::Debug::DebugBreak(env->GetIsolate());
- f->Call(env->Global(), 0, NULL);
+ f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// There should be one more break event when the script is evaluated in 'f'.
CHECK_EQ(2, break_point_hit_count);
// Get rid of the debug message handler.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -6562,7 +6901,7 @@ TEST(ProcessDebugMessages) {
counting_message_handler_counter = 0;
- v8::Debug::SetMessageHandler(CountingMessageHandler);
+ v8::Debug::SetMessageHandler(isolate, CountingMessageHandler);
const int kBufferSize = 1000;
uint16_t buffer[kBufferSize];
@@ -6576,7 +6915,7 @@ TEST(ProcessDebugMessages) {
isolate, buffer, AsciiToUtf16(scripts_command, buffer));
CHECK_EQ(0, counting_message_handler_counter);
- v8::Debug::ProcessDebugMessages();
+ v8::Debug::ProcessDebugMessages(isolate);
// At least one message should come
CHECK_GE(counting_message_handler_counter, 1);
@@ -6587,13 +6926,13 @@ TEST(ProcessDebugMessages) {
v8::Debug::SendCommand(
isolate, buffer, AsciiToUtf16(scripts_command, buffer));
CHECK_EQ(0, counting_message_handler_counter);
- v8::Debug::ProcessDebugMessages();
+ v8::Debug::ProcessDebugMessages(isolate);
// At least two messages should come
CHECK_GE(counting_message_handler_counter, 2);
// Get rid of the debug message handler.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -6639,7 +6978,7 @@ class SendCommandThread : public v8::base::Thread {
timer.Elapsed().InMillisecondsF());
}
- v8::V8::TerminateExecution(isolate_);
+ isolate_->TerminateExecution();
}
void StartSending() { semaphore_.Signal(); }
@@ -6660,24 +6999,28 @@ TEST(ProcessDebugMessagesThreaded) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
counting_message_handler_counter = 0;
v8::Debug::SetMessageHandler(
- SendCommandThread::CountingAndSignallingMessageHandler);
+ isolate, SendCommandThread::CountingAndSignallingMessageHandler);
send_command_thread_ = new SendCommandThread(isolate);
send_command_thread_->Start();
- v8::Handle<v8::FunctionTemplate> start =
+ v8::Local<v8::FunctionTemplate> start =
v8::FunctionTemplate::New(isolate, StartSendingCommands);
- env->Global()->Set(v8_str("start"), start->GetFunction());
+ CHECK(env->Global()
+ ->Set(context, v8_str("start"),
+ start->GetFunction(context).ToLocalChecked())
+ .FromJust());
CompileRun("start(); while (true) { }");
CHECK_EQ(20, counting_message_handler_counter);
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -6703,8 +7046,9 @@ TEST(Backtrace) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.context();
- v8::Debug::SetMessageHandler(BacktraceData::MessageHandler);
+ v8::Debug::SetMessageHandler(isolate, BacktraceData::MessageHandler);
const int kBufferSize = 1000;
uint16_t buffer[kBufferSize];
@@ -6720,12 +7064,11 @@ TEST(Backtrace) {
buffer,
AsciiToUtf16(scripts_command, buffer),
NULL);
- v8::Debug::ProcessDebugMessages();
+ v8::Debug::ProcessDebugMessages(isolate);
CHECK_EQ(BacktraceData::frame_counter, 0);
- v8::Handle<v8::String> void0 =
- v8::String::NewFromUtf8(env->GetIsolate(), "void(0)");
- v8::Handle<v8::Script> script = CompileWithOrigin(void0, void0);
+ v8::Local<v8::String> void0 = v8_str(env->GetIsolate(), "void(0)");
+ v8::Local<v8::Script> script = CompileWithOrigin(void0, void0);
// Check backtrace from "void(0)" script.
BacktraceData::frame_counter = -10;
@@ -6734,12 +7077,12 @@ TEST(Backtrace) {
buffer,
AsciiToUtf16(scripts_command, buffer),
NULL);
- script->Run();
+ script->Run(context).ToLocalChecked();
CHECK_EQ(BacktraceData::frame_counter, 1);
// Get rid of the debug message handler.
- v8::Debug::SetMessageHandler(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetMessageHandler(isolate, nullptr);
+ CheckDebuggerUnloaded(isolate);
}
@@ -6747,19 +7090,23 @@ TEST(GetMirror) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Value> obj =
- v8::Debug::GetMirror(v8::String::NewFromUtf8(isolate, "hodja"));
+ v8::Local<v8::Context> context = env.context();
+ v8::Local<v8::Value> obj =
+ v8::Debug::GetMirror(context, v8_str(isolate, "hodja")).ToLocalChecked();
v8::ScriptCompiler::Source source(v8_str(
"function runTest(mirror) {"
" return mirror.isString() && (mirror.length() == 5);"
"}"
""
"runTest;"));
- v8::Handle<v8::Function> run_test = v8::Handle<v8::Function>::Cast(
- v8::ScriptCompiler::CompileUnbound(isolate, &source)
+ v8::Local<v8::Function> run_test = v8::Local<v8::Function>::Cast(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &source)
+ .ToLocalChecked()
->BindToCurrentContext()
- ->Run());
- v8::Handle<v8::Value> result = run_test->Call(env->Global(), 1, &obj);
+ ->Run(context)
+ .ToLocalChecked());
+ v8::Local<v8::Value> result =
+ run_test->Call(context, env->Global(), 1, &obj).ToLocalChecked();
CHECK(result->IsTrue());
}
@@ -6768,6 +7115,7 @@ TEST(GetMirror) {
TEST(DebugBreakFunctionApply) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
// Create a function for testing breaking in apply.
v8::Local<v8::Function> foo = CompileFunction(
@@ -6778,7 +7126,7 @@ TEST(DebugBreakFunctionApply) {
"foo");
// Register a debug event listener which steps and counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakMax);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventBreakMax);
// Set the debug break flag before calling the code using function.apply.
v8::Debug::DebugBreak(env->GetIsolate());
@@ -6787,18 +7135,18 @@ TEST(DebugBreakFunctionApply) {
// where this test would enter an infinite loop.
break_point_hit_count = 0;
max_break_point_hit_count = 10000; // 10000 => infinite loop.
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(context, env->Global(), 0, NULL).ToLocalChecked();
// When keeping the debug break several break will happen.
CHECK_GT(break_point_hit_count, 1);
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
-v8::Handle<v8::Context> debugee_context;
-v8::Handle<v8::Context> debugger_context;
+v8::Local<v8::Context> debugee_context;
+v8::Local<v8::Context> debugger_context;
// Property getter that checks that current and calling contexts
@@ -6807,7 +7155,7 @@ static void NamedGetterWithCallingContextCheck(
v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
CHECK_EQ(0, strcmp(*v8::String::Utf8Value(name), "a"));
- v8::Handle<v8::Context> current = info.GetIsolate()->GetCurrentContext();
+ v8::Local<v8::Context> current = info.GetIsolate()->GetCurrentContext();
CHECK(current == debugee_context);
CHECK(current != debugger_context);
info.GetReturnValue().Set(1);
@@ -6820,18 +7168,19 @@ static void NamedGetterWithCallingContextCheck(
static void DebugEventGetAtgumentPropertyValue(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
if (event == v8::Break) {
break_point_hit_count++;
CHECK(debugger_context == CcTest::isolate()->GetCurrentContext());
- v8::Handle<v8::Function> func = v8::Handle<v8::Function>::Cast(CompileRun(
+ v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(CompileRun(
"(function(exec_state) {\n"
" return (exec_state.frame(0).argumentValue(0).property('a').\n"
" value().value() == 1);\n"
"})"));
const int argc = 1;
- v8::Handle<v8::Value> argv[argc] = { exec_state };
- v8::Handle<v8::Value> result = func->Call(exec_state, argc, argv);
+ v8::Local<v8::Value> argv[argc] = {exec_state};
+ v8::Local<v8::Value> result =
+ func->Call(debugger_context, exec_state, argc, argv).ToLocalChecked();
CHECK(result->IsTrue());
}
}
@@ -6851,14 +7200,15 @@ TEST(CallingContextIsNotDebugContext) {
debugger_context = v8::Utils::ToLocal(debug->debug_context());
// Create object with 'a' property accessor.
- v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
- named->SetAccessor(v8::String::NewFromUtf8(isolate, "a"),
- NamedGetterWithCallingContextCheck);
- env->Global()->Set(v8::String::NewFromUtf8(isolate, "obj"),
- named->NewInstance());
+ v8::Local<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
+ named->SetAccessor(v8_str(isolate, "a"), NamedGetterWithCallingContextCheck);
+ CHECK(env->Global()
+ ->Set(debugee_context, v8_str(isolate, "obj"),
+ named->NewInstance(debugee_context).ToLocalChecked())
+ .FromJust());
// Register the debug event listener
- v8::Debug::SetDebugEventListener(DebugEventGetAtgumentPropertyValue);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventGetAtgumentPropertyValue);
// Create a function that invokes debugger.
v8::Local<v8::Function> foo = CompileFunction(
@@ -6868,38 +7218,44 @@ TEST(CallingContextIsNotDebugContext) {
"foo");
break_point_hit_count = 0;
- foo->Call(env->Global(), 0, NULL);
+ foo->Call(debugee_context, env->Global(), 0, NULL).ToLocalChecked();
CHECK_EQ(1, break_point_hit_count);
- v8::Debug::SetDebugEventListener(NULL);
- debugee_context = v8::Handle<v8::Context>();
- debugger_context = v8::Handle<v8::Context>();
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ debugee_context = v8::Local<v8::Context>();
+ debugger_context = v8::Local<v8::Context>();
+ CheckDebuggerUnloaded(isolate);
}
TEST(DebugContextIsPreservedBetweenAccesses) {
v8::HandleScope scope(CcTest::isolate());
- v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount);
- v8::Local<v8::Context> context1 = v8::Debug::GetDebugContext();
- v8::Local<v8::Context> context2 = v8::Debug::GetDebugContext();
+ v8::Debug::SetDebugEventListener(CcTest::isolate(),
+ DebugEventBreakPointHitCount);
+ v8::Local<v8::Context> context1 =
+ v8::Debug::GetDebugContext(CcTest::isolate());
+ v8::Local<v8::Context> context2 =
+ v8::Debug::GetDebugContext(CcTest::isolate());
CHECK(v8::Utils::OpenHandle(*context1).is_identical_to(
v8::Utils::OpenHandle(*context2)));
- v8::Debug::SetDebugEventListener(NULL);
+ v8::Debug::SetDebugEventListener(CcTest::isolate(), nullptr);
}
TEST(NoDebugContextWhenDebuggerDisabled) {
v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Context> context = v8::Debug::GetDebugContext();
+ v8::Local<v8::Context> context =
+ v8::Debug::GetDebugContext(CcTest::isolate());
CHECK(context.IsEmpty());
}
-static v8::Handle<v8::Value> expected_callback_data;
+static v8::Local<v8::Value> expected_callback_data;
static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
CHECK(details.GetEventContext() == expected_context);
- CHECK(expected_callback_data->Equals(details.GetCallbackData()));
+ CHECK(expected_callback_data->Equals(details.GetEventContext(),
+ details.GetCallbackData())
+ .FromJust());
}
@@ -6909,15 +7265,18 @@ TEST(DebugEventContext) {
v8::HandleScope scope(isolate);
expected_context = v8::Context::New(isolate);
expected_callback_data = v8::Int32::New(isolate, 2010);
- v8::Debug::SetDebugEventListener(DebugEventContextChecker,
- expected_callback_data);
+ v8::Debug::SetDebugEventListener(isolate, DebugEventContextChecker,
+ expected_callback_data);
v8::Context::Scope context_scope(expected_context);
- v8::Script::Compile(
- v8::String::NewFromUtf8(isolate, "(function(){debugger;})();"))->Run();
+ v8::Script::Compile(expected_context,
+ v8_str(isolate, "(function(){debugger;})();"))
+ .ToLocalChecked()
+ ->Run(expected_context)
+ .ToLocalChecked();
expected_context.Clear();
- v8::Debug::SetDebugEventListener(NULL);
- expected_context_data = v8::Handle<v8::Value>();
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(isolate, nullptr);
+ expected_context_data = v8::Local<v8::Value>();
+ CheckDebuggerUnloaded(isolate);
}
@@ -6926,21 +7285,22 @@ static bool debug_event_break_deoptimize_done = false;
static void DebugEventBreakDeoptimize(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
if (event == v8::Break) {
if (!frame_function_name.IsEmpty()) {
// Get the name of the function.
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = {
- exec_state, v8::Integer::New(CcTest::isolate(), 0)
- };
- v8::Handle<v8::Value> result =
- frame_function_name->Call(exec_state, argc, argv);
+ v8::Local<v8::Value> argv[argc] = {
+ exec_state, v8::Integer::New(CcTest::isolate(), 0)};
+ v8::Local<v8::Value> result =
+ frame_function_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
if (!result->IsUndefined()) {
char fn[80];
CHECK(result->IsString());
- v8::Handle<v8::String> function_name(
- result->ToString(CcTest::isolate()));
+ v8::Local<v8::String> function_name(
+ result->ToString(context).ToLocalChecked());
function_name->WriteUtf8(fn);
if (strcmp(fn, "bar") == 0) {
i::Deoptimizer::DeoptimizeAll(CcTest::i_isolate());
@@ -6960,6 +7320,7 @@ TEST(DeoptimizeDuringDebugBreak) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
+ v8::Local<v8::Context> context = env.context();
// Create a function for checking the function when hitting a break point.
frame_function_name = CompileFunction(&env,
@@ -6971,19 +7332,20 @@ TEST(DeoptimizeDuringDebugBreak) {
// This tests lazy deoptimization bailout for the stack check, as the first
// time in function bar when using debug break and no break points will be at
// the initial stack check.
- v8::Debug::SetDebugEventListener(DebugEventBreakDeoptimize);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugEventBreakDeoptimize);
// Compile and run function bar which will optimize it for some flag settings.
v8::Local<v8::Function> f = CompileFunction(&env, "function bar(){}", "bar");
- f->Call(v8::Undefined(env->GetIsolate()), 0, NULL);
+ f->Call(context, v8::Undefined(env->GetIsolate()), 0, NULL).ToLocalChecked();
// Set debug break and call bar again.
v8::Debug::DebugBreak(env->GetIsolate());
- f->Call(v8::Undefined(env->GetIsolate()), 0, NULL);
+ f->Call(context, v8::Undefined(env->GetIsolate()), 0, NULL).ToLocalChecked();
CHECK(debug_event_break_deoptimize_done);
- v8::Debug::SetDebugEventListener(NULL);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
}
@@ -6991,46 +7353,58 @@ static void DebugEventBreakWithOptimizedStack(
const v8::Debug::EventDetails& event_details) {
v8::Isolate* isolate = event_details.GetEventContext()->GetIsolate();
v8::DebugEvent event = event_details.GetEvent();
- v8::Handle<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Object> exec_state = event_details.GetExecutionState();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
if (event == v8::Break) {
if (!frame_function_name.IsEmpty()) {
for (int i = 0; i < 2; i++) {
const int argc = 2;
- v8::Handle<v8::Value> argv[argc] = {
- exec_state, v8::Integer::New(isolate, i)
- };
+ v8::Local<v8::Value> argv[argc] = {exec_state,
+ v8::Integer::New(isolate, i)};
// Get the name of the function in frame i.
- v8::Handle<v8::Value> result =
- frame_function_name->Call(exec_state, argc, argv);
+ v8::Local<v8::Value> result =
+ frame_function_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
CHECK(result->IsString());
- v8::Handle<v8::String> function_name(result->ToString(isolate));
- CHECK(function_name->Equals(v8::String::NewFromUtf8(isolate, "loop")));
+ v8::Local<v8::String> function_name(
+ result->ToString(context).ToLocalChecked());
+ CHECK(
+ function_name->Equals(context, v8_str(isolate, "loop")).FromJust());
// Get the name of the first argument in frame i.
- result = frame_argument_name->Call(exec_state, argc, argv);
+ result = frame_argument_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
CHECK(result->IsString());
- v8::Handle<v8::String> argument_name(result->ToString(isolate));
- CHECK(argument_name->Equals(v8::String::NewFromUtf8(isolate, "count")));
+ v8::Local<v8::String> argument_name(
+ result->ToString(context).ToLocalChecked());
+ CHECK(argument_name->Equals(context, v8_str(isolate, "count"))
+ .FromJust());
// Get the value of the first argument in frame i. If the
// funtion is optimized the value will be undefined, otherwise
// the value will be '1 - i'.
//
// TODO(3141533): We should be able to get the real value for
// optimized frames.
- result = frame_argument_value->Call(exec_state, argc, argv);
- CHECK(result->IsUndefined() || (result->Int32Value() == 1 - i));
+ result = frame_argument_value->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
+ CHECK(result->IsUndefined() ||
+ (result->Int32Value(context).FromJust() == 1 - i));
// Get the name of the first local variable.
- result = frame_local_name->Call(exec_state, argc, argv);
+ result = frame_local_name->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
CHECK(result->IsString());
- v8::Handle<v8::String> local_name(result->ToString(isolate));
- CHECK(local_name->Equals(v8::String::NewFromUtf8(isolate, "local")));
+ v8::Local<v8::String> local_name(
+ result->ToString(context).ToLocalChecked());
+ CHECK(local_name->Equals(context, v8_str(isolate, "local")).FromJust());
// Get the value of the first local variable. If the function
// is optimized the value will be undefined, otherwise it will
// be 42.
//
// TODO(3141533): We should be able to get the real value for
// optimized frames.
- result = frame_local_value->Call(exec_state, argc, argv);
- CHECK(result->IsUndefined() || (result->Int32Value() == 42));
+ result = frame_local_value->Call(context, exec_state, argc, argv)
+ .ToLocalChecked();
+ CHECK(result->IsUndefined() ||
+ (result->Int32Value(context).FromJust() == 42));
}
}
}
@@ -7038,7 +7412,8 @@ static void DebugEventBreakWithOptimizedStack(
static void ScheduleBreak(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Debug::SetDebugEventListener(DebugEventBreakWithOptimizedStack);
+ v8::Debug::SetDebugEventListener(args.GetIsolate(),
+ DebugEventBreakWithOptimizedStack);
v8::Debug::DebugBreak(args.GetIsolate());
}
@@ -7046,6 +7421,7 @@ static void ScheduleBreak(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(DebugBreakStackInspection) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
frame_function_name =
CompileFunction(&env, frame_function_name_source, "frame_function_name");
@@ -7059,11 +7435,13 @@ TEST(DebugBreakStackInspection) {
frame_local_value =
CompileFunction(&env, frame_local_value_source, "frame_local_value");
- v8::Handle<v8::FunctionTemplate> schedule_break_template =
+ v8::Local<v8::FunctionTemplate> schedule_break_template =
v8::FunctionTemplate::New(env->GetIsolate(), ScheduleBreak);
- v8::Handle<v8::Function> schedule_break =
- schedule_break_template->GetFunction();
- env->Global()->Set(v8_str("scheduleBreak"), schedule_break);
+ v8::Local<v8::Function> schedule_break =
+ schedule_break_template->GetFunction(context).ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(context, v8_str("scheduleBreak"), schedule_break)
+ .FromJust());
const char* src =
"function loop(count) {"
@@ -7071,7 +7449,10 @@ TEST(DebugBreakStackInspection) {
" if (count < 1) { scheduleBreak(); loop(count + 1); }"
"}"
"loop(0);";
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), src))->Run();
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), src))
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
}
@@ -7112,7 +7493,7 @@ static void TestDebugBreakInLoop(const char* loop_head,
CompileRun("f();");
CHECK_EQ(kBreaksPerTest, break_point_hit_count);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
}
}
}
@@ -7142,7 +7523,7 @@ void DebugBreakLoop(const char* loop_header, const char** loop_bodies,
v8::HandleScope scope(env->GetIsolate());
// Register a debug event listener which sets the break flag and counts.
- v8::Debug::SetDebugEventListener(DebugEventBreakMax);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventBreakMax);
CompileRun(
"var a = 1;\n"
@@ -7152,8 +7533,8 @@ void DebugBreakLoop(const char* loop_header, const char** loop_bodies,
TestDebugBreakInLoop(loop_header, loop_bodies, loop_footer);
// Get rid of the debug event listener.
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -7218,6 +7599,7 @@ v8::Local<v8::Script> inline_script;
static void DebugBreakInlineListener(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
if (event != v8::Break) return;
int expected_frame_count = 4;
@@ -7233,7 +7615,7 @@ static void DebugBreakInlineListener(
SNPrintF(script_vector, "%%GetFrameCount(%d)", break_id);
v8::Local<v8::Value> result = CompileRun(script);
- int frame_count = result->Int32Value();
+ int frame_count = result->Int32Value(context).FromJust();
CHECK_EQ(expected_frame_count, frame_count);
for (int i = 0; i < frame_count; i++) {
@@ -7242,10 +7624,11 @@ static void DebugBreakInlineListener(
SNPrintF(script_vector, "%%GetFrameDetails(%d, %d)[5]", break_id, i);
v8::Local<v8::Value> result = CompileRun(script);
CHECK_EQ(expected_line_number[i],
- i::Script::GetLineNumber(source_script, result->Int32Value()));
+ i::Script::GetLineNumber(source_script,
+ result->Int32Value(context).FromJust()));
}
- v8::Debug::SetDebugEventListener(NULL);
- v8::V8::TerminateExecution(CcTest::isolate());
+ v8::Debug::SetDebugEventListener(CcTest::isolate(), nullptr);
+ CcTest::isolate()->TerminateExecution();
}
@@ -7253,6 +7636,7 @@ TEST(DebugBreakInline) {
i::FLAG_allow_natives_syntax = true;
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ v8::Local<v8::Context> context = env.context();
const char* source =
"function debug(b) { \n"
" if (b) debugger; \n"
@@ -7267,10 +7651,11 @@ TEST(DebugBreakInline) {
"g(false); \n"
"%OptimizeFunctionOnNextCall(g); \n"
"g(true);";
- v8::Debug::SetDebugEventListener(DebugBreakInlineListener);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugBreakInlineListener);
inline_script =
- v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), source));
- inline_script->Run();
+ v8::Script::Compile(context, v8_str(env->GetIsolate(), source))
+ .ToLocalChecked();
+ inline_script->Run(context).ToLocalChecked();
}
@@ -7301,7 +7686,7 @@ TEST(Regress131642) {
// on the stack.
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugEventStepNext);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugEventStepNext);
// We step through the first script. It exits through an exception. We run
// this inside a new frame to record a different FP than the second script
@@ -7313,7 +7698,7 @@ TEST(Regress131642) {
const char* script_2 = "[0].forEach(function() { });";
CompileRun(script_2);
- v8::Debug::SetDebugEventListener(NULL);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
}
@@ -7335,15 +7720,15 @@ TEST(DebuggerCreatesContextIffActive) {
v8::HandleScope scope(env->GetIsolate());
CHECK_EQ(1, v8::internal::CountNativeContexts());
- v8::Debug::SetDebugEventListener(NULL);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
CompileRun("debugger;");
CHECK_EQ(1, v8::internal::CountNativeContexts());
- v8::Debug::SetDebugEventListener(NopListener);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), NopListener);
CompileRun("debugger;");
CHECK_EQ(2, v8::internal::CountNativeContexts());
- v8::Debug::SetDebugEventListener(NULL);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
}
@@ -7373,7 +7758,7 @@ TEST(PrecompiledFunction) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
env.ExposeDebug();
- v8::Debug::SetDebugEventListener(DebugBreakInlineListener);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), DebugBreakInlineListener);
v8::Local<v8::Function> break_here =
CompileFunction(&env, "function break_here(){}", "break_here");
@@ -7395,8 +7780,8 @@ TEST(PrecompiledFunction) {
v8::String::Utf8Value utf8(result);
CHECK_EQ(0, strcmp("bar", *utf8));
- v8::Debug::SetDebugEventListener(NULL);
- CheckDebuggerUnloaded();
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded(env->GetIsolate());
}
@@ -7414,12 +7799,16 @@ static void AddDebugBreak(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(DebugBreakStackTrace) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::Debug::SetDebugEventListener(DebugBreakStackTraceListener);
- v8::Handle<v8::FunctionTemplate> add_debug_break_template =
+ v8::Debug::SetDebugEventListener(env->GetIsolate(),
+ DebugBreakStackTraceListener);
+ v8::Local<v8::Context> context = env.context();
+ v8::Local<v8::FunctionTemplate> add_debug_break_template =
v8::FunctionTemplate::New(env->GetIsolate(), AddDebugBreak);
- v8::Handle<v8::Function> add_debug_break =
- add_debug_break_template->GetFunction();
- env->Global()->Set(v8_str("add_debug_break"), add_debug_break);
+ v8::Local<v8::Function> add_debug_break =
+ add_debug_break_template->GetFunction(context).ToLocalChecked();
+ CHECK(env->Global()
+ ->Set(context, v8_str("add_debug_break"), add_debug_break)
+ .FromJust());
CompileRun("(function loop() {"
" for (var j = 0; j < 1000; j++) {"
@@ -7453,7 +7842,7 @@ class TerminationThread : public v8::base::Thread {
virtual void Run() {
terminate_requested_semaphore.Wait();
- v8::V8::TerminateExecution(isolate_);
+ isolate_->TerminateExecution();
terminate_fired_semaphore.Signal();
}
@@ -7466,7 +7855,7 @@ TEST(DebugBreakOffThreadTerminate) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Debug::SetDebugEventListener(DebugBreakTriggerTerminate);
+ v8::Debug::SetDebugEventListener(isolate, DebugBreakTriggerTerminate);
TerminationThread terminator(isolate);
terminator.Start();
v8::TryCatch try_catch(env->GetIsolate());
@@ -7495,19 +7884,23 @@ TEST(DebugPromiseInterceptedByTryCatch) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Debug::SetDebugEventListener(&DebugEventExpectNoException);
+ v8::Debug::SetDebugEventListener(isolate, &DebugEventExpectNoException);
+ v8::Local<v8::Context> context = env.context();
ChangeBreakOnException(false, true);
- v8::Handle<v8::FunctionTemplate> fun =
+ v8::Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(isolate, TryCatchWrappedThrowCallback);
- env->Global()->Set(v8_str("fun"), fun->GetFunction());
+ CHECK(env->Global()
+ ->Set(context, v8_str("fun"),
+ fun->GetFunction(context).ToLocalChecked())
+ .FromJust());
CompileRun("var p = new Promise(function(res, rej) { fun(); res(); });");
CompileRun(
"var r;"
"p.chain(function() { r = 'resolved'; },"
" function() { r = 'rejected'; });");
- CHECK(CompileRun("r")->Equals(v8_str("resolved")));
+ CHECK(CompileRun("r")->Equals(context, v8_str("resolved")).FromJust());
}
@@ -7530,36 +7923,46 @@ TEST(DebugPromiseRejectedByCallback) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Debug::SetDebugEventListener(&DebugEventCountException);
+ v8::Debug::SetDebugEventListener(isolate, &DebugEventCountException);
+ v8::Local<v8::Context> context = env.context();
ChangeBreakOnException(false, true);
exception_event_counter = 0;
- v8::Handle<v8::FunctionTemplate> fun =
+ v8::Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(isolate, ThrowCallback);
- env->Global()->Set(v8_str("fun"), fun->GetFunction());
+ CHECK(env->Global()
+ ->Set(context, v8_str("fun"),
+ fun->GetFunction(context).ToLocalChecked())
+ .FromJust());
CompileRun("var p = new Promise(function(res, rej) { fun(); res(); });");
CompileRun(
"var r;"
"p.chain(function() { r = 'resolved'; },"
" function(e) { r = 'rejected' + e; });");
- CHECK(CompileRun("r")->Equals(v8_str("rejectedrejection")));
+ CHECK(
+ CompileRun("r")->Equals(context, v8_str("rejectedrejection")).FromJust());
CHECK_EQ(1, exception_event_counter);
}
TEST(DebugBreakOnExceptionInObserveCallback) {
+ i::FLAG_harmony_object_observe = true;
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Debug::SetDebugEventListener(&DebugEventCountException);
+ v8::Debug::SetDebugEventListener(isolate, &DebugEventCountException);
+ v8::Local<v8::Context> context = env.context();
// Break on uncaught exception
ChangeBreakOnException(false, true);
exception_event_counter = 0;
- v8::Handle<v8::FunctionTemplate> fun =
+ v8::Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(isolate, ThrowCallback);
- env->Global()->Set(v8_str("fun"), fun->GetFunction());
+ CHECK(env->Global()
+ ->Set(context, v8_str("fun"),
+ fun->GetFunction(context).ToLocalChecked())
+ .FromJust());
CompileRun(
"var obj = {};"
@@ -7569,7 +7972,7 @@ TEST(DebugBreakOnExceptionInObserveCallback) {
" throw Error('foo');"
"});"
"obj.prop = 1");
- CHECK(CompileRun("callbackRan")->BooleanValue());
+ CHECK(CompileRun("callbackRan")->BooleanValue(context).FromJust());
CHECK_EQ(1, exception_event_counter);
}
@@ -7612,7 +8015,7 @@ TEST(DebugBreakInLexicalScopes) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
- v8::Debug::SetDebugEventListener(DebugHarmonyScopingListener);
+ v8::Debug::SetDebugEventListener(isolate, DebugHarmonyScopingListener);
CompileRun(
"'use strict'; \n"
@@ -7650,6 +8053,6 @@ static void NoInterruptsOnDebugEvent(
TEST(NoInterruptsInDebugListener) {
DebugLocalContext env;
- v8::Debug::SetDebugEventListener(NoInterruptsOnDebugEvent);
+ v8::Debug::SetDebugEventListener(env->GetIsolate(), NoInterruptsOnDebugEvent);
CompileRun("void(0);");
}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 347ca9a6bc..4e9595258a 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -60,25 +60,24 @@ class DeclarationContext {
}
}
- void Check(const char* source,
- int get, int set, int has,
+ void Check(const char* source, int get, int set, int has,
Expectations expectations,
- v8::Handle<Value> value = Local<Value>());
+ v8::Local<Value> value = Local<Value>());
int get_count() const { return get_count_; }
int set_count() const { return set_count_; }
int query_count() const { return query_count_; }
protected:
- virtual v8::Handle<Value> Get(Local<Name> key);
- virtual v8::Handle<Value> Set(Local<Name> key, Local<Value> value);
- virtual v8::Handle<Integer> Query(Local<Name> key);
+ virtual v8::Local<Value> Get(Local<Name> key);
+ virtual v8::Local<Value> Set(Local<Name> key, Local<Value> value);
+ virtual v8::Local<Integer> Query(Local<Name> key);
void InitializeIfNeeded();
// Perform optional initialization steps on the context after it has
// been created. Defaults to none but may be overwritten.
- virtual void PostInitializeContext(Handle<Context> context) {}
+ virtual void PostInitializeContext(Local<Context> context) {}
// Get the holder for the interceptor. Default to the instance template
// but may be overwritten.
@@ -138,10 +137,9 @@ void DeclarationContext::InitializeIfNeeded() {
}
-void DeclarationContext::Check(const char* source,
- int get, int set, int query,
+void DeclarationContext::Check(const char* source, int get, int set, int query,
Expectations expectations,
- v8::Handle<Value> value) {
+ v8::Local<Value> value) {
InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now
// to avoid that.
@@ -149,27 +147,30 @@ void DeclarationContext::Check(const char* source,
HandleScope scope(CcTest::isolate());
TryCatch catcher(CcTest::isolate());
catcher.SetVerbose(true);
- Local<Script> script =
- Script::Compile(String::NewFromUtf8(CcTest::isolate(), source));
+ Local<Context> context = CcTest::isolate()->GetCurrentContext();
+ MaybeLocal<Script> script = Script::Compile(
+ context,
+ String::NewFromUtf8(CcTest::isolate(), source, v8::NewStringType::kNormal)
+ .ToLocalChecked());
if (expectations == EXPECT_ERROR) {
CHECK(script.IsEmpty());
return;
}
CHECK(!script.IsEmpty());
- Local<Value> result = script->Run();
+ MaybeLocal<Value> result = script.ToLocalChecked()->Run(context);
CHECK_EQ(get, get_count());
CHECK_EQ(set, set_count());
CHECK_EQ(query, query_count());
if (expectations == EXPECT_RESULT) {
CHECK(!catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK(value->Equals(result));
+ CHECK(value->Equals(context, result.ToLocalChecked()).FromJust());
}
} else {
CHECK(expectations == EXPECT_EXCEPTION);
CHECK(catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK(value->Equals(catcher.Exception()));
+ CHECK(value->Equals(context, catcher.Exception()).FromJust());
}
}
// Clean slate for the next test.
@@ -208,24 +209,25 @@ DeclarationContext* DeclarationContext::GetInstance(Local<Value> data) {
}
-v8::Handle<Value> DeclarationContext::Get(Local<Name> key) {
- return v8::Handle<Value>();
+v8::Local<Value> DeclarationContext::Get(Local<Name> key) {
+ return v8::Local<Value>();
}
-v8::Handle<Value> DeclarationContext::Set(Local<Name> key, Local<Value> value) {
- return v8::Handle<Value>();
+v8::Local<Value> DeclarationContext::Set(Local<Name> key, Local<Value> value) {
+ return v8::Local<Value>();
}
-v8::Handle<Integer> DeclarationContext::Query(Local<Name> key) {
- return v8::Handle<Integer>();
+v8::Local<Integer> DeclarationContext::Query(Local<Name> key) {
+ return v8::Local<Integer>();
}
// Test global declaration of a property the interceptor doesn't know
// about and doesn't handle.
TEST(Unknown) {
+ i::FLAG_legacy_const = true;
HandleScope scope(CcTest::isolate());
v8::V8::Initialize();
@@ -268,13 +270,14 @@ TEST(Unknown) {
class AbsentPropertyContext: public DeclarationContext {
protected:
- virtual v8::Handle<Integer> Query(Local<Name> key) {
- return v8::Handle<Integer>();
+ virtual v8::Local<Integer> Query(Local<Name> key) {
+ return v8::Local<Integer>();
}
};
TEST(Absent) {
+ i::FLAG_legacy_const = true;
v8::Isolate* isolate = CcTest::isolate();
v8::V8::Initialize();
HandleScope scope(isolate);
@@ -332,13 +335,13 @@ class AppearingPropertyContext: public DeclarationContext {
AppearingPropertyContext() : state_(DECLARE) { }
protected:
- virtual v8::Handle<Integer> Query(Local<Name> key) {
+ virtual v8::Local<Integer> Query(Local<Name> key) {
switch (state_) {
case DECLARE:
// Force declaration by returning that the
// property is absent.
state_ = INITIALIZE_IF_ASSIGN;
- return Handle<Integer>();
+ return Local<Integer>();
case INITIALIZE_IF_ASSIGN:
// Return that the property is present so we only get the
// setter called when initializing with a value.
@@ -349,7 +352,7 @@ class AppearingPropertyContext: public DeclarationContext {
break;
}
// Do the lookup in the object.
- return v8::Handle<Integer>();
+ return v8::Local<Integer>();
}
private:
@@ -358,6 +361,7 @@ class AppearingPropertyContext: public DeclarationContext {
TEST(Appearing) {
+ i::FLAG_legacy_const = true;
v8::V8::Initialize();
HandleScope scope(CcTest::isolate());
@@ -401,7 +405,7 @@ class ExistsInPrototypeContext: public DeclarationContext {
public:
ExistsInPrototypeContext() { InitializeIfNeeded(); }
protected:
- virtual v8::Handle<Integer> Query(Local<Name> key) {
+ virtual v8::Local<Integer> Query(Local<Name> key) {
// Let it seem that the property exists in the prototype object.
return Integer::New(isolate(), v8::None);
}
@@ -414,6 +418,7 @@ class ExistsInPrototypeContext: public DeclarationContext {
TEST(ExistsInPrototype) {
+ i::FLAG_legacy_const = true;
HandleScope scope(CcTest::isolate());
// Sanity check to make sure that the holder of the interceptor
@@ -460,9 +465,9 @@ TEST(ExistsInPrototype) {
class AbsentInPrototypeContext: public DeclarationContext {
protected:
- virtual v8::Handle<Integer> Query(Local<Name> key) {
+ virtual v8::Local<Integer> Query(Local<Name> key) {
// Let it seem that the property is absent in the prototype object.
- return Handle<Integer>();
+ return Local<Integer>();
}
// Use the prototype as the holder for the interceptors.
@@ -495,18 +500,21 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
}
protected:
- virtual v8::Handle<Integer> Query(Local<Name> key) {
+ virtual v8::Local<Integer> Query(Local<Name> key) {
// Let it seem that the property exists in the hidden prototype object.
return Integer::New(isolate(), v8::None);
}
// Install the hidden prototype after the global object has been created.
- virtual void PostInitializeContext(Handle<Context> context) {
+ virtual void PostInitializeContext(Local<Context> context) {
Local<Object> global_object = context->Global();
- Local<Object> hidden_proto = hidden_proto_->GetFunction()->NewInstance();
+ Local<Object> hidden_proto = hidden_proto_->GetFunction(context)
+ .ToLocalChecked()
+ ->NewInstance(context)
+ .ToLocalChecked();
Local<Object> inner_global =
Local<Object>::Cast(global_object->GetPrototype());
- inner_global->SetPrototype(hidden_proto);
+ inner_global->SetPrototype(context, hidden_proto).FromJust();
}
// Use the hidden prototype as the holder for the interceptors.
@@ -520,6 +528,7 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
TEST(ExistsInHiddenPrototype) {
+ i::FLAG_legacy_const = true;
HandleScope scope(CcTest::isolate());
{ ExistsInHiddenPrototypeContext context;
@@ -567,30 +576,31 @@ class SimpleContext {
context_->Exit();
}
- void Check(const char* source,
- Expectations expectations,
- v8::Handle<Value> value = Local<Value>()) {
+ void Check(const char* source, Expectations expectations,
+ v8::Local<Value> value = Local<Value>()) {
HandleScope scope(context_->GetIsolate());
TryCatch catcher(context_->GetIsolate());
catcher.SetVerbose(true);
- Local<Script> script =
- Script::Compile(String::NewFromUtf8(context_->GetIsolate(), source));
+ MaybeLocal<Script> script = Script::Compile(
+ context_, String::NewFromUtf8(context_->GetIsolate(), source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked());
if (expectations == EXPECT_ERROR) {
CHECK(script.IsEmpty());
return;
}
CHECK(!script.IsEmpty());
- Local<Value> result = script->Run();
+ MaybeLocal<Value> result = script.ToLocalChecked()->Run(context_);
if (expectations == EXPECT_RESULT) {
CHECK(!catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK(value->Equals(result));
+ CHECK(value->Equals(context_, result.ToLocalChecked()).FromJust());
}
} else {
CHECK(expectations == EXPECT_EXCEPTION);
CHECK(catcher.HasCaught());
if (!value.IsEmpty()) {
- CHECK(value->Equals(catcher.Exception()));
+ CHECK(value->Equals(context_, catcher.Exception()).FromJust());
}
}
}
@@ -602,6 +612,7 @@ class SimpleContext {
TEST(CrossScriptReferences) {
+ i::FLAG_legacy_const = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
@@ -901,10 +912,14 @@ TEST(CrossScriptDynamicLookup) {
{
SimpleContext context;
- Local<String> undefined_string = String::NewFromUtf8(
- CcTest::isolate(), "undefined", String::kInternalizedString);
- Local<String> number_string = String::NewFromUtf8(
- CcTest::isolate(), "number", String::kInternalizedString);
+ Local<String> undefined_string =
+ String::NewFromUtf8(CcTest::isolate(), "undefined",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> number_string =
+ String::NewFromUtf8(CcTest::isolate(), "number",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
context.Check(
"function f(o) { with(o) { return x; } }"
@@ -974,10 +989,14 @@ TEST(CrossScriptStaticLookupUndeclared) {
{
SimpleContext context;
- Local<String> undefined_string = String::NewFromUtf8(
- CcTest::isolate(), "undefined", String::kInternalizedString);
- Local<String> number_string = String::NewFromUtf8(
- CcTest::isolate(), "number", String::kInternalizedString);
+ Local<String> undefined_string =
+ String::NewFromUtf8(CcTest::isolate(), "undefined",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> number_string =
+ String::NewFromUtf8(CcTest::isolate(), "number",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
context.Check(
"function f(o) { return x; }"
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index e6604a379e..7ba16b59f8 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -113,10 +113,10 @@ static void NonIncrementalGC(i::Isolate* isolate) {
}
-static Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
+static Handle<JSFunction> GetJSFunction(v8::Local<v8::Context> context,
const char* property_name) {
- v8::Local<v8::Function> fun =
- v8::Local<v8::Function>::Cast(obj->Get(v8_str(property_name)));
+ v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
+ context->Global()->Get(context, v8_str(property_name)).ToLocalChecked());
return i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*fun));
}
@@ -137,8 +137,12 @@ TEST(DeoptimizeSimple) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
// Test lazy deoptimization of a simple function. Call the function after the
@@ -153,8 +157,12 @@ TEST(DeoptimizeSimple) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -175,8 +183,12 @@ TEST(DeoptimizeSimpleWithArguments) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
// Test lazy deoptimization of a simple function with some arguments. Call the
@@ -192,8 +204,12 @@ TEST(DeoptimizeSimpleWithArguments) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -215,9 +231,17 @@ TEST(DeoptimizeSimpleNested) {
"result = f(1, 2, 3);");
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(6, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
}
@@ -240,12 +264,22 @@ TEST(DeoptimizeRecursive) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(11, env->Global()
+ ->Get(env.local(), v8_str("calls"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::NewFromUtf8(CcTest::isolate(), "f")));
+ env->Global()
+ ->Get(env.local(), v8_str(CcTest::isolate(), "f"))
+ .ToLocalChecked());
CHECK(!fun.IsEmpty());
}
@@ -272,8 +306,16 @@ TEST(DeoptimizeMultiple) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(14, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -293,8 +335,15 @@ TEST(DeoptimizeConstructor) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->IsTrue());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
{
@@ -310,8 +359,16 @@ TEST(DeoptimizeConstructor) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(3, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -339,8 +396,16 @@ TEST(DeoptimizeConstructorMultiple) {
}
NonIncrementalGC(CcTest::i_isolate());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(14, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
}
@@ -382,7 +447,7 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
CompileRun(f_source);
CompileRun("f('a+', new X());");
CHECK(!i_isolate->use_crankshaft() ||
- GetJSFunction(env->Global(), "f")->IsOptimized());
+ GetJSFunction(env.local(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
CompileRun(
@@ -391,9 +456,14 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
}
NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ v8::Local<v8::Value> result =
+ env->Global()->Get(env.local(), v8_str("result")).ToLocalChecked();
CHECK(result->IsString());
v8::String::Utf8Value utf8(result);
CHECK_EQ(0, strcmp("a+an X", *utf8));
@@ -439,13 +509,13 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
CompileRun(f_source);
CompileRun("f(7, new X());");
CHECK(!i_isolate->use_crankshaft() ||
- GetJSFunction((*env)->Global(), "f")->IsOptimized());
+ GetJSFunction((*env).local(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
CompileRun("deopt = true;"
"var result = f(7, new X());");
NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
+ CHECK(!GetJSFunction((*env).local(), "f")->IsOptimized());
}
@@ -462,8 +532,16 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationADD) {
TestDeoptimizeBinaryOpHelper(&env, "+");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(15, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -484,8 +562,16 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationSUB) {
TestDeoptimizeBinaryOpHelper(&env, "-");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(-1, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -506,8 +592,16 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationMUL) {
TestDeoptimizeBinaryOpHelper(&env, "*");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(56, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -528,8 +622,16 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationDIV) {
TestDeoptimizeBinaryOpHelper(&env, "/");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(0, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -550,8 +652,16 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationMOD) {
TestDeoptimizeBinaryOpHelper(&env, "%");
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(7, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -596,7 +706,7 @@ UNINITIALIZED_TEST(DeoptimizeCompare) {
CompileRun(f_source);
CompileRun("f('a', new X());");
CHECK(!i_isolate->use_crankshaft() ||
- GetJSFunction(env->Global(), "f")->IsOptimized());
+ GetJSFunction(env.local(), "f")->IsOptimized());
// Call f and force deoptimization while processing the comparison.
CompileRun(
@@ -605,9 +715,17 @@ UNINITIALIZED_TEST(DeoptimizeCompare) {
}
NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
+ CHECK(!GetJSFunction(env.local(), "f")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(true, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->BooleanValue(env.local())
+ .FromJust());
CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
}
isolate->Exit();
@@ -679,10 +797,10 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreIC) {
CompileRun("f2(new X(), 'z');");
CompileRun("g2(new X(), 'z');");
if (i_isolate->use_crankshaft()) {
- CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "f1")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "g1")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "f2")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "g2")->IsOptimized());
}
// Call functions and force deoptimization while processing the ics.
@@ -695,12 +813,20 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreIC) {
}
NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
- CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK(!GetJSFunction(env.local(), "f1")->IsOptimized());
+ CHECK(!GetJSFunction(env.local(), "g1")->IsOptimized());
+ CHECK(!GetJSFunction(env.local(), "f2")->IsOptimized());
+ CHECK(!GetJSFunction(env.local(), "g2")->IsOptimized());
+ CHECK_EQ(4, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(13, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
isolate->Exit();
isolate->Dispose();
@@ -775,10 +901,10 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreICNested) {
CompileRun("f2(new X(), 'z');");
CompileRun("g2(new X(), 'z');");
if (i_isolate->use_crankshaft()) {
- CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "f1")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "g1")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "f2")->IsOptimized());
+ CHECK(GetJSFunction(env.local(), "g2")->IsOptimized());
}
// Call functions and force deoptimization while processing the ics.
@@ -788,12 +914,20 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreICNested) {
}
NonIncrementalGC(i_isolate);
- CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
- CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
- CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
- CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+ CHECK(!GetJSFunction(env.local(), "f1")->IsOptimized());
+ CHECK(!GetJSFunction(env.local(), "g1")->IsOptimized());
+ CHECK(!GetJSFunction(env.local(), "f2")->IsOptimized());
+ CHECK(!GetJSFunction(env.local(), "g2")->IsOptimized());
+ CHECK_EQ(1, env->Global()
+ ->Get(env.local(), v8_str("count"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK_EQ(13, env->Global()
+ ->Get(env.local(), v8_str("result"))
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
}
isolate->Exit();
isolate->Dispose();
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 68c35f958e..0d4edf5dd4 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -35,6 +35,7 @@
#include "src/global-handles.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
+#include "test/cctest/heap/utils-inl.h"
using namespace v8::internal;
@@ -227,4 +228,18 @@ TEST(ObjectHashTableCausesGC) {
}
#endif
+TEST(SetRequiresCopyOnCapacityChange) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Handle<NameDictionary> dict = NameDictionary::New(isolate, 0, TENURED);
+ dict->SetRequiresCopyOnCapacityChange();
+ Handle<Name> key = isolate->factory()->InternalizeString(
+ v8::Utils::OpenHandle(*v8_str("key")));
+ Handle<Object> value = handle(Smi::FromInt(0), isolate);
+ Handle<NameDictionary> new_dict =
+ NameDictionary::Add(dict, key, value, PropertyDetails::Empty());
+ CHECK_NE(*dict, *new_dict);
+}
+
} // namespace
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index 357bfc5ad9..beca93ede2 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -45,20 +45,31 @@ using namespace v8::internal;
#define EXP_SIZE (256)
#define INSTR_SIZE (1024)
-#define SET_UP_CLASS(ASMCLASS) \
+#define SET_UP_MASM() \
+ InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
+ uint32_t encoding = 0; \
+ MacroAssembler* assm = new MacroAssembler( \
+ isolate, buf, INSTR_SIZE, v8::internal::CodeObjectRequired::kYes); \
+ Decoder<DispatchingDecoderVisitor>* decoder = \
+ new Decoder<DispatchingDecoderVisitor>(); \
+ DisassemblingDecoder* disasm = new DisassemblingDecoder(); \
+ decoder->AppendVisitor(disasm)
+
+#define SET_UP_ASM() \
InitializeVM(); \
- Isolate* isolate = Isolate::Current(); \
+ Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
byte* buf = static_cast<byte*>(malloc(INSTR_SIZE)); \
uint32_t encoding = 0; \
- ASMCLASS* assm = new ASMCLASS(isolate, buf, INSTR_SIZE); \
+ Assembler* assm = new Assembler(isolate, buf, INSTR_SIZE); \
Decoder<DispatchingDecoderVisitor>* decoder = \
new Decoder<DispatchingDecoderVisitor>(); \
DisassemblingDecoder* disasm = new DisassemblingDecoder(); \
decoder->AppendVisitor(disasm)
-#define SET_UP() SET_UP_CLASS(Assembler)
-
#define COMPARE(ASM, EXP) \
assm->Reset(); \
assm->ASM; \
@@ -102,7 +113,7 @@ static void InitializeVM() {
TEST_(bootstrap) {
- SET_UP();
+ SET_UP_ASM();
// Instructions generated by C compiler, disassembled by objdump, and
// reformatted to suit our disassembly style.
@@ -132,7 +143,7 @@ TEST_(bootstrap) {
TEST_(mov_mvn) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
COMPARE(Mov(w0, Operand(0x1234)), "movz w0, #0x1234");
COMPARE(Mov(x1, Operand(0x1234)), "movz x1, #0x1234");
@@ -166,7 +177,7 @@ TEST_(mov_mvn) {
TEST_(move_immediate) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(movz(w0, 0x1234), "movz w0, #0x1234");
COMPARE(movz(x1, 0xabcd0000), "movz x1, #0xabcd0000");
@@ -203,7 +214,7 @@ TEST_(move_immediate) {
TEST(move_immediate_2) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
// Move instructions expected for certain immediates. This is really a macro
// assembler test, to ensure it generates immediates efficiently.
@@ -259,7 +270,7 @@ TEST(move_immediate_2) {
TEST_(add_immediate) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(add(w0, w1, Operand(0xff)), "add w0, w1, #0xff (255)");
COMPARE(add(x2, x3, Operand(0x3ff)), "add x2, x3, #0x3ff (1023)");
@@ -289,7 +300,7 @@ TEST_(add_immediate) {
TEST_(sub_immediate) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(sub(w0, w1, Operand(0xff)), "sub w0, w1, #0xff (255)");
COMPARE(sub(x2, x3, Operand(0x3ff)), "sub x2, x3, #0x3ff (1023)");
@@ -317,7 +328,7 @@ TEST_(sub_immediate) {
TEST_(add_shifted) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(add(w0, w1, Operand(w2)), "add w0, w1, w2");
COMPARE(add(x3, x4, Operand(x5)), "add x3, x4, x5");
@@ -343,7 +354,7 @@ TEST_(add_shifted) {
TEST_(sub_shifted) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(sub(w0, w1, Operand(w2)), "sub w0, w1, w2");
COMPARE(sub(x3, x4, Operand(x5)), "sub x3, x4, x5");
@@ -373,7 +384,7 @@ TEST_(sub_shifted) {
TEST_(add_extended) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(add(w0, w1, Operand(w2, UXTB)), "add w0, w1, w2, uxtb");
COMPARE(adds(x3, x4, Operand(w5, UXTB, 1)), "adds x3, x4, w5, uxtb #1");
@@ -399,7 +410,7 @@ TEST_(add_extended) {
TEST_(sub_extended) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(sub(w0, w1, Operand(w2, UXTB)), "sub w0, w1, w2, uxtb");
COMPARE(subs(x3, x4, Operand(w5, UXTB, 1)), "subs x3, x4, w5, uxtb #1");
@@ -425,7 +436,7 @@ TEST_(sub_extended) {
TEST_(adc_subc_ngc) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(adc(w0, w1, Operand(w2)), "adc w0, w1, w2");
COMPARE(adc(x3, x4, Operand(x5)), "adc x3, x4, x5");
@@ -445,7 +456,7 @@ TEST_(adc_subc_ngc) {
TEST_(mul_and_div) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(mul(w0, w1, w2), "mul w0, w1, w2");
COMPARE(mul(x3, x4, x5), "mul x3, x4, x5");
@@ -478,7 +489,7 @@ TEST_(mul_and_div) {
TEST(maddl_msubl) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(smaddl(x0, w1, w2, x3), "smaddl x0, w1, w2, x3");
COMPARE(smaddl(x25, w21, w22, x16), "smaddl x25, w21, w22, x16");
@@ -495,7 +506,7 @@ TEST(maddl_msubl) {
TEST_(dp_1_source) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(rbit(w0, w1), "rbit w0, w1");
COMPARE(rbit(x2, x3), "rbit x2, x3");
@@ -514,7 +525,7 @@ TEST_(dp_1_source) {
TEST_(bitfield) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(sxtb(w0, w1), "sxtb w0, w1");
COMPARE(sxtb(x2, x3), "sxtb x2, w3");
@@ -556,7 +567,7 @@ TEST_(bitfield) {
TEST_(extract) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(extr(w0, w1, w2, 0), "extr w0, w1, w2, #0");
COMPARE(extr(x3, x4, x5, 1), "extr x3, x4, x5, #1");
@@ -570,7 +581,7 @@ TEST_(extract) {
TEST_(logical_immediate) {
- SET_UP();
+ SET_UP_ASM();
#define RESULT_SIZE (256)
char result[RESULT_SIZE];
@@ -696,7 +707,7 @@ TEST_(logical_immediate) {
TEST_(logical_shifted) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(and_(w0, w1, Operand(w2)), "and w0, w1, w2");
COMPARE(and_(x3, x4, Operand(x5, LSL, 1)), "and x3, x4, x5, lsl #1");
@@ -766,7 +777,7 @@ TEST_(logical_shifted) {
TEST_(dp_2_source) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(lslv(w0, w1, w2), "lsl w0, w1, w2");
COMPARE(lslv(x3, x4, x5), "lsl x3, x4, x5");
@@ -782,7 +793,7 @@ TEST_(dp_2_source) {
TEST_(adr) {
- SET_UP();
+ SET_UP_ASM();
COMPARE_PREFIX(adr(x0, 0), "adr x0, #+0x0");
COMPARE_PREFIX(adr(x1, 1), "adr x1, #+0x1");
@@ -798,7 +809,7 @@ TEST_(adr) {
TEST_(branch) {
- SET_UP();
+ SET_UP_ASM();
#define INST_OFF(x) ((x) >> kInstructionSizeLog2)
COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
@@ -835,7 +846,7 @@ TEST_(branch) {
TEST_(load_store) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldr(w0, MemOperand(x1)), "ldr w0, [x1]");
COMPARE(ldr(w2, MemOperand(x3, 4)), "ldr w2, [x3, #4]");
@@ -892,7 +903,7 @@ TEST_(load_store) {
TEST_(load_store_regoffset) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldr(w0, MemOperand(x1, w2, UXTW)), "ldr w0, [x1, w2, uxtw]");
COMPARE(ldr(w3, MemOperand(x4, w5, UXTW, 2)), "ldr w3, [x4, w5, uxtw #2]");
@@ -977,7 +988,7 @@ TEST_(load_store_regoffset) {
TEST_(load_store_byte) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldrb(w0, MemOperand(x1)), "ldrb w0, [x1]");
COMPARE(ldrb(x2, MemOperand(x3)), "ldrb w2, [x3]");
@@ -1009,7 +1020,7 @@ TEST_(load_store_byte) {
TEST_(load_store_half) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldrh(w0, MemOperand(x1)), "ldrh w0, [x1]");
COMPARE(ldrh(x2, MemOperand(x3)), "ldrh w2, [x3]");
@@ -1045,7 +1056,7 @@ TEST_(load_store_half) {
TEST_(load_store_fp) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldr(s0, MemOperand(x1)), "ldr s0, [x1]");
COMPARE(ldr(s2, MemOperand(x3, 4)), "ldr s2, [x3, #4]");
@@ -1097,7 +1108,7 @@ TEST_(load_store_fp) {
TEST_(load_store_unscaled) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldr(w0, MemOperand(x1, 1)), "ldur w0, [x1, #1]");
COMPARE(ldr(w2, MemOperand(x3, -1)), "ldur w2, [x3, #-1]");
@@ -1130,7 +1141,7 @@ TEST_(load_store_unscaled) {
TEST_(load_store_pair) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ldp(w0, w1, MemOperand(x2)), "ldp w0, w1, [x2]");
COMPARE(ldp(x3, x4, MemOperand(x5)), "ldp x3, x4, [x5]");
@@ -1251,7 +1262,7 @@ TEST_(load_store_pair) {
#if 0 // TODO(all): enable.
TEST_(load_literal) {
- SET_UP();
+ SET_UP_ASM();
COMPARE_PREFIX(ldr(x10, 0x1234567890abcdefUL), "ldr x10, pc+8");
COMPARE_PREFIX(ldr(w20, 0xfedcba09), "ldr w20, pc+8");
@@ -1263,7 +1274,7 @@ TEST_(load_literal) {
#endif
TEST_(cond_select) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(csel(w0, w1, w2, eq), "csel w0, w1, w2, eq");
COMPARE(csel(x3, x4, x5, ne), "csel x3, x4, x5, ne");
@@ -1298,7 +1309,7 @@ TEST_(cond_select) {
TEST(cond_select_macro) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
COMPARE(Csel(w0, w1, -1, eq), "csinv w0, w1, wzr, eq");
COMPARE(Csel(w2, w3, 0, ne), "csel w2, w3, wzr, ne");
@@ -1312,7 +1323,7 @@ TEST(cond_select_macro) {
TEST_(cond_cmp) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(ccmn(w0, w1, NZCVFlag, eq), "ccmn w0, w1, #NZCV, eq");
COMPARE(ccmn(x2, x3, NZCFlag, ne), "ccmn x2, x3, #NZCv, ne");
@@ -1330,7 +1341,7 @@ TEST_(cond_cmp) {
TEST_(cond_cmp_macro) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
COMPARE(Ccmp(w0, -1, VFlag, hi), "ccmn w0, #1, #nzcV, hi");
COMPARE(Ccmp(x1, -31, CFlag, ge), "ccmn x1, #31, #nzCv, ge");
@@ -1342,7 +1353,7 @@ TEST_(cond_cmp_macro) {
TEST_(fmov_imm) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fmov(s0, 1.0f), "fmov s0, #0x70 (1.0000)");
COMPARE(fmov(s31, -13.0f), "fmov s31, #0xaa (-13.0000)");
@@ -1354,7 +1365,7 @@ TEST_(fmov_imm) {
TEST_(fmov_reg) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fmov(w3, s13), "fmov w3, s13");
COMPARE(fmov(x6, d26), "fmov x6, d26");
@@ -1368,7 +1379,7 @@ TEST_(fmov_reg) {
TEST_(fp_dp1) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fabs(s0, s1), "fabs s0, s1");
COMPARE(fabs(s31, s30), "fabs s31, s30");
@@ -1406,7 +1417,7 @@ TEST_(fp_dp1) {
TEST_(fp_dp2) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fadd(s0, s1, s2), "fadd s0, s1, s2");
COMPARE(fadd(d3, d4, d5), "fadd d3, d4, d5");
@@ -1430,7 +1441,7 @@ TEST_(fp_dp2) {
TEST(fp_dp3) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fmadd(s7, s8, s9, s10), "fmadd s7, s8, s9, s10");
COMPARE(fmadd(d10, d11, d12, d10), "fmadd d10, d11, d12, d10");
@@ -1447,7 +1458,7 @@ TEST(fp_dp3) {
TEST_(fp_compare) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fcmp(s0, s1), "fcmp s0, s1");
COMPARE(fcmp(s31, s30), "fcmp s31, s30");
@@ -1461,7 +1472,7 @@ TEST_(fp_compare) {
TEST_(fp_cond_compare) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fccmp(s0, s1, NoFlag, eq), "fccmp s0, s1, #nzcv, eq");
COMPARE(fccmp(s2, s3, ZVFlag, ne), "fccmp s2, s3, #nZcV, ne");
@@ -1479,7 +1490,7 @@ TEST_(fp_cond_compare) {
TEST_(fp_select) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fcsel(s0, s1, s2, eq), "fcsel s0, s1, s2, eq")
COMPARE(fcsel(s31, s31, s30, ne), "fcsel s31, s31, s30, ne");
@@ -1493,7 +1504,7 @@ TEST_(fp_select) {
TEST_(fcvt_scvtf_ucvtf) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(fcvtas(w0, s1), "fcvtas w0, s1");
COMPARE(fcvtas(x2, s3), "fcvtas x2, s3");
@@ -1555,7 +1566,7 @@ TEST_(fcvt_scvtf_ucvtf) {
TEST_(system_mrs) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(mrs(x0, NZCV), "mrs x0, nzcv");
COMPARE(mrs(lr, NZCV), "mrs lr, nzcv");
@@ -1566,7 +1577,7 @@ TEST_(system_mrs) {
TEST_(system_msr) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(msr(NZCV, x0), "msr nzcv, x0");
COMPARE(msr(NZCV, x30), "msr nzcv, lr");
@@ -1577,7 +1588,7 @@ TEST_(system_msr) {
TEST_(system_nop) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(nop(), "nop");
@@ -1586,9 +1597,9 @@ TEST_(system_nop) {
TEST_(debug) {
- SET_UP();
+ SET_UP_ASM();
- DCHECK(kImmExceptionIsDebug == 0xdeb0);
+ CHECK(kImmExceptionIsDebug == 0xdeb0);
// All debug codes should produce the same instruction, and the debug code
// can be any uint32_t.
@@ -1605,7 +1616,7 @@ TEST_(debug) {
TEST_(hlt) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(hlt(0), "hlt #0x0");
COMPARE(hlt(1), "hlt #0x1");
@@ -1616,7 +1627,7 @@ TEST_(hlt) {
TEST_(brk) {
- SET_UP();
+ SET_UP_ASM();
COMPARE(brk(0), "brk #0x0");
COMPARE(brk(1), "brk #0x1");
@@ -1627,7 +1638,7 @@ TEST_(brk) {
TEST_(add_sub_negative) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
COMPARE(Add(x10, x0, -42), "sub x10, x0, #0x2a (42)");
COMPARE(Add(x11, x1, -687), "sub x11, x1, #0x2af (687)");
@@ -1658,7 +1669,7 @@ TEST_(add_sub_negative) {
TEST_(logical_immediate_move) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
COMPARE(And(w0, w1, 0), "movz w0, #0x0");
COMPARE(And(x0, x1, 0), "movz x0, #0x0");
@@ -1697,7 +1708,7 @@ TEST_(logical_immediate_move) {
TEST_(barriers) {
- SET_UP_CLASS(MacroAssembler);
+ SET_UP_MASM();
// DMB
COMPARE(Dmb(FullSystem, BarrierAll), "dmb sy");
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 63b2b11aa9..9a7d8ae431 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -565,6 +565,11 @@ TEST(Type0) {
COMPARE(lui(v0, 0xffff),
"3c02ffff lui v0, 0xffff");
+ if (IsMipsArchVariant(kMips32r6)) {
+ COMPARE(aui(a0, a1, 0x1), "3ca40001 aui a0, a1, 0x1");
+ COMPARE(aui(v0, v1, 0xffff), "3c62ffff aui v0, v1, 0xffff");
+ }
+
COMPARE(sll(a0, a1, 0),
"00052000 sll a0, a1, 0");
COMPARE(sll(s0, s1, 8),
@@ -890,27 +895,33 @@ TEST(Type1) {
COMPARE(maxa_s(f3, f4, f5), "460520df maxa.s f3, f4, f5");
}
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ COMPARE(trunc_l_d(f8, f6), "46203209 trunc.l.d f8, f6");
+ COMPARE(trunc_l_s(f8, f6), "46003209 trunc.l.s f8, f6");
+
+ COMPARE(round_l_s(f8, f6), "46003208 round.l.s f8, f6");
+ COMPARE(round_l_d(f8, f6), "46203208 round.l.d f8, f6");
+
+ COMPARE(floor_l_s(f8, f6), "4600320b floor.l.s f8, f6");
+ COMPARE(floor_l_d(f8, f6), "4620320b floor.l.d f8, f6");
+
+ COMPARE(ceil_l_s(f8, f6), "4600320a ceil.l.s f8, f6");
+ COMPARE(ceil_l_d(f8, f6), "4620320a ceil.l.d f8, f6");
+ }
+
COMPARE(trunc_w_d(f8, f6), "4620320d trunc.w.d f8, f6");
COMPARE(trunc_w_s(f8, f6), "4600320d trunc.w.s f8, f6");
COMPARE(round_w_s(f8, f6), "4600320c round.w.s f8, f6");
COMPARE(round_w_d(f8, f6), "4620320c round.w.d f8, f6");
- COMPARE(round_l_s(f8, f6), "46003208 round.l.s f8, f6");
- COMPARE(round_l_d(f8, f6), "46203208 round.l.d f8, f6");
-
COMPARE(floor_w_s(f8, f6), "4600320f floor.w.s f8, f6");
COMPARE(floor_w_d(f8, f6), "4620320f floor.w.d f8, f6");
- COMPARE(floor_l_s(f8, f6), "4600320b floor.l.s f8, f6");
- COMPARE(floor_l_d(f8, f6), "4620320b floor.l.d f8, f6");
-
COMPARE(ceil_w_s(f8, f6), "4600320e ceil.w.s f8, f6");
COMPARE(ceil_w_d(f8, f6), "4620320e ceil.w.d f8, f6");
- COMPARE(ceil_l_s(f8, f6), "4600320a ceil.l.s f8, f6");
- COMPARE(ceil_l_d(f8, f6), "4620320a ceil.l.d f8, f6");
-
COMPARE(sub_s(f10, f8, f6), "46064281 sub.s f10, f8, f6");
COMPARE(sub_d(f10, f8, f6), "46264281 sub.d f10, f8, f6");
@@ -935,9 +946,6 @@ TEST(Type1) {
COMPARE(mov_d(f6, f4), "46202186 mov.d f6, f4");
if (IsMipsArchVariant(kMips32r2)) {
- COMPARE(trunc_l_d(f8, f6), "46203209 trunc.l.d f8, f6");
- COMPARE(trunc_l_s(f8, f6), "46003209 trunc.l.s f8, f6");
-
COMPARE(movz_s(f6, f4, t0), "46082192 movz.s f6, f4, t0");
COMPARE(movz_d(f6, f4, t0), "46282192 movz.d f6, f4, t0");
@@ -1045,23 +1053,28 @@ TEST(CVT_DISSASM) {
SET_UP();
COMPARE(cvt_d_s(f22, f24), "4600c5a1 cvt.d.s f22, f24");
COMPARE(cvt_d_w(f22, f24), "4680c5a1 cvt.d.w f22, f24");
- if (IsMipsArchVariant(kMips32r6) || IsMipsArchVariant(kMips32r2)) {
- COMPARE(cvt_d_l(f22, f24), "46a0c5a1 cvt.d.l f22, f24");
- }
-
- if (IsMipsArchVariant(kMips32r6) || IsMipsArchVariant(kMips32r2)) {
- COMPARE(cvt_l_s(f22, f24), "4600c5a5 cvt.l.s f22, f24");
- COMPARE(cvt_l_d(f22, f24), "4620c5a5 cvt.l.d f22, f24");
- }
COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24");
COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24");
- if (IsMipsArchVariant(kMips32r6) || IsMipsArchVariant(kMips32r2)) {
+
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ COMPARE(cvt_d_l(f22, f24), "46a0c5a1 cvt.d.l f22, f24");
+ COMPARE(cvt_l_d(f22, f24), "4620c5a5 cvt.l.d f22, f24");
+
COMPARE(cvt_s_l(f22, f24), "46a0c5a0 cvt.s.l f22, f24");
+ COMPARE(cvt_l_s(f22, f24), "4600c5a5 cvt.l.s f22, f24");
}
- COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24");
- COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24");
+ VERIFY_RUN();
+}
+
+TEST(ctc1_cfc1_disasm) {
+ SET_UP();
+ COMPARE(abs_d(f10, f31), "4620fa85 abs.d f10, f31");
+ COMPARE(ceil_w_s(f8, f31), "4600fa0e ceil.w.s f8, f31");
+ COMPARE(ctc1(a0, FCSR), "44c4f800 ctc1 a0, FCSR");
+ COMPARE(cfc1(a0, FCSR), "4444f800 cfc1 a0, FCSR");
VERIFY_RUN();
}
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 7cf6397886..8a1e0e7db0 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -38,12 +38,18 @@
using namespace v8::internal;
+bool prev_instr_compact_branch = false;
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
+ if (prev_instr_compact_branch) {
+ disasm.InstructionDecode(disasm_buffer, pc);
+ pc += 4;
+ }
+
disasm.InstructionDecode(disasm_buffer, pc);
if (strcmp(compare_string, disasm_buffer.start()) != 0) {
@@ -97,8 +103,14 @@ if (failure) { \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
- snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 4 + (offset * 4)); \
+ prev_instr_compact_branch = assm.IsPrevInstrCompactBranch(); \
+ if (prev_instr_compact_branch) { \
+ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
+ compare_string, progcounter + 8 + (offset * 4)); \
+ } else { \
+ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
+ compare_string, progcounter + 4 + (offset * 4)); \
+ } \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
@@ -402,6 +414,20 @@ TEST(Type0) {
COMPARE(lui(v0, 0xffff),
"3c02ffff lui v0, 0xffff");
+ if (kArchVariant == (kMips64r6)) {
+ COMPARE(aui(a0, a1, 0x1), "3ca40001 aui a0, a1, 0x1");
+ COMPARE(aui(v0, v1, 0xffff), "3c62ffff aui v0, v1, 0xffff");
+
+ COMPARE(daui(a0, a1, 0x1), "74a40001 daui a0, a1, 0x1");
+ COMPARE(daui(v0, v1, 0xffff), "7462ffff daui v0, v1, 0xffff");
+
+ COMPARE(dahi(a0, 0x1), "04860001 dahi a0, 0x1");
+ COMPARE(dahi(v0, 0xffff), "0446ffff dahi v0, 0xffff");
+
+ COMPARE(dati(a0, 0x1), "049e0001 dati a0, 0x1");
+ COMPARE(dati(v0, 0xffff), "045effff dati v0, 0xffff");
+ }
+
COMPARE(sll(a0, a1, 0),
"00052000 sll a0, a1, 0");
COMPARE(sll(s0, s1, 8),
@@ -756,14 +782,6 @@ TEST(Type0) {
}
if (kArchVariant == kMips64r6) {
- COMPARE_PC_REL_COMPACT(beqzc(a0, 16), "d8800010 beqzc a0, 0x10",
- 16);
- COMPARE_PC_REL_COMPACT(beqzc(a0, 4), "d8800004 beqzc a0, 0x4", 4);
- COMPARE_PC_REL_COMPACT(beqzc(a0, -32),
- "d89fffe0 beqzc a0, 0x1fffe0", -32);
- }
-
- if (kArchVariant == kMips64r6) {
COMPARE(ldpc(v0, 256), "ec580100 ldpc v0, 256");
COMPARE(ldpc(a0, -1), "ec9bffff ldpc a0, -1");
COMPARE(ldpc(a1, 0), "ecb80000 ldpc a1, 0");
@@ -778,11 +796,11 @@ TEST(Type0) {
}
if (kArchVariant == kMips64r6) {
- COMPARE(jialc(a0, -32768), "f8048000 jialc a0, 0x8000");
- COMPARE(jialc(a0, -1), "f804ffff jialc a0, 0xffff");
- COMPARE(jialc(v0, 0), "f8020000 jialc v0, 0x0");
- COMPARE(jialc(s1, 1), "f8110001 jialc s1, 0x1");
- COMPARE(jialc(a0, 32767), "f8047fff jialc a0, 0x7fff");
+ COMPARE(jialc(a0, -32768), "f8048000 jialc a0, -32768");
+ COMPARE(jialc(a0, -1), "f804ffff jialc a0, -1");
+ COMPARE(jialc(v0, 0), "f8020000 jialc v0, 0");
+ COMPARE(jialc(s1, 1), "f8110001 jialc s1, 1");
+ COMPARE(jialc(a0, 32767), "f8047fff jialc a0, 32767");
}
VERIFY_RUN();
@@ -915,17 +933,17 @@ TEST(Type3) {
COMPARE_PC_REL_COMPACT(bnvc(a1, a0, -32768),
"60a48000 bnvc a1, a0, -32768", -32768);
- COMPARE_PC_REL_COMPACT(beqzc(a0, 0), "d8800000 beqzc a0, 0x0", 0);
- COMPARE_PC_REL_COMPACT(beqzc(a0, 0xfffff), // 0x0fffff == 1048575.
- "d88fffff beqzc a0, 0xfffff", 1048575);
- COMPARE_PC_REL_COMPACT(beqzc(a0, 0x100000), // 0x100000 == -1048576.
- "d8900000 beqzc a0, 0x100000", -1048576);
+ COMPARE_PC_REL_COMPACT(beqzc(a0, 0), "d8800000 beqzc a0, 0", 0);
+ COMPARE_PC_REL_COMPACT(beqzc(a0, 1048575), // 0x0fffff == 1048575.
+ "d88fffff beqzc a0, 1048575", 1048575);
+ COMPARE_PC_REL_COMPACT(beqzc(a0, -1048576), // 0x100000 == -1048576.
+ "d8900000 beqzc a0, -1048576", -1048576);
- COMPARE_PC_REL_COMPACT(bnezc(a0, 0), "f8800000 bnezc a0, 0x0", 0);
- COMPARE_PC_REL_COMPACT(bnezc(a0, 0xfffff), // 0x0fffff == 1048575.
- "f88fffff bnezc a0, 0xfffff", 1048575);
- COMPARE_PC_REL_COMPACT(bnezc(a0, 0x100000), // 0x100000 == -1048576.
- "f8900000 bnezc a0, 0x100000", -1048576);
+ COMPARE_PC_REL_COMPACT(bnezc(a0, 0), "f8800000 bnezc a0, 0", 0);
+ COMPARE_PC_REL_COMPACT(bnezc(a0, 1048575), // int21 maximal value.
+ "f88fffff bnezc a0, 1048575", 1048575);
+ COMPARE_PC_REL_COMPACT(bnezc(a0, -1048576), // int21 minimal value.
+ "f8900000 bnezc a0, -1048576", -1048576);
COMPARE_PC_REL_COMPACT(bc(-33554432), "ca000000 bc -33554432",
-33554432);
@@ -944,29 +962,29 @@ TEST(Type3) {
33554431);
COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, -32768),
- "18858000 bgeuc a0, a1, -32768", -32768);
+ "18858000 bgeuc a0, a1, -32768", -32768);
COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, -1),
- "1885ffff bgeuc a0, a1, -1", -1);
- COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, 1),
- "18850001 bgeuc a0, a1, 1", 1);
+ "1885ffff bgeuc a0, a1, -1", -1);
+ COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, 1), "18850001 bgeuc a0, a1, 1",
+ 1);
COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, 32767),
- "18857fff bgeuc a0, a1, 32767", 32767);
+ "18857fff bgeuc a0, a1, 32767", 32767);
COMPARE_PC_REL_COMPACT(bgezalc(a0, -32768),
- "18848000 bgezalc a0, -32768", -32768);
- COMPARE_PC_REL_COMPACT(bgezalc(a0, -1), "1884ffff bgezalc a0, -1",
+ "18848000 bgezalc a0, -32768", -32768);
+ COMPARE_PC_REL_COMPACT(bgezalc(a0, -1), "1884ffff bgezalc a0, -1",
-1);
- COMPARE_PC_REL_COMPACT(bgezalc(a0, 1), "18840001 bgezalc a0, 1", 1);
+ COMPARE_PC_REL_COMPACT(bgezalc(a0, 1), "18840001 bgezalc a0, 1", 1);
COMPARE_PC_REL_COMPACT(bgezalc(a0, 32767),
- "18847fff bgezalc a0, 32767", 32767);
+ "18847fff bgezalc a0, 32767", 32767);
COMPARE_PC_REL_COMPACT(blezalc(a0, -32768),
- "18048000 blezalc a0, -32768", -32768);
- COMPARE_PC_REL_COMPACT(blezalc(a0, -1), "1804ffff blezalc a0, -1",
+ "18048000 blezalc a0, -32768", -32768);
+ COMPARE_PC_REL_COMPACT(blezalc(a0, -1), "1804ffff blezalc a0, -1",
-1);
- COMPARE_PC_REL_COMPACT(blezalc(a0, 1), "18040001 blezalc a0, 1", 1);
+ COMPARE_PC_REL_COMPACT(blezalc(a0, 1), "18040001 blezalc a0, 1", 1);
COMPARE_PC_REL_COMPACT(blezalc(a0, 32767),
- "18047fff blezalc a0, 32767", 32767);
+ "18047fff blezalc a0, 32767", 32767);
COMPARE_PC_REL_COMPACT(bltuc(a0, a1, -32768),
"1c858000 bltuc a0, a1, -32768", -32768);
@@ -1024,13 +1042,13 @@ TEST(Type3) {
"5c847fff bltzc a0, 32767", 32767);
COMPARE_PC_REL_COMPACT(bltc(a0, a1, -32768),
- "5c858000 bltc a0, a1, -32768", -32768);
+ "5c858000 bltc a0, a1, -32768", -32768);
COMPARE_PC_REL_COMPACT(bltc(a0, a1, -1),
- "5c85ffff bltc a0, a1, -1", -1);
- COMPARE_PC_REL_COMPACT(bltc(a0, a1, 1), "5c850001 bltc a0, a1, 1",
+ "5c85ffff bltc a0, a1, -1", -1);
+ COMPARE_PC_REL_COMPACT(bltc(a0, a1, 1), "5c850001 bltc a0, a1, 1",
1);
COMPARE_PC_REL_COMPACT(bltc(a0, a1, 32767),
- "5c857fff bltc a0, a1, 32767", 32767);
+ "5c857fff bltc a0, a1, 32767", 32767);
COMPARE_PC_REL_COMPACT(bgtzc(a0, -32768),
"5c048000 bgtzc a0, -32768", -32768);
@@ -1061,13 +1079,13 @@ TEST(Type3) {
1);
COMPARE_PC_REL_COMPACT(beqc(a0, a1, -32768),
- "20858000 beqc a0, a1, -32768", -32768);
- COMPARE_PC_REL_COMPACT(beqc(a0, a1, -1), "2085ffff beqc a0, a1, -1",
- -1);
- COMPARE_PC_REL_COMPACT(beqc(a0, a1, 1), "20850001 beqc a0, a1, 1",
+ "20858000 beqc a0, a1, -32768", -32768);
+ COMPARE_PC_REL_COMPACT(beqc(a0, a1, -1),
+ "2085ffff beqc a0, a1, -1", -1);
+ COMPARE_PC_REL_COMPACT(beqc(a0, a1, 1), "20850001 beqc a0, a1, 1",
1);
COMPARE_PC_REL_COMPACT(beqc(a0, a1, 32767),
- "20857fff beqc a0, a1, 32767", 32767);
+ "20857fff beqc a0, a1, 32767", 32767);
COMPARE_PC_REL_COMPACT(bnec(a0, a1, -32768),
"60858000 bnec a0, a1, -32768", -32768);
@@ -1239,3 +1257,13 @@ TEST(CVT_DISSASM) {
VERIFY_RUN();
}
+
+
+TEST(ctc1_cfc1_disasm) {
+ SET_UP();
+ COMPARE(abs_d(f10, f31), "4620fa85 abs.d f10, f31");
+ COMPARE(ceil_w_s(f8, f31), "4600fa0e ceil.w.s f8, f31");
+ COMPARE(ctc1(a0, FCSR), "44c4f800 ctc1 a0, FCSR");
+ COMPARE(cfc1(a0, FCSR), "4444f800 cfc1 a0, FCSR");
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
index ec02a251dd..9290c46c78 100644
--- a/deps/v8/test/cctest/test-disasm-ppc.cc
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -64,7 +64,7 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// in the rest of the macros.
#define SET_UP() \
CcTest::InitializeVM(); \
- Isolate* isolate = Isolate::Current(); \
+ Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
Assembler assm(isolate, buffer, 4 * 1024); \
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index f5630ab54e..ee1f09d0a0 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -311,7 +311,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) {
// `delete array[0]` does not alter length, but changes the elments_kind
name = MakeString("0");
- JSReceiver::DeletePropertyOrElement(array, name).Check();
+ CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false));
CHECK_NE(array->map(), *previous_map);
CHECK_EQ(array->map()->elements_kind(), FAST_HOLEY_SMI_ELEMENTS);
CHECK_EQ(1, Smi::cast(array->length())->value());
@@ -384,7 +384,7 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) {
// `delete array[0]` does not alter length, but changes the elments_kind
name = MakeString("0");
- JSReceiver::DeletePropertyOrElement(array, name).Check();
+ CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false));
CHECK_NE(array->map(), *previous_map);
CHECK_EQ(array->map()->elements_kind(), FAST_HOLEY_ELEMENTS);
CHECK_EQ(1, Smi::cast(array->length())->value());
@@ -441,7 +441,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
// `delete array[0]` does not alter length, but changes the elments_kind
name = MakeString("0");
- JSReceiver::DeletePropertyOrElement(array, name).Check();
+ CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false));
CHECK_NE(array->map(), *previous_map);
CHECK_EQ(array->map()->elements_kind(), FAST_HOLEY_DOUBLE_ELEMENTS);
CHECK_EQ(2, Smi::cast(array->length())->value());
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 26a7191d9e..c06e5b9124 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mvstanton): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -47,8 +44,6 @@ TEST(VectorStructure) {
CHECK(Handle<FixedArray>::cast(vector)
.is_identical_to(factory->empty_fixed_array()));
// Which can nonetheless be queried.
- CHECK_EQ(0, vector->ic_with_type_info_count());
- CHECK_EQ(0, vector->ic_generic_count());
CHECK(vector->is_empty());
{
@@ -135,8 +130,6 @@ TEST(VectorICMetadata) {
// Meanwhile set some feedback values and type feedback values to
// verify the data structure remains intact.
- vector->change_ic_with_type_info_count(100);
- vector->change_ic_generic_count(3333);
vector->Set(FeedbackVectorSlot(0), *vector);
// Verify the metadata is correctly set up from the spec.
@@ -200,60 +193,6 @@ TEST(VectorSlotClearing) {
}
-TEST(VectorICProfilerStatistics) {
- if (i::FLAG_always_opt) return;
- CcTest::InitializeVM();
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
-
- // Make sure function f has a call that uses a type feedback slot.
- CompileRun(
- "function fun() {};"
- "function f(a) { a(); } f(fun);");
- Handle<JSFunction> f = GetFunction("f");
- // There should be one IC.
- Handle<Code> code = handle(f->shared()->code(), isolate);
- TypeFeedbackInfo* feedback_info =
- TypeFeedbackInfo::cast(code->type_feedback_info());
- CHECK_EQ(1, feedback_info->ic_total_count());
- CHECK_EQ(0, feedback_info->ic_with_type_info_count());
- CHECK_EQ(0, feedback_info->ic_generic_count());
- Handle<TypeFeedbackVector> feedback_vector =
- handle(f->shared()->feedback_vector(), isolate);
- FeedbackVectorHelper helper(feedback_vector);
- CallICNexus nexus(feedback_vector, helper.slot(0));
- CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(0, feedback_vector->ic_generic_count());
-
- // Now send the information generic.
- CompileRun("f(Object);");
- CHECK_EQ(0, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(1, feedback_vector->ic_generic_count());
-
- // A collection will not affect the site.
- heap->CollectAllGarbage();
- CHECK_EQ(0, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(1, feedback_vector->ic_generic_count());
-
- // The Array function is special. A call to array remains monomorphic
- // and isn't cleared by gc because an AllocationSite is being held.
- // Clear the IC manually in order to test this case.
- nexus.Clear(*code);
- CompileRun("f(Array);");
- CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(0, feedback_vector->ic_generic_count());
-
-
- CHECK(nexus.GetFeedback()->IsAllocationSite());
- heap->CollectAllGarbage();
- CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
- CHECK_EQ(0, feedback_vector->ic_generic_count());
- CHECK(nexus.GetFeedback()->IsAllocationSite());
-}
-
-
TEST(VectorCallICStates) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -460,17 +399,11 @@ TEST(ReferenceContextAllocatesNoSlots) {
Handle<TypeFeedbackVector> feedback_vector =
handle(f->shared()->feedback_vector(), isolate);
FeedbackVectorHelper helper(feedback_vector);
- if (FLAG_vector_stores) {
- CHECK_EQ(4, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::LOAD_IC);
- } else {
- CHECK_EQ(2, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
- }
+ CHECK_EQ(4, helper.slot_count());
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::LOAD_IC);
}
{
@@ -485,11 +418,7 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be one LOAD_IC, for the load of a.
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
- if (FLAG_vector_stores) {
- CHECK_EQ(2, helper.slot_count());
- } else {
- CHECK_EQ(1, helper.slot_count());
- }
+ CHECK_EQ(2, helper.slot_count());
}
{
@@ -506,20 +435,12 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be 2 LOAD_ICs and 2 CALL_ICs.
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
- if (FLAG_vector_stores) {
- CHECK_EQ(5, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::CALL_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::CALL_IC);
- CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
- } else {
- CHECK_EQ(4, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::CALL_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::CALL_IC);
- CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::LOAD_IC);
- }
+ CHECK_EQ(5, helper.slot_count());
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::CALL_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::CALL_IC);
+ CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
}
{
@@ -536,16 +457,10 @@ TEST(ReferenceContextAllocatesNoSlots) {
// the load of x[0] in the return statement.
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
- if (FLAG_vector_stores) {
- CHECK_EQ(3, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::KEYED_STORE_IC);
- CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::KEYED_LOAD_IC);
- } else {
- CHECK_EQ(2, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::KEYED_LOAD_IC);
- }
+ CHECK_EQ(3, helper.slot_count());
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::KEYED_STORE_IC);
+ CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::KEYED_LOAD_IC);
}
{
@@ -561,27 +476,19 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be 3 LOAD_ICs, for load of a and load of x.old and x.young.
Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
- if (FLAG_vector_stores) {
- CHECK_EQ(6, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 5, FeedbackVectorSlotKind::LOAD_IC);
- } else {
- CHECK_EQ(3, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
- CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::LOAD_IC);
- }
+ CHECK_EQ(6, helper.slot_count());
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 5, FeedbackVectorSlotKind::LOAD_IC);
}
}
TEST(VectorStoreICBasic) {
if (i::FLAG_always_opt) return;
- if (!i::FLAG_vector_stores) return;
CcTest::InitializeVM();
LocalContext context;
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index fe1170d9b6..89456bd6ba 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -101,7 +101,7 @@ class Expectations {
void Init(int index, PropertyType type, PropertyAttributes attributes,
Representation representation, Handle<Object> value) {
- DCHECK(index < MAX_PROPERTIES);
+ CHECK(index < MAX_PROPERTIES);
types_[index] = type;
attributes_[index] = attributes;
representations_[index] = representation;
@@ -621,6 +621,17 @@ static void TestGeneralizeRepresentation(
CHECK_EQ(expected_field_type_dependency, info.dependencies()->HasAborted());
}
+ {
+ // Check that all previous maps are not stable.
+ Map* tmp = *new_map;
+ while (true) {
+ Object* back = tmp->GetBackPointer();
+ if (back->IsUndefined()) break;
+ tmp = Map::cast(back);
+ CHECK(!tmp->is_stable());
+ }
+ }
+
info.dependencies()->Rollback(); // Properly cleanup compilation info.
// Update all deprecated maps and check that they are now the same.
@@ -1380,9 +1391,10 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
void UpdateExpectations(int property_index, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> function_type =
+ HeapType::Class(isolate->sloppy_function_map(), isolate);
expectations.SetDataField(property_index, Representation::HeapObject(),
- any_type);
+ function_type);
}
};
@@ -2136,7 +2148,8 @@ TEST(TransitionDataConstantToAnotherDataConstant) {
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<HeapType> function_type =
+ HeapType::Class(isolate->sloppy_function_map(), isolate);
Handle<JSFunction> js_func1 = factory->NewFunction(factory->empty_string());
TransitionToDataConstantOperator transition_op1(js_func1);
@@ -2144,8 +2157,8 @@ TEST(TransitionDataConstantToAnotherDataConstant) {
Handle<JSFunction> js_func2 = factory->NewFunction(factory->empty_string());
TransitionToDataConstantOperator transition_op2(js_func2);
- FieldGeneralizationChecker checker(kPropCount - 1,
- Representation::HeapObject(), any_type);
+ FieldGeneralizationChecker checker(
+ kPropCount - 1, Representation::HeapObject(), function_type);
TestTransitionTo(transition_op1, transition_op2, checker);
}
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 6c7aa030bc..77ba2f2243 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -49,7 +49,7 @@ using ::v8::internal::String;
using ::v8::internal::Vector;
-static void CheckFunctionName(v8::Handle<v8::Script> script,
+static void CheckFunctionName(v8::Local<v8::Script> script,
const char* func_pos_src,
const char* ref_inferred_name) {
Isolate* isolate = CcTest::i_isolate();
@@ -93,8 +93,12 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
}
-static v8::Handle<v8::Script> Compile(v8::Isolate* isolate, const char* src) {
- return v8::Script::Compile(v8::String::NewFromUtf8(isolate, src));
+static v8::Local<v8::Script> Compile(v8::Isolate* isolate, const char* src) {
+ return v8::Script::Compile(
+ isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, src, v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked();
}
@@ -102,10 +106,9 @@ TEST(GlobalProperty) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "fun1 = function() { return 1; }\n"
- "fun2 = function() { return 2; }\n");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "fun1 = function() { return 1; }\n"
+ "fun2 = function() { return 2; }\n");
CheckFunctionName(script, "return 1", "fun1");
CheckFunctionName(script, "return 2", "fun2");
}
@@ -115,10 +118,10 @@ TEST(GlobalVar) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "var fun1 = function() { return 1; }\n"
- "var fun2 = function() { return 2; }\n");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "var fun1 = function() { return 1; }\n"
+ "var fun2 = function() { return 2; }\n");
CheckFunctionName(script, "return 1", "fun1");
CheckFunctionName(script, "return 2", "fun2");
}
@@ -128,12 +131,12 @@ TEST(LocalVar) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function outer() {\n"
- " var fun1 = function() { return 1; }\n"
- " var fun2 = function() { return 2; }\n"
- "}");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function outer() {\n"
+ " var fun1 = function() { return 1; }\n"
+ " var fun2 = function() { return 2; }\n"
+ "}");
CheckFunctionName(script, "return 1", "fun1");
CheckFunctionName(script, "return 2", "fun2");
}
@@ -143,12 +146,12 @@ TEST(InConstructor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function MyClass() {\n"
- " this.method1 = function() { return 1; }\n"
- " this.method2 = function() { return 2; }\n"
- "}");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function MyClass() {\n"
+ " this.method1 = function() { return 1; }\n"
+ " this.method2 = function() { return 2; }\n"
+ "}");
CheckFunctionName(script, "return 1", "MyClass.method1");
CheckFunctionName(script, "return 2", "MyClass.method2");
}
@@ -158,14 +161,14 @@ TEST(Factory) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function createMyObj() {\n"
- " var obj = {};\n"
- " obj.method1 = function() { return 1; }\n"
- " obj.method2 = function() { return 2; }\n"
- " return obj;\n"
- "}");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function createMyObj() {\n"
+ " var obj = {};\n"
+ " obj.method1 = function() { return 1; }\n"
+ " obj.method2 = function() { return 2; }\n"
+ " return obj;\n"
+ "}");
CheckFunctionName(script, "return 1", "obj.method1");
CheckFunctionName(script, "return 2", "obj.method2");
}
@@ -175,14 +178,14 @@ TEST(Static) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function MyClass() {}\n"
- "MyClass.static1 = function() { return 1; }\n"
- "MyClass.static2 = function() { return 2; }\n"
- "MyClass.MyInnerClass = {}\n"
- "MyClass.MyInnerClass.static3 = function() { return 3; }\n"
- "MyClass.MyInnerClass.static4 = function() { return 4; }");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function MyClass() {}\n"
+ "MyClass.static1 = function() { return 1; }\n"
+ "MyClass.static2 = function() { return 2; }\n"
+ "MyClass.MyInnerClass = {}\n"
+ "MyClass.MyInnerClass.static3 = function() { return 3; }\n"
+ "MyClass.MyInnerClass.static4 = function() { return 4; }");
CheckFunctionName(script, "return 1", "MyClass.static1");
CheckFunctionName(script, "return 2", "MyClass.static2");
CheckFunctionName(script, "return 3", "MyClass.MyInnerClass.static3");
@@ -194,7 +197,7 @@ TEST(Prototype) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
+ v8::Local<v8::Script> script = Compile(
CcTest::isolate(),
"function MyClass() {}\n"
"MyClass.prototype.method1 = function() { return 1; }\n"
@@ -213,12 +216,12 @@ TEST(ObjectLiteral) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function MyClass() {}\n"
- "MyClass.prototype = {\n"
- " method1: function() { return 1; },\n"
- " method2: function() { return 2; } }");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function MyClass() {}\n"
+ "MyClass.prototype = {\n"
+ " method1: function() { return 1; },\n"
+ " method2: function() { return 2; } }");
CheckFunctionName(script, "return 1", "MyClass.method1");
CheckFunctionName(script, "return 2", "MyClass.method2");
}
@@ -228,16 +231,16 @@ TEST(UpperCaseClass) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(CcTest::isolate(),
- "'use strict';\n"
- "class MyClass {\n"
- " constructor() {\n"
- " this.value = 1;\n"
- " }\n"
- " method() {\n"
- " this.value = 2;\n"
- " }\n"
- "}");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "'use strict';\n"
+ "class MyClass {\n"
+ " constructor() {\n"
+ " this.value = 1;\n"
+ " }\n"
+ " method() {\n"
+ " this.value = 2;\n"
+ " }\n"
+ "}");
CheckFunctionName(script, "this.value = 1", "MyClass");
CheckFunctionName(script, "this.value = 2", "MyClass.method");
}
@@ -247,16 +250,16 @@ TEST(LowerCaseClass) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(CcTest::isolate(),
- "'use strict';\n"
- "class myclass {\n"
- " constructor() {\n"
- " this.value = 1;\n"
- " }\n"
- " method() {\n"
- " this.value = 2;\n"
- " }\n"
- "}");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "'use strict';\n"
+ "class myclass {\n"
+ " constructor() {\n"
+ " this.value = 1;\n"
+ " }\n"
+ " method() {\n"
+ " this.value = 2;\n"
+ " }\n"
+ "}");
CheckFunctionName(script, "this.value = 1", "myclass");
CheckFunctionName(script, "this.value = 2", "myclass.method");
}
@@ -266,7 +269,7 @@ TEST(AsParameter) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
+ v8::Local<v8::Script> script = Compile(
CcTest::isolate(),
"function f1(a) { return a(); }\n"
"function f2(a, b) { return a() + b(); }\n"
@@ -283,11 +286,10 @@ TEST(MultipleFuncsConditional) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "fun1 = 0 ?\n"
- " function() { return 1; } :\n"
- " function() { return 2; }");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "fun1 = 0 ?\n"
+ " function() { return 1; } :\n"
+ " function() { return 2; }");
CheckFunctionName(script, "return 1", "fun1");
CheckFunctionName(script, "return 2", "fun1");
}
@@ -297,12 +299,12 @@ TEST(MultipleFuncsInLiteral) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function MyClass() {}\n"
- "MyClass.prototype = {\n"
- " method1: 0 ? function() { return 1; } :\n"
- " function() { return 2; } }");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function MyClass() {}\n"
+ "MyClass.prototype = {\n"
+ " method1: 0 ? function() { return 1; } :\n"
+ " function() { return 2; } }");
CheckFunctionName(script, "return 1", "MyClass.method1");
CheckFunctionName(script, "return 2", "MyClass.method1");
}
@@ -312,18 +314,17 @@ TEST(AnonymousInAnonymousClosure1) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "(function() {\n"
- " (function() {\n"
- " var a = 1;\n"
- " return;\n"
- " })();\n"
- " var b = function() {\n"
- " var c = 1;\n"
- " return;\n"
- " };\n"
- "})();");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "(function() {\n"
+ " (function() {\n"
+ " var a = 1;\n"
+ " return;\n"
+ " })();\n"
+ " var b = function() {\n"
+ " var c = 1;\n"
+ " return;\n"
+ " };\n"
+ "})();");
CheckFunctionName(script, "return", "");
}
@@ -332,15 +333,14 @@ TEST(AnonymousInAnonymousClosure2) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "(function() {\n"
- " (function() {\n"
- " var a = 1;\n"
- " return;\n"
- " })();\n"
- " var c = 1;\n"
- "})();");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "(function() {\n"
+ " (function() {\n"
+ " var a = 1;\n"
+ " return;\n"
+ " })();\n"
+ " var c = 1;\n"
+ "})();");
CheckFunctionName(script, "return", "");
}
@@ -349,15 +349,14 @@ TEST(NamedInAnonymousClosure) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "var foo = function() {\n"
- " (function named() {\n"
- " var a = 1;\n"
- " })();\n"
- " var c = 1;\n"
- " return;\n"
- "};");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "var foo = function() {\n"
+ " (function named() {\n"
+ " var a = 1;\n"
+ " })();\n"
+ " var c = 1;\n"
+ " return;\n"
+ "};");
CheckFunctionName(script, "return", "foo");
}
@@ -367,12 +366,12 @@ TEST(Issue380) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function a() {\n"
- "var result = function(p,a,c,k,e,d)"
- "{return p}(\"if blah blah\",62,1976,\'a|b\'.split(\'|\'),0,{})\n"
- "}");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function a() {\n"
+ "var result = function(p,a,c,k,e,d)"
+ "{return p}(\"if blah blah\",62,1976,\'a|b\'.split(\'|\'),0,{})\n"
+ "}");
CheckFunctionName(script, "return p", "");
}
@@ -381,12 +380,12 @@ TEST(MultipleAssignments) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "var fun1 = fun2 = function () { return 1; }\n"
- "var bar1 = bar2 = bar3 = function () { return 2; }\n"
- "foo1 = foo2 = function () { return 3; }\n"
- "baz1 = baz2 = baz3 = function () { return 4; }");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "var fun1 = fun2 = function () { return 1; }\n"
+ "var bar1 = bar2 = bar3 = function () { return 2; }\n"
+ "foo1 = foo2 = function () { return 3; }\n"
+ "baz1 = baz2 = baz3 = function () { return 4; }");
CheckFunctionName(script, "return 1", "fun2");
CheckFunctionName(script, "return 2", "bar3");
CheckFunctionName(script, "return 3", "foo2");
@@ -398,7 +397,7 @@ TEST(AsConstructorParameter) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
+ v8::Local<v8::Script> script = Compile(
CcTest::isolate(),
"function Foo() {}\n"
"var foo = new Foo(function() { return 1; })\n"
@@ -413,14 +412,14 @@ TEST(FactoryHashmap) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function createMyObj() {\n"
- " var obj = {};\n"
- " obj[\"method1\"] = function() { return 1; }\n"
- " obj[\"method2\"] = function() { return 2; }\n"
- " return obj;\n"
- "}");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function createMyObj() {\n"
+ " var obj = {};\n"
+ " obj[\"method1\"] = function() { return 1; }\n"
+ " obj[\"method2\"] = function() { return 2; }\n"
+ " return obj;\n"
+ "}");
CheckFunctionName(script, "return 1", "obj.method1");
CheckFunctionName(script, "return 2", "obj.method2");
}
@@ -430,16 +429,16 @@ TEST(FactoryHashmapVariable) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "function createMyObj() {\n"
- " var obj = {};\n"
- " var methodName = \"method1\";\n"
- " obj[methodName] = function() { return 1; }\n"
- " methodName = \"method2\";\n"
- " obj[methodName] = function() { return 2; }\n"
- " return obj;\n"
- "}");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "function createMyObj() {\n"
+ " var obj = {};\n"
+ " var methodName = \"method1\";\n"
+ " obj[methodName] = function() { return 1; }\n"
+ " methodName = \"method2\";\n"
+ " obj[methodName] = function() { return 2; }\n"
+ " return obj;\n"
+ "}");
// Can't infer function names statically.
CheckFunctionName(script, "return 1", "obj.(anonymous function)");
CheckFunctionName(script, "return 2", "obj.(anonymous function)");
@@ -450,7 +449,7 @@ TEST(FactoryHashmapConditional) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
+ v8::Local<v8::Script> script = Compile(
CcTest::isolate(),
"function createMyObj() {\n"
" var obj = {};\n"
@@ -466,14 +465,13 @@ TEST(GlobalAssignmentAndCall) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "var Foo = function() {\n"
- " return 1;\n"
- "}();\n"
- "var Baz = Bar = function() {\n"
- " return 2;\n"
- "}");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "var Foo = function() {\n"
+ " return 1;\n"
+ "}();\n"
+ "var Baz = Bar = function() {\n"
+ " return 2;\n"
+ "}");
// The inferred name is empty, because this is an assignment of a result.
CheckFunctionName(script, "return 1", "");
// See MultipleAssignments test.
@@ -485,17 +483,16 @@ TEST(AssignmentAndCall) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "(function Enclosing() {\n"
- " var Foo;\n"
- " Foo = function() {\n"
- " return 1;\n"
- " }();\n"
- " var Baz = Bar = function() {\n"
- " return 2;\n"
- " }\n"
- "})();");
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "(function Enclosing() {\n"
+ " var Foo;\n"
+ " Foo = function() {\n"
+ " return 1;\n"
+ " }();\n"
+ " var Baz = Bar = function() {\n"
+ " return 2;\n"
+ " }\n"
+ "})();");
// The inferred name is empty, because this is an assignment of a result.
CheckFunctionName(script, "return 1", "");
// See MultipleAssignments test.
@@ -509,15 +506,15 @@ TEST(MethodAssignmentInAnonymousFunctionCall) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "(function () {\n"
- " var EventSource = function () { };\n"
- " EventSource.prototype.addListener = function () {\n"
- " return 2012;\n"
- " };\n"
- " this.PublicEventSource = EventSource;\n"
- "})();");
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "(function () {\n"
+ " var EventSource = function () { };\n"
+ " EventSource.prototype.addListener = function () {\n"
+ " return 2012;\n"
+ " };\n"
+ " this.PublicEventSource = EventSource;\n"
+ "})();");
CheckFunctionName(script, "return 2012", "EventSource.addListener");
}
@@ -526,20 +523,19 @@ TEST(ReturnAnonymousFunction) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Script> script = Compile(
- CcTest::isolate(),
- "(function() {\n"
- " function wrapCode() {\n"
- " return function () {\n"
- " return 2012;\n"
- " };\n"
- " };\n"
- " var foo = 10;\n"
- " function f() {\n"
- " return wrapCode();\n"
- " }\n"
- " this.ref = f;\n"
- "})()");
- script->Run();
+ v8::Local<v8::Script> script = Compile(CcTest::isolate(),
+ "(function() {\n"
+ " function wrapCode() {\n"
+ " return function () {\n"
+ " return 2012;\n"
+ " };\n"
+ " };\n"
+ " var foo = 10;\n"
+ " function f() {\n"
+ " return wrapCode();\n"
+ " }\n"
+ " this.ref = f;\n"
+ "})()");
+ script->Run(CcTest::isolate()->GetCurrentContext()).ToLocalChecked();
CheckFunctionName(script, "return 2012", "");
}
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index cc95df21cf..22fd785566 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -56,7 +56,7 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
bool has_been_disposed() { return has_been_disposed_; }
virtual void Dispose() {
- DCHECK(!has_been_disposed_);
+ CHECK(!has_been_disposed_);
has_been_disposed_ = true;
}
@@ -121,16 +121,16 @@ TEST(IterateObjectGroupsOldApi) {
global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
// CanSkipCallback was called for all objects.
- DCHECK(can_skip_called_objects.length() == 4);
- DCHECK(can_skip_called_objects.Contains(*g1s1.location()));
- DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
+ CHECK(can_skip_called_objects.length() == 4);
+ CHECK(can_skip_called_objects.Contains(*g1s1.location()));
+ CHECK(can_skip_called_objects.Contains(*g1s2.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s1.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s2.location()));
// Nothing was visited.
- DCHECK(visitor.visited.length() == 0);
- DCHECK(!info1.has_been_disposed());
- DCHECK(!info2.has_been_disposed());
+ CHECK(visitor.visited.length() == 0);
+ CHECK(!info1.has_been_disposed());
+ CHECK(!info2.has_been_disposed());
}
// Iterate again, now only skip the second object group.
@@ -145,18 +145,18 @@ TEST(IterateObjectGroupsOldApi) {
global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
// CanSkipCallback was called for all objects.
- DCHECK(can_skip_called_objects.length() == 3 ||
- can_skip_called_objects.length() == 4);
- DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
+ CHECK(can_skip_called_objects.length() == 3 ||
+ can_skip_called_objects.length() == 4);
+ CHECK(can_skip_called_objects.Contains(*g1s2.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s1.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s2.location()));
// The first group was visited.
- DCHECK(visitor.visited.length() == 2);
- DCHECK(visitor.visited.Contains(*g1s1.location()));
- DCHECK(visitor.visited.Contains(*g1s2.location()));
- DCHECK(info1.has_been_disposed());
- DCHECK(!info2.has_been_disposed());
+ CHECK(visitor.visited.length() == 2);
+ CHECK(visitor.visited.Contains(*g1s1.location()));
+ CHECK(visitor.visited.Contains(*g1s2.location()));
+ CHECK(info1.has_been_disposed());
+ CHECK(!info2.has_been_disposed());
}
// Iterate again, don't skip anything.
@@ -166,15 +166,15 @@ TEST(IterateObjectGroupsOldApi) {
global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
// CanSkipCallback was called for all objects.
- DCHECK(can_skip_called_objects.length() == 1);
- DCHECK(can_skip_called_objects.Contains(*g2s1.location()) ||
- can_skip_called_objects.Contains(*g2s2.location()));
+ CHECK(can_skip_called_objects.length() == 1);
+ CHECK(can_skip_called_objects.Contains(*g2s1.location()) ||
+ can_skip_called_objects.Contains(*g2s2.location()));
// The second group was visited.
- DCHECK(visitor.visited.length() == 2);
- DCHECK(visitor.visited.Contains(*g2s1.location()));
- DCHECK(visitor.visited.Contains(*g2s2.location()));
- DCHECK(info2.has_been_disposed());
+ CHECK(visitor.visited.length() == 2);
+ CHECK(visitor.visited.Contains(*g2s1.location()));
+ CHECK(visitor.visited.Contains(*g2s2.location()));
+ CHECK(info2.has_been_disposed());
}
}
@@ -216,16 +216,16 @@ TEST(IterateObjectGroups) {
global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
// CanSkipCallback was called for all objects.
- DCHECK(can_skip_called_objects.length() == 4);
- DCHECK(can_skip_called_objects.Contains(*g1s1.location()));
- DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
+ CHECK(can_skip_called_objects.length() == 4);
+ CHECK(can_skip_called_objects.Contains(*g1s1.location()));
+ CHECK(can_skip_called_objects.Contains(*g1s2.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s1.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s2.location()));
// Nothing was visited.
- DCHECK(visitor.visited.length() == 0);
- DCHECK(!info1.has_been_disposed());
- DCHECK(!info2.has_been_disposed());
+ CHECK(visitor.visited.length() == 0);
+ CHECK(!info1.has_been_disposed());
+ CHECK(!info2.has_been_disposed());
}
// Iterate again, now only skip the second object group.
@@ -240,18 +240,18 @@ TEST(IterateObjectGroups) {
global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
// CanSkipCallback was called for all objects.
- DCHECK(can_skip_called_objects.length() == 3 ||
- can_skip_called_objects.length() == 4);
- DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
- DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
+ CHECK(can_skip_called_objects.length() == 3 ||
+ can_skip_called_objects.length() == 4);
+ CHECK(can_skip_called_objects.Contains(*g1s2.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s1.location()));
+ CHECK(can_skip_called_objects.Contains(*g2s2.location()));
// The first group was visited.
- DCHECK(visitor.visited.length() == 2);
- DCHECK(visitor.visited.Contains(*g1s1.location()));
- DCHECK(visitor.visited.Contains(*g1s2.location()));
- DCHECK(info1.has_been_disposed());
- DCHECK(!info2.has_been_disposed());
+ CHECK(visitor.visited.length() == 2);
+ CHECK(visitor.visited.Contains(*g1s1.location()));
+ CHECK(visitor.visited.Contains(*g1s2.location()));
+ CHECK(info1.has_been_disposed());
+ CHECK(!info2.has_been_disposed());
}
// Iterate again, don't skip anything.
@@ -261,15 +261,15 @@ TEST(IterateObjectGroups) {
global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
// CanSkipCallback was called for all objects.
- DCHECK(can_skip_called_objects.length() == 1);
- DCHECK(can_skip_called_objects.Contains(*g2s1.location()) ||
- can_skip_called_objects.Contains(*g2s2.location()));
+ CHECK(can_skip_called_objects.length() == 1);
+ CHECK(can_skip_called_objects.Contains(*g2s1.location()) ||
+ can_skip_called_objects.Contains(*g2s2.location()));
// The second group was visited.
- DCHECK(visitor.visited.length() == 2);
- DCHECK(visitor.visited.Contains(*g2s1.location()));
- DCHECK(visitor.visited.Contains(*g2s2.location()));
- DCHECK(info2.has_been_disposed());
+ CHECK(visitor.visited.length() == 2);
+ CHECK(visitor.visited.Contains(*g2s1.location()));
+ CHECK(visitor.visited.Contains(*g2s2.location()));
+ CHECK(info2.has_been_disposed());
}
}
@@ -306,16 +306,16 @@ TEST(ImplicitReferences) {
List<ImplicitRefGroup*>* implicit_refs =
global_handles->implicit_ref_groups();
USE(implicit_refs);
- DCHECK(implicit_refs->length() == 2);
- DCHECK(implicit_refs->at(0)->parent ==
- reinterpret_cast<HeapObject**>(g1s1.location()));
- DCHECK(implicit_refs->at(0)->length == 2);
- DCHECK(implicit_refs->at(0)->children[0] == g1c1.location());
- DCHECK(implicit_refs->at(0)->children[1] == g1c2.location());
- DCHECK(implicit_refs->at(1)->parent ==
- reinterpret_cast<HeapObject**>(g2s1.location()));
- DCHECK(implicit_refs->at(1)->length == 1);
- DCHECK(implicit_refs->at(1)->children[0] == g2c1.location());
+ CHECK(implicit_refs->length() == 2);
+ CHECK(implicit_refs->at(0)->parent ==
+ reinterpret_cast<HeapObject**>(g1s1.location()));
+ CHECK(implicit_refs->at(0)->length == 2);
+ CHECK(implicit_refs->at(0)->children[0] == g1c1.location());
+ CHECK(implicit_refs->at(0)->children[1] == g1c2.location());
+ CHECK(implicit_refs->at(1)->parent ==
+ reinterpret_cast<HeapObject**>(g2s1.location()));
+ CHECK(implicit_refs->at(1)->length == 1);
+ CHECK(implicit_refs->at(1)->children[0] == g2c1.location());
global_handles->RemoveObjectGroups();
global_handles->RemoveImplicitRefGroups();
}
@@ -337,7 +337,9 @@ TEST(EternalHandles) {
indices[i] = -1;
HandleScope scope(isolate);
v8::Local<v8::Object> object = v8::Object::New(v8_isolate);
- object->Set(i, v8::Integer::New(v8_isolate, i));
+ object->Set(v8_isolate->GetCurrentContext(), i,
+ v8::Integer::New(v8_isolate, i))
+ .FromJust();
// Create with internal api
eternal_handles->Create(
isolate, *v8::Utils::OpenHandle(*object), &indices[i]);
@@ -360,10 +362,12 @@ TEST(EternalHandles) {
// Test external api
local = eternals[i].Get(v8_isolate);
}
- v8::Local<v8::Object> object = v8::Handle<v8::Object>::Cast(local);
- v8::Local<v8::Value> value = object->Get(i);
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(local);
+ v8::Local<v8::Value> value =
+ object->Get(v8_isolate->GetCurrentContext(), i).ToLocalChecked();
CHECK(value->IsInt32());
- CHECK_EQ(i, value->Int32Value());
+ CHECK_EQ(i,
+ value->Int32Value(v8_isolate->GetCurrentContext()).FromJust());
}
}
@@ -397,3 +401,19 @@ TEST(PersistentBaseGetLocal) {
CHECK(o == g.Get(isolate));
CHECK(v8::Local<v8::Object>::New(isolate, g) == g.Get(isolate));
}
+
+
+void WeakCallback(const v8::WeakCallbackInfo<void>& data) {}
+
+
+TEST(WeakPersistentSmi) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Number> n = v8::Number::New(isolate, 0);
+ v8::Global<v8::Number> g(isolate, n);
+
+ // Should not crash.
+ g.SetWeak<void>(nullptr, &WeakCallback, v8::WeakCallbackType::kParameter);
+}
diff --git a/deps/v8/test/cctest/test-global-object.cc b/deps/v8/test/cctest/test-global-object.cc
index c696086061..d6713208bf 100644
--- a/deps/v8/test/cctest/test-global-object.cc
+++ b/deps/v8/test/cctest/test-global-object.cc
@@ -42,12 +42,12 @@ TEST(StrictUndeclaredGlobalVariable) {
LocalContext context;
v8::TryCatch try_catch(CcTest::isolate());
v8::Local<v8::Script> script = v8_compile("\"use strict\"; x = 42;");
- v8::Handle<v8::Object> proto = v8::Object::New(CcTest::isolate());
- v8::Handle<v8::Object> global =
+ v8::Local<v8::Object> proto = v8::Object::New(CcTest::isolate());
+ v8::Local<v8::Object> global =
context->Global()->GetPrototype().As<v8::Object>();
- proto->Set(var_name, v8_num(100));
- global->SetPrototype(proto);
- script->Run();
+ proto->Set(context.local(), var_name, v8_num(100)).FromJust();
+ global->SetPrototype(context.local(), proto).FromJust();
+ CHECK(script->Run(context.local()).IsEmpty());
CHECK(try_catch.HasCaught());
v8::String::Utf8Value exception(try_catch.Exception());
CHECK_EQ(0, strcmp("ReferenceError: x is not defined", *exception));
@@ -59,7 +59,7 @@ TEST(KeysGlobalObject_Regress2764) {
v8::HandleScope scope(env1->GetIsolate());
// Create second environment.
- v8::Handle<Context> env2 = Context::New(env1->GetIsolate());
+ v8::Local<Context> env2 = Context::New(env1->GetIsolate());
Local<Value> token = v8_str("foo");
@@ -68,17 +68,25 @@ TEST(KeysGlobalObject_Regress2764) {
env2->SetSecurityToken(token);
// Create a reference to env2 global from env1 global.
- env1->Global()->Set(v8_str("global2"), env2->Global());
+ env1->Global()
+ ->Set(env1.local(), v8_str("global2"), env2->Global())
+ .FromJust();
// Set some global variables in global2
- env2->Global()->Set(v8_str("a"), v8_str("a"));
- env2->Global()->Set(v8_str("42"), v8_str("42"));
+ env2->Global()->Set(env2, v8_str("a"), v8_str("a")).FromJust();
+ env2->Global()->Set(env2, v8_str("42"), v8_str("42")).FromJust();
// List all entries from global2.
Local<Array> result;
result = Local<Array>::Cast(CompileRun("Object.keys(global2)"));
CHECK_EQ(2u, result->Length());
- CHECK(v8_str("42")->Equals(result->Get(0)));
- CHECK(v8_str("a")->Equals(result->Get(1)));
+ CHECK(
+ v8_str("42")
+ ->Equals(env1.local(), result->Get(env1.local(), 0).ToLocalChecked())
+ .FromJust());
+ CHECK(
+ v8_str("a")
+ ->Equals(env1.local(), result->Get(env1.local(), 1).ToLocalChecked())
+ .FromJust());
result =
Local<Array>::Cast(CompileRun("Object.getOwnPropertyNames(global2)"));
diff --git a/deps/v8/test/cctest/test-hashing.cc b/deps/v8/test/cctest/test-hashing.cc
index 9e5de2e05a..25a8e5c527 100644
--- a/deps/v8/test/cctest/test-hashing.cc
+++ b/deps/v8/test/cctest/test-hashing.cc
@@ -110,7 +110,8 @@ void check(uint32_t key) {
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
- MacroAssembler masm(CcTest::i_isolate(), buffer, sizeof buffer);
+ MacroAssembler masm(CcTest::i_isolate(), buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
generate(&masm, key);
@@ -124,8 +125,8 @@ void check(uint32_t key) {
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
#ifdef USE_SIMULATOR
- uint32_t codegen_hash = static_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
+ uint32_t codegen_hash = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
+ CALL_GENERATED_CODE(isolate, hash, 0, 0, 0, 0, 0)));
#else
uint32_t codegen_hash = hash();
#endif
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 9bdd3b81e6..a2fd09e9f5 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -96,11 +96,10 @@ class NamedEntriesDetector {
static const v8::HeapGraphNode* GetGlobalObject(
const v8::HeapSnapshot* snapshot) {
- CHECK_EQ(3, snapshot->GetRoot()->GetChildrenCount());
- // The 0th-child is (GC Roots), 1st is code stubs context, 2nd is the user
- // root.
+ CHECK_EQ(2, snapshot->GetRoot()->GetChildrenCount());
+ // The 0th-child is (GC Roots), 1st is the user root.
const v8::HeapGraphNode* global_obj =
- snapshot->GetRoot()->GetChild(2)->GetToNode();
+ snapshot->GetRoot()->GetChild(1)->GetToNode();
CHECK_EQ(0, strncmp("Object", const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(global_obj))->name(), 6));
return global_obj;
@@ -247,13 +246,12 @@ TEST(BoundFunctionInSnapshot) {
const v8::HeapGraphNode* f =
GetProperty(global, v8::HeapGraphEdge::kProperty, "boundFunction");
CHECK(f);
- CHECK(v8::String::NewFromUtf8(env->GetIsolate(), "native_bind")
- ->Equals(f->GetName()));
+ CHECK(v8_str("native_bind")->Equals(env.local(), f->GetName()).FromJust());
const v8::HeapGraphNode* bindings =
GetProperty(f, v8::HeapGraphEdge::kInternal, "bindings");
CHECK(bindings);
CHECK_EQ(v8::HeapGraphNode::kArray, bindings->GetType());
- CHECK_EQ(3, bindings->GetChildrenCount());
+ CHECK_EQ(1, bindings->GetChildrenCount());
const v8::HeapGraphNode* bound_this = GetProperty(
f, v8::HeapGraphEdge::kShortcut, "bound_this");
@@ -428,8 +426,8 @@ TEST(HeapSnapshotConsString) {
v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(1);
LocalContext env(NULL, global_template);
- v8::Handle<v8::Object> global_proxy = env->Global();
- v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
+ v8::Local<v8::Object> global_proxy = env->Global();
+ v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(1, global->InternalFieldCount());
i::Factory* factory = CcTest::i_isolate()->factory();
@@ -475,11 +473,11 @@ TEST(HeapSnapshotSymbol) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "a");
CHECK(a);
CHECK_EQ(a->GetType(), v8::HeapGraphNode::kSymbol);
- CHECK(v8_str("symbol")->Equals(a->GetName()));
+ CHECK(v8_str("symbol")->Equals(env.local(), a->GetName()).FromJust());
const v8::HeapGraphNode* name =
GetProperty(a, v8::HeapGraphEdge::kInternal, "name");
CHECK(name);
- CHECK(v8_str("mySymbol")->Equals(name->GetName()));
+ CHECK(v8_str("mySymbol")->Equals(env.local(), name->GetName()).FromJust());
}
@@ -540,7 +538,7 @@ TEST(HeapSnapshotWeakCollection) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "ws");
CHECK(ws);
CHECK_EQ(v8::HeapGraphNode::kObject, ws->GetType());
- CHECK(v8_str("WeakSet")->Equals(ws->GetName()));
+ CHECK(v8_str("WeakSet")->Equals(env.local(), ws->GetName()).FromJust());
const v8::HeapGraphNode* ws_table =
GetProperty(ws, v8::HeapGraphEdge::kInternal, "table");
@@ -564,7 +562,7 @@ TEST(HeapSnapshotWeakCollection) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "wm");
CHECK(wm);
CHECK_EQ(v8::HeapGraphNode::kObject, wm->GetType());
- CHECK(v8_str("WeakMap")->Equals(wm->GetName()));
+ CHECK(v8_str("WeakMap")->Equals(env.local(), wm->GetName()).FromJust());
const v8::HeapGraphNode* wm_table =
GetProperty(wm, v8::HeapGraphEdge::kInternal, "table");
@@ -613,7 +611,7 @@ TEST(HeapSnapshotCollection) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "set");
CHECK(set);
CHECK_EQ(v8::HeapGraphNode::kObject, set->GetType());
- CHECK(v8_str("Set")->Equals(set->GetName()));
+ CHECK(v8_str("Set")->Equals(env.local(), set->GetName()).FromJust());
const v8::HeapGraphNode* set_table =
GetProperty(set, v8::HeapGraphEdge::kInternal, "table");
@@ -637,7 +635,7 @@ TEST(HeapSnapshotCollection) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "map");
CHECK(map);
CHECK_EQ(v8::HeapGraphNode::kObject, map->GetType());
- CHECK(v8_str("Map")->Equals(map->GetName()));
+ CHECK(v8_str("Map")->Equals(env.local(), map->GetName()).FromJust());
const v8::HeapGraphNode* map_table =
GetProperty(map, v8::HeapGraphEdge::kInternal, "table");
@@ -666,8 +664,8 @@ TEST(HeapSnapshotInternalReferences) {
v8::ObjectTemplate::New(isolate);
global_template->SetInternalFieldCount(2);
LocalContext env(NULL, global_template);
- v8::Handle<v8::Object> global_proxy = env->Global();
- v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
+ v8::Local<v8::Object> global_proxy = env->Global();
+ v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
CHECK_EQ(2, global->InternalFieldCount());
v8::Local<v8::Object> obj = v8::Object::New(isolate);
global->SetInternalField(0, v8_num(17));
@@ -863,7 +861,7 @@ class TestJSONStream : public v8::OutputStream {
return kContinue;
}
virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
- DCHECK(false);
+ CHECK(false);
return kAbort;
}
void WriteTo(i::Vector<char> dest) { buffer_.WriteTo(dest); }
@@ -916,19 +914,26 @@ TEST(HeapSnapshotJSONSerialization) {
// Verify that snapshot string is valid JSON.
OneByteResource* json_res = new OneByteResource(json);
v8::Local<v8::String> json_string =
- v8::String::NewExternal(env->GetIsolate(), json_res);
- env->Global()->Set(v8_str("json_snapshot"), json_string);
+ v8::String::NewExternalOneByte(env->GetIsolate(), json_res)
+ .ToLocalChecked();
+ env->Global()
+ ->Set(env.local(), v8_str("json_snapshot"), json_string)
+ .FromJust();
v8::Local<v8::Value> snapshot_parse_result = CompileRun(
"var parsed = JSON.parse(json_snapshot); true;");
CHECK(!snapshot_parse_result.IsEmpty());
// Verify that snapshot object has required fields.
v8::Local<v8::Object> parsed_snapshot =
- env->Global()->Get(v8_str("parsed"))->ToObject(isolate);
- CHECK(parsed_snapshot->Has(v8_str("snapshot")));
- CHECK(parsed_snapshot->Has(v8_str("nodes")));
- CHECK(parsed_snapshot->Has(v8_str("edges")));
- CHECK(parsed_snapshot->Has(v8_str("strings")));
+ env->Global()
+ ->Get(env.local(), v8_str("parsed"))
+ .ToLocalChecked()
+ ->ToObject(env.local())
+ .ToLocalChecked();
+ CHECK(parsed_snapshot->Has(env.local(), v8_str("snapshot")).FromJust());
+ CHECK(parsed_snapshot->Has(env.local(), v8_str("nodes")).FromJust());
+ CHECK(parsed_snapshot->Has(env.local(), v8_str("edges")).FromJust());
+ CHECK(parsed_snapshot->Has(env.local(), v8_str("strings")).FromJust());
// Get node and edge "member" offsets.
v8::Local<v8::Value> meta_analysis_result = CompileRun(
@@ -979,21 +984,34 @@ TEST(HeapSnapshotJSONSerialization) {
" \"x\", property_type),"
" \"s\", property_type)");
CHECK(!string_obj_pos_val.IsEmpty());
- int string_obj_pos =
- static_cast<int>(string_obj_pos_val->ToNumber(isolate)->Value());
+ int string_obj_pos = static_cast<int>(
+ string_obj_pos_val->ToNumber(env.local()).ToLocalChecked()->Value());
v8::Local<v8::Object> nodes_array =
- parsed_snapshot->Get(v8_str("nodes"))->ToObject(isolate);
- int string_index = static_cast<int>(
- nodes_array->Get(string_obj_pos + 1)->ToNumber(isolate)->Value());
+ parsed_snapshot->Get(env.local(), v8_str("nodes"))
+ .ToLocalChecked()
+ ->ToObject(env.local())
+ .ToLocalChecked();
+ int string_index =
+ static_cast<int>(nodes_array->Get(env.local(), string_obj_pos + 1)
+ .ToLocalChecked()
+ ->ToNumber(env.local())
+ .ToLocalChecked()
+ ->Value());
CHECK_GT(string_index, 0);
v8::Local<v8::Object> strings_array =
- parsed_snapshot->Get(v8_str("strings"))->ToObject(isolate);
- v8::Local<v8::String> string =
- strings_array->Get(string_index)->ToString(isolate);
- v8::Local<v8::String> ref_string =
- CompileRun(STRING_LITERAL_FOR_TEST)->ToString(isolate);
+ parsed_snapshot->Get(env.local(), v8_str("strings"))
+ .ToLocalChecked()
+ ->ToObject(env.local())
+ .ToLocalChecked();
+ v8::Local<v8::String> string = strings_array->Get(env.local(), string_index)
+ .ToLocalChecked()
+ ->ToString(env.local())
+ .ToLocalChecked();
+ v8::Local<v8::String> ref_string = CompileRun(STRING_LITERAL_FOR_TEST)
+ ->ToString(env.local())
+ .ToLocalChecked();
#undef STRING_LITERAL_FOR_TEST
- CHECK_LT(0, strcmp(*v8::String::Utf8Value(ref_string),
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(ref_string),
*v8::String::Utf8Value(string)));
}
@@ -1032,13 +1050,13 @@ class TestStatsStream : public v8::OutputStream {
virtual ~TestStatsStream() {}
virtual void EndOfStream() { ++eos_signaled_; }
virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
- DCHECK(false);
+ CHECK(false);
return kAbort;
}
virtual WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* buffer,
int updates_written) {
++intervals_count_;
- DCHECK(updates_written);
+ CHECK(updates_written);
updates_written_ += updates_written;
entries_count_ = 0;
if (first_interval_index_ == -1 && updates_written != 0)
@@ -1190,7 +1208,7 @@ TEST(HeapSnapshotObjectsStats) {
v8::Local<v8::Array> array = v8::Array::New(env->GetIsolate());
CHECK_EQ(0u, array->Length());
// Force array's buffer allocation.
- array->Set(2, v8_num(7));
+ array->Set(env.local(), 2, v8_num(7)).FromJust();
uint32_t entries_size;
{
@@ -1205,7 +1223,7 @@ TEST(HeapSnapshotObjectsStats) {
}
for (int i = 0; i < 100; ++i)
- array->Set(i, v8_num(i));
+ array->Set(env.local(), i, v8_num(i)).FromJust();
{
// Single chunk of data with 1 entry expected in update.
@@ -1230,7 +1248,7 @@ TEST(HeapObjectIds) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
const int kLength = 10;
- v8::Handle<v8::Object> objects[kLength];
+ v8::Local<v8::Object> objects[kLength];
v8::SnapshotObjectId ids[kLength];
heap_profiler->StartTrackingHeapObjects(false);
@@ -1252,15 +1270,15 @@ TEST(HeapObjectIds) {
for (int i = 0; i < kLength; i++) {
v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
CHECK_EQ(ids[i], id);
- v8::Handle<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
- CHECK(objects[i]->Equals(obj));
+ v8::Local<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
+ CHECK(objects[i]->Equals(env.local(), obj).FromJust());
}
heap_profiler->ClearObjectIds();
for (int i = 0; i < kLength; i++) {
v8::SnapshotObjectId id = heap_profiler->GetObjectId(objects[i]);
CHECK_EQ(v8::HeapProfiler::kUnknownObjectId, id);
- v8::Handle<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
+ v8::Local<v8::Value> obj = heap_profiler->FindObjectById(ids[i]);
CHECK(obj.IsEmpty());
}
}
@@ -1308,8 +1326,8 @@ TEST(HeapSnapshotGetSnapshotObjectId) {
GetProperty(global, v8::HeapGraphEdge::kProperty, "globalObject");
CHECK(global_object);
- v8::Local<v8::Value> globalObjectHandle = env->Global()->Get(
- v8::String::NewFromUtf8(env->GetIsolate(), "globalObject"));
+ v8::Local<v8::Value> globalObjectHandle =
+ env->Global()->Get(env.local(), v8_str("globalObject")).ToLocalChecked();
CHECK(!globalObjectHandle.IsEmpty());
CHECK(globalObjectHandle->IsObject());
@@ -1412,7 +1430,7 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
bool disposed() { return disposed_; }
static v8::RetainedObjectInfo* WrapperInfoCallback(
- uint16_t class_id, v8::Handle<v8::Value> wrapper) {
+ uint16_t class_id, v8::Local<v8::Value> wrapper) {
if (class_id == 1) {
if (wrapper->IsString()) {
v8::String::Utf8Value utf8(wrapper);
@@ -1532,14 +1550,18 @@ class GraphWithImplicitRefs {
for (int i = 0; i < kObjectsCount; i++) {
objects_[i].Reset(isolate_, v8::Object::New(isolate_));
}
- (*env)->Global()->Set(v8_str("root_object"),
- v8::Local<v8::Value>::New(isolate_, objects_[0]));
+ (*env)
+ ->Global()
+ ->Set(isolate_->GetCurrentContext(), v8_str("root_object"),
+ v8::Local<v8::Value>::New(isolate_, objects_[0]))
+ .FromJust();
}
~GraphWithImplicitRefs() {
instance_ = NULL;
}
- static void gcPrologue(v8::GCType type, v8::GCCallbackFlags flags) {
+ static void gcPrologue(v8::Isolate* isolate, v8::GCType type,
+ v8::GCCallbackFlags flags) {
instance_->AddImplicitReferences();
}
@@ -1571,7 +1593,7 @@ TEST(HeapSnapshotImplicitReferences) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
GraphWithImplicitRefs graph(&env);
- v8::V8::AddGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
+ env->GetIsolate()->AddGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
@@ -1594,7 +1616,8 @@ TEST(HeapSnapshotImplicitReferences) {
}
}
CHECK_EQ(2, implicit_targets_count);
- v8::V8::RemoveGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
+ env->GetIsolate()->RemoveGCPrologueCallback(
+ &GraphWithImplicitRefs::gcPrologue);
}
@@ -1664,7 +1687,7 @@ TEST(DeleteHeapSnapshot) {
class NameResolver : public v8::HeapProfiler::ObjectNameResolver {
public:
- virtual const char* GetName(v8::Handle<v8::Object> object) {
+ virtual const char* GetName(v8::Local<v8::Object> object) {
return "Global object name";
}
};
@@ -1757,17 +1780,21 @@ TEST(GetHeapValueForNode) {
const v8::HeapGraphNode* obj = GetProperty(
global, v8::HeapGraphEdge::kProperty, "a");
CHECK(heap_profiler->FindObjectById(obj->GetId())->IsObject());
- v8::Local<v8::Object> js_obj = js_global->Get(v8_str("a")).As<v8::Object>();
+ v8::Local<v8::Object> js_obj = js_global->Get(env.local(), v8_str("a"))
+ .ToLocalChecked()
+ .As<v8::Object>();
CHECK(js_obj == heap_profiler->FindObjectById(obj->GetId()));
const v8::HeapGraphNode* s_prop =
GetProperty(obj, v8::HeapGraphEdge::kProperty, "s_prop");
- v8::Local<v8::String> js_s_prop =
- js_obj->Get(v8_str("s_prop")).As<v8::String>();
+ v8::Local<v8::String> js_s_prop = js_obj->Get(env.local(), v8_str("s_prop"))
+ .ToLocalChecked()
+ .As<v8::String>();
CHECK(js_s_prop == heap_profiler->FindObjectById(s_prop->GetId()));
const v8::HeapGraphNode* n_prop =
GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
- v8::Local<v8::String> js_n_prop =
- js_obj->Get(v8_str("n_prop")).As<v8::String>();
+ v8::Local<v8::String> js_n_prop = js_obj->Get(env.local(), v8_str("n_prop"))
+ .ToLocalChecked()
+ .As<v8::String>();
CHECK(js_n_prop == heap_profiler->FindObjectById(n_prop->GetId()));
}
@@ -1818,42 +1845,56 @@ TEST(GetConstructorName) {
"var Constructor2 = function() {};\n"
"var obj2 = new Constructor2();\n"
"var obj3 = {};\n"
- "obj3.constructor = function Constructor3() {};\n"
+ "obj3.__proto__ = { constructor: function Constructor3() {} };\n"
"var obj4 = {};\n"
"// Slow properties\n"
"for (var i=0; i<2000; ++i) obj4[\"p\" + i] = i;\n"
- "obj4.constructor = function Constructor4() {};\n"
+ "obj4.__proto__ = { constructor: function Constructor4() {} };\n"
"var obj5 = {};\n"
"var obj6 = {};\n"
"obj6.constructor = 6;");
v8::Local<v8::Object> js_global =
env->Global()->GetPrototype().As<v8::Object>();
- v8::Local<v8::Object> obj1 = js_global->Get(v8_str("obj1")).As<v8::Object>();
- i::Handle<i::JSObject> js_obj1 = v8::Utils::OpenHandle(*obj1);
+ v8::Local<v8::Object> obj1 = js_global->Get(env.local(), v8_str("obj1"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj1 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj1));
CHECK_EQ(0, StringCmp(
"Constructor1", i::V8HeapExplorer::GetConstructorName(*js_obj1)));
- v8::Local<v8::Object> obj2 = js_global->Get(v8_str("obj2")).As<v8::Object>();
- i::Handle<i::JSObject> js_obj2 = v8::Utils::OpenHandle(*obj2);
+ v8::Local<v8::Object> obj2 = js_global->Get(env.local(), v8_str("obj2"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj2 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj2));
CHECK_EQ(0, StringCmp(
"Constructor2", i::V8HeapExplorer::GetConstructorName(*js_obj2)));
- v8::Local<v8::Object> obj3 = js_global->Get(v8_str("obj3")).As<v8::Object>();
- i::Handle<i::JSObject> js_obj3 = v8::Utils::OpenHandle(*obj3);
- // TODO(verwaest): Restore to Constructor3 once supported by the
- // heap-snapshot-generator.
- CHECK_EQ(
- 0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(*js_obj3)));
- v8::Local<v8::Object> obj4 = js_global->Get(v8_str("obj4")).As<v8::Object>();
- i::Handle<i::JSObject> js_obj4 = v8::Utils::OpenHandle(*obj4);
- // TODO(verwaest): Restore to Constructor4 once supported by the
- // heap-snapshot-generator.
- CHECK_EQ(
- 0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(*js_obj4)));
- v8::Local<v8::Object> obj5 = js_global->Get(v8_str("obj5")).As<v8::Object>();
- i::Handle<i::JSObject> js_obj5 = v8::Utils::OpenHandle(*obj5);
+ v8::Local<v8::Object> obj3 = js_global->Get(env.local(), v8_str("obj3"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj3 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj3));
+ CHECK_EQ(0, StringCmp("Constructor3",
+ i::V8HeapExplorer::GetConstructorName(*js_obj3)));
+ v8::Local<v8::Object> obj4 = js_global->Get(env.local(), v8_str("obj4"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj4 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj4));
+ CHECK_EQ(0, StringCmp("Constructor4",
+ i::V8HeapExplorer::GetConstructorName(*js_obj4)));
+ v8::Local<v8::Object> obj5 = js_global->Get(env.local(), v8_str("obj5"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj5 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj5));
CHECK_EQ(0, StringCmp(
"Object", i::V8HeapExplorer::GetConstructorName(*js_obj5)));
- v8::Local<v8::Object> obj6 = js_global->Get(v8_str("obj6")).As<v8::Object>();
- i::Handle<i::JSObject> js_obj6 = v8::Utils::OpenHandle(*obj6);
+ v8::Local<v8::Object> obj6 = js_global->Get(env.local(), v8_str("obj6"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ i::Handle<i::JSObject> js_obj6 =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj6));
CHECK_EQ(0, StringCmp(
"Object", i::V8HeapExplorer::GetConstructorName(*js_obj6)));
}
@@ -1912,8 +1953,10 @@ TEST(FastCaseRedefinedAccessors) {
"});\n");
v8::Local<v8::Object> js_global =
env->Global()->GetPrototype().As<v8::Object>();
- i::Handle<i::JSObject> js_obj1 =
- v8::Utils::OpenHandle(*js_global->Get(v8_str("obj1")).As<v8::Object>());
+ i::Handle<i::JSReceiver> js_obj1 =
+ v8::Utils::OpenHandle(*js_global->Get(env.local(), v8_str("obj1"))
+ .ToLocalChecked()
+ .As<v8::Object>());
USE(js_obj1);
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
@@ -1983,10 +2026,11 @@ TEST(HiddenPropertiesFastCase) {
GetProperty(c, v8::HeapGraphEdge::kProperty, "<symbol>");
CHECK(!hidden_props);
- v8::Handle<v8::Value> cHandle =
- env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "c"));
+ v8::Local<v8::Value> cHandle =
+ env->Global()->Get(env.local(), v8_str("c")).ToLocalChecked();
CHECK(!cHandle.IsEmpty() && cHandle->IsObject());
- cHandle->ToObject(isolate)
+ cHandle->ToObject(env.local())
+ .ToLocalChecked()
->SetPrivate(env.local(),
v8::Private::ForApi(env->GetIsolate(), v8_str("key")),
v8_str("val"))
@@ -2117,7 +2161,6 @@ TEST(NoDebugObjectInSnapshot) {
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root = snapshot->GetRoot();
int globals_count = 0;
- bool found = false;
for (int i = 0; i < root->GetChildrenCount(); ++i) {
const v8::HeapGraphEdge* edge = root->GetChild(i);
if (edge->GetType() == v8::HeapGraphEdge::kShortcut) {
@@ -2125,13 +2168,10 @@ TEST(NoDebugObjectInSnapshot) {
const v8::HeapGraphNode* global = edge->GetToNode();
const v8::HeapGraphNode* foo =
GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
- if (foo != nullptr) {
- found = true;
- }
+ CHECK(foo);
}
}
- CHECK_EQ(2, globals_count);
- CHECK(found);
+ CHECK_EQ(1, globals_count);
}
@@ -2288,18 +2328,27 @@ TEST(AllocationSitesAreVisible) {
CHECK_EQ(v8::internal::FixedArray::SizeFor(3),
static_cast<int>(elements->GetShallowSize()));
- v8::Handle<v8::Value> array_val =
+ v8::Local<v8::Value> array_val =
heap_profiler->FindObjectById(transition_info->GetId());
CHECK(array_val->IsArray());
- v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(array_val);
+ v8::Local<v8::Array> array = v8::Local<v8::Array>::Cast(array_val);
// Verify the array is "a" in the code above.
CHECK_EQ(3u, array->Length());
CHECK(v8::Integer::New(isolate, 3)
- ->Equals(array->Get(v8::Integer::New(isolate, 0))));
+ ->Equals(env.local(),
+ array->Get(env.local(), v8::Integer::New(isolate, 0))
+ .ToLocalChecked())
+ .FromJust());
CHECK(v8::Integer::New(isolate, 2)
- ->Equals(array->Get(v8::Integer::New(isolate, 1))));
+ ->Equals(env.local(),
+ array->Get(env.local(), v8::Integer::New(isolate, 1))
+ .ToLocalChecked())
+ .FromJust());
CHECK(v8::Integer::New(isolate, 1)
- ->Equals(array->Get(v8::Integer::New(isolate, 2))));
+ ->Equals(env.local(),
+ array->Get(env.local(), v8::Integer::New(isolate, 2))
+ .ToLocalChecked())
+ .FromJust());
}
@@ -2583,7 +2632,7 @@ TEST(TrackV8ApiAllocation) {
const char* names[] = { "(V8 API)" };
heap_profiler->StartTrackingHeapObjects(true);
- v8::Handle<v8::Object> o1 = v8::Object::New(env->GetIsolate());
+ v8::Local<v8::Object> o1 = v8::Object::New(env->GetIsolate());
o1->Clone();
AllocationTracker* tracker =
@@ -2653,15 +2702,15 @@ TEST(ArrayBufferSharedBackingStore) {
CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
void* data = ab_contents.Data();
- DCHECK(data != NULL);
+ CHECK(data != NULL);
v8::Local<v8::ArrayBuffer> ab2 =
v8::ArrayBuffer::New(isolate, data, ab_contents.ByteLength());
CHECK(ab2->IsExternal());
- env->Global()->Set(v8_str("ab1"), ab);
- env->Global()->Set(v8_str("ab2"), ab2);
+ env->Global()->Set(env.local(), v8_str("ab1"), ab).FromJust();
+ env->Global()->Set(env.local(), v8_str("ab2"), ab2).FromJust();
- v8::Handle<v8::Value> result = CompileRun("ab2.byteLength");
- CHECK_EQ(1024, result->Int32Value());
+ v8::Local<v8::Value> result = CompileRun("ab2.byteLength");
+ CHECK_EQ(1024, result->Int32Value(env.local()).FromJust());
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
@@ -2688,13 +2737,13 @@ TEST(BoxObject) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
- v8::Handle<v8::Object> global_proxy = env->Global();
- v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
+ v8::Local<v8::Object> global_proxy = env->Global();
+ v8::Local<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
i::Factory* factory = CcTest::i_isolate()->factory();
i::Handle<i::String> string = factory->NewStringFromStaticChars("string");
i::Handle<i::Object> box = factory->NewBox(string);
- global->Set(0, v8::ToApiHandle<v8::Object>(box));
+ global->Set(env.local(), 0, v8::ToApiHandle<v8::Object>(box)).FromJust();
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
new file mode 100644
index 0000000000..6ce77c9416
--- /dev/null
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -0,0 +1,1111 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+#include <sstream>
+#include <utility>
+
+#include "src/api.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+
+
+static const int kMaxInobjectProperties =
+ (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
+
+
+template <typename T>
+static Handle<T> OpenHandle(v8::Local<v8::Value> value) {
+ Handle<Object> obj = v8::Utils::OpenHandle(*value);
+ return Handle<T>::cast(obj);
+}
+
+
+static inline v8::Local<v8::Value> Run(v8::Local<v8::Script> script) {
+ v8::Local<v8::Value> result;
+ if (script->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .ToLocal(&result)) {
+ return result;
+ }
+ return v8::Local<v8::Value>();
+}
+
+
+template <typename T = Object>
+Handle<T> GetGlobal(const char* name) {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Handle<String> str_name = factory->InternalizeUtf8String(name);
+
+ Handle<Object> value =
+ Object::GetProperty(isolate->global_object(), str_name).ToHandleChecked();
+ return Handle<T>::cast(value);
+}
+
+
+template <typename T = Object>
+Handle<T> GetLexical(const char* name) {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+
+ Handle<String> str_name = factory->InternalizeUtf8String(name);
+ Handle<ScriptContextTable> script_contexts(
+ isolate->native_context()->script_context_table());
+
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+ Handle<Object> result =
+ FixedArray::get(ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index),
+ lookup_result.slot_index);
+ return Handle<T>::cast(result);
+ }
+ return Handle<T>();
+}
+
+
+template <typename T = Object>
+Handle<T> GetLexical(const std::string& name) {
+ return GetLexical<T>(name.c_str());
+}
+
+
+template <typename T>
+static inline Handle<T> Run(v8::Local<v8::Script> script) {
+ return OpenHandle<T>(Run(script));
+}
+
+
+template <typename T>
+static inline Handle<T> CompileRun(const char* script) {
+ return OpenHandle<T>(CompileRun(script));
+}
+
+
+static Object* GetFieldValue(JSObject* obj, int property_index) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(obj->map(), property_index);
+ return obj->RawFastPropertyAt(index);
+}
+
+
+static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
+ if (obj->IsUnboxedDoubleField(field_index)) {
+ return obj->RawFastDoublePropertyAt(field_index);
+ } else {
+ Object* value = obj->RawFastPropertyAt(field_index);
+ CHECK(value->IsMutableHeapNumber());
+ return HeapNumber::cast(value)->value();
+ }
+}
+
+
+static double GetDoubleFieldValue(JSObject* obj, int property_index) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(obj->map(), property_index);
+ return GetDoubleFieldValue(obj, index);
+}
+
+
+bool IsObjectShrinkable(JSObject* obj) {
+ Handle<Map> filler_map =
+ CcTest::i_isolate()->factory()->one_pointer_filler_map();
+
+ int inobject_properties = obj->map()->GetInObjectProperties();
+ int unused = obj->map()->unused_property_fields();
+ if (unused == 0) return false;
+
+ for (int i = inobject_properties - unused; i < inobject_properties; i++) {
+ if (*filler_map != GetFieldValue(obj, i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+TEST(JSObjectBasic) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ const char* source =
+ "function A() {"
+ " this.a = 42;"
+ " this.d = 4.2;"
+ " this.o = this;"
+ "}";
+ CompileRun(source);
+
+ Handle<JSFunction> func = GetGlobal<JSFunction>("A");
+
+ // Zero instances were created so far.
+ CHECK(!func->has_initial_map());
+
+ v8::Local<v8::Script> new_A_script = v8_compile("new A();");
+
+ Handle<JSObject> obj = Run<JSObject>(new_A_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ // One instance created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // There must be at least some slack.
+ CHECK_LT(5, obj->map()->GetInObjectProperties());
+ CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, 0));
+ CHECK_EQ(4.2, GetDoubleFieldValue(*obj, 1));
+ CHECK_EQ(*obj, GetFieldValue(*obj, 2));
+ CHECK(IsObjectShrinkable(*obj));
+
+ // Create several objects to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_A_script);
+ CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // No slack left.
+ CHECK_EQ(3, obj->map()->GetInObjectProperties());
+}
+
+
+TEST(JSObjectBasicNoInlineNew) {
+ FLAG_inline_new = false;
+ TestJSObjectBasic();
+}
+
+
+TEST(JSObjectComplex) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ const char* source =
+ "function A(n) {"
+ " if (n > 0) this.a = 42;"
+ " if (n > 1) this.d = 4.2;"
+ " if (n > 2) this.o1 = this;"
+ " if (n > 3) this.o2 = this;"
+ " if (n > 4) this.o3 = this;"
+ " if (n > 5) this.o4 = this;"
+ "}";
+ CompileRun(source);
+
+ Handle<JSFunction> func = GetGlobal<JSFunction>("A");
+
+ // Zero instances were created so far.
+ CHECK(!func->has_initial_map());
+
+ Handle<JSObject> obj1 = CompileRun<JSObject>("new A(1);");
+ Handle<JSObject> obj3 = CompileRun<JSObject>("new A(3);");
+ Handle<JSObject> obj5 = CompileRun<JSObject>("new A(5);");
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ // Three instances created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 3,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // There must be at least some slack.
+ CHECK_LT(5, obj3->map()->GetInObjectProperties());
+ CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj3, 0));
+ CHECK_EQ(4.2, GetDoubleFieldValue(*obj3, 1));
+ CHECK_EQ(*obj3, GetFieldValue(*obj3, 2));
+ CHECK(IsObjectShrinkable(*obj1));
+ CHECK(IsObjectShrinkable(*obj3));
+ CHECK(IsObjectShrinkable(*obj5));
+
+ // Create several objects to complete the tracking.
+ for (int i = 3; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ CompileRun("new A(3);");
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+
+ // obj1 and obj2 stays shrinkable because we don't clear unused fields.
+ CHECK(IsObjectShrinkable(*obj1));
+ CHECK(IsObjectShrinkable(*obj3));
+ CHECK(!IsObjectShrinkable(*obj5));
+
+ CHECK_EQ(5, obj1->map()->GetInObjectProperties());
+ CHECK_EQ(4, obj1->map()->unused_property_fields());
+
+ CHECK_EQ(5, obj3->map()->GetInObjectProperties());
+ CHECK_EQ(2, obj3->map()->unused_property_fields());
+
+ CHECK_EQ(5, obj5->map()->GetInObjectProperties());
+ CHECK_EQ(0, obj5->map()->unused_property_fields());
+
+ // Since slack tracking is complete, the new objects should not be shrinkable.
+ obj1 = CompileRun<JSObject>("new A(1);");
+ obj3 = CompileRun<JSObject>("new A(3);");
+ obj5 = CompileRun<JSObject>("new A(5);");
+
+ CHECK(!IsObjectShrinkable(*obj1));
+ CHECK(!IsObjectShrinkable(*obj3));
+ CHECK(!IsObjectShrinkable(*obj5));
+}
+
+
+TEST(JSObjectComplexNoInlineNew) {
+ FLAG_inline_new = false;
+ TestJSObjectComplex();
+}
+
+
+TEST(JSGeneratorObjectBasic) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ const char* source =
+ "function* A() {"
+ " var i = 0;"
+ " while(true) {"
+ " yield i++;"
+ " }"
+ "};"
+ "function CreateGenerator() {"
+ " var o = A();"
+ " o.a = 42;"
+ " o.d = 4.2;"
+ " o.o = o;"
+ " return o;"
+ "}";
+ CompileRun(source);
+
+ Handle<JSFunction> func = GetGlobal<JSFunction>("A");
+
+ // Zero instances were created so far.
+ CHECK(!func->has_initial_map());
+
+ v8::Local<v8::Script> new_A_script = v8_compile("CreateGenerator();");
+
+ Handle<JSObject> obj = Run<JSObject>(new_A_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ // One instance created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // There must be at least some slack.
+ CHECK_LT(5, obj->map()->GetInObjectProperties());
+ CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, 0));
+ CHECK_EQ(4.2, GetDoubleFieldValue(*obj, 1));
+ CHECK_EQ(*obj, GetFieldValue(*obj, 2));
+ CHECK(IsObjectShrinkable(*obj));
+
+ // Create several objects to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_A_script);
+ CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // No slack left.
+ CHECK_EQ(3, obj->map()->GetInObjectProperties());
+}
+
+
+TEST(JSGeneratorObjectBasicNoInlineNew) {
+ FLAG_inline_new = false;
+ TestJSGeneratorObjectBasic();
+}
+
+
+TEST(SubclassBasicNoBaseClassInstances) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // Check that base class' and subclass' slack tracking do not interfere with
+ // each other.
+ // In this test we never create base class instances.
+
+ const char* source =
+ "'use strict';"
+ "class A {"
+ " constructor(...args) {"
+ " this.aa = 42;"
+ " this.ad = 4.2;"
+ " this.ao = this;"
+ " }"
+ "};"
+ "class B extends A {"
+ " constructor(...args) {"
+ " super(...args);"
+ " this.ba = 142;"
+ " this.bd = 14.2;"
+ " this.bo = this;"
+ " }"
+ "};";
+ CompileRun(source);
+
+ Handle<JSFunction> a_func = GetLexical<JSFunction>("A");
+ Handle<JSFunction> b_func = GetLexical<JSFunction>("B");
+
+ // Zero instances were created so far.
+ CHECK(!a_func->has_initial_map());
+ CHECK(!b_func->has_initial_map());
+
+ v8::Local<v8::Script> new_B_script = v8_compile("new B();");
+
+ Handle<JSObject> obj = Run<JSObject>(new_B_script);
+
+ CHECK(a_func->has_initial_map());
+ Handle<Map> a_initial_map(a_func->initial_map());
+
+ CHECK(b_func->has_initial_map());
+ Handle<Map> b_initial_map(b_func->initial_map());
+
+ // Zero instances of A created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart,
+ a_initial_map->construction_counter());
+ CHECK(a_initial_map->IsInobjectSlackTrackingInProgress());
+
+ // One instance of B created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ b_initial_map->construction_counter());
+ CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
+
+ // There must be at least some slack.
+ CHECK_LT(10, obj->map()->GetInObjectProperties());
+ CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, 0));
+ CHECK_EQ(4.2, GetDoubleFieldValue(*obj, 1));
+ CHECK_EQ(*obj, GetFieldValue(*obj, 2));
+ CHECK_EQ(Smi::FromInt(142), GetFieldValue(*obj, 3));
+ CHECK_EQ(14.2, GetDoubleFieldValue(*obj, 4));
+ CHECK_EQ(*obj, GetFieldValue(*obj, 5));
+ CHECK(IsObjectShrinkable(*obj));
+
+ // Create several subclass instances to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_B_script);
+ CHECK_EQ(b_initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!b_initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // Zero instances of A created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart,
+ a_initial_map->construction_counter());
+ CHECK(a_initial_map->IsInobjectSlackTrackingInProgress());
+
+ // No slack left.
+ CHECK_EQ(6, obj->map()->GetInObjectProperties());
+}
+
+
+TEST(SubclassBasicNoBaseClassInstancesNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassBasicNoBaseClassInstances();
+}
+
+
+TEST(SubclassBasic) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // Check that base class' and subclass' slack tracking do not interfere with
+ // each other.
+ // In this test we first create enough base class instances to complete
+ // the slack tracking and then proceed creating subclass instances.
+
+ const char* source =
+ "'use strict';"
+ "class A {"
+ " constructor(...args) {"
+ " this.aa = 42;"
+ " this.ad = 4.2;"
+ " this.ao = this;"
+ " }"
+ "};"
+ "class B extends A {"
+ " constructor(...args) {"
+ " super(...args);"
+ " this.ba = 142;"
+ " this.bd = 14.2;"
+ " this.bo = this;"
+ " }"
+ "};";
+ CompileRun(source);
+
+ Handle<JSFunction> a_func = GetLexical<JSFunction>("A");
+ Handle<JSFunction> b_func = GetLexical<JSFunction>("B");
+
+ // Zero instances were created so far.
+ CHECK(!a_func->has_initial_map());
+ CHECK(!b_func->has_initial_map());
+
+ v8::Local<v8::Script> new_A_script = v8_compile("new A();");
+ v8::Local<v8::Script> new_B_script = v8_compile("new B();");
+
+ Handle<JSObject> a_obj = Run<JSObject>(new_A_script);
+ Handle<JSObject> b_obj = Run<JSObject>(new_B_script);
+
+ CHECK(a_func->has_initial_map());
+ Handle<Map> a_initial_map(a_func->initial_map());
+
+ CHECK(b_func->has_initial_map());
+ Handle<Map> b_initial_map(b_func->initial_map());
+
+ // One instance of a base class created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ a_initial_map->construction_counter());
+ CHECK(a_initial_map->IsInobjectSlackTrackingInProgress());
+
+ // One instance of a subclass created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ b_initial_map->construction_counter());
+ CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
+
+ // Create several base class instances to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(a_initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_A_script);
+ CHECK_EQ(a_initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!a_initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*a_obj));
+
+ // No slack left.
+ CHECK_EQ(3, a_obj->map()->GetInObjectProperties());
+
+ // There must be at least some slack.
+ CHECK_LT(10, b_obj->map()->GetInObjectProperties());
+ CHECK_EQ(Smi::FromInt(42), GetFieldValue(*b_obj, 0));
+ CHECK_EQ(4.2, GetDoubleFieldValue(*b_obj, 1));
+ CHECK_EQ(*b_obj, GetFieldValue(*b_obj, 2));
+ CHECK_EQ(Smi::FromInt(142), GetFieldValue(*b_obj, 3));
+ CHECK_EQ(14.2, GetDoubleFieldValue(*b_obj, 4));
+ CHECK_EQ(*b_obj, GetFieldValue(*b_obj, 5));
+ CHECK(IsObjectShrinkable(*b_obj));
+
+ // Create several subclass instances to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_B_script);
+ CHECK_EQ(b_initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!b_initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*b_obj));
+
+ // No slack left.
+ CHECK_EQ(6, b_obj->map()->GetInObjectProperties());
+}
+
+
+TEST(SubclassBasicNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassBasic();
+}
+
+
+// Creates class hierachy of length matching the |hierarchy_desc| length and
+// with the number of fields at i'th level equal to |hierarchy_desc[i]|.
+static void CreateClassHierarchy(const std::vector<int>& hierarchy_desc) {
+ std::ostringstream os;
+ os << "'use strict';\n\n";
+
+ int n = static_cast<int>(hierarchy_desc.size());
+ for (int cur_class = 0; cur_class < n; cur_class++) {
+ os << "class A" << cur_class;
+ if (cur_class > 0) {
+ os << " extends A" << (cur_class - 1);
+ }
+ os << " {\n"
+ " constructor(...args) {\n";
+ if (cur_class > 0) {
+ os << " super(...args);\n";
+ }
+ int fields_count = hierarchy_desc[cur_class];
+ for (int k = 0; k < fields_count; k++) {
+ os << " this.f" << cur_class << "_" << k << " = " << k << ";\n";
+ }
+ os << " }\n"
+ "};\n\n";
+ }
+ CompileRun(os.str().c_str());
+}
+
+
+static std::string GetClassName(int class_index) {
+ std::ostringstream os;
+ os << "A" << class_index;
+ return os.str();
+}
+
+
+static v8::Local<v8::Script> GetNewObjectScript(const std::string& class_name) {
+ std::ostringstream os;
+ os << "new " << class_name << "();";
+ return v8_compile(os.str().c_str());
+}
+
+
+// Test that in-object slack tracking works as expected for first |n| classes
+// in the hierarchy.
+// This test works only for if the total property count is less than maximum
+// in-object properties count.
+static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
+ int fields_count = 0;
+ for (int cur_class = 0; cur_class < n; cur_class++) {
+ std::string class_name = GetClassName(cur_class);
+ int fields_count_at_current_level = hierarchy_desc[cur_class];
+ fields_count += fields_count_at_current_level;
+
+ // This test is not suitable for in-object properties count overflow case.
+ CHECK_LT(fields_count, kMaxInobjectProperties);
+
+ // Create |class_name| objects and check slack tracking.
+ v8::Local<v8::Script> new_script = GetNewObjectScript(class_name);
+
+ Handle<JSFunction> func = GetLexical<JSFunction>(class_name);
+
+ Handle<JSObject> obj = Run<JSObject>(new_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ // There must be at least some slack.
+ CHECK_LT(fields_count, obj->map()->GetInObjectProperties());
+
+ // One instance was created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // Create several instances to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_script);
+ CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // No slack left.
+ CHECK_EQ(fields_count, obj->map()->GetInObjectProperties());
+ }
+}
+
+
+static void TestSubclassChain(const std::vector<int>& hierarchy_desc) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CreateClassHierarchy(hierarchy_desc);
+ TestClassHierarchy(hierarchy_desc, static_cast<int>(hierarchy_desc.size()));
+}
+
+
+TEST(LongSubclassChain1) {
+ std::vector<int> hierarchy_desc;
+ for (int i = 0; i < 7; i++) {
+ hierarchy_desc.push_back(i * 10);
+ }
+ TestSubclassChain(hierarchy_desc);
+}
+
+
+TEST(LongSubclassChain2) {
+ std::vector<int> hierarchy_desc;
+ hierarchy_desc.push_back(10);
+ for (int i = 0; i < 42; i++) {
+ hierarchy_desc.push_back(0);
+ }
+ hierarchy_desc.push_back(230);
+ TestSubclassChain(hierarchy_desc);
+}
+
+
+TEST(LongSubclassChain3) {
+ std::vector<int> hierarchy_desc;
+ for (int i = 0; i < 42; i++) {
+ hierarchy_desc.push_back(5);
+ }
+ TestSubclassChain(hierarchy_desc);
+}
+
+
+TEST(InobjectPropetiesCountOverflowInSubclass) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ std::vector<int> hierarchy_desc;
+ const int kNoOverflowCount = 5;
+ for (int i = 0; i < kNoOverflowCount; i++) {
+ hierarchy_desc.push_back(50);
+ }
+ // In this class we are going to have properties in the backing store.
+ hierarchy_desc.push_back(100);
+
+ CreateClassHierarchy(hierarchy_desc);
+
+ // For the last class in the hierarchy we need different checks.
+ {
+ int cur_class = kNoOverflowCount;
+ std::string class_name = GetClassName(cur_class);
+
+ // Create |class_name| objects and check slack tracking.
+ v8::Local<v8::Script> new_script = GetNewObjectScript(class_name);
+
+ Handle<JSFunction> func = GetLexical<JSFunction>(class_name);
+
+ Handle<JSObject> obj = Run<JSObject>(new_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ // There must be no slack left.
+ CHECK_EQ(JSObject::kMaxInstanceSize, obj->map()->instance_size());
+ CHECK_EQ(kMaxInobjectProperties, obj->map()->GetInObjectProperties());
+
+ // One instance was created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // Create several instances to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_script);
+ CHECK(!IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // No slack left.
+ CHECK_EQ(kMaxInobjectProperties, obj->map()->GetInObjectProperties());
+ }
+
+ // The other classes in the hierarchy are not affected.
+ TestClassHierarchy(hierarchy_desc, kNoOverflowCount);
+}
+
+
+TEST(SlowModeSubclass) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ std::vector<int> hierarchy_desc;
+ const int kNoOverflowCount = 5;
+ for (int i = 0; i < kNoOverflowCount; i++) {
+ hierarchy_desc.push_back(50);
+ }
+ // This class should go dictionary mode.
+ hierarchy_desc.push_back(1000);
+
+ CreateClassHierarchy(hierarchy_desc);
+
+ // For the last class in the hierarchy we need different checks.
+ {
+ int cur_class = kNoOverflowCount;
+ std::string class_name = GetClassName(cur_class);
+
+ // Create |class_name| objects and check slack tracking.
+ v8::Local<v8::Script> new_script = GetNewObjectScript(class_name);
+
+ Handle<JSFunction> func = GetLexical<JSFunction>(class_name);
+
+ Handle<JSObject> obj = Run<JSObject>(new_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ // Object should go dictionary mode.
+ CHECK_EQ(JSObject::kHeaderSize, obj->map()->instance_size());
+ CHECK(obj->map()->is_dictionary_map());
+
+ // One instance was created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // Create several instances to complete the tracking.
+ for (int i = 1; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_script);
+ CHECK(!IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // Object should stay in dictionary mode.
+ CHECK_EQ(JSObject::kHeaderSize, obj->map()->instance_size());
+ CHECK(obj->map()->is_dictionary_map());
+ }
+
+ // The other classes in the hierarchy are not affected.
+ TestClassHierarchy(hierarchy_desc, kNoOverflowCount);
+}
+
+
+static void TestSubclassBuiltin(const char* subclass_name,
+ InstanceType instance_type,
+ const char* builtin_name,
+ const char* ctor_arguments = "",
+ int builtin_properties_count = 0) {
+ {
+ std::ostringstream os;
+ os << "'use strict';\n"
+ "class "
+ << subclass_name << " extends " << builtin_name
+ << " {\n"
+ " constructor(...args) {\n"
+ " super(...args);\n"
+ " this.a = 42;\n"
+ " this.d = 4.2;\n"
+ " this.o = this;\n"
+ " }\n"
+ "};\n";
+ CompileRun(os.str().c_str());
+ }
+
+ Handle<JSFunction> func = GetLexical<JSFunction>(subclass_name);
+
+ // Zero instances were created so far.
+ CHECK(!func->has_initial_map());
+
+ v8::Local<v8::Script> new_script;
+ {
+ std::ostringstream os;
+ os << "new " << subclass_name << "(" << ctor_arguments << ");";
+ new_script = v8_compile(os.str().c_str());
+ }
+
+ Run<JSObject>(new_script);
+
+ CHECK(func->has_initial_map());
+ Handle<Map> initial_map(func->initial_map());
+
+ CHECK_EQ(instance_type, initial_map->instance_type());
+
+ // One instance of a subclass created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // Create two instances in order to ensure that |obj|.o is a data field
+ // in case of Function subclassing.
+ Handle<JSObject> obj = Run<JSObject>(new_script);
+
+ // Two instances of a subclass created.
+ CHECK_EQ(Map::kSlackTrackingCounterStart - 2,
+ initial_map->construction_counter());
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+
+ // There must be at least some slack.
+ CHECK_LT(builtin_properties_count + 5, obj->map()->GetInObjectProperties());
+ CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, builtin_properties_count + 0));
+ CHECK_EQ(4.2, GetDoubleFieldValue(*obj, builtin_properties_count + 1));
+ CHECK_EQ(*obj, GetFieldValue(*obj, builtin_properties_count + 2));
+ CHECK(IsObjectShrinkable(*obj));
+
+ // Create several subclass instances to complete the tracking.
+ for (int i = 2; i < Map::kGenerousAllocationCount; i++) {
+ CHECK(initial_map->IsInobjectSlackTrackingInProgress());
+ Handle<JSObject> tmp = Run<JSObject>(new_script);
+ CHECK_EQ(initial_map->IsInobjectSlackTrackingInProgress(),
+ IsObjectShrinkable(*tmp));
+ }
+ CHECK(!initial_map->IsInobjectSlackTrackingInProgress());
+ CHECK(!IsObjectShrinkable(*obj));
+
+ // No slack left.
+ CHECK_EQ(builtin_properties_count + 3, obj->map()->GetInObjectProperties());
+
+ CHECK_EQ(instance_type, obj->map()->instance_type());
+}
+
+
+TEST(SubclassObjectBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_OBJECT_TYPE, "Object", "true");
+ TestSubclassBuiltin("A2", JS_OBJECT_TYPE, "Object", "42");
+ TestSubclassBuiltin("A3", JS_OBJECT_TYPE, "Object", "'some string'");
+}
+
+
+TEST(SubclassObjectBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassObjectBuiltin();
+}
+
+
+TEST(SubclassFunctionBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_FUNCTION_TYPE, "Function", "'return 153;'");
+ TestSubclassBuiltin("A2", JS_FUNCTION_TYPE, "Function", "'this.a = 44;'");
+}
+
+
+TEST(SubclassFunctionBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassFunctionBuiltin();
+}
+
+
+TEST(SubclassBooleanBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_VALUE_TYPE, "Boolean", "true");
+ TestSubclassBuiltin("A2", JS_VALUE_TYPE, "Boolean", "false");
+}
+
+
+TEST(SubclassBooleanBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassBooleanBuiltin();
+}
+
+
+TEST(SubclassErrorBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ const int first_field = 2;
+ TestSubclassBuiltin("A1", JS_OBJECT_TYPE, "Error", "'err'", first_field);
+ TestSubclassBuiltin("A2", JS_OBJECT_TYPE, "EvalError", "'err'", first_field);
+ TestSubclassBuiltin("A3", JS_OBJECT_TYPE, "RangeError", "'err'", first_field);
+ TestSubclassBuiltin("A4", JS_OBJECT_TYPE, "ReferenceError", "'err'",
+ first_field);
+ TestSubclassBuiltin("A5", JS_OBJECT_TYPE, "SyntaxError", "'err'",
+ first_field);
+ TestSubclassBuiltin("A6", JS_OBJECT_TYPE, "TypeError", "'err'", first_field);
+ TestSubclassBuiltin("A7", JS_OBJECT_TYPE, "URIError", "'err'", first_field);
+}
+
+
+TEST(SubclassErrorBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassErrorBuiltin();
+}
+
+
+TEST(SubclassNumberBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_VALUE_TYPE, "Number", "42");
+ TestSubclassBuiltin("A2", JS_VALUE_TYPE, "Number", "4.2");
+}
+
+
+TEST(SubclassNumberBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassNumberBuiltin();
+}
+
+
+TEST(SubclassDateBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_DATE_TYPE, "Date", "123456789");
+}
+
+
+TEST(SubclassDateBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassDateBuiltin();
+}
+
+
+TEST(SubclassStringBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_VALUE_TYPE, "String", "'some string'");
+ TestSubclassBuiltin("A2", JS_VALUE_TYPE, "String", "");
+}
+
+
+TEST(SubclassStringBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassStringBuiltin();
+}
+
+
+TEST(SubclassRegExpBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ const int first_field = 1;
+ TestSubclassBuiltin("A1", JS_REGEXP_TYPE, "RegExp", "'o(..)h', 'g'",
+ first_field);
+}
+
+
+TEST(SubclassRegExpBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassRegExpBuiltin();
+}
+
+
+TEST(SubclassArrayBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_ARRAY_TYPE, "Array", "42");
+}
+
+
+TEST(SubclassArrayBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassArrayBuiltin();
+}
+
+
+TEST(SubclassTypedArrayBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+#define TYPED_ARRAY_TEST(Type, type, TYPE, elementType, size) \
+ TestSubclassBuiltin("A" #Type, JS_TYPED_ARRAY_TYPE, #Type "Array", "42");
+
+ TYPED_ARRAYS(TYPED_ARRAY_TEST)
+
+#undef TYPED_ARRAY_TEST
+}
+
+
+TEST(SubclassTypedArrayBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassTypedArrayBuiltin();
+}
+
+
+TEST(SubclassCollectionBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_SET_TYPE, "Set", "");
+ TestSubclassBuiltin("A2", JS_MAP_TYPE, "Map", "");
+ TestSubclassBuiltin("A3", JS_WEAK_SET_TYPE, "WeakSet", "");
+ TestSubclassBuiltin("A4", JS_WEAK_MAP_TYPE, "WeakMap", "");
+}
+
+
+TEST(SubclassCollectionBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassCollectionBuiltin();
+}
+
+
+TEST(SubclassArrayBufferBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ TestSubclassBuiltin("A1", JS_ARRAY_BUFFER_TYPE, "ArrayBuffer", "42");
+ TestSubclassBuiltin("A2", JS_DATA_VIEW_TYPE, "DataView",
+ "new ArrayBuffer(42)");
+}
+
+
+TEST(SubclassArrayBufferBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassArrayBufferBuiltin();
+}
+
+
+TEST(SubclassPromiseBuiltin) {
+ // Avoid eventual completion of in-object slack tracking.
+ FLAG_inline_construct = false;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ const int first_field = 4;
+ TestSubclassBuiltin("A1", JS_PROMISE_TYPE, "Promise",
+ "function(resolve, reject) { resolve('ok'); }",
+ first_field);
+}
+
+
+TEST(SubclassPromiseBuiltinNoInlineNew) {
+ FLAG_inline_new = false;
+ TestSubclassPromiseBuiltin();
+}
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
index cbbbf3c22a..3f7d9d17c3 100644
--- a/deps/v8/test/cctest/test-javascript-arm64.cc
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -34,7 +34,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -43,7 +43,6 @@ using ::v8::Context;
using ::v8::Extension;
using ::v8::Function;
using ::v8::FunctionTemplate;
-using ::v8::Handle;
using ::v8::HandleScope;
using ::v8::Local;
using ::v8::Message;
@@ -59,21 +58,24 @@ using ::v8::Undefined;
using ::v8::V8;
using ::v8::Value;
-static void ExpectBoolean(bool expected, Local<Value> result) {
+static void ExpectBoolean(Local<Context> context, bool expected,
+ Local<Value> result) {
CHECK(result->IsBoolean());
- CHECK_EQ(expected, result->BooleanValue());
+ CHECK_EQ(expected, result->BooleanValue(context).FromJust());
}
-static void ExpectInt32(int32_t expected, Local<Value> result) {
+static void ExpectInt32(Local<Context> context, int32_t expected,
+ Local<Value> result) {
CHECK(result->IsInt32());
- CHECK_EQ(expected, result->Int32Value());
+ CHECK_EQ(expected, result->Int32Value(context).FromJust());
}
-static void ExpectNumber(double expected, Local<Value> result) {
+static void ExpectNumber(Local<Context> context, double expected,
+ Local<Value> result) {
CHECK(result->IsNumber());
- CHECK_EQ(expected, result->NumberValue());
+ CHECK_EQ(expected, result->NumberValue(context).FromJust());
}
@@ -88,7 +90,7 @@ TEST(simple_value) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<Value> result = CompileRun("0x271828;");
- ExpectInt32(0x271828, result);
+ ExpectInt32(env.local(), 0x271828, result);
}
@@ -96,7 +98,7 @@ TEST(global_variable) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
Local<Value> result = CompileRun("var my_global_var = 0x123; my_global_var;");
- ExpectInt32(0x123, result);
+ ExpectInt32(env.local(), 0x123, result);
}
@@ -106,7 +108,7 @@ TEST(simple_function_call) {
Local<Value> result = CompileRun(
"function foo() { return 0x314; }"
"foo();");
- ExpectInt32(0x314, result);
+ ExpectInt32(env.local(), 0x314, result);
}
@@ -120,14 +122,12 @@ TEST(binary_op) {
" return 2 * (a + b - 1);"
"}"
"foo();");
- ExpectInt32(0x2468, result);
+ ExpectInt32(env.local(), 0x2468, result);
}
-static void if_comparison_testcontext_helper(
- char const * op,
- char const * lhs,
- char const * rhs,
- int expect) {
+static void if_comparison_testcontext_helper(Local<Context> context,
+ char const* op, char const* lhs,
+ char const* rhs, int expect) {
char buffer[256];
snprintf(buffer, sizeof(buffer),
"var lhs = %s;"
@@ -136,14 +136,12 @@ static void if_comparison_testcontext_helper(
"else { 0; }",
lhs, rhs, op);
Local<Value> result = CompileRun(buffer);
- ExpectInt32(expect, result);
+ ExpectInt32(context, expect, result);
}
-static void if_comparison_effectcontext_helper(
- char const * op,
- char const * lhs,
- char const * rhs,
- int expect) {
+static void if_comparison_effectcontext_helper(Local<Context> context,
+ char const* op, char const* lhs,
+ char const* rhs, int expect) {
char buffer[256];
snprintf(buffer, sizeof(buffer),
"var lhs = %s;"
@@ -153,23 +151,21 @@ static void if_comparison_effectcontext_helper(
"else { 0; }",
lhs, rhs, op);
Local<Value> result = CompileRun(buffer);
- ExpectInt32(expect, result);
+ ExpectInt32(context, expect, result);
}
-static void if_comparison_helper(
- char const * op,
- int expect_when_lt,
- int expect_when_eq,
- int expect_when_gt) {
+static void if_comparison_helper(Local<Context> context, char const* op,
+ int expect_when_lt, int expect_when_eq,
+ int expect_when_gt) {
// TODO(all): Non-SMI tests.
- if_comparison_testcontext_helper(op, "1", "3", expect_when_lt);
- if_comparison_testcontext_helper(op, "5", "5", expect_when_eq);
- if_comparison_testcontext_helper(op, "9", "7", expect_when_gt);
+ if_comparison_testcontext_helper(context, op, "1", "3", expect_when_lt);
+ if_comparison_testcontext_helper(context, op, "5", "5", expect_when_eq);
+ if_comparison_testcontext_helper(context, op, "9", "7", expect_when_gt);
- if_comparison_effectcontext_helper(op, "1", "3", expect_when_lt);
- if_comparison_effectcontext_helper(op, "5", "5", expect_when_eq);
- if_comparison_effectcontext_helper(op, "9", "7", expect_when_gt);
+ if_comparison_effectcontext_helper(context, op, "1", "3", expect_when_lt);
+ if_comparison_effectcontext_helper(context, op, "5", "5", expect_when_eq);
+ if_comparison_effectcontext_helper(context, op, "9", "7", expect_when_gt);
}
@@ -177,14 +173,14 @@ TEST(if_comparison) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- if_comparison_helper("<", 1, 0, 0);
- if_comparison_helper("<=", 1, 1, 0);
- if_comparison_helper("==", 0, 1, 0);
- if_comparison_helper("===", 0, 1, 0);
- if_comparison_helper(">=", 0, 1, 1);
- if_comparison_helper(">", 0, 0, 1);
- if_comparison_helper("!=", 1, 0, 1);
- if_comparison_helper("!==", 1, 0, 1);
+ if_comparison_helper(env.local(), "<", 1, 0, 0);
+ if_comparison_helper(env.local(), "<=", 1, 1, 0);
+ if_comparison_helper(env.local(), "==", 0, 1, 0);
+ if_comparison_helper(env.local(), "===", 0, 1, 0);
+ if_comparison_helper(env.local(), ">=", 0, 1, 1);
+ if_comparison_helper(env.local(), ">", 0, 0, 1);
+ if_comparison_helper(env.local(), "!=", 1, 0, 1);
+ if_comparison_helper(env.local(), "!==", 1, 0, 1);
}
@@ -194,19 +190,19 @@ TEST(unary_plus) {
Local<Value> result;
// SMI
result = CompileRun("var a = 1234; +a");
- ExpectInt32(1234, result);
+ ExpectInt32(env.local(), 1234, result);
// Number
result = CompileRun("var a = 1234.5; +a");
- ExpectNumber(1234.5, result);
+ ExpectNumber(env.local(), 1234.5, result);
// String (SMI)
result = CompileRun("var a = '1234'; +a");
- ExpectInt32(1234, result);
+ ExpectInt32(env.local(), 1234, result);
// String (Number)
result = CompileRun("var a = '1234.5'; +a");
- ExpectNumber(1234.5, result);
+ ExpectNumber(env.local(), 1234.5, result);
// Check side effects.
result = CompileRun("var a = 1234; +(a = 4321); a");
- ExpectInt32(4321, result);
+ ExpectInt32(env.local(), 4321, result);
}
@@ -215,15 +211,15 @@ TEST(unary_minus) {
v8::HandleScope scope(env->GetIsolate());
Local<Value> result;
result = CompileRun("var a = 1234; -a");
- ExpectInt32(-1234, result);
+ ExpectInt32(env.local(), -1234, result);
result = CompileRun("var a = 1234.5; -a");
- ExpectNumber(-1234.5, result);
+ ExpectNumber(env.local(), -1234.5, result);
result = CompileRun("var a = 1234; -(a = 4321); a");
- ExpectInt32(4321, result);
+ ExpectInt32(env.local(), 4321, result);
result = CompileRun("var a = '1234'; -a");
- ExpectInt32(-1234, result);
+ ExpectInt32(env.local(), -1234, result);
result = CompileRun("var a = '1234.5'; -a");
- ExpectNumber(-1234.5, result);
+ ExpectNumber(env.local(), -1234.5, result);
}
@@ -234,7 +230,7 @@ TEST(unary_void) {
result = CompileRun("var a = 1234; void (a);");
ExpectUndefined(result);
result = CompileRun("var a = 0; void (a = 42); a");
- ExpectInt32(42, result);
+ ExpectInt32(env.local(), 42, result);
result = CompileRun("var a = 0; void (a = 42);");
ExpectUndefined(result);
}
@@ -245,21 +241,21 @@ TEST(unary_not) {
v8::HandleScope scope(env->GetIsolate());
Local<Value> result;
result = CompileRun("var a = 1234; !a");
- ExpectBoolean(false, result);
+ ExpectBoolean(env.local(), false, result);
result = CompileRun("var a = 0; !a");
- ExpectBoolean(true, result);
+ ExpectBoolean(env.local(), true, result);
result = CompileRun("var a = 0; !(a = 1234); a");
- ExpectInt32(1234, result);
+ ExpectInt32(env.local(), 1234, result);
result = CompileRun("var a = '1234'; !a");
- ExpectBoolean(false, result);
+ ExpectBoolean(env.local(), false, result);
result = CompileRun("var a = ''; !a");
- ExpectBoolean(true, result);
+ ExpectBoolean(env.local(), true, result);
result = CompileRun("var a = 1234; !!a");
- ExpectBoolean(true, result);
+ ExpectBoolean(env.local(), true, result);
result = CompileRun("var a = 0; !!a");
- ExpectBoolean(false, result);
+ ExpectBoolean(env.local(), false, result);
result = CompileRun("var a = 0; if ( !a ) { 1; } else { 0; }");
- ExpectInt32(1, result);
+ ExpectInt32(env.local(), 1, result);
result = CompileRun("var a = 1; if ( !a ) { 1; } else { 0; }");
- ExpectInt32(0, result);
+ ExpectInt32(env.local(), 0, result);
}
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
index 98d3365b87..38b22f9b1b 100644
--- a/deps/v8/test/cctest/test-js-arm64-variables.cc
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -36,7 +36,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -45,7 +45,6 @@ using ::v8::Context;
using ::v8::Extension;
using ::v8::Function;
using ::v8::FunctionTemplate;
-using ::v8::Handle;
using ::v8::HandleScope;
using ::v8::Local;
using ::v8::Message;
@@ -61,9 +60,10 @@ using ::v8::Undefined;
using ::v8::V8;
using ::v8::Value;
-static void ExpectInt32(int32_t expected, Local<Value> result) {
+static void ExpectInt32(Local<Context> context, int32_t expected,
+ Local<Value> result) {
CHECK(result->IsInt32());
- CHECK_EQ(expected, result->Int32Value());
+ CHECK_EQ(expected, result->Int32Value(context).FromJust());
}
@@ -75,7 +75,7 @@ TEST(global_variables) {
"var x = 0;"
"function f0() { return x; }"
"f0();");
- ExpectInt32(0, result);
+ ExpectInt32(env.local(), 0, result);
}
@@ -86,7 +86,7 @@ TEST(parameters) {
Local<Value> result = CompileRun(
"function f1(x) { return x; }"
"f1(1);");
- ExpectInt32(1, result);
+ ExpectInt32(env.local(), 1, result);
}
@@ -97,7 +97,7 @@ TEST(stack_allocated_locals) {
Local<Value> result = CompileRun(
"function f2() { var x = 2; return x; }"
"f2();");
- ExpectInt32(2, result);
+ ExpectInt32(env.local(), 2, result);
}
@@ -111,7 +111,7 @@ TEST(context_allocated_locals) {
" return x;"
"}"
"f3(3);");
- ExpectInt32(3, result);
+ ExpectInt32(env.local(), 3, result);
}
@@ -125,7 +125,7 @@ TEST(read_from_outer_context) {
" return g();"
"}"
"f4(4);");
- ExpectInt32(4, result);
+ ExpectInt32(env.local(), 4, result);
}
@@ -138,5 +138,5 @@ TEST(lookup_slots) {
" with ({}) return x;"
"}"
"f5(5);");
- ExpectInt32(5, result);
+ ExpectInt32(env.local(), 5, result);
}
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index fdda3f53c6..b8cf406130 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -117,12 +117,12 @@ void CompareStringsOneWay(const char* s1, const char* s2,
int similar_part_length = diff_pos1 - pos1;
int diff_pos2 = pos2 + similar_part_length;
- DCHECK_EQ(diff_pos2, chunk->pos2);
+ CHECK_EQ(diff_pos2, chunk->pos2);
for (int j = 0; j < similar_part_length; j++) {
- DCHECK(pos1 + j < len1);
- DCHECK(pos2 + j < len2);
- DCHECK_EQ(s1[pos1 + j], s2[pos2 + j]);
+ CHECK(pos1 + j < len1);
+ CHECK(pos2 + j < len2);
+ CHECK_EQ(s1[pos1 + j], s2[pos2 + j]);
}
diff_parameter += chunk->len1 + chunk->len2;
pos1 = diff_pos1 + chunk->len1;
@@ -131,17 +131,17 @@ void CompareStringsOneWay(const char* s1, const char* s2,
{
// After last chunk.
int similar_part_length = len1 - pos1;
- DCHECK_EQ(similar_part_length, len2 - pos2);
+ CHECK_EQ(similar_part_length, len2 - pos2);
USE(len2);
for (int j = 0; j < similar_part_length; j++) {
- DCHECK(pos1 + j < len1);
- DCHECK(pos2 + j < len2);
- DCHECK_EQ(s1[pos1 + j], s2[pos2 + j]);
+ CHECK(pos1 + j < len1);
+ CHECK(pos2 + j < len2);
+ CHECK_EQ(s1[pos1 + j], s2[pos2 + j]);
}
}
if (expected_diff_parameter != -1) {
- DCHECK_EQ(expected_diff_parameter, diff_parameter);
+ CHECK_EQ(expected_diff_parameter, diff_parameter);
}
}
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index f1dc5a28b4..385366aa24 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -35,7 +35,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -57,7 +57,7 @@ using ::v8::V8;
// Migrating an isolate
class KangarooThread : public v8::base::Thread {
public:
- KangarooThread(v8::Isolate* isolate, v8::Handle<v8::Context> context)
+ KangarooThread(v8::Isolate* isolate, v8::Local<v8::Context> context)
: Thread(Options("KangarooThread")),
isolate_(isolate),
context_(isolate, context) {}
@@ -66,15 +66,14 @@ class KangarooThread : public v8::base::Thread {
{
v8::Locker locker(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
- CHECK_EQ(reinterpret_cast<v8::internal::Isolate*>(isolate_),
- v8::internal::Isolate::Current());
+ CHECK_EQ(isolate_, v8::Isolate::GetCurrent());
v8::HandleScope scope(isolate_);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("getValue()");
CHECK(v->IsNumber());
- CHECK_EQ(30, static_cast<int>(v->NumberValue()));
+ CHECK_EQ(30, static_cast<int>(v->NumberValue(context).FromJust()));
}
{
v8::Locker locker(isolate_);
@@ -85,7 +84,7 @@ class KangarooThread : public v8::base::Thread {
v8::Context::Scope context_scope(context);
Local<Value> v = CompileRun("getValue()");
CHECK(v->IsNumber());
- CHECK_EQ(30, static_cast<int>(v->NumberValue()));
+ CHECK_EQ(30, static_cast<int>(v->NumberValue(context).FromJust()));
}
isolate_->Dispose();
}
@@ -108,8 +107,7 @@ TEST(KangarooIsolates) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- CHECK_EQ(reinterpret_cast<v8::internal::Isolate*>(isolate),
- v8::internal::Isolate::Current());
+ CHECK_EQ(isolate, v8::Isolate::GetCurrent());
CompileRun("function getValue() { return 30; }");
thread1.Reset(new KangarooThread(isolate, context));
}
@@ -118,14 +116,14 @@ TEST(KangarooIsolates) {
}
-static void CalcFibAndCheck() {
+static void CalcFibAndCheck(v8::Local<v8::Context> context) {
Local<Value> v = CompileRun("function fib(n) {"
" if (n <= 2) return 1;"
" return fib(n-1) + fib(n-2);"
"}"
"fib(10)");
CHECK(v->IsNumber());
- CHECK_EQ(55, static_cast<int>(v->NumberValue()));
+ CHECK_EQ(55, static_cast<int>(v->NumberValue(context).FromJust()));
}
class JoinableThread {
@@ -187,9 +185,8 @@ class IsolateLockingThreadWithLocalContext : public JoinableThread {
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope(isolate_);
LocalContext local_context(isolate_);
- CHECK_EQ(reinterpret_cast<v8::internal::Isolate*>(isolate_),
- v8::internal::Isolate::Current());
- CalcFibAndCheck();
+ CHECK_EQ(isolate_, v8::Isolate::GetCurrent());
+ CalcFibAndCheck(local_context.local());
}
private:
v8::Isolate* isolate_;
@@ -241,11 +238,11 @@ class IsolateNestedLockingThread : public JoinableThread {
LocalContext local_context(isolate_);
{
v8::Locker another_lock(isolate_);
- CalcFibAndCheck();
+ CalcFibAndCheck(local_context.local());
}
{
v8::Locker another_lock(isolate_);
- CalcFibAndCheck();
+ CalcFibAndCheck(local_context.local());
}
}
private:
@@ -289,7 +286,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread {
IsolateLockingThreadWithLocalContext threadB(isolate2_);
threadB.Start();
- CalcFibAndCheck();
+ CalcFibAndCheck(local_context.local());
threadB.Join();
}
private:
@@ -323,11 +320,10 @@ TEST(SeparateIsolatesLocksNonexclusive) {
class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
public:
explicit LockIsolateAndCalculateFibSharedContextThread(
- v8::Isolate* isolate, v8::Handle<v8::Context> context)
- : JoinableThread("LockIsolateAndCalculateFibThread"),
- isolate_(isolate),
- context_(isolate, context) {
- }
+ v8::Isolate* isolate, v8::Local<v8::Context> context)
+ : JoinableThread("LockIsolateAndCalculateFibThread"),
+ isolate_(isolate),
+ context_(isolate, context) {}
virtual void Run() {
v8::Locker lock(isolate_);
@@ -336,7 +332,7 @@ class LockIsolateAndCalculateFibSharedContextThread : public JoinableThread {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
private:
v8::Isolate* isolate_;
@@ -351,26 +347,31 @@ class LockerUnlockerThread : public JoinableThread {
}
virtual void Run() {
- v8::Locker lock(isolate_);
- v8::Isolate::Scope isolate_scope(isolate_);
- v8::HandleScope handle_scope(isolate_);
- v8::Local<v8::Context> context = v8::Context::New(isolate_);
+ isolate_->DiscardThreadSpecificMetadata(); // No-op
{
- v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
- }
- {
- LockIsolateAndCalculateFibSharedContextThread thread(isolate_, context);
- isolate_->Exit();
- v8::Unlocker unlocker(isolate_);
- thread.Start();
- thread.Join();
- }
- isolate_->Enter();
- {
- v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ v8::Locker lock(isolate_);
+ v8::Isolate::Scope isolate_scope(isolate_);
+ v8::HandleScope handle_scope(isolate_);
+ v8::Local<v8::Context> context = v8::Context::New(isolate_);
+ {
+ v8::Context::Scope context_scope(context);
+ CalcFibAndCheck(context);
+ }
+ {
+ LockIsolateAndCalculateFibSharedContextThread thread(isolate_, context);
+ isolate_->Exit();
+ v8::Unlocker unlocker(isolate_);
+ thread.Start();
+ thread.Join();
+ }
+ isolate_->Enter();
+ {
+ v8::Context::Scope context_scope(context);
+ CalcFibAndCheck(context);
+ }
}
+ isolate_->DiscardThreadSpecificMetadata();
+ isolate_->DiscardThreadSpecificMetadata(); // No-op
}
private:
@@ -411,7 +412,7 @@ class LockTwiceAndUnlockThread : public JoinableThread {
v8::Local<v8::Context> context = v8::Context::New(isolate_);
{
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
{
v8::Locker second_lock(isolate_);
@@ -426,7 +427,7 @@ class LockTwiceAndUnlockThread : public JoinableThread {
isolate_->Enter();
{
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
}
@@ -472,10 +473,10 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
{
v8::Isolate::Scope isolate_scope(isolate1_);
v8::HandleScope handle_scope(isolate1_);
- v8::Handle<v8::Context> context1 = v8::Context::New(isolate1_);
+ v8::Local<v8::Context> context1 = v8::Context::New(isolate1_);
{
v8::Context::Scope context_scope(context1);
- CalcFibAndCheck();
+ CalcFibAndCheck(context1);
}
thread.Reset(new LockIsolateAndCalculateFibSharedContextThread(
isolate1_, context1));
@@ -486,17 +487,17 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
{
v8::Isolate::Scope isolate_scope(isolate2_);
v8::HandleScope handle_scope(isolate2_);
- v8::Handle<v8::Context> context2 = v8::Context::New(isolate2_);
+ v8::Local<v8::Context> context2 = v8::Context::New(isolate2_);
{
v8::Context::Scope context_scope(context2);
- CalcFibAndCheck();
+ CalcFibAndCheck(context2);
}
v8::Unlocker unlock1(isolate1_);
CHECK(!v8::Locker::IsLocked(isolate1_));
CHECK(v8::Locker::IsLocked(isolate2_));
v8::Context::Scope context_scope(context2);
thread->Start();
- CalcFibAndCheck();
+ CalcFibAndCheck(context2);
thread->Join();
}
}
@@ -522,11 +523,10 @@ TEST(LockAndUnlockDifferentIsolates) {
class LockUnlockLockThread : public JoinableThread {
public:
- LockUnlockLockThread(v8::Isolate* isolate, v8::Handle<v8::Context> context)
- : JoinableThread("LockUnlockLockThread"),
- isolate_(isolate),
- context_(isolate, context) {
- }
+ LockUnlockLockThread(v8::Isolate* isolate, v8::Local<v8::Context> context)
+ : JoinableThread("LockUnlockLockThread"),
+ isolate_(isolate),
+ context_(isolate, context) {}
virtual void Run() {
v8::Locker lock1(isolate_);
@@ -538,7 +538,7 @@ class LockUnlockLockThread : public JoinableThread {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
{
v8::Unlocker unlock1(isolate_);
@@ -553,7 +553,7 @@ class LockUnlockLockThread : public JoinableThread {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate_, context_);
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
}
}
@@ -579,7 +579,7 @@ TEST(LockUnlockLockMultithreaded) {
v8::Locker locker_(isolate);
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
for (int i = 0; i < kNThreads; i++) {
threads.Add(new LockUnlockLockThread(
isolate, context));
@@ -591,7 +591,7 @@ TEST(LockUnlockLockMultithreaded) {
class LockUnlockLockDefaultIsolateThread : public JoinableThread {
public:
- explicit LockUnlockLockDefaultIsolateThread(v8::Handle<v8::Context> context)
+ explicit LockUnlockLockDefaultIsolateThread(v8::Local<v8::Context> context)
: JoinableThread("LockUnlockLockDefaultIsolateThread"),
context_(CcTest::isolate(), context) {}
@@ -603,7 +603,7 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(CcTest::isolate(), context_);
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
{
v8::Unlocker unlock1(CcTest::isolate());
@@ -614,7 +614,7 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(CcTest::isolate(), context_);
v8::Context::Scope context_scope(context);
- CalcFibAndCheck();
+ CalcFibAndCheck(context);
}
}
}
@@ -655,11 +655,12 @@ TEST(Regress1433) {
v8::Locker lock(isolate);
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
- v8::Handle<Context> context = v8::Context::New(isolate);
+ v8::Local<Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Handle<String> source = v8::String::NewFromUtf8(isolate, "1+1");
- v8::Handle<Script> script = v8::Script::Compile(source);
- v8::Handle<Value> result = script->Run();
+ v8::Local<String> source = v8_str("1+1");
+ v8::Local<Script> script =
+ v8::Script::Compile(context, source).ToLocalChecked();
+ v8::Local<Value> result = script->Run(context).ToLocalChecked();
v8::String::Utf8Value utf8(result);
}
isolate->Dispose();
@@ -686,11 +687,12 @@ class IsolateGenesisThread : public JoinableThread {
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope isolate_scope(isolate);
- CHECK(!i::Isolate::Current()->has_installed_extensions());
+ CHECK(
+ !reinterpret_cast<i::Isolate*>(isolate)->has_installed_extensions());
v8::ExtensionConfiguration extensions(count_, extension_names_);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate, &extensions);
- CHECK(i::Isolate::Current()->has_installed_extensions());
+ CHECK(reinterpret_cast<i::Isolate*>(isolate)->has_installed_extensions());
}
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index b9147f82e1..7254ee084f 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -65,7 +65,8 @@ static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
static bool IsAddressWithinFuncCode(v8::Local<v8::Context> context,
const char* func_name,
Address addr) {
- v8::Local<v8::Value> func = context->Global()->Get(v8_str(func_name));
+ v8::Local<v8::Value> func =
+ context->Global()->Get(context, v8_str(func_name)).ToLocalChecked();
CHECK(func->IsFunction());
JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func));
return IsAddressWithinFuncCode(js_func, addr);
@@ -85,15 +86,18 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::StackFrame* calling_frame = frame_iterator.frame();
CHECK(calling_frame->is_java_script());
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
#if defined(V8_HOST_ARCH_32_BIT)
int32_t low_bits = reinterpret_cast<int32_t>(calling_frame->fp());
- args.This()->Set(v8_str("low_bits"), v8_num(low_bits >> 1));
+ args.This()
+ ->Set(context, v8_str("low_bits"), v8_num(low_bits >> 1))
+ .FromJust();
#elif defined(V8_HOST_ARCH_64_BIT)
uint64_t fp = reinterpret_cast<uint64_t>(calling_frame->fp());
int32_t low_bits = static_cast<int32_t>(fp & 0xffffffff);
int32_t high_bits = static_cast<int32_t>(fp >> 32);
- args.This()->Set(v8_str("low_bits"), v8_num(low_bits));
- args.This()->Set(v8_str("high_bits"), v8_num(high_bits));
+ args.This()->Set(context, v8_str("low_bits"), v8_num(low_bits)).FromJust();
+ args.This()->Set(context, v8_str("high_bits"), v8_num(high_bits)).FromJust();
#else
#error Host architecture is neither 32-bit nor 64-bit.
#endif
@@ -107,8 +111,9 @@ void CreateFramePointerGrabberConstructor(v8::Local<v8::Context> context,
Local<v8::FunctionTemplate> constructor_template =
v8::FunctionTemplate::New(context->GetIsolate(), construct_call);
constructor_template->SetClassName(v8_str("FPGrabber"));
- Local<Function> fun = constructor_template->GetFunction();
- context->Global()->Set(v8_str(constructor_name), fun);
+ Local<Function> fun =
+ constructor_template->GetFunction(context).ToLocalChecked();
+ context->Global()->Set(context, v8_str(constructor_name), fun).FromJust();
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 925803cc15..adbd1a5a37 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -85,7 +85,7 @@ class ScopedLoggerInitializer {
i::FLAG_log = saved_log_;
}
- v8::Handle<v8::Context>& env() { return env_; }
+ v8::Local<v8::Context>& env() { return env_; }
v8::Isolate* isolate() { return isolate_; }
@@ -106,7 +106,7 @@ class ScopedLoggerInitializer {
v8::Isolate* isolate_;
v8::Isolate::Scope isolate_scope_;
v8::HandleScope scope_;
- v8::Handle<v8::Context> env_;
+ v8::Local<v8::Context> env_;
Logger* logger_;
DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
@@ -307,18 +307,21 @@ class SimpleExternalString : public v8::String::ExternalStringResource {
TEST(Issue23768) {
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::Context> env = v8::Context::New(CcTest::isolate());
+ v8::Local<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
SimpleExternalString source_ext_str("(function ext() {})();");
v8::Local<v8::String> source =
- v8::String::NewExternal(CcTest::isolate(), &source_ext_str);
+ v8::String::NewExternalTwoByte(CcTest::isolate(), &source_ext_str)
+ .ToLocalChecked();
// Script needs to have a name in order to trigger InitLineEnds execution.
- v8::Handle<v8::String> origin =
- v8::String::NewFromUtf8(CcTest::isolate(), "issue-23768-test");
- v8::Handle<v8::Script> evil_script = CompileWithOrigin(source, origin);
+ v8::Local<v8::String> origin =
+ v8::String::NewFromUtf8(CcTest::isolate(), "issue-23768-test",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::Local<v8::Script> evil_script = CompileWithOrigin(source, origin);
CHECK(!evil_script.IsEmpty());
- CHECK(!evil_script->Run().IsEmpty());
+ CHECK(!evil_script->Run(env).IsEmpty());
i::Handle<i::ExternalTwoByteString> i_source(
i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
// This situation can happen if source was an external string disposed
@@ -346,14 +349,18 @@ TEST(LogCallbacks) {
v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
isolate, v8::FunctionTemplate::New(isolate));
obj->SetClassName(v8_str("Obj"));
- v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
+ v8::Local<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
v8::Local<v8::Signature> signature = v8::Signature::New(isolate, obj);
proto->Set(v8_str("method1"),
v8::FunctionTemplate::New(isolate, ObjMethod1,
- v8::Handle<v8::Value>(), signature),
+ v8::Local<v8::Value>(), signature),
static_cast<v8::PropertyAttribute>(v8::DontDelete));
- initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
+ initialize_logger.env()
+ ->Global()
+ ->Set(initialize_logger.env(), v8_str("Obj"),
+ obj->GetFunction(initialize_logger.env()).ToLocalChecked())
+ .FromJust();
CompileRun("Obj.prototype.method1.toString();");
logger->LogCompiledFunctions();
@@ -401,7 +408,7 @@ TEST(LogAccessorCallbacks) {
v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
isolate, v8::FunctionTemplate::New(isolate));
obj->SetClassName(v8_str("Obj"));
- v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
+ v8::Local<v8::ObjectTemplate> inst = obj->InstanceTemplate();
inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
inst->SetAccessor(v8_str("prop2"), Prop2Getter);
@@ -475,29 +482,37 @@ TEST(EquivalenceOfLoggingAndTraversal) {
i::Vector<const char> log(
i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
CHECK(exists);
- v8::Handle<v8::String> log_str = v8::String::NewFromUtf8(
- isolate, log.start(), v8::String::kNormalString, log.length());
- initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
+ v8::Local<v8::String> log_str =
+ v8::String::NewFromUtf8(isolate, log.start(),
+ v8::NewStringType::kNormal, log.length())
+ .ToLocalChecked();
+ initialize_logger.env()
+ ->Global()
+ ->Set(initialize_logger.env(), v8_str("_log"), log_str)
+ .FromJust();
i::Vector<const char> source = TestSources::GetScriptsSource();
- v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
- isolate, source.start(), v8::String::kNormalString, source.length());
+ v8::Local<v8::String> source_str =
+ v8::String::NewFromUtf8(isolate, source.start(),
+ v8::NewStringType::kNormal, source.length())
+ .ToLocalChecked();
v8::TryCatch try_catch(isolate);
- v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
+ v8::Local<v8::Script> script = CompileWithOrigin(source_str, "");
if (script.IsEmpty()) {
v8::String::Utf8Value exception(try_catch.Exception());
printf("compile: %s\n", *exception);
CHECK(false);
}
- v8::Handle<v8::Value> result = script->Run();
- if (result.IsEmpty()) {
+ v8::Local<v8::Value> result;
+ if (!script->Run(initialize_logger.env()).ToLocal(&result)) {
v8::String::Utf8Value exception(try_catch.Exception());
printf("run: %s\n", *exception);
CHECK(false);
}
// The result either be a "true" literal or problem description.
if (!result->IsTrue()) {
- v8::Local<v8::String> s = result->ToString(isolate);
+ v8::Local<v8::String> s =
+ result->ToString(initialize_logger.env()).ToLocalChecked();
i::ScopedVector<char> data(s->Utf8Length() + 1);
CHECK(data.start());
s->WriteUtf8(data.start());
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index b655fc80f9..24ab60e972 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -61,7 +61,7 @@ static bool all_zeroes(const byte* beg, const byte* end) {
TEST(CopyBytes) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
const int data_size = 1 * KB;
@@ -81,7 +81,8 @@ TEST(CopyBytes) {
byte* r0_;
byte* r1_;
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of R0 and
@@ -112,8 +113,8 @@ TEST(CopyBytes) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
- (void) CALL_GENERATED_CODE(f, reinterpret_cast<int>(src),
- reinterpret_cast<int>(dest), size, 0, 0);
+ (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int>(src),
+ reinterpret_cast<int>(dest), size, 0, 0);
// R0 and R1 should point at the first byte after the copied data.
CHECK_EQ(src + size, r0_);
CHECK_EQ(dest + size, r1_);
@@ -144,7 +145,8 @@ TEST(LoadAndStoreWithRepresentation) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ sub(sp, sp, Operand(1 * kPointerSize));
Label exit;
@@ -221,7 +223,7 @@ TEST(LoadAndStoreWithRepresentation) {
// Call the function from C++.
F5 f = FUNCTION_CAST<F5>(code->entry());
- CHECK(!CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0));
+ CHECK(!CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
}
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-ia32.cc b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
index 3834b18798..829ada3a61 100644
--- a/deps/v8/test/cctest/test-macro-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
@@ -56,7 +56,8 @@ TEST(LoadAndStoreWithRepresentation) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ push(ebx);
__ push(edx);
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 515bac9d3a..696ca010ca 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -62,7 +62,7 @@ static bool all_zeroes(const byte* beg, const byte* end) {
TEST(CopyBytes) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
const int data_size = 1 * KB;
@@ -82,7 +82,8 @@ TEST(CopyBytes) {
byte* a0_;
byte* a1_;
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of a0 and
@@ -113,8 +114,8 @@ TEST(CopyBytes) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
- (void) CALL_GENERATED_CODE(f, reinterpret_cast<int>(src),
- reinterpret_cast<int>(dest), size, 0, 0);
+ (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int>(src),
+ reinterpret_cast<int>(dest), size, 0, 0);
// a0 and a1 should point at the first byte after the copied data.
CHECK_EQ(src + size, a0_);
CHECK_EQ(dest + size, a1_);
@@ -142,11 +143,11 @@ static void TestNaN(const char *code) {
v8::Local<v8::Context> context = CcTest::NewContext(PRINT_EXTENSION);
v8::Context::Scope context_scope(context);
- v8::Local<v8::Script> script = v8::Script::Compile(v8_str(code));
- v8::Local<v8::Object> result = v8::Local<v8::Object>::Cast(script->Run());
- // Have to populate the handle manually, as it's not Cast-able.
- i::Handle<i::JSObject> o =
- v8::Utils::OpenHandle<v8::Object, i::JSObject>(result);
+ v8::Local<v8::Script> script =
+ v8::Script::Compile(context, v8_str(code)).ToLocalChecked();
+ v8::Local<v8::Object> result =
+ v8::Local<v8::Object>::Cast(script->Run(context).ToLocalChecked());
+ i::Handle<i::JSReceiver> o = v8::Utils::OpenHandle(*result);
i::Handle<i::JSArray> array1(reinterpret_cast<i::JSArray*>(*o));
i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
double value = a->get_scalar(0);
@@ -184,7 +185,8 @@ TEST(jump_tables4) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
@@ -251,11 +253,164 @@ TEST(jump_tables4) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
- int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ int res =
+ reinterpret_cast<int>(CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %d\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+TEST(jump_tables5) {
+ if (!IsMipsArchVariant(kMips32r6)) return;
+
+ // Similar to test-assembler-mips jump_tables1, with extra test for emitting a
+ // compact branch instruction before emission of the dd table.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+ Label done;
+
+ __ addiu(sp, sp, -4);
+ __ sw(ra, MemOperand(sp));
+
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 7 + 1);
+ PredictableCodeSizeScope predictable(
+ masm, kNumCases * kPointerSize + ((7 + 1) * Assembler::kInstrSize));
+ Label here;
+
+ __ bal(&here);
+ __ sll(at, a0, 2); // In delay slot.
+ __ bind(&here);
+ __ addu(at, at, ra);
+ __ lw(at, MemOperand(at, 6 * Assembler::kInstrSize));
+ __ jalr(at);
+ __ nop(); // Branch delay slot nop.
+ __ bc(&done);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(v0, (values[i] >> 16) & 0xffff);
+ __ ori(v0, v0, values[i] & 0xffff);
+ __ jr(ra);
+ __ nop();
+ }
+
+ __ bind(&done);
+ __ lw(ra, MemOperand(sp));
+ __ addiu(sp, sp, 4);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int32_t res = reinterpret_cast<int32_t>(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %d\n", i, res);
CHECK_EQ(values[i], res);
}
}
+static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ Lsa(v0, a0, a1, sa);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assembler.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(Lsa) {
+ CcTest::InitializeVM();
+ struct TestCaseLsa {
+ int32_t rt;
+ int32_t rs;
+ uint8_t sa;
+ uint32_t expected_res;
+ };
+
+ struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
+ {0x4, 0x1, 1, 0x6},
+ {0x4, 0x1, 2, 0x8},
+ {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 4, 0x14},
+ {0x4, 0x1, 5, 0x24},
+ {0x0, 0x1, 1, 0x2},
+ {0x0, 0x1, 2, 0x4},
+ {0x0, 0x1, 3, 0x8},
+ {0x0, 0x1, 4, 0x10},
+ {0x0, 0x1, 5, 0x20},
+ {0x4, 0x0, 1, 0x4},
+ {0x4, 0x0, 2, 0x4},
+ {0x4, 0x0, 3, 0x4},
+ {0x4, 0x0, 4, 0x4},
+ {0x4, 0x0, 5, 0x4},
+
+ // Shift overflow.
+ {0x4, INT32_MAX, 1, 0x2},
+ {0x4, INT32_MAX >> 1, 2, 0x0},
+ {0x4, INT32_MAX >> 2, 3, 0xfffffffc},
+ {0x4, INT32_MAX >> 3, 4, 0xfffffff4},
+ {0x4, INT32_MAX >> 4, 5, 0xffffffe4},
+
+ // Signed addition overflow.
+ {INT32_MAX - 1, 0x1, 1, 0x80000000},
+ {INT32_MAX - 3, 0x1, 2, 0x80000000},
+ {INT32_MAX - 7, 0x1, 3, 0x80000000},
+ {INT32_MAX - 15, 0x1, 4, 0x80000000},
+ {INT32_MAX - 31, 0x1, 5, 0x80000000},
+
+ // Addition overflow.
+ {-2, 0x1, 1, 0x0},
+ {-4, 0x1, 2, 0x0},
+ {-8, 0x1, 3, 0x0},
+ {-16, 0x1, 4, 0x0},
+ {-32, 0x1, 5, 0x0}};
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint32_t res = run_lsa(tc[i].rt, tc[i].rs, tc[i].sa);
+ PrintF("0x%x =? 0x%x == lsa(v0, %x, %x, %hhu)\n", tc[i].expected_res, res,
+ tc[i].rt, tc[i].rs, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index fadd45f43b..684b554236 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -63,7 +63,7 @@ static bool all_zeroes(const byte* beg, const byte* end) {
TEST(CopyBytes) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
const int data_size = 1 * KB;
@@ -83,7 +83,8 @@ TEST(CopyBytes) {
byte* a0_;
byte* a1_;
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
// Code to be generated: The stuff in CopyBytes followed by a store of a0 and
@@ -114,9 +115,8 @@ TEST(CopyBytes) {
for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
memset(dest_buffer, 0, data_size);
CHECK(dest + size < dest_buffer + data_size);
- (void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(src),
- reinterpret_cast<int64_t>(dest),
- size, 0, 0);
+ (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(src),
+ reinterpret_cast<int64_t>(dest), size, 0, 0);
// a0 and a1 should point at the first byte after the copied data.
CHECK_EQ(src + size, a0_);
CHECK_EQ(dest + size, a1_);
@@ -138,7 +138,7 @@ TEST(CopyBytes) {
TEST(LoadConstants) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
int64_t refConstants[64];
@@ -149,7 +149,8 @@ TEST(LoadConstants) {
refConstants[i] = ~(mask << i);
}
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
__ mov(a4, a0);
@@ -169,8 +170,8 @@ TEST(LoadConstants) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
- (void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(result),
- 0, 0, 0, 0);
+ (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(result), 0, 0,
+ 0, 0);
// Check results.
for (int i = 0; i < 64; i++) {
CHECK(refConstants[i] == result[i]);
@@ -180,10 +181,11 @@ TEST(LoadConstants) {
TEST(LoadAddress) {
CcTest::InitializeVM();
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
Label to_jump, skip;
__ mov(a4, a0);
@@ -213,7 +215,7 @@ TEST(LoadAddress) {
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
::F f = FUNCTION_CAST< ::F>(code->entry());
- (void) CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0);
+ (void)CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0);
// Check results.
}
@@ -226,7 +228,8 @@ TEST(jump_tables4) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0);
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
const int kNumCases = 512;
@@ -237,9 +240,6 @@ TEST(jump_tables4) {
__ daddiu(sp, sp, -8);
__ sd(ra, MemOperand(sp));
- if ((masm->pc_offset() & 7) == 0) {
- __ nop();
- }
__ mov(v0, zero_reg);
@@ -252,6 +252,7 @@ TEST(jump_tables4) {
__ addiu(v0, v0, 1);
}
+ __ Align(8);
Label done;
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
@@ -296,11 +297,251 @@ TEST(jump_tables4) {
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
- int64_t res =
- reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, i, 0, 0, 0, 0));
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
::printf("f(%d) = %" PRId64 "\n", i, res);
CHECK_EQ(values[i], res);
}
}
+
+TEST(jump_tables5) {
+ if (kArchVariant != kMips64r6) return;
+
+ // Similar to test-assembler-mips jump_tables1, with extra test for emitting a
+ // compact branch instruction before emission of the dd table.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+ Label done;
+
+ __ daddiu(sp, sp, -8);
+ __ sd(ra, MemOperand(sp));
+
+ __ Align(8);
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 7 + 1);
+ PredictableCodeSizeScope predictable(
+ masm, kNumCases * kPointerSize + ((7 + 1) * Assembler::kInstrSize));
+ Label here;
+
+ __ bal(&here);
+ __ dsll(at, a0, 3); // In delay slot.
+ __ bind(&here);
+ __ daddu(at, at, ra);
+ __ ld(at, MemOperand(at, 6 * Assembler::kInstrSize));
+ __ jalr(at);
+ __ nop(); // Branch delay slot nop.
+ __ bc(&done);
+ // A nop instruction must be generated by the forbidden slot guard
+ // (Assembler::dd(Label*)) so the first label goes to an 8 bytes aligned
+ // location.
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lui(v0, (values[i] >> 16) & 0xffff);
+ __ ori(v0, v0, values[i] & 0xffff);
+ __ jr(ra);
+ __ nop();
+ }
+
+ __ bind(&done);
+ __ ld(ra, MemOperand(sp));
+ __ daddiu(sp, sp, 8);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(
+ CALL_GENERATED_CODE(isolate, f, i, 0, 0, 0, 0));
+ ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+
+static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ Lsa(v0, a0, a1, sa);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assembler.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(Lsa) {
+ CcTest::InitializeVM();
+ struct TestCaseLsa {
+ int32_t rt;
+ int32_t rs;
+ uint8_t sa;
+ uint64_t expected_res;
+ };
+
+ struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
+ {0x4, 0x1, 1, 0x6},
+ {0x4, 0x1, 2, 0x8},
+ {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 4, 0x14},
+ {0x4, 0x1, 5, 0x24},
+ {0x0, 0x1, 1, 0x2},
+ {0x0, 0x1, 2, 0x4},
+ {0x0, 0x1, 3, 0x8},
+ {0x0, 0x1, 4, 0x10},
+ {0x0, 0x1, 5, 0x20},
+ {0x4, 0x0, 1, 0x4},
+ {0x4, 0x0, 2, 0x4},
+ {0x4, 0x0, 3, 0x4},
+ {0x4, 0x0, 4, 0x4},
+ {0x4, 0x0, 5, 0x4},
+
+ // Shift overflow.
+ {0x4, INT32_MAX, 1, 0x2},
+ {0x4, INT32_MAX >> 1, 2, 0x0},
+ {0x4, INT32_MAX >> 2, 3, 0xfffffffffffffffc},
+ {0x4, INT32_MAX >> 3, 4, 0xfffffffffffffff4},
+ {0x4, INT32_MAX >> 4, 5, 0xffffffffffffffe4},
+
+ // Signed addition overflow.
+ {INT32_MAX - 1, 0x1, 1, 0xffffffff80000000},
+ {INT32_MAX - 3, 0x1, 2, 0xffffffff80000000},
+ {INT32_MAX - 7, 0x1, 3, 0xffffffff80000000},
+ {INT32_MAX - 15, 0x1, 4, 0xffffffff80000000},
+ {INT32_MAX - 31, 0x1, 5, 0xffffffff80000000},
+
+ // Addition overflow.
+ {-2, 0x1, 1, 0x0},
+ {-4, 0x1, 2, 0x0},
+ {-8, 0x1, 3, 0x0},
+ {-16, 0x1, 4, 0x0},
+ {-32, 0x1, 5, 0x0}};
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_lsa(tc[i].rt, tc[i].rs, tc[i].sa);
+ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Lsa(v0, %x, %x, %hhu)\n",
+ tc[i].expected_res, res, tc[i].rt, tc[i].rs, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+
+static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ Dlsa(v0, a0, a1, sa);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assembler.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ ::F f = FUNCTION_CAST<::F>(code->entry());
+
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, rt, rs, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(Dlsa) {
+ CcTest::InitializeVM();
+ struct TestCaseLsa {
+ int64_t rt;
+ int64_t rs;
+ uint8_t sa;
+ uint64_t expected_res;
+ };
+
+ struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res
+ {0x4, 0x1, 1, 0x6},
+ {0x4, 0x1, 2, 0x8},
+ {0x4, 0x1, 3, 0xc},
+ {0x4, 0x1, 4, 0x14},
+ {0x4, 0x1, 5, 0x24},
+ {0x0, 0x1, 1, 0x2},
+ {0x0, 0x1, 2, 0x4},
+ {0x0, 0x1, 3, 0x8},
+ {0x0, 0x1, 4, 0x10},
+ {0x0, 0x1, 5, 0x20},
+ {0x4, 0x0, 1, 0x4},
+ {0x4, 0x0, 2, 0x4},
+ {0x4, 0x0, 3, 0x4},
+ {0x4, 0x0, 4, 0x4},
+ {0x4, 0x0, 5, 0x4},
+
+ // Shift overflow.
+ {0x4, INT64_MAX, 1, 0x2},
+ {0x4, INT64_MAX >> 1, 2, 0x0},
+ {0x4, INT64_MAX >> 2, 3, 0xfffffffffffffffc},
+ {0x4, INT64_MAX >> 3, 4, 0xfffffffffffffff4},
+ {0x4, INT64_MAX >> 4, 5, 0xffffffffffffffe4},
+
+ // Signed addition overflow.
+ {INT64_MAX - 1, 0x1, 1, 0x8000000000000000},
+ {INT64_MAX - 3, 0x1, 2, 0x8000000000000000},
+ {INT64_MAX - 7, 0x1, 3, 0x8000000000000000},
+ {INT64_MAX - 15, 0x1, 4, 0x8000000000000000},
+ {INT64_MAX - 31, 0x1, 5, 0x8000000000000000},
+
+ // Addition overflow.
+ {-2, 0x1, 1, 0x0},
+ {-4, 0x1, 2, 0x0},
+ {-8, 0x1, 3, 0x0},
+ {-16, 0x1, 4, 0x0},
+ {-32, 0x1, 5, 0x0}};
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_dlsa(tc[i].rt, tc[i].rs, tc[i].sa);
+ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Dlsa(v0, %" PRIx64 ", %" PRIx64
+ ", %hhu)\n",
+ tc[i].expected_res, res, tc[i].rt, tc[i].rs, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 4cc52a11e2..612f9e88a3 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -151,7 +151,8 @@ TEST(SmiMove) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
Label exit;
@@ -195,7 +196,7 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ movl(rax, Immediate(id + 2));
__ j(less_equal, exit);
} else {
- DCHECK_EQ(x, y);
+ CHECK_EQ(x, y);
__ movl(rax, Immediate(id + 3));
__ j(not_equal, exit);
}
@@ -212,7 +213,7 @@ void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
__ movl(rax, Immediate(id + 9));
__ j(greater_equal, exit);
} else {
- DCHECK(y > x);
+ CHECK(y > x);
__ movl(rax, Immediate(id + 10));
__ j(less_equal, exit);
}
@@ -236,7 +237,8 @@ TEST(SmiCompare) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -284,7 +286,8 @@ TEST(Integer32ToSmi) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -382,7 +385,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
int64_t x,
int y) {
int64_t result = x + y;
- DCHECK(Smi::IsValid(result));
+ CHECK(Smi::IsValid(result));
__ movl(rax, Immediate(id));
__ Move(r8, Smi::FromInt(static_cast<int>(result)));
__ movq(rcx, x);
@@ -410,7 +413,8 @@ TEST(Integer64PlusConstantToSmi) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -452,7 +456,8 @@ TEST(SmiCheck) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -671,7 +676,8 @@ TEST(SmiNeg) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -769,7 +775,7 @@ static void SmiAddOverflowTest(MacroAssembler* masm,
int id,
int x) {
// Adds a Smi to x so that the addition overflows.
- DCHECK(x != 0); // Can't overflow by adding a Smi.
+ CHECK(x != 0); // Can't overflow by adding a Smi.
int y_max = (x > 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue - x - 1);
int y_min = (x > 0) ? (Smi::kMaxValue - x + 1) : (Smi::kMinValue + 0);
@@ -882,7 +888,8 @@ TEST(SmiAdd) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -983,7 +990,7 @@ static void SmiSubOverflowTest(MacroAssembler* masm,
int id,
int x) {
// Subtracts a Smi from x so that the subtraction overflows.
- DCHECK(x != -1); // Can't overflow by subtracting a Smi.
+ CHECK(x != -1); // Can't overflow by subtracting a Smi.
int y_max = (x < 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue + 0);
int y_min = (x < 0) ? (Smi::kMaxValue + x + 2) : (Smi::kMinValue + x);
@@ -1098,7 +1105,8 @@ TEST(SmiSub) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1186,7 +1194,8 @@ TEST(SmiMul) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1289,7 +1298,8 @@ TEST(SmiDiv) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1396,7 +1406,8 @@ TEST(SmiMod) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1447,7 +1458,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
for (int i = 0; i < 8; i++) {
__ Move(rcx, Smi::FromInt(x));
SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
- DCHECK(index.reg.is(rcx) || index.reg.is(rdx));
+ CHECK(index.reg.is(rcx) || index.reg.is(rdx));
__ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(index.reg, r8);
@@ -1455,7 +1466,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToIndex(rcx, rcx, i);
- DCHECK(index.reg.is(rcx));
+ CHECK(index.reg.is(rcx));
__ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(x) << i);
__ cmpq(rcx, r8);
@@ -1464,7 +1475,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToNegativeIndex(rdx, rcx, i);
- DCHECK(index.reg.is(rcx) || index.reg.is(rdx));
+ CHECK(index.reg.is(rcx) || index.reg.is(rdx));
__ shlq(index.reg, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(-x) << i);
__ cmpq(index.reg, r8);
@@ -1472,7 +1483,7 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
__ incq(rax);
__ Move(rcx, Smi::FromInt(x));
index = masm->SmiToNegativeIndex(rcx, rcx, i);
- DCHECK(index.reg.is(rcx));
+ CHECK(index.reg.is(rcx));
__ shlq(rcx, Immediate(index.scale));
__ Set(r8, static_cast<intptr_t>(-x) << i);
__ cmpq(rcx, r8);
@@ -1490,7 +1501,8 @@ TEST(SmiIndex) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1556,7 +1568,8 @@ TEST(SmiSelectNonSmi) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1632,7 +1645,8 @@ TEST(SmiAnd) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1710,7 +1724,8 @@ TEST(SmiOr) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1790,7 +1805,8 @@ TEST(SmiXor) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1854,7 +1870,8 @@ TEST(SmiNot) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -1947,7 +1964,8 @@ TEST(SmiShiftLeft) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -2050,7 +2068,8 @@ TEST(SmiShiftLogicalRight) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -2116,7 +2135,8 @@ TEST(SmiShiftArithmeticRight) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -2144,7 +2164,7 @@ TEST(SmiShiftArithmeticRight) {
void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
- DCHECK(x >= 0);
+ CHECK(x >= 0);
int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
int power_count = 8;
__ movl(rax, Immediate(id));
@@ -2177,7 +2197,8 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
EntryCode(masm);
@@ -2217,7 +2238,8 @@ TEST(OperandOffset) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
Label exit;
@@ -2567,7 +2589,8 @@ TEST(LoadAndStoreWithRepresentation) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
EntryCode(masm);
__ subq(rsp, Immediate(1 * kPointerSize));
diff --git a/deps/v8/test/cctest/test-macro-assembler-x87.cc b/deps/v8/test/cctest/test-macro-assembler-x87.cc
index 3cee27add0..ac2a8e3917 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x87.cc
@@ -56,7 +56,8 @@ TEST(LoadAndStoreWithRepresentation) {
CHECK(buffer);
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
__ push(ebx);
__ push(edx);
diff --git a/deps/v8/test/cctest/test-microtask-delivery.cc b/deps/v8/test/cctest/test-microtask-delivery.cc
index 415be3caf2..ecec77fbfd 100644
--- a/deps/v8/test/cctest/test-microtask-delivery.cc
+++ b/deps/v8/test/cctest/test-microtask-delivery.cc
@@ -55,6 +55,7 @@ class HarmonyIsolate {
TEST(MicrotaskDeliverySimple) {
+ i::FLAG_harmony_object_observe = true;
HarmonyIsolate isolate;
v8::HandleScope scope(isolate.GetIsolate());
LocalContext context(isolate.GetIsolate());
@@ -82,28 +83,41 @@ TEST(MicrotaskDeliverySimple) {
"});"
"Object.observe(obj, observer);"
"obj.id = 1;");
- CHECK_EQ(6, CompileRun("ordering.length")->Int32Value());
- CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
- CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
- CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
- CHECK_EQ(4, CompileRun("ordering[3]")->Int32Value());
- CHECK_EQ(5, CompileRun("ordering[4]")->Int32Value());
- CHECK_EQ(6, CompileRun("ordering[5]")->Int32Value());
+ CHECK_EQ(
+ 6, CompileRun("ordering.length")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(1,
+ CompileRun("ordering[0]")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(2,
+ CompileRun("ordering[1]")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(3,
+ CompileRun("ordering[2]")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(4,
+ CompileRun("ordering[3]")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(5,
+ CompileRun("ordering[4]")->Int32Value(context.local()).FromJust());
+ CHECK_EQ(6,
+ CompileRun("ordering[5]")->Int32Value(context.local()).FromJust());
}
TEST(MicrotaskPerIsolateState) {
+ i::FLAG_harmony_object_observe = true;
HarmonyIsolate isolate;
v8::HandleScope scope(isolate.GetIsolate());
LocalContext context1(isolate.GetIsolate());
isolate.GetIsolate()->SetAutorunMicrotasks(false);
CompileRun(
"var obj = { calls: 0 };");
- v8::Handle<v8::Value> obj = CompileRun("obj");
+ v8::Local<v8::Value> obj = CompileRun("obj");
{
LocalContext context2(isolate.GetIsolate());
- context2->Global()->Set(
- v8::String::NewFromUtf8(isolate.GetIsolate(), "obj"), obj);
+ context2->Global()
+ ->Set(context2.local(),
+ v8::String::NewFromUtf8(isolate.GetIsolate(), "obj",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ obj)
+ .FromJust();
CompileRun(
"var resolver = {};"
"new Promise(function(resolve) {"
@@ -117,8 +131,13 @@ TEST(MicrotaskPerIsolateState) {
}
{
LocalContext context3(isolate.GetIsolate());
- context3->Global()->Set(
- v8::String::NewFromUtf8(isolate.GetIsolate(), "obj"), obj);
+ context3->Global()
+ ->Set(context3.local(),
+ v8::String::NewFromUtf8(isolate.GetIsolate(), "obj",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ obj)
+ .FromJust();
CompileRun(
"var foo = { id: 1 };"
"Object.observe(foo, function() {"
@@ -128,9 +147,15 @@ TEST(MicrotaskPerIsolateState) {
}
{
LocalContext context4(isolate.GetIsolate());
- context4->Global()->Set(
- v8::String::NewFromUtf8(isolate.GetIsolate(), "obj"), obj);
+ context4->Global()
+ ->Set(context4.local(),
+ v8::String::NewFromUtf8(isolate.GetIsolate(), "obj",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked(),
+ obj)
+ .FromJust();
isolate.GetIsolate()->RunMicrotasks();
- CHECK_EQ(2, CompileRun("obj.calls")->Int32Value());
+ CHECK_EQ(2,
+ CompileRun("obj.calls")->Int32Value(context4.local()).FromJust());
}
}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index e0d457f7a6..f0af22e27a 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -42,6 +39,7 @@ inline int32_t ToInt32(v8::Local<v8::Value> value) {
TEST(PerIsolateState) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context1(CcTest::isolate());
@@ -105,6 +103,7 @@ TEST(PerIsolateState) {
TEST(EndOfMicrotaskDelivery) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -118,6 +117,7 @@ TEST(EndOfMicrotaskDelivery) {
TEST(DeliveryOrdering) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -149,6 +149,7 @@ TEST(DeliveryOrdering) {
TEST(DeliveryCallbackThrows) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -177,6 +178,7 @@ TEST(DeliveryCallbackThrows) {
TEST(DeliveryChangesMutationInCallback) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -202,6 +204,7 @@ TEST(DeliveryChangesMutationInCallback) {
TEST(DeliveryOrderingReentrant) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -233,6 +236,7 @@ TEST(DeliveryOrderingReentrant) {
TEST(DeliveryOrderingDeliverChangeRecords) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -257,6 +261,7 @@ TEST(DeliveryOrderingDeliverChangeRecords) {
TEST(ObjectHashTableGrowth) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
// Initializing this context sets up initial hash tables.
LocalContext context(CcTest::isolate());
@@ -352,6 +357,7 @@ static void ExpectRecords(v8::Isolate* isolate, Local<Value> records,
arraysize(expectations))
TEST(APITestBasicMutation) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* v8_isolate = CcTest::isolate();
HandleScope scope(v8_isolate);
LocalContext context(v8_isolate);
@@ -411,6 +417,7 @@ TEST(APITestBasicMutation) {
TEST(HiddenPrototypeObservation) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* v8_isolate = CcTest::isolate();
HandleScope scope(v8_isolate);
LocalContext context(v8_isolate);
@@ -473,6 +480,7 @@ static int NumberOfElements(i::Handle<i::JSWeakMap> map) {
TEST(ObservationWeakMap) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun(
@@ -543,6 +551,7 @@ static int TestObserveSecurity(Local<Context> observer_context,
TEST(ObserverSecurityAAA) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA = Context::New(isolate);
@@ -551,6 +560,7 @@ TEST(ObserverSecurityAAA) {
TEST(ObserverSecurityA1A2A3) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -568,6 +578,7 @@ TEST(ObserverSecurityA1A2A3) {
TEST(ObserverSecurityAAB) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA = Context::New(isolate);
@@ -577,6 +588,7 @@ TEST(ObserverSecurityAAB) {
TEST(ObserverSecurityA1A2B) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
@@ -593,6 +605,7 @@ TEST(ObserverSecurityA1A2B) {
TEST(ObserverSecurityABA) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA = Context::New(isolate);
@@ -602,6 +615,7 @@ TEST(ObserverSecurityABA) {
TEST(ObserverSecurityA1BA2) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA1 = Context::New(isolate);
@@ -617,6 +631,7 @@ TEST(ObserverSecurityA1BA2) {
TEST(ObserverSecurityBAA) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA = Context::New(isolate);
@@ -626,6 +641,7 @@ TEST(ObserverSecurityBAA) {
TEST(ObserverSecurityBA1A2) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA1 = Context::New(isolate);
@@ -641,6 +657,7 @@ TEST(ObserverSecurityBA1A2) {
TEST(ObserverSecurityNotify) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
v8::Local<Context> contextA = Context::New(isolate);
@@ -676,6 +693,7 @@ TEST(ObserverSecurityNotify) {
TEST(HiddenPropertiesLeakage) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun("var obj = {};"
@@ -697,6 +715,7 @@ TEST(HiddenPropertiesLeakage) {
TEST(GetNotifierFromOtherContext) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
LocalContext context(CcTest::isolate());
CompileRun("var obj = {};");
@@ -713,6 +732,7 @@ TEST(GetNotifierFromOtherContext) {
TEST(GetNotifierFromOtherOrigin) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
Local<Value> foo = v8_str("foo");
Local<Value> bar = v8_str("bar");
@@ -733,6 +753,7 @@ TEST(GetNotifierFromOtherOrigin) {
TEST(GetNotifierFromSameOrigin) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
Local<Value> foo = v8_str("foo");
LocalContext context(CcTest::isolate());
@@ -784,6 +805,7 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
TEST(DontLeakContextOnObserve) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
Local<Value> foo = v8_str("foo");
LocalContext context(CcTest::isolate());
@@ -804,11 +826,12 @@ TEST(DontLeakContextOnObserve) {
}
CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(1);
+ CheckSurvivingGlobalObjectsCount(0);
}
TEST(DontLeakContextOnGetNotifier) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
Local<Value> foo = v8_str("foo");
LocalContext context(CcTest::isolate());
@@ -827,11 +850,12 @@ TEST(DontLeakContextOnGetNotifier) {
}
CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(1);
+ CheckSurvivingGlobalObjectsCount(0);
}
TEST(DontLeakContextOnNotifierPerformChange) {
+ i::FLAG_harmony_object_observe = true;
HandleScope scope(CcTest::isolate());
Local<Value> foo = v8_str("foo");
LocalContext context(CcTest::isolate());
@@ -858,7 +882,7 @@ TEST(DontLeakContextOnNotifierPerformChange) {
}
CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(1);
+ CheckSurvivingGlobalObjectsCount(0);
}
@@ -869,6 +893,7 @@ static void ObserverCallback(const FunctionCallbackInfo<Value>& args) {
TEST(ObjectObserveCallsCppFunction) {
+ i::FLAG_harmony_object_observe = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext context(isolate);
@@ -891,6 +916,7 @@ TEST(ObjectObserveCallsCppFunction) {
TEST(ObjectObserveCallsFunctionTemplateInstance) {
+ i::FLAG_harmony_object_observe = true;
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext context(isolate);
@@ -926,6 +952,7 @@ static void AccessorSetter(Local<Name> property, Local<Value> value,
TEST(APIAccessorsShouldNotNotify) {
+ i::FLAG_harmony_object_observe = true;
Isolate* isolate = CcTest::isolate();
HandleScope handle_scope(isolate);
LocalContext context(isolate);
@@ -959,6 +986,7 @@ void MockUseCounterCallback(v8::Isolate* isolate,
TEST(UseCountObjectObserve) {
+ i::FLAG_harmony_object_observe = true;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
LocalContext env;
@@ -986,6 +1014,7 @@ TEST(UseCountObjectObserve) {
TEST(UseCountObjectGetNotifier) {
+ i::FLAG_harmony_object_observe = true;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
LocalContext env;
@@ -1005,6 +1034,7 @@ static bool NamedAccessCheckAlwaysAllow(Local<v8::Context> accessing_context,
TEST(DisallowObserveAccessCheckedObject) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
@@ -1026,6 +1056,7 @@ TEST(DisallowObserveAccessCheckedObject) {
TEST(DisallowGetNotifierAccessCheckedObject) {
+ i::FLAG_harmony_object_observe = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index dc4ddb0af6..7269e2d5b2 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -25,27 +25,24 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "src/v8.h"
-#include "src/ast.h"
-#include "src/ast-numbering.h"
-#include "src/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/ast-value-factory.h"
#include "src/compiler.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/objects.h"
-#include "src/parser.h"
-#include "src/preparser.h"
-#include "src/rewriter.h"
-#include "src/scanner-character-streams.h"
-#include "src/token.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparser.h"
+#include "src/parsing/rewriter.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/token.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
@@ -220,7 +217,7 @@ TEST(UsingCachedData) {
" 42: 'number literal', for: 'keyword as propertyName', "
" f\\u006fr: 'keyword propertyname with escape'};"
"var v = /RegExp Literal/;"
- "var w = /RegExp Literal\\u0020With Escape/gin;"
+ "var w = /RegExp Literal\\u0020With Escape/gi;"
"var y = { get getter() { return 42; }, "
" set setter(v) { this.value = v; }};"
"var f = a => function (b) { return a + b; };"
@@ -706,7 +703,7 @@ TEST(Utf8CharacterStream) {
cursor += unibrow::Utf8::Encode(buffer + cursor, i,
unibrow::Utf16::kNoPreviousCharacter, true);
}
- DCHECK(cursor == kAllUtf8CharsSizeU);
+ CHECK(cursor == kAllUtf8CharsSizeU);
i::Utf8ToUtf16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
kAllUtf8CharsSizeU);
@@ -802,8 +799,8 @@ TEST(StreamScanner) {
i::Token::EOS,
i::Token::ILLEGAL
};
- DCHECK_EQ('{', str2[19]);
- DCHECK_EQ('}', str2[37]);
+ CHECK_EQ('{', str2[19]);
+ CHECK_EQ('}', str2[37]);
TestStreamScanner(&stream2, expectations2, 20, 37);
const char* str3 = "{}}}}";
@@ -1151,12 +1148,23 @@ static void CheckParsesToNumber(const char* source, bool with_dot) {
TEST(ParseNumbers) {
+ CheckParsesToNumber("1.", true);
CheckParsesToNumber("1.34", true);
CheckParsesToNumber("134", false);
CheckParsesToNumber("134e44", false);
CheckParsesToNumber("134.e44", true);
CheckParsesToNumber("134.44e44", true);
CheckParsesToNumber(".44", true);
+
+ CheckParsesToNumber("-1.", true);
+ CheckParsesToNumber("-1.0", true);
+ CheckParsesToNumber("-1.34", true);
+ CheckParsesToNumber("-134", false);
+ CheckParsesToNumber("-134e44", false);
+ CheckParsesToNumber("-134.e44", true);
+ CheckParsesToNumber("-134.44e44", true);
+ CheckParsesToNumber("-.44", true);
+
CheckParsesToNumber("+x", true);
}
@@ -1402,13 +1410,13 @@ TEST(DiscardFunctionBody) {
// See comments in ParseFunctionLiteral in parser.cc.
const char* discard_sources[] = {
"(function f() { function g() { var a; } })();",
+ "(function f() { function g() { { function h() { } } } })();",
/* TODO(conradw): In future it may be possible to apply this optimisation
* to these productions.
"(function f() { 0, function g() { var a; } })();",
"(function f() { 0, { g() { var a; } } })();",
"(function f() { 0, class c { g() { var a; } } })();", */
- NULL
- };
+ NULL};
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -1440,6 +1448,7 @@ TEST(DiscardFunctionBody) {
} else {
// TODO(conradw): This path won't be hit until the other test cases can be
// uncommented.
+ UNREACHABLE();
CHECK_NOT_NULL(inner->body());
CHECK_GE(2, inner->body()->length());
i::Expression* exp = inner->body()->at(1)->AsExpressionStatement()->
@@ -1496,10 +1505,10 @@ enum ParserFlag {
kAllowLazy,
kAllowNatives,
kAllowHarmonyDefaultParameters,
- kAllowHarmonyRestParameters,
kAllowHarmonySloppy,
kAllowHarmonySloppyLet,
kAllowHarmonyDestructuring,
+ kAllowHarmonyDestructuringAssignment,
kAllowHarmonyNewTarget,
kAllowStrongMode,
kNoLegacyConst
@@ -1519,12 +1528,12 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
parser->set_allow_natives(flags.Contains(kAllowNatives));
parser->set_allow_harmony_default_parameters(
flags.Contains(kAllowHarmonyDefaultParameters));
- parser->set_allow_harmony_rest_parameters(
- flags.Contains(kAllowHarmonyRestParameters));
parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
parser->set_allow_harmony_sloppy_let(flags.Contains(kAllowHarmonySloppyLet));
- parser->set_allow_harmony_destructuring(
+ parser->set_allow_harmony_destructuring_bind(
flags.Contains(kAllowHarmonyDestructuring));
+ parser->set_allow_harmony_destructuring_assignment(
+ flags.Contains(kAllowHarmonyDestructuringAssignment));
parser->set_allow_strong_mode(flags.Contains(kAllowStrongMode));
parser->set_allow_legacy_const(!flags.Contains(kNoLegacyConst));
}
@@ -1532,17 +1541,19 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
void TestParserSyncWithFlags(i::Handle<i::String> source,
i::EnumSet<ParserFlag> flags,
- ParserSyncTestResult result) {
+ ParserSyncTestResult result,
+ bool is_module = false) {
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
int preparser_materialized_literals = -1;
int parser_materialized_literals = -2;
+ bool test_preparser = !is_module;
// Preparse the data.
i::CompleteParserRecorder log;
- {
+ if (test_preparser) {
i::Scanner scanner(isolate->unicode_cache());
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
i::Zone zone;
@@ -1556,7 +1567,6 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
&preparser_materialized_literals);
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
-
bool preparse_error = log.HasError();
// Parse the data
@@ -1567,7 +1577,11 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
SetParserFlags(&parser, flags);
- info.set_global();
+ if (is_module) {
+ info.set_module();
+ } else {
+ info.set_global();
+ }
parser.Parse(&info);
function = info.literal();
if (function) {
@@ -1596,7 +1610,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
CHECK(false);
}
- if (!preparse_error) {
+ if (test_preparser && !preparse_error) {
v8::base::OS::Print(
"Parser failed on:\n"
"\t%s\n"
@@ -1607,21 +1621,22 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
CHECK(false);
}
// Check that preparser and parser produce the same error.
- i::Handle<i::String> preparser_message =
- FormatMessage(log.ErrorMessageData());
- if (!i::String::Equals(message_string, preparser_message)) {
- v8::base::OS::Print(
- "Expected parser and preparser to produce the same error on:\n"
- "\t%s\n"
- "However, found the following error messages\n"
- "\tparser: %s\n"
- "\tpreparser: %s\n",
- source->ToCString().get(),
- message_string->ToCString().get(),
- preparser_message->ToCString().get());
- CHECK(false);
+ if (test_preparser) {
+ i::Handle<i::String> preparser_message =
+ FormatMessage(log.ErrorMessageData());
+ if (!i::String::Equals(message_string, preparser_message)) {
+ v8::base::OS::Print(
+ "Expected parser and preparser to produce the same error on:\n"
+ "\t%s\n"
+ "However, found the following error messages\n"
+ "\tparser: %s\n"
+ "\tpreparser: %s\n",
+ source->ToCString().get(), message_string->ToCString().get(),
+ preparser_message->ToCString().get());
+ CHECK(false);
+ }
}
- } else if (preparse_error) {
+ } else if (test_preparser && preparse_error) {
v8::base::OS::Print(
"Preparser failed on:\n"
"\t%s\n"
@@ -1638,7 +1653,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"However, parser and preparser succeeded",
source->ToCString().get());
CHECK(false);
- } else if (preparser_materialized_literals != parser_materialized_literals) {
+ } else if (test_preparser &&
+ preparser_materialized_literals != parser_materialized_literals) {
v8::base::OS::Print(
"Preparser materialized literals (%d) differ from Parser materialized "
"literals (%d) on:\n"
@@ -1651,14 +1667,14 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
}
-void TestParserSync(const char* source,
- const ParserFlag* varying_flags,
+void TestParserSync(const char* source, const ParserFlag* varying_flags,
size_t varying_flags_length,
ParserSyncTestResult result = kSuccessOrError,
const ParserFlag* always_true_flags = NULL,
size_t always_true_flags_length = 0,
const ParserFlag* always_false_flags = NULL,
- size_t always_false_flags_length = 0) {
+ size_t always_false_flags_length = 0,
+ bool is_module = false) {
i::Handle<i::String> str =
CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(source);
for (int bits = 0; bits < (1 << varying_flags_length); bits++) {
@@ -1675,7 +1691,7 @@ void TestParserSync(const char* source,
++flag_index) {
flags.Remove(always_false_flags[flag_index]);
}
- TestParserSyncWithFlags(str, flags, result);
+ TestParserSyncWithFlags(str, flags, result, is_module);
}
}
@@ -1819,12 +1835,11 @@ TEST(StrictOctal) {
void RunParserSyncTest(const char* context_data[][2],
const char* statement_data[],
ParserSyncTestResult result,
- const ParserFlag* flags = NULL,
- int flags_len = 0,
+ const ParserFlag* flags = NULL, int flags_len = 0,
const ParserFlag* always_true_flags = NULL,
int always_true_len = 0,
const ParserFlag* always_false_flags = NULL,
- int always_false_len = 0) {
+ int always_false_len = 0, bool is_module = false) {
v8::HandleScope handles(CcTest::isolate());
v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
@@ -1877,20 +1892,32 @@ void RunParserSyncTest(const char* context_data[][2],
statement_data[j],
context_data[i][1]);
CHECK(length == kProgramSize);
- TestParserSync(program.start(),
- flags,
- flags_len,
- result,
- always_true_flags,
- always_true_len,
- always_false_flags,
- always_false_len);
+ TestParserSync(program.start(), flags, flags_len, result,
+ always_true_flags, always_true_len, always_false_flags,
+ always_false_len, is_module);
}
}
delete[] generated_flags;
}
+void RunModuleParserSyncTest(const char* context_data[][2],
+ const char* statement_data[],
+ ParserSyncTestResult result,
+ const ParserFlag* flags = NULL, int flags_len = 0,
+ const ParserFlag* always_true_flags = NULL,
+ int always_true_len = 0,
+ const ParserFlag* always_false_flags = NULL,
+ int always_false_len = 0) {
+ bool flag = i::FLAG_harmony_modules;
+ i::FLAG_harmony_modules = true;
+ RunParserSyncTest(context_data, statement_data, result, flags, flags_len,
+ always_true_flags, always_true_len, always_false_flags,
+ always_false_len, true);
+ i::FLAG_harmony_modules = flag;
+}
+
+
TEST(ErrorsEvalAndArguments) {
// Tests that both preparsing and parsing produce the right kind of errors for
// using "eval" and "arguments" as identifiers. Without the strict mode, it's
@@ -2203,7 +2230,6 @@ TEST(NoErrorsYieldSloppyGeneratorsEnabled) {
"function foo(yield) { }",
"function foo(bar, yield) { }",
"function * yield() { }",
- "(function * yield() { })",
"yield = 1;",
"var foo = yield = 1;",
"yield * 2;",
@@ -2264,6 +2290,23 @@ TEST(ErrorsYieldStrict) {
}
+TEST(ErrorsYieldSloppy) {
+ const char* context_data[][2] = {
+ { "", "" },
+ { "function not_gen() {", "}" },
+ { "(function not_gen() {", "})" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "(function * yield() { })",
+ NULL
+ };
+
+ RunParserSyncTest(context_data, statement_data, kError);
+}
+
+
TEST(NoErrorsGenerator) {
const char* context_data[][2] = {
{ "function * gen() {", "}" },
@@ -2287,6 +2330,7 @@ TEST(NoErrorsGenerator) {
"yield 3; yield 4;",
"yield * 3; yield * 4;",
"(function (yield) { })",
+ "(function yield() { })",
"yield { yield: 12 }",
"yield /* comment */ { yield: 12 }",
"yield * \n { yield: 12 }",
@@ -2336,9 +2380,8 @@ TEST(ErrorsYieldGenerator) {
"var foo, yield;",
"try { } catch (yield) { }",
"function yield() { }",
- // The name of the NFE is let-bound in the generator, which does not permit
+ // The name of the NFE is bound in the generator, which does not permit
// yield to be an identifier.
- "(function yield() { })",
"(function * yield() { })",
// Yield isn't valid as a formal parameter for generators.
"function * foo(yield) { }",
@@ -2761,7 +2804,6 @@ TEST(NoErrorsRegexpLiteral) {
const char* statement_data[] = {
"/foo/",
"/foo/g",
- "/foo/whatever", // This is an error but not detected by the parser.
NULL
};
@@ -3298,8 +3340,8 @@ TEST(SerializationOfMaybeAssignmentFlag) {
script_scope->Initialize();
i::Scope* s =
i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
- DCHECK(s != script_scope);
- DCHECK(name != NULL);
+ CHECK(s != script_scope);
+ CHECK(name != NULL);
// Get result from h's function context (that is f's context)
i::Variable* var = s->Lookup(name);
@@ -3346,7 +3388,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
script_scope->Initialize();
i::Scope* s =
i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
- DCHECK(s != script_scope);
+ CHECK(s != script_scope);
const i::AstRawString* name_x = avf.GetOneByteString("x");
// Get result from f's function context (that is g's outer context)
@@ -3529,6 +3571,7 @@ TEST(UseAsmUseCount) {
TEST(UseConstLegacyCount) {
+ i::FLAG_legacy_const = true;
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
LocalContext env;
@@ -3643,9 +3686,7 @@ TEST(ErrorsArrowFormalParameters) {
nullptr
};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
- RunParserSyncTest(context_data, assignment_expression_suffix_data, kError,
- NULL, 0, always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, assignment_expression_suffix_data, kError);
}
@@ -3837,7 +3878,6 @@ TEST(NoErrorsArrowFunctions) {
};
static const ParserFlag always_flags[] = {kAllowHarmonyDefaultParameters,
- kAllowHarmonyRestParameters,
kAllowHarmonyDestructuring};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
@@ -3944,7 +3984,6 @@ TEST(ArrowFunctionsYieldParameterNameInGenerator) {
};
static const ParserFlag always_flags[] = { kAllowHarmonyDestructuring,
- kAllowHarmonyRestParameters,
kAllowStrongMode};
RunParserSyncTest(sloppy_function_context_data, arrow_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
@@ -5257,22 +5296,26 @@ TEST(ParseRestParameters) {
"/regexp/, 'str', function(){});"},
{NULL, NULL}};
- const char* data[] = {
- "...args",
- "a, ...args",
- "... args",
- "a, ... args",
- "...\targs",
- "a, ...\targs",
- "...\r\nargs",
- "a, ...\r\nargs",
- "...\rargs",
- "a, ...\rargs",
- "...\t\n\t\t\n args",
- "a, ... \n \n args",
- NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ const char* data[] = {"...args",
+ "a, ...args",
+ "... args",
+ "a, ... args",
+ "...\targs",
+ "a, ...\targs",
+ "...\r\nargs",
+ "a, ...\r\nargs",
+ "...\rargs",
+ "a, ...\rargs",
+ "...\t\n\t\t\n args",
+ "a, ... \n \n args",
+ "...{ length, 0: a, 1: b}",
+ "...{}",
+ "...[a, b]",
+ "...[]",
+ "...[...[a, b, ...c]]",
+ NULL};
+ static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5303,9 +5346,7 @@ TEST(ParseRestParametersErrors) {
"a,\ra, ...args",
"a,\na, ...args",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5320,8 +5361,7 @@ TEST(RestParameterInSetterMethodError) {
{nullptr, nullptr}};
const char* data[] = {"...a", "...arguments", "...eval", nullptr};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters,
- kAllowHarmonySloppy};
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
arraysize(always_flags));
}
@@ -5344,15 +5384,11 @@ TEST(RestParametersEvalArguments) {
"arguments, ...args",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
-
// Fail in strict mode
- RunParserSyncTest(strict_context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, data, kError);
// OK in sloppy mode
- RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, data, kSuccess);
}
@@ -5371,12 +5407,9 @@ TEST(RestParametersDuplicateEvalArguments) {
"arguments, arguments, ...args",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters};
-
// In strict mode, the error is using "eval" or "arguments" as parameter names
// In sloppy mode, the error is that eval / arguments are duplicated
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5718,10 +5751,7 @@ TEST(ModuleParsingInternals) {
i::Scope* outer_scope = module_scope->outer_scope();
CHECK(outer_scope->is_script_scope());
CHECK_NULL(outer_scope->outer_scope());
- CHECK_EQ(1, outer_scope->num_modules());
CHECK(module_scope->is_module_scope());
- CHECK_NOT_NULL(module_scope->module_var());
- CHECK_EQ(i::TEMPORARY, module_scope->module_var()->mode());
i::ModuleDescriptor* descriptor = module_scope->module();
CHECK_NOT_NULL(descriptor);
CHECK_EQ(1, descriptor->Length());
@@ -6213,8 +6243,7 @@ TEST(StrongConstructorDirective) {
"foo() { \"use strong\" } constructor() {}", NULL};
static const ParserFlag always_flags[] = {
- kAllowHarmonyRestParameters, kAllowHarmonySloppy, kAllowHarmonySloppyLet,
- kAllowStrongMode};
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kAllowStrongMode};
RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
arraysize(always_flags));
@@ -6397,6 +6426,7 @@ TEST(ArrowFunctionASIErrors) {
TEST(StrongModeFreeVariablesDeclaredByPreviousScript) {
i::FLAG_strong_mode = true;
+ i::FLAG_legacy_const = true;
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
@@ -6546,7 +6576,7 @@ TEST(StrongModeFreeVariablesNotDeclared) {
TEST(DestructuringPositiveTests) {
- i::FLAG_harmony_destructuring = true;
+ i::FLAG_harmony_destructuring_bind = true;
const char* context_data[][2] = {{"'use strict'; let ", " = {};"},
{"var ", " = {};"},
@@ -6605,7 +6635,7 @@ TEST(DestructuringPositiveTests) {
TEST(DestructuringNegativeTests) {
- i::FLAG_harmony_destructuring = true;
+ i::FLAG_harmony_destructuring_bind = true;
static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
{ // All modes.
@@ -6682,6 +6712,10 @@ TEST(DestructuringNegativeTests) {
"{ x : 'foo' }",
"{ x : /foo/ }",
"{ x : `foo` }",
+ "{ get a() {} }",
+ "{ set a() {} }",
+ "{ method() {} }",
+ "{ *method() {} }",
NULL};
// clang-format on
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
@@ -6775,8 +6809,376 @@ TEST(DestructuringNegativeTests) {
}
+TEST(DestructuringAssignmentPositiveTests) {
+ const char* context_data[][2] = {
+ {"'use strict'; let x, y, z; (", " = {});"},
+ {"var x, y, z; (", " = {});"},
+ {"'use strict'; let x, y, z; for (x in ", " = {});"},
+ {"'use strict'; let x, y, z; for (x of ", " = {});"},
+ {"var x, y, z; for (x in ", " = {});"},
+ {"var x, y, z; for (x of ", " = {});"},
+ {"var x, y, z; for (", " in {});"},
+ {"var x, y, z; for (", " of {});"},
+ {"'use strict'; var x, y, z; for (", " in {});"},
+ {"'use strict'; var x, y, z; for (", " of {});"},
+ {NULL, NULL}};
+
+ const char* mixed_assignments_context_data[][2] = {
+ {"'use strict'; let x, y, z; (", " = z = {});"},
+ {"var x, y, z; (", " = z = {});"},
+ {"'use strict'; let x, y, z; (x = ", " = z = {});"},
+ {"var x, y, z; (x = ", " = z = {});"},
+ {"'use strict'; let x, y, z; for (x in ", " = z = {});"},
+ {"'use strict'; let x, y, z; for (x in x = ", " = z = {});"},
+ {"'use strict'; let x, y, z; for (x of ", " = z = {});"},
+ {"'use strict'; let x, y, z; for (x of x = ", " = z = {});"},
+ {"var x, y, z; for (x in ", " = z = {});"},
+ {"var x, y, z; for (x in x = ", " = z = {});"},
+ {"var x, y, z; for (x of ", " = z = {});"},
+ {"var x, y, z; for (x of x = ", " = z = {});"},
+ {NULL, NULL}};
+
+ // clang-format off
+ const char* data[] = {
+ "x",
+
+ "{ x : y }",
+ "{ x : foo().y }",
+ "{ x : foo()[y] }",
+ "{ x : y.z }",
+ "{ x : y[z] }",
+ "{ x : { y } }",
+ "{ x : { foo: y } }",
+ "{ x : { foo: foo().y } }",
+ "{ x : { foo: foo()[y] } }",
+ "{ x : { foo: y.z } }",
+ "{ x : { foo: y[z] } }",
+ "{ x : [ y ] }",
+ "{ x : [ foo().y ] }",
+ "{ x : [ foo()[y] ] }",
+ "{ x : [ y.z ] }",
+ "{ x : [ y[z] ] }",
+
+ "{ x : y = 10 }",
+ "{ x : foo().y = 10 }",
+ "{ x : foo()[y] = 10 }",
+ "{ x : y.z = 10 }",
+ "{ x : y[z] = 10 }",
+ "{ x : { y = 10 } = {} }",
+ "{ x : { foo: y = 10 } = {} }",
+ "{ x : { foo: foo().y = 10 } = {} }",
+ "{ x : { foo: foo()[y] = 10 } = {} }",
+ "{ x : { foo: y.z = 10 } = {} }",
+ "{ x : { foo: y[z] = 10 } = {} }",
+ "{ x : [ y = 10 ] = {} }",
+ "{ x : [ foo().y = 10 ] = {} }",
+ "{ x : [ foo()[y] = 10 ] = {} }",
+ "{ x : [ y.z = 10 ] = {} }",
+ "{ x : [ y[z] = 10 ] = {} }",
+
+ "[ x ]",
+ "[ foo().x ]",
+ "[ foo()[x] ]",
+ "[ x.y ]",
+ "[ x[y] ]",
+ "[ { x } ]",
+ "[ { x : y } ]",
+ "[ { x : foo().y } ]",
+ "[ { x : foo()[y] } ]",
+ "[ { x : x.y } ]",
+ "[ { x : x[y] } ]",
+ "[ [ x ] ]",
+ "[ [ foo().x ] ]",
+ "[ [ foo()[x] ] ]",
+ "[ [ x.y ] ]",
+ "[ [ x[y] ] ]",
+
+ "[ x = 10 ]",
+ "[ foo().x = 10 ]",
+ "[ foo()[x] = 10 ]",
+ "[ x.y = 10 ]",
+ "[ x[y] = 10 ]",
+ "[ { x = 10 } = {} ]",
+ "[ { x : y = 10 } = {} ]",
+ "[ { x : foo().y = 10 } = {} ]",
+ "[ { x : foo()[y] = 10 } = {} ]",
+ "[ { x : x.y = 10 } = {} ]",
+ "[ { x : x[y] = 10 } = {} ]",
+ "[ [ x = 10 ] = {} ]",
+ "[ [ foo().x = 10 ] = {} ]",
+ "[ [ foo()[x] = 10 ] = {} ]",
+ "[ [ x.y = 10 ] = {} ]",
+ "[ [ x[y] = 10 ] = {} ]",
+ "{ x : y = 1 }",
+ "{ x }",
+ "{ x, y, z }",
+ "{ x = 1, y: z, z: y }",
+ "{x = 42, y = 15}",
+ "[x]",
+ "[x = 1]",
+ "[x,y,z]",
+ "[x, y = 42, z]",
+ "{ x : x, y : y }",
+ "{ x : x = 1, y : y }",
+ "{ x : x, y : y = 42 }",
+ "[]",
+ "{}",
+ "[{x:x, y:y}, [,x,z,]]",
+ "[{x:x = 1, y:y = 2}, [z = 3, z = 4, z = 5]]",
+ "[x,,y]",
+ "[(x),,(y)]",
+ "[(x)]",
+ "{42 : x}",
+ "{42 : x = 42}",
+ "{42e-2 : x}",
+ "{42e-2 : x = 42}",
+ "{'hi' : x}",
+ "{'hi' : x = 42}",
+ "{var: x}",
+ "{var: x = 42}",
+ "{var: (x) = 42}",
+ "{[x] : z}",
+ "{[1+1] : z}",
+ "{[1+1] : (z)}",
+ "{[foo()] : z}",
+ "{[foo()] : (z)}",
+ "{[foo()] : foo().bar}",
+ "{[foo()] : foo()['bar']}",
+ "{[foo()] : this.bar}",
+ "{[foo()] : this['bar']}",
+ "{[foo()] : 'foo'.bar}",
+ "{[foo()] : 'foo'['bar']}",
+ "[...x]",
+ "[x,y,...z]",
+ "[x,,...z]",
+ "{ x: y }",
+ "[x, y]",
+ "[((x, y) => z).x]",
+ "{x: ((y, z) => z).x}",
+ "[((x, y) => z)['x']]",
+ "{x: ((y, z) => z)['x']}",
+
+ "{x: { y = 10 } }",
+ "[(({ x } = { x: 1 }) => x).a]",
+
+ // v8:4662
+ "{ x: (y) }",
+ "{ x: (y) = [] }",
+ "{ x: (foo.bar) }",
+ "{ x: (foo['bar']) }",
+ "[ ...(a) ]",
+ "[ ...(foo['bar']) ]",
+ "[ ...(foo.bar) ]",
+ "[ (y) ]",
+ "[ (foo.bar) ]",
+ "[ (foo['bar']) ]",
+
+ NULL};
+ // clang-format on
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyDestructuringAssignment, kAllowHarmonyDestructuring,
+ kAllowHarmonyDefaultParameters};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+
+ RunParserSyncTest(mixed_assignments_context_data, data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ const char* empty_context_data[][2] = {
+ {"'use strict';", ""}, {"", ""}, {NULL, NULL}};
+
+ // CoverInitializedName ambiguity handling in various contexts
+ const char* ambiguity_data[] = {
+ "var foo = { x = 10 } = {};",
+ "var foo = { q } = { x = 10 } = {};",
+ "var foo; foo = { x = 10 } = {};",
+ "var foo; foo = { q } = { x = 10 } = {};",
+ "var x; ({ x = 10 } = {});",
+ "var q, x; ({ q } = { x = 10 } = {});",
+ "var x; [{ x = 10 } = {}]",
+ "var x; (true ? { x = true } = {} : { x = false } = {})",
+ "var q, x; (q, { x = 10 } = {});",
+ "var { x = 10 } = { x = 20 } = {};",
+ "var { x = 10 } = (o = { x = 20 } = {});",
+ "var x; (({ x = 10 } = { x = 20 } = {}) => x)({})",
+ NULL,
+ };
+ RunParserSyncTest(empty_context_data, ambiguity_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(DestructuringAssignmentNegativeTests) {
+ const char* context_data[][2] = {
+ {"'use strict'; let x, y, z; (", " = {});"},
+ {"var x, y, z; (", " = {});"},
+ {"'use strict'; let x, y, z; for (x in ", " = {});"},
+ {"'use strict'; let x, y, z; for (x of ", " = {});"},
+ {"var x, y, z; for (x in ", " = {});"},
+ {"var x, y, z; for (x of ", " = {});"},
+ {NULL, NULL}};
+
+ // clang-format off
+ const char* data[] = {
+ "{ x : ++y }",
+ "{ x : y * 2 }",
+ "{ ...x }",
+ "{ get x() {} }",
+ "{ set x() {} }",
+ "{ x: y() }",
+ "{ this }",
+ "{ x: this }",
+ "{ x: this = 1 }",
+ "{ super }",
+ "{ x: super }",
+ "{ x: super = 1 }",
+ "{ new.target }",
+ "{ x: new.target }",
+ "{ x: new.target = 1 }",
+ "[x--]",
+ "[--x = 1]",
+ "[x()]",
+ "[this]",
+ "[this = 1]",
+ "[new.target]",
+ "[new.target = 1]",
+ "[super]",
+ "[super = 1]",
+ "[function f() {}]",
+ "[50]",
+ "[(50)]",
+ "[(function() {})]",
+ "[(foo())]",
+ "{ x: 50 }",
+ "{ x: (50) }",
+ "['str']",
+ "{ x: 'str' }",
+ "{ x: ('str') }",
+ "{ x: (foo()) }",
+ "{ x: (function() {}) }",
+ "{ x: y } = 'str'",
+ "[x, y] = 'str'",
+ "[(x,y) => z]",
+ "{x: (y) => z}",
+ "[x, ...y, z]",
+ "[...x,]",
+ "[x, y, ...z = 1]",
+ "[...z = 1]",
+
+ // v8:4657
+ "({ x: x4, x: (x+=1e4) })",
+ "(({ x: x4, x: (x+=1e4) }))",
+ "({ x: x4, x: (x+=1e4) } = {})",
+ "(({ x: x4, x: (x+=1e4) } = {}))",
+ "(({ x: x4, x: (x+=1e4) }) = {})",
+ "({ x: y } = {})",
+ "(({ x: y } = {}))",
+ "(({ x: y }) = {})",
+ "([a])",
+ "(([a]))",
+ "([a] = [])",
+ "(([a] = []))",
+ "(([a]) = [])",
+
+ // v8:4662
+ "{ x: ([y]) }",
+ "{ x: ([y] = []) }",
+ "{ x: ({y}) }",
+ "{ x: ({y} = {}) }",
+ "{ x: (++y) }",
+ "[ (...[a]) ]",
+ "[ ...([a]) ]",
+ "[ ...([a] = [])",
+ "[ ...[ ( [ a ] ) ] ]",
+ "[ ([a]) ]",
+ "[ (...[a]) ]",
+ "[ ([a] = []) ]",
+ "[ (++y) ]",
+ "[ ...(++y) ]",
+
+ "[ x += x ]",
+ "{ foo: x += x }",
+
+ NULL};
+ // clang-format on
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyDestructuringAssignment, kAllowHarmonyDestructuring,
+ kAllowHarmonyDefaultParameters};
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+
+ const char* empty_context_data[][2] = {
+ {"'use strict';", ""}, {"", ""}, {NULL, NULL}};
+
+ // CoverInitializedName ambiguity handling in various contexts
+ const char* ambiguity_data[] = {
+ "var foo = { x = 10 };",
+ "var foo = { q } = { x = 10 };",
+ "var foo; foo = { x = 10 };",
+ "var foo; foo = { q } = { x = 10 };",
+ "var x; ({ x = 10 });",
+ "var q, x; ({ q } = { x = 10 });",
+ "var x; [{ x = 10 }]",
+ "var x; (true ? { x = true } : { x = false })",
+ "var q, x; (q, { x = 10 });",
+ "var { x = 10 } = { x = 20 };",
+ "var { x = 10 } = (o = { x = 20 });",
+ "var x; (({ x = 10 } = { x = 20 }) => x)({})",
+
+ // Not ambiguous, but uses same context data
+ "switch([window %= []] = []) { default: }",
+
+ NULL,
+ };
+ RunParserSyncTest(empty_context_data, ambiguity_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ // Strict mode errors
+ const char* strict_context_data[][2] = {{"'use strict'; (", " = {})"},
+ {"'use strict'; for (", " of {}) {}"},
+ {"'use strict'; for (", " in {}) {}"},
+ {NULL, NULL}};
+ const char* strict_data[] = {"{ eval }",
+ "{ arguments }",
+ "{ foo: eval }",
+ "{ foo: arguments }",
+ "{ eval = 0 }",
+ "{ arguments = 0 }",
+ "{ foo: eval = 0 }",
+ "{ foo: arguments = 0 }",
+ "[ eval ]",
+ "[ arguments ]",
+ "[ eval = 0 ]",
+ "[ arguments = 0 ]",
+
+ // v8:4662
+ "{ x: (eval) }",
+ "{ x: (arguments) }",
+ "{ x: (eval = 0) }",
+ "{ x: (arguments = 0) }",
+ "{ x: (eval) = 0 }",
+ "{ x: (arguments) = 0 }",
+ "[ (eval) ]",
+ "[ (arguments) ]",
+ "[ (eval = 0) ]",
+ "[ (arguments = 0) ]",
+ "[ (eval) = 0 ]",
+ "[ (arguments) = 0 ]",
+ "[ ...(eval) ]",
+ "[ ...(arguments) ]",
+ "[ ...(eval = 0) ]",
+ "[ ...(arguments = 0) ]",
+ "[ ...(eval) = 0 ]",
+ "[ ...(arguments) = 0 ]",
+
+ NULL};
+ RunParserSyncTest(strict_context_data, strict_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
TEST(DestructuringDisallowPatternsInForVarIn) {
- i::FLAG_harmony_destructuring = true;
+ i::FLAG_harmony_destructuring_bind = true;
static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {
{"", ""}, {"function f() {", "}"}, {NULL, NULL}};
@@ -6800,7 +7202,7 @@ TEST(DestructuringDisallowPatternsInForVarIn) {
TEST(DestructuringDuplicateParams) {
- i::FLAG_harmony_destructuring = true;
+ i::FLAG_harmony_destructuring_bind = true;
static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
@@ -6826,7 +7228,7 @@ TEST(DestructuringDuplicateParams) {
TEST(DestructuringDuplicateParamsSloppy) {
- i::FLAG_harmony_destructuring = true;
+ i::FLAG_harmony_destructuring_bind = true;
static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {
{"", ""}, {"function outer() {", "}"}, {nullptr, nullptr}};
@@ -6847,7 +7249,7 @@ TEST(DestructuringDuplicateParamsSloppy) {
TEST(DestructuringDisallowPatternsInSingleParamArrows) {
- i::FLAG_harmony_destructuring = true;
+ i::FLAG_harmony_destructuring_bind = true;
static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
const char* context_data[][2] = {{"'use strict';", ""},
{"function outer() { 'use strict';", "}"},
@@ -6866,32 +7268,6 @@ TEST(DestructuringDisallowPatternsInSingleParamArrows) {
}
-TEST(DestructuringDisallowPatternsInRestParams) {
- i::FLAG_harmony_destructuring = true;
- i::FLAG_harmony_rest_parameters = true;
- static const ParserFlag always_flags[] = {kAllowHarmonyRestParameters,
- kAllowHarmonyDestructuring};
- const char* context_data[][2] = {{"'use strict';", ""},
- {"function outer() { 'use strict';", "}"},
- {"", ""},
- {"function outer() { ", "}"},
- {nullptr, nullptr}};
-
- // clang-format off
- const char* error_data[] = {
- "function(...{}) {}",
- "function(...{x}) {}",
- "function(...[x]) {}",
- "(...{}) => {}",
- "(...{x}) => {}",
- "(...[x]) => {}",
- nullptr};
- // clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
-}
-
-
TEST(DefaultParametersYieldInInitializers) {
// clang-format off
const char* sloppy_function_context_data[][2] = {
@@ -6968,9 +7344,8 @@ TEST(DefaultParametersYieldInInitializers) {
kSuccess, NULL, 0, always_flags, arraysize(always_flags));
RunParserSyncTest(sloppy_arrow_context_data, parameter_data, kSuccess, NULL,
0, always_flags, arraysize(always_flags));
- // TODO(wingo): Will change to kSuccess when destructuring assignment lands.
RunParserSyncTest(sloppy_arrow_context_data, destructuring_assignment_data,
- kError, NULL, 0, always_flags, arraysize(always_flags));
+ kSuccess, NULL, 0, always_flags, arraysize(always_flags));
RunParserSyncTest(strict_function_context_data, parameter_data, kError, NULL,
0, always_flags, arraysize(always_flags));
@@ -7233,7 +7608,7 @@ TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
static const ParserFlag always_flags[] = {
kAllowHarmonyDefaultParameters, kAllowHarmonyDestructuring,
- kAllowHarmonyRestParameters, kAllowHarmonySloppy, kAllowStrongMode};
+ kAllowHarmonySloppy, kAllowStrongMode};
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
}
@@ -7305,3 +7680,160 @@ TEST(LetSloppyOnly) {
RunParserSyncTest(context_data, fail_data, kError, NULL, 0, fail_flags,
arraysize(fail_flags));
}
+
+
+TEST(EscapedKeywords) {
+ // clang-format off
+ const char* sloppy_context_data[][2] = {
+ {"", ""},
+ {NULL, NULL}
+ };
+
+ const char* strict_context_data[][2] = {
+ {"'use strict';", ""},
+ {NULL, NULL}
+ };
+
+ const char* fail_data[] = {
+ "for (var i = 0; i < 100; ++i) { br\\u0065ak; }",
+ "cl\\u0061ss Foo {}",
+ "var x = cl\\u0061ss {}",
+ "\\u0063onst foo = 1;",
+ "while (i < 10) { if (i++ & 1) c\\u006fntinue; this.x++; }",
+ "d\\u0065bugger;",
+ "d\\u0065lete this.a;",
+ "\\u0063o { } while(0)",
+ "if (d\\u006f { true }) {}",
+ "if (false) { this.a = 1; } \\u0065lse { this.b = 1; }",
+ "e\\u0078port var foo;",
+ "try { } catch (e) {} f\\u0069nally { }",
+ "f\\u006fr (var i = 0; i < 10; ++i);",
+ "f\\u0075nction fn() {}",
+ "var f = f\\u0075nction() {}",
+ "\\u0069f (true) { }",
+ "\\u0069mport blah from './foo.js';",
+ "n\\u0065w function f() {}",
+ "(function() { r\\u0065turn; })()",
+ "class C extends function() {} { constructor() { sup\\u0065r() } }",
+ "class C extends function() {} { constructor() { sup\\u0065r.a = 1 } }",
+ "sw\\u0069tch (this.a) {}",
+ "var x = th\\u0069s;",
+ "th\\u0069s.a = 1;",
+ "thr\\u006fw 'boo';",
+ "t\\u0072y { true } catch (e) {}",
+ "var x = typ\\u0065of 'blah'",
+ "v\\u0061r a = true",
+ "var v\\u0061r = true",
+ "(function() { return v\\u006fid 0; })()",
+ "wh\\u0069le (true) { }",
+ "w\\u0069th (this.scope) { }",
+ "(function*() { y\\u0069eld 1; })()",
+
+ "var \\u0065num = 1;",
+ "var { \\u0065num } = {}",
+ "(\\u0065num = 1);",
+
+ // Null / Boolean literals
+ "(x === n\\u0075ll);",
+ "var x = n\\u0075ll;",
+ "var n\\u0075ll = 1;",
+ "var { n\\u0075ll } = { 1 };",
+ "n\\u0075ll = 1;",
+ "(x === tr\\u0075e);",
+ "var x = tr\\u0075e;",
+ "var tr\\u0075e = 1;",
+ "var { tr\\u0075e } = {};",
+ "tr\\u0075e = 1;",
+ "(x === f\\u0061lse);",
+ "var x = f\\u0061lse;",
+ "var f\\u0061lse = 1;",
+ "var { f\\u0061lse } = {};",
+ "f\\u0061lse = 1;",
+
+ // TODO(caitp): consistent error messages for labeled statements and
+ // expressions
+ "switch (this.a) { c\\u0061se 6: break; }",
+ "try { } c\\u0061tch (e) {}",
+ "switch (this.a) { d\\u0065fault: break; }",
+ "class C \\u0065xtends function B() {} {}",
+ "for (var a i\\u006e this) {}",
+ "if ('foo' \\u0069n this) {}",
+ "if (this \\u0069nstanceof Array) {}",
+ "(n\\u0065w function f() {})",
+ "(typ\\u0065of 123)",
+ "(v\\u006fid 0)",
+ "do { ; } wh\\u0069le (true) { }",
+ "(function*() { return (n++, y\\u0069eld 1); })()",
+ "class C { st\\u0061tic bar() {} }",
+
+ "(y\\u0069eld);",
+ "var y\\u0069eld = 1;",
+ "var { y\\u0069eld } = {};",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonyDestructuring};
+ RunParserSyncTest(sloppy_context_data, fail_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, fail_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunModuleParserSyncTest(sloppy_context_data, fail_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ // clang-format off
+ const char* let_data[] = {
+ "var l\\u0065t = 1;",
+ "l\\u0065t = 1;",
+ "(l\\u0065t === 1);",
+ NULL
+ };
+ // clang-format on
+
+ RunParserSyncTest(sloppy_context_data, let_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, let_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ static const ParserFlag sloppy_let_flags[] = {
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kAllowHarmonyDestructuring};
+ RunParserSyncTest(sloppy_context_data, let_data, kError, NULL, 0,
+ sloppy_let_flags, arraysize(sloppy_let_flags));
+
+ // Non-errors in sloppy mode
+ const char* valid_data[] = {"(\\u0069mplements = 1);",
+ "var impl\\u0065ments = 1;",
+ "var { impl\\u0065ments } = {};",
+ "(\\u0069nterface = 1);",
+ "var int\\u0065rface = 1;",
+ "var { int\\u0065rface } = {};",
+ "(p\\u0061ckage = 1);",
+ "var packa\\u0067e = 1;",
+ "var { packa\\u0067e } = {};",
+ "(p\\u0072ivate = 1);",
+ "var p\\u0072ivate;",
+ "var { p\\u0072ivate } = {};",
+ "(prot\\u0065cted);",
+ "var prot\\u0065cted = 1;",
+ "var { prot\\u0065cted } = {};",
+ "(publ\\u0069c);",
+ "var publ\\u0069c = 1;",
+ "var { publ\\u0069c } = {};",
+ NULL};
+ RunParserSyncTest(sloppy_context_data, valid_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, valid_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunModuleParserSyncTest(strict_context_data, valid_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
+TEST(MiscSyntaxErrors) {
+ const char* context_data[][2] = {
+ {"'use strict'", ""}, {"", ""}, {NULL, NULL}};
+ const char* error_data[] = {"for (();;) {}", NULL};
+
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0);
+}
diff --git a/deps/v8/test/cctest/test-platform-linux.cc b/deps/v8/test/cctest/test-platform-linux.cc
index abe43ad744..613638e78a 100644
--- a/deps/v8/test/cctest/test-platform-linux.cc
+++ b/deps/v8/test/cctest/test-platform-linux.cc
@@ -27,9 +27,6 @@
//
// Tests of the TokenLock class from lock.h
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h> // for usleep()
diff --git a/deps/v8/test/cctest/test-platform-win32.cc b/deps/v8/test/cctest/test-platform-win32.cc
index 2d87d92f27..cecde74120 100644
--- a/deps/v8/test/cctest/test-platform-win32.cc
+++ b/deps/v8/test/cctest/test-platform-win32.cc
@@ -27,9 +27,6 @@
//
// Tests of the TokenLock class from lock.h
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 771be1fd09..2645a3dc18 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdint.h>
#include "src/base/build_config.h"
#include "src/base/platform/platform.h"
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 65d1a83bc4..82c0f30bd6 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -27,9 +27,6 @@
//
// Tests of profiles generator and utilities.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "include/v8-profiler.h"
@@ -51,7 +48,8 @@ using i::Vector;
TEST(ProfileNodeFindOrAddChild) {
- ProfileTree tree;
+ CcTest::InitializeVM();
+ ProfileTree tree(CcTest::i_isolate());
ProfileNode* node = tree.root();
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
@@ -75,8 +73,9 @@ TEST(ProfileNodeFindOrAddChild) {
TEST(ProfileNodeFindOrAddChildForSameFunction) {
+ CcTest::InitializeVM();
const char* aaa = "aaa";
- ProfileTree tree;
+ ProfileTree tree(CcTest::i_isolate());
ProfileNode* node = tree.root();
CodeEntry entry1(i::Logger::FUNCTION_TAG, aaa);
ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
@@ -122,10 +121,11 @@ class ProfileTreeTestHelper {
TEST(ProfileTreeAddPathFromEnd) {
+ CcTest::InitializeVM();
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
- ProfileTree tree;
+ ProfileTree tree(CcTest::i_isolate());
ProfileTreeTestHelper helper(&tree);
CHECK(!helper.Walk(&entry1));
CHECK(!helper.Walk(&entry2));
@@ -181,7 +181,8 @@ TEST(ProfileTreeAddPathFromEnd) {
TEST(ProfileTreeCalculateTotalTicks) {
- ProfileTree empty_tree;
+ CcTest::InitializeVM();
+ ProfileTree empty_tree(CcTest::i_isolate());
CHECK_EQ(0u, empty_tree.root()->self_ticks());
empty_tree.root()->IncrementSelfTicks();
CHECK_EQ(1u, empty_tree.root()->self_ticks());
@@ -191,7 +192,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
Vector<CodeEntry*> e1_path_vec(
e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
- ProfileTree single_child_tree;
+ ProfileTree single_child_tree(CcTest::i_isolate());
single_child_tree.AddPathFromEnd(e1_path_vec);
single_child_tree.root()->IncrementSelfTicks();
CHECK_EQ(1u, single_child_tree.root()->self_ticks());
@@ -206,7 +207,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
Vector<CodeEntry*> e2_e1_path_vec(e2_e1_path,
sizeof(e2_e1_path) / sizeof(e2_e1_path[0]));
- ProfileTree flat_tree;
+ ProfileTree flat_tree(CcTest::i_isolate());
ProfileTreeTestHelper flat_helper(&flat_tree);
flat_tree.AddPathFromEnd(e1_path_vec);
flat_tree.AddPathFromEnd(e1_path_vec);
@@ -233,7 +234,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
Vector<CodeEntry*> e3_path_vec(
e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
- ProfileTree wide_tree;
+ ProfileTree wide_tree(CcTest::i_isolate());
ProfileTreeTestHelper wide_helper(&wide_tree);
wide_tree.AddPathFromEnd(e1_path_vec);
wide_tree.AddPathFromEnd(e1_path_vec);
diff --git a/deps/v8/test/cctest/test-random-number-generator.cc b/deps/v8/test/cctest/test-random-number-generator.cc
index f5e05cb396..8a855fe5d5 100644
--- a/deps/v8/test/cctest/test-random-number-generator.cc
+++ b/deps/v8/test/cctest/test-random-number-generator.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -51,3 +48,127 @@ TEST(RandomSeedFlagIsUsed) {
i->Dispose();
}
}
+
+
+// Chi squared for getting m 0s out of n bits.
+double ChiSquared(int m, int n) {
+ double ys_minus_np1 = (m - n / 2.0);
+ double chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
+ double ys_minus_np2 = ((n - m) - n / 2.0);
+ double chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
+ return chi_squared_1 + chi_squared_2;
+}
+
+
+// Test for correlations between recent bits from the PRNG, or bits that are
+// biased.
+void RandomBitCorrelation(int random_bit) {
+ FLAG_random_seed = 31415926;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ v8::base::RandomNumberGenerator* rng = i_isolate->random_number_generator();
+#ifdef DEBUG
+ const int kHistory = 2;
+ const int kRepeats = 1000;
+#else
+ const int kHistory = 8;
+ const int kRepeats = 10000;
+#endif
+ uint32_t history[kHistory];
+ // The predictor bit is either constant 0 or 1, or one of the bits from the
+ // history.
+ for (int predictor_bit = -2; predictor_bit < 32; predictor_bit++) {
+ // The predicted bit is one of the bits from the PRNG.
+ for (int ago = 0; ago < kHistory; ago++) {
+ // We don't want to check whether each bit predicts itself.
+ if (ago == 0 && predictor_bit == random_bit) continue;
+
+ // Enter the new random value into the history
+ for (int i = ago; i >= 0; i--) {
+ history[i] = bit_cast<uint32_t>(rng->NextInt());
+ }
+
+ // Find out how many of the bits are the same as the prediction bit.
+ int m = 0;
+ for (int i = 0; i < kRepeats; i++) {
+ v8::HandleScope scope(isolate);
+ uint32_t random = bit_cast<uint32_t>(rng->NextInt());
+ for (int j = ago - 1; j >= 0; j--) history[j + 1] = history[j];
+ history[0] = random;
+
+ int predicted;
+ if (predictor_bit >= 0) {
+ predicted = (history[ago] >> predictor_bit) & 1;
+ } else {
+ predicted = predictor_bit == -2 ? 0 : 1;
+ }
+ int bit = (random >> random_bit) & 1;
+ if (bit == predicted) m++;
+ }
+
+ // Chi squared analysis for k = 2 (2, states: same/not-same) and one
+ // degree of freedom (k - 1).
+ double chi_squared = ChiSquared(m, kRepeats);
+ if (chi_squared > 24) {
+ int percent = static_cast<int>(m * 100.0 / kRepeats);
+ if (predictor_bit < 0) {
+ PrintF("Bit %d is %d %d%% of the time\n", random_bit,
+ predictor_bit == -2 ? 0 : 1, percent);
+ } else {
+ PrintF("Bit %d is the same as bit %d %d ago %d%% of the time\n",
+ random_bit, predictor_bit, ago, percent);
+ }
+ }
+
+ // For 1 degree of freedom this corresponds to 1 in a million. We are
+ // running ~8000 tests, so that would be surprising.
+ CHECK(chi_squared <= 24);
+
+ // If the predictor bit is a fixed 0 or 1 then it makes no sense to
+ // repeat the test with a different age.
+ if (predictor_bit < 0) break;
+ }
+ }
+ isolate->Dispose();
+}
+
+
+#define TEST_RANDOM_BIT(BIT) \
+ TEST(RandomBitCorrelations##BIT) { RandomBitCorrelation(BIT); }
+
+TEST_RANDOM_BIT(0)
+TEST_RANDOM_BIT(1)
+TEST_RANDOM_BIT(2)
+TEST_RANDOM_BIT(3)
+TEST_RANDOM_BIT(4)
+TEST_RANDOM_BIT(5)
+TEST_RANDOM_BIT(6)
+TEST_RANDOM_BIT(7)
+TEST_RANDOM_BIT(8)
+TEST_RANDOM_BIT(9)
+TEST_RANDOM_BIT(10)
+TEST_RANDOM_BIT(11)
+TEST_RANDOM_BIT(12)
+TEST_RANDOM_BIT(13)
+TEST_RANDOM_BIT(14)
+TEST_RANDOM_BIT(15)
+TEST_RANDOM_BIT(16)
+TEST_RANDOM_BIT(17)
+TEST_RANDOM_BIT(18)
+TEST_RANDOM_BIT(19)
+TEST_RANDOM_BIT(20)
+TEST_RANDOM_BIT(21)
+TEST_RANDOM_BIT(22)
+TEST_RANDOM_BIT(23)
+TEST_RANDOM_BIT(24)
+TEST_RANDOM_BIT(25)
+TEST_RANDOM_BIT(26)
+TEST_RANDOM_BIT(27)
+TEST_RANDOM_BIT(28)
+TEST_RANDOM_BIT(29)
+TEST_RANDOM_BIT(30)
+TEST_RANDOM_BIT(31)
+
+#undef TEST_RANDOM_BIT
diff --git a/deps/v8/test/cctest/test-receiver-check-hidden-prototype.cc b/deps/v8/test/cctest/test-receiver-check-hidden-prototype.cc
new file mode 100644
index 0000000000..90ed8e7b56
--- /dev/null
+++ b/deps/v8/test/cctest/test-receiver-check-hidden-prototype.cc
@@ -0,0 +1,73 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "include/v8-experimental.h"
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace {
+
+
+static void SlowCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(41);
+}
+
+
+TEST(CompatibleReceiverBuiltin) {
+ // Check that the HandleFastApiCall builtin visits the hidden prototypes
+ // during the compatible receiver check.
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> current_context = isolate->GetCurrentContext();
+
+ v8::Local<v8::FunctionTemplate> constructor_template =
+ v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> prototype_template =
+ v8::FunctionTemplate::New(isolate);
+ prototype_template->SetHiddenPrototype(true);
+
+ v8::Local<v8::ObjectTemplate> proto_instance_template =
+ prototype_template->InstanceTemplate();
+
+ v8::experimental::FastAccessorBuilder* fast_accessor_builder =
+ v8::experimental::FastAccessorBuilder::New(isolate);
+ fast_accessor_builder->ReturnValue(
+ fast_accessor_builder->IntegerConstant(42));
+ v8::Local<v8::FunctionTemplate> accessor_template =
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, SlowCallback, fast_accessor_builder, v8::Local<v8::Value>(),
+ v8::Signature::New(isolate, prototype_template));
+
+ proto_instance_template->SetAccessorProperty(
+ v8_str("bar"), accessor_template, v8::Local<v8::FunctionTemplate>(),
+ v8::ReadOnly);
+
+ v8::Local<v8::Object> object =
+ constructor_template->GetFunction(current_context)
+ .ToLocalChecked()
+ ->NewInstance(current_context)
+ .ToLocalChecked();
+
+ v8::Local<v8::Object> hidden_prototype =
+ prototype_template->GetFunction(current_context)
+ .ToLocalChecked()
+ ->NewInstance(current_context)
+ .ToLocalChecked();
+
+ CHECK(object->SetPrototype(current_context, hidden_prototype).FromJust());
+
+ context->Global()
+ ->Set(current_context, v8_str("object"), object)
+ .FromMaybe(false);
+
+ CHECK_EQ(42, CompileRun("var getter = object.__lookupGetter__('bar');"
+ "getter.call(object)")
+ ->Int32Value(current_context)
+ .FromJust());
+}
+
+} // namespace
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 2e236c20f3..a91058cc24 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -25,21 +25,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <cstdlib>
#include <sstream>
+#include "include/v8.h"
#include "src/v8.h"
-#include "src/ast.h"
+#include "src/ast/ast.h"
#include "src/char-predicates-inl.h"
#include "src/ostreams.h"
-#include "src/parser.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
+#include "src/regexp/regexp-parser.h"
#include "src/splay-tree-inl.h"
#include "src/string-stream.h"
#ifdef V8_INTERPRETED_REGEXP
@@ -102,17 +100,21 @@ static bool CheckParse(const char* input) {
}
-static void CheckParseEq(const char* input, const char* expected) {
+static void CheckParseEq(const char* input, const char* expected,
+ bool unicode = false) {
v8::HandleScope scope(CcTest::isolate());
Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, false, false, &result));
+ CcTest::i_isolate(), &zone, &reader, false, unicode, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
std::ostringstream os;
result.tree->Print(os, &zone);
+ if (strcmp(expected, os.str().c_str()) != 0) {
+ printf("%s | %s\n", expected, os.str().c_str());
+ }
CHECK_EQ(0, strcmp(expected, os.str().c_str()));
}
@@ -159,7 +161,11 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
CHECK_EQ(max, min_max.max_match); \
}
-TEST(Parser) {
+
+void TestRegExpParser(bool lookbehind) {
+ FLAG_harmony_regexp_lookbehind = lookbehind;
+ FLAG_harmony_unicode_regexps = true;
+
CHECK_PARSE_ERROR("?");
CheckParseEq("abc", "'abc'");
@@ -191,6 +197,13 @@ TEST(Parser) {
CheckParseEq("foo|(bar|baz)|quux", "(| 'foo' (^ (| 'bar' 'baz')) 'quux')");
CheckParseEq("foo(?=bar)baz", "(: 'foo' (-> + 'bar') 'baz')");
CheckParseEq("foo(?!bar)baz", "(: 'foo' (-> - 'bar') 'baz')");
+ if (lookbehind) {
+ CheckParseEq("foo(?<=bar)baz", "(: 'foo' (<- + 'bar') 'baz')");
+ CheckParseEq("foo(?<!bar)baz", "(: 'foo' (<- - 'bar') 'baz')");
+ } else {
+ CHECK_PARSE_ERROR("foo(?<=bar)baz");
+ CHECK_PARSE_ERROR("foo(?<!bar)baz");
+ }
CheckParseEq("()", "(^ %)");
CheckParseEq("(?=)", "(-> + %)");
CheckParseEq("[]", "^[\\x00-\\uffff]"); // Doesn't compile on windows
@@ -262,14 +275,22 @@ TEST(Parser) {
CheckParseEq("(a)\\1", "(: (^ 'a') (<- 1))");
CheckParseEq("(a\\1)", "(^ 'a')");
CheckParseEq("(\\1a)", "(^ 'a')");
+ CheckParseEq("(\\2)(\\1)", "(: (^ (<- 2)) (^ (<- 1)))");
CheckParseEq("(?=a)?a", "'a'");
CheckParseEq("(?=a){0,10}a", "'a'");
CheckParseEq("(?=a){1,10}a", "(: (-> + 'a') 'a')");
CheckParseEq("(?=a){9,10}a", "(: (-> + 'a') 'a')");
CheckParseEq("(?!a)?a", "'a'");
- CheckParseEq("\\1(a)", "(^ 'a')");
+ CheckParseEq("\\1(a)", "(: (<- 1) (^ 'a'))");
CheckParseEq("(?!(a))\\1", "(: (-> - (^ 'a')) (<- 1))");
- CheckParseEq("(?!\\1(a\\1)\\1)\\1", "(: (-> - (: (^ 'a') (<- 1))) (<- 1))");
+ CheckParseEq("(?!\\1(a\\1)\\1)\\1",
+ "(: (-> - (: (<- 1) (^ 'a') (<- 1))) (<- 1))");
+ CheckParseEq("\\1\\2(a(?:\\1(b\\1\\2))\\2)\\1",
+ "(: (<- 1) (<- 2) (^ (: 'a' (^ 'b') (<- 2))) (<- 1))");
+ if (lookbehind) {
+ CheckParseEq("\\1\\2(a(?<=\\1(b\\1\\2))\\2)\\1",
+ "(: (<- 1) (<- 2) (^ (: 'a' (<- + (^ 'b')) (<- 2))) (<- 1))");
+ }
CheckParseEq("[\\0]", "[\\x00]");
CheckParseEq("[\\11]", "[\\x09]");
CheckParseEq("[\\11a]", "[\\x09 a]");
@@ -286,6 +307,15 @@ TEST(Parser) {
CheckParseEq("\\u003z", "'u003z'");
CheckParseEq("foo[z]*", "(: 'foo' (# 0 - g [z]))");
+ // Unicode regexps
+ CheckParseEq("\\u{12345}", "'\\ud808\\udf45'", true);
+ CheckParseEq("\\u{12345}\\u{23456}", "(! '\\ud808\\udf45' '\\ud84d\\udc56')",
+ true);
+ CheckParseEq("\\u{12345}|\\u{23456}", "(| '\\ud808\\udf45' '\\ud84d\\udc56')",
+ true);
+ CheckParseEq("\\u{12345}{3}", "(# 3 3 g '\\ud808\\udf45')", true);
+ CheckParseEq("\\u{12345}*", "(# 0 - g '\\ud808\\udf45')", true);
+
CHECK_SIMPLE("", false);
CHECK_SIMPLE("a", true);
CHECK_SIMPLE("a|b", false);
@@ -361,8 +391,8 @@ TEST(Parser) {
CHECK_MIN_MAX("(?:ab)|cde", 2, 3);
CHECK_MIN_MAX("(ab)", 2, 2);
CHECK_MIN_MAX("(ab|cde)", 2, 3);
- CHECK_MIN_MAX("(ab)\\1", 2, 4);
- CHECK_MIN_MAX("(ab|cde)\\1", 2, 6);
+ CHECK_MIN_MAX("(ab)\\1", 2, RegExpTree::kInfinity);
+ CHECK_MIN_MAX("(ab|cde)\\1", 2, RegExpTree::kInfinity);
CHECK_MIN_MAX("(?:ab)?", 0, 2);
CHECK_MIN_MAX("(?:ab)*", 0, RegExpTree::kInfinity);
CHECK_MIN_MAX("(?:ab)+", 2, RegExpTree::kInfinity);
@@ -400,6 +430,16 @@ TEST(Parser) {
}
+TEST(ParserWithLookbehind) {
+ TestRegExpParser(true); // Lookbehind enabled.
+}
+
+
+TEST(ParserWithoutLookbehind) {
+ TestRegExpParser(true); // Lookbehind enabled.
+}
+
+
TEST(ParserRegression) {
CheckParseEq("[A-Z$-][x]", "(! [A-Z $ -] [x])");
CheckParseEq("a{3,4*}", "(: 'a{3,' (# 0 - g '4') '}')");
@@ -790,7 +830,7 @@ TEST(MacroAssemblerNativeSimple) {
Label fail, backtrack;
m.PushBacktrack(&fail);
- m.CheckNotAtStart(NULL);
+ m.CheckNotAtStart(0, NULL);
m.LoadCurrentCharacter(2, NULL);
m.CheckNotCharacter('o', NULL);
m.LoadCurrentCharacter(1, NULL, false);
@@ -857,7 +897,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
Label fail, backtrack;
m.PushBacktrack(&fail);
- m.CheckNotAtStart(NULL);
+ m.CheckNotAtStart(0, NULL);
m.LoadCurrentCharacter(2, NULL);
m.CheckNotCharacter('o', NULL);
m.LoadCurrentCharacter(1, NULL, false);
@@ -973,12 +1013,12 @@ TEST(MacroAssemblerNativeBackReferenceLATIN1) {
m.AdvanceCurrentPosition(2);
m.WriteCurrentPositionToRegister(1, 0);
Label nomatch;
- m.CheckNotBackReference(0, &nomatch);
+ m.CheckNotBackReference(0, false, &nomatch);
m.Fail();
m.Bind(&nomatch);
m.AdvanceCurrentPosition(2);
Label missing_match;
- m.CheckNotBackReference(0, &missing_match);
+ m.CheckNotBackReference(0, false, &missing_match);
m.WriteCurrentPositionToRegister(2, 0);
m.Succeed();
m.Bind(&missing_match);
@@ -1023,12 +1063,12 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
m.AdvanceCurrentPosition(2);
m.WriteCurrentPositionToRegister(1, 0);
Label nomatch;
- m.CheckNotBackReference(0, &nomatch);
+ m.CheckNotBackReference(0, false, &nomatch);
m.Fail();
m.Bind(&nomatch);
m.AdvanceCurrentPosition(2);
Label missing_match;
- m.CheckNotBackReference(0, &missing_match);
+ m.CheckNotBackReference(0, false, &missing_match);
m.WriteCurrentPositionToRegister(2, 0);
m.Succeed();
m.Bind(&missing_match);
@@ -1073,7 +1113,7 @@ TEST(MacroAssemblernativeAtStart) {
0);
Label not_at_start, newline, fail;
- m.CheckNotAtStart(&not_at_start);
+ m.CheckNotAtStart(0, &not_at_start);
// Check that prevchar = '\n' and current = 'f'.
m.CheckCharacter('\n', &newline);
m.Bind(&fail);
@@ -1138,16 +1178,16 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
m.WriteCurrentPositionToRegister(2, 0);
m.AdvanceCurrentPosition(3);
m.WriteCurrentPositionToRegister(3, 0);
- m.CheckNotBackReferenceIgnoreCase(2, &fail); // Match "AbC".
- m.CheckNotBackReferenceIgnoreCase(2, &fail); // Match "ABC".
+ m.CheckNotBackReferenceIgnoreCase(2, false, &fail); // Match "AbC".
+ m.CheckNotBackReferenceIgnoreCase(2, false, &fail); // Match "ABC".
Label expected_fail;
- m.CheckNotBackReferenceIgnoreCase(2, &expected_fail);
+ m.CheckNotBackReferenceIgnoreCase(2, false, &expected_fail);
m.Bind(&fail);
m.Fail();
m.Bind(&expected_fail);
m.AdvanceCurrentPosition(3); // Skip "xYz"
- m.CheckNotBackReferenceIgnoreCase(2, &succ);
+ m.CheckNotBackReferenceIgnoreCase(2, false, &succ);
m.Fail();
m.Bind(&succ);
@@ -1339,7 +1379,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
m.WriteCurrentPositionToRegister(0, 0);
m.WriteCurrentPositionToRegister(1, 1);
Label done;
- m.CheckNotBackReference(0, &done); // Performs a system-stack push.
+ m.CheckNotBackReference(0, false, &done); // Performs a system-stack push.
m.Bind(&done);
m.PushRegister(large_number, RegExpMacroAssembler::kNoStackLimitCheck);
m.PopRegister(1);
@@ -1388,7 +1428,7 @@ TEST(MacroAssembler) {
m.Fail();
m.Bind(&start);
m.PushBacktrack(&fail);
- m.CheckNotAtStart(NULL);
+ m.CheckNotAtStart(0, NULL);
m.LoadCurrentCharacter(0, NULL);
m.CheckNotCharacter('f', NULL);
m.LoadCurrentCharacter(1, NULL);
@@ -1678,26 +1718,26 @@ TEST(CanonicalizeCharacterSets) {
list->Add(CharacterRange(30, 40), &zone);
list->Add(CharacterRange(50, 60), &zone);
set.Canonicalize();
- DCHECK_EQ(3, list->length());
- DCHECK_EQ(10, list->at(0).from());
- DCHECK_EQ(20, list->at(0).to());
- DCHECK_EQ(30, list->at(1).from());
- DCHECK_EQ(40, list->at(1).to());
- DCHECK_EQ(50, list->at(2).from());
- DCHECK_EQ(60, list->at(2).to());
+ CHECK_EQ(3, list->length());
+ CHECK_EQ(10, list->at(0).from());
+ CHECK_EQ(20, list->at(0).to());
+ CHECK_EQ(30, list->at(1).from());
+ CHECK_EQ(40, list->at(1).to());
+ CHECK_EQ(50, list->at(2).from());
+ CHECK_EQ(60, list->at(2).to());
list->Rewind(0);
list->Add(CharacterRange(10, 20), &zone);
list->Add(CharacterRange(50, 60), &zone);
list->Add(CharacterRange(30, 40), &zone);
set.Canonicalize();
- DCHECK_EQ(3, list->length());
- DCHECK_EQ(10, list->at(0).from());
- DCHECK_EQ(20, list->at(0).to());
- DCHECK_EQ(30, list->at(1).from());
- DCHECK_EQ(40, list->at(1).to());
- DCHECK_EQ(50, list->at(2).from());
- DCHECK_EQ(60, list->at(2).to());
+ CHECK_EQ(3, list->length());
+ CHECK_EQ(10, list->at(0).from());
+ CHECK_EQ(20, list->at(0).to());
+ CHECK_EQ(30, list->at(1).from());
+ CHECK_EQ(40, list->at(1).to());
+ CHECK_EQ(50, list->at(2).from());
+ CHECK_EQ(60, list->at(2).to());
list->Rewind(0);
list->Add(CharacterRange(30, 40), &zone);
@@ -1706,26 +1746,26 @@ TEST(CanonicalizeCharacterSets) {
list->Add(CharacterRange(100, 100), &zone);
list->Add(CharacterRange(1, 1), &zone);
set.Canonicalize();
- DCHECK_EQ(5, list->length());
- DCHECK_EQ(1, list->at(0).from());
- DCHECK_EQ(1, list->at(0).to());
- DCHECK_EQ(10, list->at(1).from());
- DCHECK_EQ(20, list->at(1).to());
- DCHECK_EQ(25, list->at(2).from());
- DCHECK_EQ(25, list->at(2).to());
- DCHECK_EQ(30, list->at(3).from());
- DCHECK_EQ(40, list->at(3).to());
- DCHECK_EQ(100, list->at(4).from());
- DCHECK_EQ(100, list->at(4).to());
+ CHECK_EQ(5, list->length());
+ CHECK_EQ(1, list->at(0).from());
+ CHECK_EQ(1, list->at(0).to());
+ CHECK_EQ(10, list->at(1).from());
+ CHECK_EQ(20, list->at(1).to());
+ CHECK_EQ(25, list->at(2).from());
+ CHECK_EQ(25, list->at(2).to());
+ CHECK_EQ(30, list->at(3).from());
+ CHECK_EQ(40, list->at(3).to());
+ CHECK_EQ(100, list->at(4).from());
+ CHECK_EQ(100, list->at(4).to());
list->Rewind(0);
list->Add(CharacterRange(10, 19), &zone);
list->Add(CharacterRange(21, 30), &zone);
list->Add(CharacterRange(20, 20), &zone);
set.Canonicalize();
- DCHECK_EQ(1, list->length());
- DCHECK_EQ(10, list->at(0).from());
- DCHECK_EQ(30, list->at(0).to());
+ CHECK_EQ(1, list->length());
+ CHECK_EQ(10, list->at(0).from());
+ CHECK_EQ(30, list->at(0).to());
}
@@ -1807,8 +1847,8 @@ TEST(CharacterRangeMerge) {
offset += 9;
}
- DCHECK(CharacterRange::IsCanonical(&l1));
- DCHECK(CharacterRange::IsCanonical(&l2));
+ CHECK(CharacterRange::IsCanonical(&l1));
+ CHECK(CharacterRange::IsCanonical(&l2));
ZoneList<CharacterRange> first_only(4, &zone);
ZoneList<CharacterRange> second_only(4, &zone);
@@ -1819,3 +1859,82 @@ TEST(CharacterRangeMerge) {
TEST(Graph) {
Execute("\\b\\w+\\b", false, true, true);
}
+
+
+namespace {
+
+int* global_use_counts = NULL;
+
+void MockUseCounterCallback(v8::Isolate* isolate,
+ v8::Isolate::UseCounterFeature feature) {
+ ++global_use_counts[feature];
+}
+}
+
+
+// Test that ES2015 RegExp compatibility fixes are in place, that they
+// are not overly broad, and the appropriate UseCounters are incremented
+TEST(UseCountRegExp) {
+ i::FLAG_harmony_regexps = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ // Compat fix: RegExp.prototype.sticky == undefined; UseCounter tracks it
+ v8::Local<v8::Value> resultSticky = CompileRun("RegExp.prototype.sticky");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpPrototypeToString]);
+ CHECK(resultSticky->IsUndefined());
+
+ // re.sticky has approriate value and doesn't touch UseCounter
+ v8::Local<v8::Value> resultReSticky = CompileRun("/a/.sticky");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpPrototypeToString]);
+ CHECK(resultReSticky->IsFalse());
+
+ // When the getter is caleld on another object, throw an exception
+ // and don't increment the UseCounter
+ v8::Local<v8::Value> resultStickyError = CompileRun(
+ "var exception;"
+ "try { "
+ " Object.getOwnPropertyDescriptor(RegExp.prototype, 'sticky')"
+ " .get.call(null);"
+ "} catch (e) {"
+ " exception = e;"
+ "}"
+ "exception");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(0, use_counts[v8::Isolate::kRegExpPrototypeToString]);
+ CHECK(resultStickyError->IsObject());
+
+ // RegExp.prototype.toString() returns '/(?:)/' as a compatibility fix;
+ // a UseCounter is incremented to track it.
+ v8::Local<v8::Value> resultToString =
+ CompileRun("RegExp.prototype.toString().length");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
+ CHECK(resultToString->IsInt32());
+ CHECK_EQ(6,
+ resultToString->Int32Value(isolate->GetCurrentContext()).FromJust());
+
+ // .toString() works on normal RegExps
+ v8::Local<v8::Value> resultReToString = CompileRun("/a/.toString().length");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
+ CHECK(resultReToString->IsInt32());
+ CHECK_EQ(
+ 3, resultReToString->Int32Value(isolate->GetCurrentContext()).FromJust());
+
+ // .toString() throws on non-RegExps that aren't RegExp.prototype
+ v8::Local<v8::Value> resultToStringError = CompileRun(
+ "var exception;"
+ "try { RegExp.prototype.toString.call(null) }"
+ "catch (e) { exception = e; }"
+ "exception");
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]);
+ CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]);
+ CHECK(resultToStringError->IsObject());
+}
diff --git a/deps/v8/test/cctest/test-reloc-info.cc b/deps/v8/test/cctest/test-reloc-info.cc
index d2a780ba62..4346f0083e 100644
--- a/deps/v8/test/cctest/test-reloc-info.cc
+++ b/deps/v8/test/cctest/test-reloc-info.cc
@@ -25,10 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/assembler.h"
+#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -36,7 +34,7 @@ namespace internal {
static void WriteRinfo(RelocInfoWriter* writer,
byte* pc, RelocInfo::Mode mode, intptr_t data) {
- RelocInfo rinfo(pc, mode, data, NULL);
+ RelocInfo rinfo(CcTest::i_isolate(), pc, mode, data, NULL);
writer->Write(&rinfo);
}
@@ -44,6 +42,7 @@ static void WriteRinfo(RelocInfoWriter* writer,
// Tests that writing both types of positions and then reading either
// or both works as expected.
TEST(Positions) {
+ CcTest::InitializeVM();
const int code_size = 10 * KB;
int relocation_info_size = 10 * KB;
const int buffer_size = code_size + relocation_info_size;
@@ -68,8 +67,9 @@ TEST(Positions) {
writer.Finish();
relocation_info_size = static_cast<int>(buffer_end - writer.pos());
- CodeDesc desc = {buffer.get(), buffer_size, code_size, relocation_info_size,
- 0, NULL};
+ MacroAssembler assm(CcTest::i_isolate(), nullptr, 0, CodeObjectRequired::kNo);
+ CodeDesc desc = {buffer.get(), buffer_size, code_size,
+ relocation_info_size, 0, &assm};
// Read only (non-statement) positions.
{
diff --git a/deps/v8/test/cctest/test-representation.cc b/deps/v8/test/cctest/test-representation.cc
index 43dbc394c4..fc1f531331 100644
--- a/deps/v8/test/cctest/test-representation.cc
+++ b/deps/v8/test/cctest/test-representation.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/cctest.h"
#include "src/property-details.h"
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 3506c4b38b..2cc15f816a 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -4,9 +4,6 @@
//
// Tests the sampling API in include/v8.h
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <map>
#include <string>
#include "include/v8.h"
@@ -94,7 +91,7 @@ class SamplingTestHelper {
explicit SamplingTestHelper(const std::string& test_function)
: sample_is_taken_(false), isolate_(CcTest::isolate()) {
- DCHECK(!instance_);
+ CHECK(!instance_);
instance_ = this;
v8::HandleScope scope(isolate_);
v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate_);
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 644b04a78a..2f29b25fab 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -25,27 +25,25 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <signal.h>
#include <sys/stat.h>
#include "src/v8.h"
+#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/compilation-cache.h"
#include "src/debug/debug.h"
#include "src/heap/spaces.h"
#include "src/objects.h"
-#include "src/parser.h"
+#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
-#include "src/scopeinfo.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/utils-inl.h"
using namespace v8::internal;
@@ -95,7 +93,8 @@ void WritePayload(const Vector<const byte>& payload, const char* file_name) {
static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
SnapshotByteSink sink;
StartupSerializer ser(isolate, &sink);
- ser.Serialize();
+ ser.SerializeStrongReferences();
+ ser.SerializeWeakReferencesAndDeferred();
SnapshotData snapshot_data(ser);
WritePayload(snapshot_data.RawData(), snapshot_file);
return true;
@@ -294,7 +293,7 @@ UNINITIALIZED_TEST(PartialSerialization) {
HandleScope scope(isolate);
env.Reset(v8_isolate, v8::Context::New(v8_isolate));
}
- DCHECK(!env.IsEmpty());
+ CHECK(!env.IsEmpty());
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
@@ -313,7 +312,7 @@ UNINITIALIZED_TEST(PartialSerialization) {
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::String> foo = v8_str("foo");
- DCHECK(!foo.IsEmpty());
+ CHECK(!foo.IsEmpty());
raw_foo = *(v8::Utils::OpenHandle(*foo));
}
@@ -372,17 +371,14 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
HandleScope handle_scope(isolate);
Handle<Object> root;
- Handle<FixedArray> outdated_contexts;
// Intentionally empty handle. The deserializer should not come across
// any references to the global proxy in this test.
Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy>::null();
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- root =
- deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts).ToHandleChecked();
- CHECK_EQ(0, outdated_contexts->length());
+ root = deserializer.DeserializePartial(isolate, global_proxy)
+ .ToHandleChecked();
CHECK(root->IsString());
}
@@ -390,9 +386,8 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- root2 =
- deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts).ToHandleChecked();
+ root2 = deserializer.DeserializePartial(isolate, global_proxy)
+ .ToHandleChecked();
CHECK(root2->IsString());
CHECK(root.is_identical_to(root2));
}
@@ -417,7 +412,7 @@ UNINITIALIZED_TEST(ContextSerialization) {
HandleScope scope(isolate);
env.Reset(v8_isolate, v8::Context::New(v8_isolate));
}
- DCHECK(!env.IsEmpty());
+ CHECK(!env.IsEmpty());
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
@@ -489,27 +484,23 @@ UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
HandleScope handle_scope(isolate);
Handle<Object> root;
- Handle<FixedArray> outdated_contexts;
Handle<JSGlobalProxy> global_proxy =
isolate->factory()->NewUninitializedJSGlobalProxy();
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- root =
- deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts).ToHandleChecked();
+ root = deserializer.DeserializePartial(isolate, global_proxy)
+ .ToHandleChecked();
CHECK(root->IsContext());
CHECK(Handle<Context>::cast(root)->global_proxy() == *global_proxy);
- CHECK_EQ(2, outdated_contexts->length());
}
Handle<Object> root2;
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- root2 =
- deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts).ToHandleChecked();
+ root2 = deserializer.DeserializePartial(isolate, global_proxy)
+ .ToHandleChecked();
CHECK(root2->IsContext());
CHECK(!root.is_identical_to(root2));
}
@@ -532,7 +523,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
HandleScope scope(isolate);
env.Reset(v8_isolate, v8::Context::New(v8_isolate));
}
- DCHECK(!env.IsEmpty());
+ CHECK(!env.IsEmpty());
{
v8::HandleScope handle_scope(v8_isolate);
v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
@@ -543,7 +534,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
" e = function(s) { return eval (s); }"
"})();"
"var o = this;"
- "var r = Math.random() + Math.cos(0);"
+ "var r = Math.sin(0) + Math.cos(0);"
"var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
"var s = parseInt('12345');");
@@ -626,20 +617,13 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
HandleScope handle_scope(isolate);
Handle<Object> root;
- Handle<FixedArray> outdated_contexts;
Handle<JSGlobalProxy> global_proxy =
isolate->factory()->NewUninitializedJSGlobalProxy();
{
SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
Deserializer deserializer(&snapshot_data);
- root =
- deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts).ToHandleChecked();
- if (FLAG_global_var_shortcuts) {
- CHECK_EQ(5, outdated_contexts->length());
- } else {
- CHECK_EQ(3, outdated_contexts->length());
- }
+ root = deserializer.DeserializePartial(isolate, global_proxy)
+ .ToHandleChecked();
CHECK(root->IsContext());
Handle<Context> context = Handle<Context>::cast(root);
CHECK(context->global_proxy() == *global_proxy);
@@ -654,7 +638,7 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
->ToNumber(v8_isolate->GetCurrentContext())
.ToLocalChecked()
->Value();
- CHECK(r >= 1 && r <= 2);
+ CHECK_EQ(1, r);
int f = CompileRun("f()")
->ToNumber(v8_isolate->GetCurrentContext())
.ToLocalChecked()
@@ -1528,7 +1512,7 @@ TEST(SerializeToplevelIsolates) {
->Equals(isolate2->GetCurrentContext(), v8_str("abcdef"))
.FromJust());
}
- DCHECK(toplevel_test_code_event_found);
+ CHECK(toplevel_test_code_event_found);
isolate2->Dispose();
}
diff --git a/deps/v8/test/cctest/test-simd.cc b/deps/v8/test/cctest/test-simd.cc
index 98c6c7f9df..1f412affba 100644
--- a/deps/v8/test/cctest/test-simd.cc
+++ b/deps/v8/test/cctest/test-simd.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/objects.h"
diff --git a/deps/v8/test/cctest/test-slots-buffer.cc b/deps/v8/test/cctest/test-slots-buffer.cc
index c23cfdfc51..07b70f5217 100644
--- a/deps/v8/test/cctest/test-slots-buffer.cc
+++ b/deps/v8/test/cctest/test-slots-buffer.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/heap/slots-buffer.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/utils-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 6e0ee04a9c..e992f33c8b 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -79,7 +79,7 @@ class MyRandomNumberGenerator {
}
bool next(double threshold) {
- DCHECK(threshold >= 0.0 && threshold <= 1.0);
+ CHECK(threshold >= 0.0 && threshold <= 1.0);
if (threshold == 1.0) return true;
if (threshold == 0.0) return false;
uint32_t value = next() % 100000;
@@ -191,9 +191,9 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
buf[j] = rng->next(0x10000);
}
Resource* resource = new Resource(buf, len);
- building_blocks[i] =
- v8::Utils::OpenHandle(
- *v8::String::NewExternal(CcTest::isolate(), resource));
+ building_blocks[i] = v8::Utils::OpenHandle(
+ *v8::String::NewExternalTwoByte(CcTest::isolate(), resource)
+ .ToLocalChecked());
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -205,9 +205,9 @@ static void InitializeBuildingBlocks(Handle<String>* building_blocks,
buf[j] = rng->next(0x80);
}
OneByteResource* resource = new OneByteResource(buf, len);
- building_blocks[i] =
- v8::Utils::OpenHandle(
- *v8::String::NewExternal(CcTest::isolate(), resource));
+ building_blocks[i] = v8::Utils::OpenHandle(
+ *v8::String::NewExternalOneByte(CcTest::isolate(), resource)
+ .ToLocalChecked());
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
@@ -880,8 +880,10 @@ TEST(Utf8Conversion) {
// A simple one-byte string
const char* one_byte_string = "abcdef12345";
int len = v8::String::NewFromUtf8(CcTest::isolate(), one_byte_string,
- v8::String::kNormalString,
- StrLength(one_byte_string))->Utf8Length();
+ v8::NewStringType::kNormal,
+ StrLength(one_byte_string))
+ .ToLocalChecked()
+ ->Utf8Length();
CHECK_EQ(StrLength(one_byte_string), len);
// A mixed one-byte and two-byte string
// U+02E4 -> CB A4
@@ -896,8 +898,10 @@ TEST(Utf8Conversion) {
// The number of bytes expected to be written for each length
const int lengths[12] = {0, 0, 2, 3, 3, 3, 6, 7, 7, 7, 10, 11};
const int char_lengths[12] = {0, 0, 1, 2, 2, 2, 3, 4, 4, 4, 5, 5};
- v8::Handle<v8::String> mixed = v8::String::NewFromTwoByte(
- CcTest::isolate(), mixed_string, v8::String::kNormalString, 5);
+ v8::Local<v8::String> mixed =
+ v8::String::NewFromTwoByte(CcTest::isolate(), mixed_string,
+ v8::NewStringType::kNormal, 5)
+ .ToLocalChecked();
CHECK_EQ(10, mixed->Utf8Length());
// Try encoding the string with all capacities
char buffer[11];
@@ -929,9 +933,9 @@ TEST(ExternalShortStringAdd) {
CHECK_GT(kMaxLength, i::ConsString::kMinLength);
// Allocate two JavaScript arrays for holding short strings.
- v8::Handle<v8::Array> one_byte_external_strings =
+ v8::Local<v8::Array> one_byte_external_strings =
v8::Array::New(CcTest::isolate(), kMaxLength + 1);
- v8::Handle<v8::Array> non_one_byte_external_strings =
+ v8::Local<v8::Array> non_one_byte_external_strings =
v8::Array::New(CcTest::isolate(), kMaxLength + 1);
// Generate short one-byte and two-byte external strings.
@@ -944,10 +948,13 @@ TEST(ExternalShortStringAdd) {
// string data.
OneByteResource* one_byte_resource = new OneByteResource(one_byte, i);
v8::Local<v8::String> one_byte_external_string =
- v8::String::NewExternal(CcTest::isolate(), one_byte_resource);
+ v8::String::NewExternalOneByte(CcTest::isolate(), one_byte_resource)
+ .ToLocalChecked();
- one_byte_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
- one_byte_external_string);
+ one_byte_external_strings->Set(context.local(),
+ v8::Integer::New(CcTest::isolate(), i),
+ one_byte_external_string)
+ .FromJust();
uc16* non_one_byte = NewArray<uc16>(i + 1);
for (int j = 0; j < i; j++) {
non_one_byte[j] = 0x1234;
@@ -956,17 +963,25 @@ TEST(ExternalShortStringAdd) {
// string data.
Resource* resource = new Resource(non_one_byte, i);
v8::Local<v8::String> non_one_byte_external_string =
- v8::String::NewExternal(CcTest::isolate(), resource);
- non_one_byte_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
- non_one_byte_external_string);
+ v8::String::NewExternalTwoByte(CcTest::isolate(), resource)
+ .ToLocalChecked();
+ non_one_byte_external_strings->Set(context.local(),
+ v8::Integer::New(CcTest::isolate(), i),
+ non_one_byte_external_string)
+ .FromJust();
}
// Add the arrays with the short external strings in the global object.
- v8::Handle<v8::Object> global = context->Global();
- global->Set(v8_str("external_one_byte"), one_byte_external_strings);
- global->Set(v8_str("external_non_one_byte"), non_one_byte_external_strings);
- global->Set(v8_str("max_length"),
- v8::Integer::New(CcTest::isolate(), kMaxLength));
+ v8::Local<v8::Object> global = context->Global();
+ global->Set(context.local(), v8_str("external_one_byte"),
+ one_byte_external_strings)
+ .FromJust();
+ global->Set(context.local(), v8_str("external_non_one_byte"),
+ non_one_byte_external_strings)
+ .FromJust();
+ global->Set(context.local(), v8_str("max_length"),
+ v8::Integer::New(CcTest::isolate(), kMaxLength))
+ .FromJust();
// Add short external one-byte and two-byte strings checking the result.
static const char* source =
@@ -1012,7 +1027,7 @@ TEST(ExternalShortStringAdd) {
" return 0;"
"};"
"test()";
- CHECK_EQ(0, CompileRun(source)->Int32Value());
+ CHECK_EQ(0, CompileRun(source)->Int32Value(context.local()).FromJust());
}
@@ -1021,14 +1036,19 @@ TEST(JSONStringifySliceMadeExternal) {
// Create a sliced string from a one-byte string. The latter is turned
// into a two-byte external string. Check that JSON.stringify works.
v8::HandleScope handle_scope(CcTest::isolate());
- v8::Handle<v8::String> underlying =
+ v8::Local<v8::String> underlying =
CompileRun(
"var underlying = 'abcdefghijklmnopqrstuvwxyz';"
- "underlying")->ToString(CcTest::isolate());
- v8::Handle<v8::String> slice = CompileRun(
- "var slice = '';"
- "slice = underlying.slice(1);"
- "slice")->ToString(CcTest::isolate());
+ "underlying")
+ ->ToString(CcTest::isolate()->GetCurrentContext())
+ .ToLocalChecked();
+ v8::Local<v8::String> slice =
+ CompileRun(
+ "var slice = '';"
+ "slice = underlying.slice(1);"
+ "slice")
+ ->ToString(CcTest::isolate()->GetCurrentContext())
+ .ToLocalChecked();
CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString());
CHECK(v8::Utils::OpenHandle(*underlying)->IsSeqOneByteString());
@@ -1079,16 +1099,23 @@ TEST(CachedHashOverflow) {
};
const char* line;
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
for (int i = 0; (line = lines[i]); i++) {
printf("%s\n", line);
- v8::Local<v8::Value> result = v8::Script::Compile(
- v8::String::NewFromUtf8(CcTest::isolate(), line))->Run();
+ v8::Local<v8::Value> result =
+ v8::Script::Compile(context,
+ v8::String::NewFromUtf8(CcTest::isolate(), line,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocalChecked();
CHECK_EQ(results[i]->IsUndefined(), result->IsUndefined());
CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
if (result->IsNumber()) {
int32_t value = 0;
CHECK(results[i]->ToInt32(&value));
- CHECK_EQ(value, result->ToInt32(CcTest::isolate())->Value());
+ CHECK_EQ(value, result->ToInt32(context).ToLocalChecked()->Value());
}
}
}
@@ -1320,7 +1347,8 @@ TEST(CountBreakIterator) {
" return iterator.next();"
"})();");
CHECK(result->IsNumber());
- int uses = result->ToInt32(CcTest::isolate())->Value() == 0 ? 0 : 1;
+ int uses =
+ result->ToInt32(context.local()).ToLocalChecked()->Value() == 0 ? 0 : 1;
CHECK_EQ(uses, use_counts[v8::Isolate::kBreakIterator]);
// Make sure GC cleans up the break iterator, so we don't get a memory leak
// reported by ASAN.
@@ -1341,7 +1369,7 @@ TEST(StringReplaceAtomTwoByteResult) {
CHECK(string->IsSeqTwoByteString());
v8::Local<v8::String> expected = v8_str("one_byte\x80only\x80string\x80");
- CHECK(expected->Equals(result));
+ CHECK(expected->Equals(context.local(), result).FromJust());
}
@@ -1477,6 +1505,6 @@ TEST(FormatMessage) {
MessageTemplate::FormatMessage(MessageTemplate::kPropertyNotFunction,
arg0, arg1, arg2).ToHandleChecked();
Handle<String> expected = isolate->factory()->NewStringFromAsciiChecked(
- "Property 'arg0' of object arg1 is not a function");
+ "'arg0' returned for property 'arg1' of object 'arg2' is not a function");
CHECK(String::Equals(result, expected));
}
diff --git a/deps/v8/test/cctest/test-strtod.cc b/deps/v8/test/cctest/test-strtod.cc
index ccefe321aa..7c1118603e 100644
--- a/deps/v8/test/cctest/test-strtod.cc
+++ b/deps/v8/test/cctest/test-strtod.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index ff4a672390..1024a27edf 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -30,9 +30,6 @@
// of ConsStrings. These operations may not be very fast, but they
// should be possible without getting errors due to too deep recursion.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/objects.h"
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index b39deef144..05a3c3339e 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 71dd49f50b..a9058a523a 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
new file mode 100644
index 0000000000..a889e088f6
--- /dev/null
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -0,0 +1,258 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/v8.h"
+
+#include "src/list.h"
+#include "src/list-inl.h"
+#include "test/cctest/cctest.h"
+
+using v8::IdleTask;
+using v8::Task;
+using v8::Isolate;
+
+#include "src/tracing/trace-event.h"
+
+#define GET_TRACE_OBJECTS_LIST platform.GetMockTraceObjects()
+
+#define GET_TRACE_OBJECT(Index) GET_TRACE_OBJECTS_LIST->at(Index)
+
+
+struct MockTraceObject {
+ char phase;
+ std::string name;
+ uint64_t id;
+ uint64_t bind_id;
+ int num_args;
+ unsigned int flags;
+ MockTraceObject(char phase, std::string name, uint64_t id, uint64_t bind_id,
+ int num_args, int flags)
+ : phase(phase),
+ name(name),
+ id(id),
+ bind_id(bind_id),
+ num_args(num_args),
+ flags(flags) {}
+};
+
+typedef v8::internal::List<MockTraceObject*> MockTraceObjectList;
+
+class MockTracingPlatform : public v8::Platform {
+ public:
+ explicit MockTracingPlatform(v8::Platform* platform) {}
+ virtual ~MockTracingPlatform() {
+ for (int i = 0; i < trace_object_list_.length(); ++i) {
+ delete trace_object_list_[i];
+ }
+ trace_object_list_.Clear();
+ }
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {}
+
+ void CallOnForegroundThread(Isolate* isolate, Task* task) override {}
+
+ void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) override {}
+
+ double MonotonicallyIncreasingTime() override { return 0.0; }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {}
+
+ bool IdleTasksEnabled(Isolate* isolate) override { return false; }
+
+ bool PendingIdleTask() { return false; }
+
+ void PerformIdleTask(double idle_time_in_seconds) {}
+
+ bool PendingDelayedTask() { return false; }
+
+ void PerformDelayedTask() {}
+
+ uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
+ const char* name, uint64_t id, uint64_t bind_id,
+ int num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values,
+ unsigned int flags) override {
+ MockTraceObject* to = new MockTraceObject(phase, std::string(name), id,
+ bind_id, num_args, flags);
+ trace_object_list_.Add(to);
+ return 0;
+ }
+
+ void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle) override {}
+
+ const uint8_t* GetCategoryGroupEnabled(const char* name) override {
+ if (strcmp(name, "v8-cat")) {
+ static uint8_t no = 0;
+ return &no;
+ } else {
+ static uint8_t yes = 0x7;
+ return &yes;
+ }
+ }
+
+ const char* GetCategoryGroupName(
+ const uint8_t* category_enabled_flag) override {
+ static const char dummy[] = "dummy";
+ return dummy;
+ }
+
+ MockTraceObjectList* GetMockTraceObjects() { return &trace_object_list_; }
+
+ private:
+ MockTraceObjectList trace_object_list_;
+};
+
+
+TEST(TraceEventDisabledCategory) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ // Disabled category, will not add events.
+ TRACE_EVENT_BEGIN0("cat", "e1");
+ TRACE_EVENT_END0("cat", "e1");
+ CHECK_EQ(0, GET_TRACE_OBJECTS_LIST->length());
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(TraceEventNoArgs) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ // Enabled category will add 2 events.
+ TRACE_EVENT_BEGIN0("v8-cat", "e1");
+ TRACE_EVENT_END0("v8-cat", "e1");
+
+ CHECK_EQ(2, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ('B', GET_TRACE_OBJECT(0)->phase);
+ CHECK_EQ("e1", GET_TRACE_OBJECT(0)->name);
+ CHECK_EQ(0, GET_TRACE_OBJECT(0)->num_args);
+
+ CHECK_EQ('E', GET_TRACE_OBJECT(1)->phase);
+ CHECK_EQ("e1", GET_TRACE_OBJECT(1)->name);
+ CHECK_EQ(0, GET_TRACE_OBJECT(1)->num_args);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(TraceEventWithOneArg) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ TRACE_EVENT_BEGIN1("v8-cat", "e1", "arg1", 42);
+ TRACE_EVENT_END1("v8-cat", "e1", "arg1", 42);
+ TRACE_EVENT_BEGIN1("v8-cat", "e2", "arg1", "abc");
+ TRACE_EVENT_END1("v8-cat", "e2", "arg1", "abc");
+
+ CHECK_EQ(4, GET_TRACE_OBJECTS_LIST->length());
+
+ CHECK_EQ(1, GET_TRACE_OBJECT(0)->num_args);
+ CHECK_EQ(1, GET_TRACE_OBJECT(1)->num_args);
+ CHECK_EQ(1, GET_TRACE_OBJECT(2)->num_args);
+ CHECK_EQ(1, GET_TRACE_OBJECT(3)->num_args);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(TraceEventWithTwoArgs) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ TRACE_EVENT_BEGIN2("v8-cat", "e1", "arg1", 42, "arg2", "abc");
+ TRACE_EVENT_END2("v8-cat", "e1", "arg1", 42, "arg2", "abc");
+ TRACE_EVENT_BEGIN2("v8-cat", "e2", "arg1", "abc", "arg2", 43);
+ TRACE_EVENT_END2("v8-cat", "e2", "arg1", "abc", "arg2", 43);
+
+ CHECK_EQ(4, GET_TRACE_OBJECTS_LIST->length());
+
+ CHECK_EQ(2, GET_TRACE_OBJECT(0)->num_args);
+ CHECK_EQ(2, GET_TRACE_OBJECT(1)->num_args);
+ CHECK_EQ(2, GET_TRACE_OBJECT(2)->num_args);
+ CHECK_EQ(2, GET_TRACE_OBJECT(3)->num_args);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(ScopedTraceEvent) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ { TRACE_EVENT0("v8-cat", "e"); }
+
+ CHECK_EQ(1, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ(0, GET_TRACE_OBJECT(0)->num_args);
+
+ { TRACE_EVENT1("v8-cat", "e1", "arg1", "abc"); }
+
+ CHECK_EQ(2, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ(1, GET_TRACE_OBJECT(1)->num_args);
+
+ { TRACE_EVENT2("v8-cat", "e1", "arg1", "abc", "arg2", 42); }
+
+ CHECK_EQ(3, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ(2, GET_TRACE_OBJECT(2)->num_args);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(TestEventWithFlow) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ static uint64_t bind_id = 21;
+ {
+ TRACE_EVENT_WITH_FLOW0("v8-cat", "f1", bind_id, TRACE_EVENT_FLAG_FLOW_OUT);
+ }
+ {
+ TRACE_EVENT_WITH_FLOW0(
+ "v8-cat", "f2", bind_id,
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT);
+ }
+ { TRACE_EVENT_WITH_FLOW0("v8-cat", "f3", bind_id, TRACE_EVENT_FLAG_FLOW_IN); }
+
+ CHECK_EQ(3, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ(bind_id, GET_TRACE_OBJECT(0)->bind_id);
+ CHECK_EQ(TRACE_EVENT_FLAG_FLOW_OUT, GET_TRACE_OBJECT(0)->flags);
+ CHECK_EQ(bind_id, GET_TRACE_OBJECT(1)->bind_id);
+ CHECK_EQ(TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
+ GET_TRACE_OBJECT(1)->flags);
+ CHECK_EQ(bind_id, GET_TRACE_OBJECT(2)->bind_id);
+ CHECK_EQ(TRACE_EVENT_FLAG_FLOW_IN, GET_TRACE_OBJECT(2)->flags);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(TestEventWithId) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockTracingPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+
+ static uint64_t event_id = 21;
+ TRACE_EVENT_ASYNC_BEGIN0("v8-cat", "a1", event_id);
+ TRACE_EVENT_ASYNC_END0("v8-cat", "a1", event_id);
+
+ CHECK_EQ(2, GET_TRACE_OBJECTS_LIST->length());
+ CHECK_EQ(TRACE_EVENT_PHASE_ASYNC_BEGIN, GET_TRACE_OBJECT(0)->phase);
+ CHECK_EQ(event_id, GET_TRACE_OBJECT(0)->id);
+ CHECK_EQ(TRACE_EVENT_PHASE_ASYNC_END, GET_TRACE_OBJECT(1)->phase);
+ CHECK_EQ(event_id, GET_TRACE_OBJECT(1)->id);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 60a0706b04..8834f9ade1 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include <utility>
@@ -87,7 +84,9 @@ TEST(TransitionArray_SimpleFieldTransitions) {
(key == *name2 && target == *map2));
}
- DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#ifdef DEBUG
+ CHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#endif
}
@@ -136,7 +135,9 @@ TEST(TransitionArray_FullFieldTransitions) {
(key == *name2 && target == *map2));
}
- DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#ifdef DEBUG
+ CHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#endif
}
@@ -183,7 +184,9 @@ TEST(TransitionArray_DifferentFieldNames) {
}
}
- DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#ifdef DEBUG
+ CHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#endif
}
@@ -224,7 +227,9 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
CHECK_EQ(*name, TransitionArray::GetKey(map0->raw_transitions(), i));
}
- DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#ifdef DEBUG
+ CHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#endif
}
@@ -302,5 +307,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
}
}
- DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#ifdef DEBUG
+ CHECK(TransitionArray::IsSortedNoDuplicates(*map0));
+#endif
}
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index f55c560a28..348eb05a3a 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index dda36de46f..4549654501 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <vector>
#include "src/crankshaft/hydrogen-types.h"
diff --git a/deps/v8/test/cctest/test-typing-reset.cc b/deps/v8/test/cctest/test-typing-reset.cc
index 9102e1fca0..4e9413ac3a 100644
--- a/deps/v8/test/cctest/test-typing-reset.cc
+++ b/deps/v8/test/cctest/test-typing-reset.cc
@@ -2,18 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
-#include "src/ast.h"
-#include "src/ast-expression-visitor.h"
-#include "src/parser.h"
-#include "src/rewriter.h"
-#include "src/scopes.h"
+#include "src/ast/ast.h"
+#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/scopes.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
#include "src/typing-reset.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/test-unbound-queue.cc b/deps/v8/test/cctest/test-unbound-queue.cc
index a19b783392..48e344aaa6 100644
--- a/deps/v8/test/cctest/test-unbound-queue.cc
+++ b/deps/v8/test/cctest/test-unbound-queue.cc
@@ -27,9 +27,6 @@
//
// Tests of the unbound queue.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 3dd56ee09e..3906d848de 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this define after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include <utility>
@@ -18,7 +15,7 @@
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap-tester.h"
+#include "test/cctest/heap/utils-inl.h"
using namespace v8::base;
using namespace v8::internal;
@@ -54,10 +51,12 @@ static Handle<String> MakeName(const char* str, int suffix) {
Handle<JSObject> GetObject(const char* name) {
- return v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
- CcTest::global()
- ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str(name))
- .ToLocalChecked()));
+ return Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ CcTest::global()
+ ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
+ v8_str(name))
+ .ToLocalChecked())));
}
@@ -66,7 +65,7 @@ static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
return obj->RawFastDoublePropertyAt(field_index);
} else {
Object* value = obj->RawFastPropertyAt(field_index);
- DCHECK(value->IsMutableHeapNumber());
+ CHECK(value->IsMutableHeapNumber());
return HeapNumber::cast(value)->value();
}
}
@@ -742,22 +741,41 @@ TEST(LayoutDescriptorAppendAllDoubles) {
static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
Isolate* isolate, int inobject_properties,
Handle<DescriptorArray> descriptors, int number_of_descriptors) {
- Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<Map> initial_map = Map::Create(isolate, inobject_properties);
Handle<LayoutDescriptor> full_layout_descriptor = LayoutDescriptor::New(
- map, descriptors, descriptors->number_of_descriptors());
+ initial_map, descriptors, descriptors->number_of_descriptors());
int nof = 0;
bool switched_to_slow_mode = false;
- for (int i = 0; i < number_of_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ // This method calls LayoutDescriptor::AppendIfFastOrUseFull() internally
+ // and does all the required map-descriptors related book keeping.
+ Handle<Map> last_map = Map::AddMissingTransitionsForTesting(
+ initial_map, descriptors, full_layout_descriptor);
- // This method calls LayoutDescriptor::AppendIfFastOrUseFull() internally
- // and does all the required map-descriptors related book keeping.
- map = Map::CopyInstallDescriptorsForTesting(map, i, descriptors,
- full_layout_descriptor);
+ // Follow back pointers to construct a sequence of maps from |map|
+ // to |last_map|.
+ int descriptors_length = descriptors->number_of_descriptors();
+ std::vector<Handle<Map>> maps(descriptors_length);
+ {
+ CHECK(last_map->is_stable());
+ Map* map = *last_map;
+ for (int i = 0; i < descriptors_length; i++) {
+ maps[descriptors_length - 1 - i] = handle(map, isolate);
+ Object* maybe_map = map->GetBackPointer();
+ CHECK(maybe_map->IsMap());
+ map = Map::cast(maybe_map);
+ CHECK(!map->is_stable());
+ }
+ CHECK_EQ(1, maps[0]->NumberOfOwnDescriptors());
+ }
+ Handle<Map> map;
+ // Now check layout descriptors of all intermediate maps.
+ for (int i = 0; i < number_of_descriptors; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ map = maps[i];
LayoutDescriptor* layout_desc = map->layout_descriptor();
if (layout_desc->IsSlowLayout()) {
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 56990edfd3..207c2450a0 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
index 50db316537..de0976825d 100644
--- a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
+++ b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index f125cc09bf..bd1ff998b9 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/arm64/utils-arm64.h"
@@ -98,7 +95,7 @@ bool EqualFP64(double expected, const RegisterDump*, double result) {
bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
- DCHECK(reg.Is32Bits());
+ CHECK(reg.Is32Bits());
// Retrieve the corresponding X register so we can check that the upper part
// was properly cleared.
int64_t result_x = core->xreg(reg.code());
@@ -115,7 +112,7 @@ bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
bool Equal64(uint64_t expected,
const RegisterDump* core,
const Register& reg) {
- DCHECK(reg.Is64Bits());
+ CHECK(reg.Is64Bits());
uint64_t result = core->xreg(reg.code());
return Equal64(expected, core, result);
}
@@ -124,7 +121,7 @@ bool Equal64(uint64_t expected,
bool EqualFP32(float expected,
const RegisterDump* core,
const FPRegister& fpreg) {
- DCHECK(fpreg.Is32Bits());
+ CHECK(fpreg.Is32Bits());
// Retrieve the corresponding D register so we can check that the upper part
// was properly cleared.
uint64_t result_64 = core->dreg_bits(fpreg.code());
@@ -141,7 +138,7 @@ bool EqualFP32(float expected,
bool EqualFP64(double expected,
const RegisterDump* core,
const FPRegister& fpreg) {
- DCHECK(fpreg.Is64Bits());
+ CHECK(fpreg.Is64Bits());
return EqualFP64(expected, core, core->dreg(fpreg.code()));
}
@@ -149,7 +146,7 @@ bool EqualFP64(double expected,
bool Equal64(const Register& reg0,
const RegisterDump* core,
const Register& reg1) {
- DCHECK(reg0.Is64Bits() && reg1.Is64Bits());
+ CHECK(reg0.Is64Bits() && reg1.Is64Bits());
int64_t expected = core->xreg(reg0.code());
int64_t result = core->xreg(reg1.code());
return Equal64(expected, core, result);
@@ -177,8 +174,8 @@ static char FlagV(uint32_t flags) {
bool EqualNzcv(uint32_t expected, uint32_t result) {
- DCHECK((expected & ~NZCVFlag) == 0);
- DCHECK((result & ~NZCVFlag) == 0);
+ CHECK((expected & ~NZCVFlag) == 0);
+ CHECK((result & ~NZCVFlag) == 0);
if (result != expected) {
printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
@@ -234,7 +231,7 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
}
}
// Check that we got enough registers.
- DCHECK(CountSetBits(list, kNumberOfRegisters) == reg_count);
+ CHECK(CountSetBits(list, kNumberOfRegisters) == reg_count);
return list;
}
@@ -261,7 +258,7 @@ RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
}
}
// Check that we got enough registers.
- DCHECK(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
+ CHECK(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
return list;
}
@@ -273,7 +270,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
if (reg_list & (1UL << i)) {
Register xn = Register::Create(i, kXRegSizeInBits);
// We should never write into csp here.
- DCHECK(!xn.Is(csp));
+ CHECK(!xn.Is(csp));
if (!xn.IsZero()) {
if (!first.IsValid()) {
// This is the first register we've hit, so construct the literal.
@@ -323,7 +320,7 @@ void Clobber(MacroAssembler* masm, CPURegList reg_list) {
void RegisterDump::Dump(MacroAssembler* masm) {
- DCHECK(__ StackPointer().Is(csp));
+ CHECK(__ StackPointer().Is(csp));
// Ensure that we don't unintentionally clobber any registers.
RegList old_tmp_list = masm->TmpList()->list();
@@ -399,7 +396,7 @@ void RegisterDump::Dump(MacroAssembler* masm) {
// easily restore them.
Register dump2_base = x10;
Register dump2 = x11;
- DCHECK(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
+ CHECK(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
// Don't lose the dump_ address.
__ Mov(dump2_base, dump_base);
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index a091bf3932..3ecae23d4b 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -59,7 +59,7 @@ class RegisterDump {
if (code == kSPRegInternalCode) {
return wspreg();
}
- DCHECK(RegAliasesMatch(code));
+ CHECK(RegAliasesMatch(code));
return dump_.w_[code];
}
@@ -67,13 +67,13 @@ class RegisterDump {
if (code == kSPRegInternalCode) {
return spreg();
}
- DCHECK(RegAliasesMatch(code));
+ CHECK(RegAliasesMatch(code));
return dump_.x_[code];
}
// FPRegister accessors.
inline uint32_t sreg_bits(unsigned code) const {
- DCHECK(FPRegAliasesMatch(code));
+ CHECK(FPRegAliasesMatch(code));
return dump_.s_[code];
}
@@ -82,7 +82,7 @@ class RegisterDump {
}
inline uint64_t dreg_bits(unsigned code) const {
- DCHECK(FPRegAliasesMatch(code));
+ CHECK(FPRegAliasesMatch(code));
return dump_.d_[code];
}
@@ -92,19 +92,19 @@ class RegisterDump {
// Stack pointer accessors.
inline int64_t spreg() const {
- DCHECK(SPRegAliasesMatch());
+ CHECK(SPRegAliasesMatch());
return dump_.sp_;
}
inline int32_t wspreg() const {
- DCHECK(SPRegAliasesMatch());
+ CHECK(SPRegAliasesMatch());
return static_cast<int32_t>(dump_.wsp_);
}
// Flags accessors.
inline uint32_t flags_nzcv() const {
- DCHECK(IsComplete());
- DCHECK((dump_.flags_ & ~Flags_mask) == 0);
+ CHECK(IsComplete());
+ CHECK((dump_.flags_ & ~Flags_mask) == 0);
return dump_.flags_ & Flags_mask;
}
@@ -120,21 +120,21 @@ class RegisterDump {
// w<code>. A failure of this test most likely represents a failure in the
// ::Dump method, or a failure in the simulator.
bool RegAliasesMatch(unsigned code) const {
- DCHECK(IsComplete());
- DCHECK(code < kNumberOfRegisters);
+ CHECK(IsComplete());
+ CHECK(code < kNumberOfRegisters);
return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
}
// As RegAliasesMatch, but for the stack pointer.
bool SPRegAliasesMatch() const {
- DCHECK(IsComplete());
+ CHECK(IsComplete());
return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
}
// As RegAliasesMatch, but for floating-point registers.
bool FPRegAliasesMatch(unsigned code) const {
- DCHECK(IsComplete());
- DCHECK(code < kNumberOfFPRegisters);
+ CHECK(IsComplete());
+ CHECK(code < kNumberOfFPRegisters);
return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
}
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 979c6f251b..5045b7e591 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after it is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <stdlib.h>
#include <vector>
diff --git a/deps/v8/test/cctest/test-version.cc b/deps/v8/test/cctest/test-version.cc
index 77727a03c5..50fca16871 100644
--- a/deps/v8/test/cctest/test-version.cc
+++ b/deps/v8/test/cctest/test-version.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "src/v8.h"
#include "src/version.h"
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index a877a9eb91..2630110c59 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -25,15 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <utility>
#include "src/v8.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/utils-inl.h"
using namespace v8::internal;
@@ -59,7 +57,7 @@ static void WeakPointerCallback(
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
- DCHECK_EQ(1234, p->second);
+ CHECK_EQ(1234, p->second);
NumberOfWeakCalls++;
p->first->Reset();
}
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 411c92168d..6998e0f749 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -25,15 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include <utility>
#include "src/v8.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/utils-inl.h"
using namespace v8::internal;
@@ -62,7 +60,7 @@ static void WeakPointerCallback(
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
- DCHECK_EQ(1234, p->second);
+ CHECK_EQ(1234, p->second);
NumberOfWeakCalls++;
p->first->Reset();
}
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index bd93450a97..d28ef7d004 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -44,6 +44,10 @@ class CcTestSuite(testsuite.TestSuite):
build_dir = "out"
self.serdes_dir = os.path.normpath(
os.path.join(root, "..", "..", build_dir, ".serdes"))
+
+ def SetupWorkingDirectory(self):
+ # This is only called once per machine, while init above is called once per
+ # process.
if os.path.exists(self.serdes_dir):
shutil.rmtree(self.serdes_dir, True)
os.makedirs(self.serdes_dir)
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index 5e558cfd05..ea2b2cee3d 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(mythria): Remove this after this flag is turned on globally
-#define V8_IMMINENT_DEPRECATION_WARNINGS
-
#include "test/cctest/trace-extension.h"
#include "src/profiler/sampler.h"
diff --git a/deps/v8/test/cctest/wasm/OWNERS b/deps/v8/test/cctest/wasm/OWNERS
new file mode 100644
index 0000000000..c2abc8a6ad
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/OWNERS
@@ -0,0 +1,3 @@
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
new file mode 100644
index 0000000000..6fcde645cb
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -0,0 +1,141 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+#define BUILD(r, ...) \
+ do { \
+ byte code[] = {__VA_ARGS__}; \
+ r.Build(code, code + arraysize(code)); \
+ } while (false)
+
+
+static uint32_t AddJsFunction(TestingModule* module, FunctionSig* sig,
+ const char* source) {
+ Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ module->AddFunction(sig, Handle<Code>::null());
+ uint32_t index = static_cast<uint32_t>(module->module->functions->size() - 1);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Handle<Code> code = CompileWasmToJSWrapper(isolate, module, jsfunc, index);
+ module->function_code->at(index) = code;
+ return index;
+}
+
+
+static Handle<JSFunction> WrapCode(ModuleEnv* module, uint32_t index) {
+ Isolate* isolate = module->module->shared_isolate;
+ // Wrap the code so it can be called as a JS function.
+ Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
+ Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
+ Handle<Code> code = module->function_code->at(index);
+ WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
+ return compiler::CompileJSToWasmWrapper(isolate, module, name, code,
+ module_object, index);
+}
+
+
+static void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
+ double b) {
+ Isolate* isolate = jsfunc->GetIsolate();
+ Handle<Object> buffer[] = {isolate->factory()->NewNumber(a),
+ isolate->factory()->NewNumber(b)};
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, jsfunc, global, 2, buffer);
+
+ CHECK(!retval.is_null());
+ Handle<Object> result = retval.ToHandleChecked();
+ if (result->IsSmi()) {
+ CHECK_EQ(expected, Smi::cast(*result)->value());
+ } else {
+ CHECK(result->IsHeapNumber());
+ CHECK_EQ(expected, HeapNumber::cast(*result)->value());
+ }
+}
+
+
+TEST(Run_Int32Sub_jswrapped) {
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.i_ii());
+ BUILD(t, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+
+ EXPECT_CALL(33, jsfunc, 44, 11);
+ EXPECT_CALL(-8723487, jsfunc, -8000000, 723487);
+}
+
+
+TEST(Run_Float32Div_jswrapped) {
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.f_ff());
+ BUILD(t, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+
+ EXPECT_CALL(92, jsfunc, 46, 0.5);
+ EXPECT_CALL(64, jsfunc, -16, -0.25);
+}
+
+
+TEST(Run_Float64Add_jswrapped) {
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.d_dd());
+ BUILD(t, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+
+ EXPECT_CALL(3, jsfunc, 2, 1);
+ EXPECT_CALL(-5.5, jsfunc, -5.25, -0.25);
+}
+
+
+TEST(Run_I32Popcount_jswrapped) {
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.i_i());
+ BUILD(t, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+
+ EXPECT_CALL(2, jsfunc, 9, 0);
+ EXPECT_CALL(3, jsfunc, 11, 0);
+ EXPECT_CALL(6, jsfunc, 0x3F, 0);
+
+ USE(AddJsFunction);
+}
+
+
+#if !V8_TARGET_ARCH_ARM64
+// TODO(titzer): fix wasm->JS calls on arm64 (wrapper issues)
+
+TEST(Run_CallJS_Add_jswrapped) {
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.i_i(), &module);
+ uint32_t js_index =
+ AddJsFunction(&module, sigs.i_i(), "(function(a) { return a + 99; })");
+ BUILD(t, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
+
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+
+ EXPECT_CALL(101, jsfunc, 2, -8);
+ EXPECT_CALL(199, jsfunc, 100, -1);
+ EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
+}
+
+#endif
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
new file mode 100644
index 0000000000..3b7bae1dda
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -0,0 +1,199 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+
+namespace {
+void TestModule(WasmModuleIndex* module, int32_t expected_result) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ int32_t result =
+ CompileAndRunWasmModule(isolate, module->Begin(), module->End());
+ CHECK_EQ(expected_result, result);
+}
+} // namespace
+
+
+// A raw test that skips the WasmModuleBuilder.
+TEST(Run_WasmModule_CallAdd_rev) {
+ static const byte data[] = {
+ // sig#0 ------------------------------------------
+ kDeclSignatures, 2, 0, kLocalI32, // void -> int
+ 2, kLocalI32, kLocalI32, kLocalI32, // int,int -> int
+ // func#0 (main) ----------------------------------
+ kDeclFunctions, 2, kDeclFunctionExport, 0, 0, // sig index
+ 6, 0, // body size
+ kExprCallFunction, 1, // --
+ kExprI8Const, 77, // --
+ kExprI8Const, 22, // --
+ // func#1 -----------------------------------------
+ 0, // no name, not exported
+ 1, 0, // sig index
+ 5, 0, // body size
+ kExprI32Add, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ };
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ int32_t result =
+ CompileAndRunWasmModule(isolate, data, data + arraysize(data));
+ CHECK_EQ(99, result);
+}
+
+
+TEST(Run_WasmModule_Return114) {
+ static const int32_t kReturnValue = 114;
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* f = builder->FunctionAt(f_index);
+ f->ReturnType(kAstI32);
+ f->Exported(1);
+ byte code[] = {WASM_I8(kReturnValue)};
+ f->EmitCode(code, sizeof(code));
+ WasmModuleWriter* writer = builder->Build(&zone);
+ TestModule(writer->WriteTo(&zone), kReturnValue);
+}
+
+
+TEST(Run_WasmModule_CallAdd) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f1_index = builder->AddFunction();
+ WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
+ f->ReturnType(kAstI32);
+ uint16_t param1 = f->AddParam(kAstI32);
+ uint16_t param2 = f->AddParam(kAstI32);
+ byte code1[] = {WASM_I32_ADD(WASM_GET_LOCAL(param1), WASM_GET_LOCAL(param2))};
+ uint32_t local_indices1[] = {2, 4};
+ f->EmitCode(code1, sizeof(code1), local_indices1, sizeof(local_indices1) / 4);
+ uint16_t f2_index = builder->AddFunction();
+ f = builder->FunctionAt(f2_index);
+ f->ReturnType(kAstI32);
+ f->Exported(1);
+ byte code2[] = {WASM_CALL_FUNCTION(f1_index, WASM_I8(77), WASM_I8(22))};
+ f->EmitCode(code2, sizeof(code2));
+ WasmModuleWriter* writer = builder->Build(&zone);
+ TestModule(writer->WriteTo(&zone), 99);
+}
+
+
+TEST(Run_WasmModule_ReadLoadedDataSegment) {
+ static const byte kDataSegmentDest0 = 12;
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* f = builder->FunctionAt(f_index);
+ f->ReturnType(kAstI32);
+ f->Exported(1);
+ byte code[] = {
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kDataSegmentDest0))};
+ f->EmitCode(code, sizeof(code));
+ byte data[] = {0xaa, 0xbb, 0xcc, 0xdd};
+ builder->AddDataSegment(new (&zone) WasmDataSegmentEncoder(
+ &zone, data, sizeof(data), kDataSegmentDest0));
+ WasmModuleWriter* writer = builder->Build(&zone);
+ TestModule(writer->WriteTo(&zone), 0xddccbbaa);
+}
+
+
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_WITH_ASAN 1
+#endif
+#endif
+
+
+#if !defined(V8_WITH_ASAN)
+// TODO(bradnelson): Figure out why this crashes under asan.
+TEST(Run_WasmModule_CheckMemoryIsZero) {
+ static const int kCheckSize = 16 * 1024;
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* f = builder->FunctionAt(f_index);
+ f->ReturnType(kAstI32);
+ uint16_t localIndex = f->AddLocal(kAstI32);
+ f->Exported(1);
+ byte code[] = {WASM_BLOCK(
+ 2,
+ WASM_WHILE(
+ WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32(kCheckSize)),
+ WASM_IF_ELSE(
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(localIndex)),
+ WASM_BRV(2, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
+ WASM_I8(11))};
+ uint32_t local_indices[] = {7, 19, 25, 28};
+ f->EmitCode(code, sizeof(code), local_indices, sizeof(local_indices) / 4);
+ WasmModuleWriter* writer = builder->Build(&zone);
+ TestModule(writer->WriteTo(&zone), 11);
+}
+#endif
+
+
+#if !defined(V8_WITH_ASAN)
+// TODO(bradnelson): Figure out why this crashes under asan.
+TEST(Run_WasmModule_CallMain_recursive) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* f = builder->FunctionAt(f_index);
+ f->ReturnType(kAstI32);
+ uint16_t localIndex = f->AddLocal(kAstI32);
+ f->Exported(1);
+ byte code[] = {WASM_BLOCK(
+ 2, WASM_SET_LOCAL(localIndex,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_IF_ELSE(WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I8(5)),
+ WASM_BLOCK(2, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_INC_LOCAL(localIndex)),
+ WASM_BRV(1, WASM_CALL_FUNCTION0(0))),
+ WASM_BRV(0, WASM_I8(55))))};
+ uint32_t local_indices[] = {3, 11, 21, 24};
+ f->EmitCode(code, sizeof(code), local_indices, sizeof(local_indices) / 4);
+ WasmModuleWriter* writer = builder->Build(&zone);
+ TestModule(writer->WriteTo(&zone), 55);
+}
+#endif
+
+
+#if !defined(V8_WITH_ASAN)
+// TODO(bradnelson): Figure out why this crashes under asan.
+TEST(Run_WasmModule_Global) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint32_t global1 = builder->AddGlobal(MachineType::Int32(), 0);
+ uint32_t global2 = builder->AddGlobal(MachineType::Int32(), 0);
+ uint16_t f1_index = builder->AddFunction();
+ WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
+ f->ReturnType(kAstI32);
+ byte code1[] = {
+ WASM_I32_ADD(WASM_LOAD_GLOBAL(global1), WASM_LOAD_GLOBAL(global2))};
+ f->EmitCode(code1, sizeof(code1));
+ uint16_t f2_index = builder->AddFunction();
+ f = builder->FunctionAt(f2_index);
+ f->ReturnType(kAstI32);
+ f->Exported(1);
+ byte code2[] = {WASM_STORE_GLOBAL(global1, WASM_I32(56)),
+ WASM_STORE_GLOBAL(global2, WASM_I32(41)),
+ WASM_RETURN(WASM_CALL_FUNCTION0(f1_index))};
+ f->EmitCode(code2, sizeof(code2));
+ WasmModuleWriter* writer = builder->Build(&zone);
+ TestModule(writer->WriteTo(&zone), 97);
+}
+#endif
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
new file mode 100644
index 0000000000..445c3f0aed
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -0,0 +1,3254 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+#define BUILD(r, ...) \
+ do { \
+ byte code[] = {__VA_ARGS__}; \
+ r.Build(code, code + arraysize(code)); \
+ } while (false)
+
+
+TEST(Run_WasmInt8Const) {
+ WasmRunner<int8_t> r;
+ const byte kExpectedValue = 121;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I8(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+
+TEST(Run_WasmInt8Const_fallthru1) {
+ WasmRunner<int8_t> r;
+ const byte kExpectedValue = 122;
+ // kExpectedValue
+ BUILD(r, WASM_I8(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+
+TEST(Run_WasmInt8Const_fallthru2) {
+ WasmRunner<int8_t> r;
+ const byte kExpectedValue = 123;
+ // -99 kExpectedValue
+ BUILD(r, WASM_I8(-99), WASM_I8(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+
+TEST(Run_WasmInt8Const_all) {
+ for (int value = -128; value <= 127; value++) {
+ WasmRunner<int8_t> r;
+ // return(value)
+ BUILD(r, WASM_I8(value));
+ int8_t result = r.Call();
+ CHECK_EQ(value, result);
+ }
+}
+
+
+TEST(Run_WasmInt32Const) {
+ WasmRunner<int32_t> r;
+ const int32_t kExpectedValue = 0x11223344;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I32(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+
+TEST(Run_WasmInt32Const_many) {
+ FOR_INT32_INPUTS(i) {
+ WasmRunner<int32_t> r;
+ const int32_t kExpectedValue = *i;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I32(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+ }
+}
+
+
+TEST(Run_WasmMemorySize) {
+ WasmRunner<int32_t> r;
+ TestingModule module;
+ module.AddMemory(1024);
+ r.env()->module = &module;
+ BUILD(r, kExprMemorySize);
+ CHECK_EQ(1024, r.Call());
+}
+
+
+#if WASM_64
+TEST(Run_WasmInt64Const) {
+ WasmRunner<int64_t> r;
+ const int64_t kExpectedValue = 0x1122334455667788LL;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I64(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+
+TEST(Run_WasmInt64Const_many) {
+ int cntr = 0;
+ FOR_INT32_INPUTS(i) {
+ WasmRunner<int64_t> r;
+ const int64_t kExpectedValue = (static_cast<int64_t>(*i) << 32) | cntr;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I64(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+ cntr++;
+ }
+}
+#endif
+
+
+TEST(Run_WasmInt32Param0) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // return(local[0])
+ BUILD(r, WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+
+TEST(Run_WasmInt32Param0_fallthru) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // local[0]
+ BUILD(r, WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+
+TEST(Run_WasmInt32Param1) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ // local[1]
+ BUILD(r, WASM_GET_LOCAL(1));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(-111, *i)); }
+}
+
+
+TEST(Run_WasmInt32Add) {
+ WasmRunner<int32_t> r;
+ // 11 + 44
+ BUILD(r, WASM_I32_ADD(WASM_I8(11), WASM_I8(44)));
+ CHECK_EQ(55, r.Call());
+}
+
+
+TEST(Run_WasmInt32Add_P) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // p0 + 13
+ BUILD(r, WASM_I32_ADD(WASM_I8(13), WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
+}
+
+
+TEST(Run_WasmInt32Add_P_fallthru) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // p0 + 13
+ BUILD(r, WASM_I32_ADD(WASM_I8(13), WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
+}
+
+
+TEST(Run_WasmInt32Add_P2) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ // p0 + p1
+ BUILD(r, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) +
+ static_cast<uint32_t>(*j));
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+// TODO(titzer): Fix for nosee4 and re-enable.
+#if 0
+
+TEST(Run_WasmFloat32Add) {
+ WasmRunner<int32_t> r;
+ // int(11.5f + 44.5f)
+ BUILD(r,
+ WASM_I32_SCONVERT_F32(WASM_F32_ADD(WASM_F32(11.5f), WASM_F32(44.5f))));
+ CHECK_EQ(56, r.Call());
+}
+
+
+TEST(Run_WasmFloat64Add) {
+ WasmRunner<int32_t> r;
+ // return int(13.5d + 43.5d)
+ BUILD(r, WASM_I32_SCONVERT_F64(WASM_F64_ADD(WASM_F64(13.5), WASM_F64(43.5))));
+ CHECK_EQ(57, r.Call());
+}
+
+#endif
+
+
+void TestInt32Binop(WasmOpcode opcode, int32_t expected, int32_t a, int32_t b) {
+ {
+ WasmRunner<int32_t> r;
+ // K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_I32(a), WASM_I32(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ // a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+TEST(Run_WasmInt32Binops) {
+ TestInt32Binop(kExprI32Add, 88888888, 33333333, 55555555);
+ TestInt32Binop(kExprI32Sub, -1111111, 7777777, 8888888);
+ TestInt32Binop(kExprI32Mul, 65130756, 88734, 734);
+ TestInt32Binop(kExprI32DivS, -66, -4777344, 72384);
+ TestInt32Binop(kExprI32DivU, 805306368, 0xF0000000, 5);
+ TestInt32Binop(kExprI32RemS, -3, -3003, 1000);
+ TestInt32Binop(kExprI32RemU, 4, 4004, 1000);
+ TestInt32Binop(kExprI32And, 0xEE, 0xFFEE, 0xFF0000FF);
+ TestInt32Binop(kExprI32Ior, 0xF0FF00FF, 0xF0F000EE, 0x000F0011);
+ TestInt32Binop(kExprI32Xor, 0xABCDEF01, 0xABCDEFFF, 0xFE);
+ TestInt32Binop(kExprI32Shl, 0xA0000000, 0xA, 28);
+ TestInt32Binop(kExprI32ShrU, 0x07000010, 0x70000100, 4);
+ TestInt32Binop(kExprI32ShrS, 0xFF000000, 0x80000000, 7);
+ TestInt32Binop(kExprI32Eq, 1, -99, -99);
+ TestInt32Binop(kExprI32Ne, 0, -97, -97);
+
+ TestInt32Binop(kExprI32LtS, 1, -4, 4);
+ TestInt32Binop(kExprI32LeS, 0, -2, -3);
+ TestInt32Binop(kExprI32LtU, 1, 0, -6);
+ TestInt32Binop(kExprI32LeU, 1, 98978, 0xF0000000);
+
+ TestInt32Binop(kExprI32GtS, 1, 4, -4);
+ TestInt32Binop(kExprI32GeS, 0, -3, -2);
+ TestInt32Binop(kExprI32GtU, 1, -6, 0);
+ TestInt32Binop(kExprI32GeU, 1, 0xF0000000, 98978);
+}
+
+
+void TestInt32Unop(WasmOpcode opcode, int32_t expected, int32_t a) {
+ {
+ WasmRunner<int32_t> r;
+ // return op K
+ BUILD(r, WASM_UNOP(opcode, WASM_I32(a)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // return op a
+ BUILD(r, WASM_UNOP(opcode, WASM_GET_LOCAL(0)));
+ CHECK_EQ(expected, r.Call(a));
+ }
+}
+
+
+TEST(Run_WasmInt32Clz) {
+ TestInt32Unop(kExprI32Clz, 0, 0x80001000);
+ TestInt32Unop(kExprI32Clz, 1, 0x40000500);
+ TestInt32Unop(kExprI32Clz, 2, 0x20000300);
+ TestInt32Unop(kExprI32Clz, 3, 0x10000003);
+ TestInt32Unop(kExprI32Clz, 4, 0x08050000);
+ TestInt32Unop(kExprI32Clz, 5, 0x04006000);
+ TestInt32Unop(kExprI32Clz, 6, 0x02000000);
+ TestInt32Unop(kExprI32Clz, 7, 0x010000a0);
+ TestInt32Unop(kExprI32Clz, 8, 0x00800c00);
+ TestInt32Unop(kExprI32Clz, 9, 0x00400000);
+ TestInt32Unop(kExprI32Clz, 10, 0x0020000d);
+ TestInt32Unop(kExprI32Clz, 11, 0x00100f00);
+ TestInt32Unop(kExprI32Clz, 12, 0x00080000);
+ TestInt32Unop(kExprI32Clz, 13, 0x00041000);
+ TestInt32Unop(kExprI32Clz, 14, 0x00020020);
+ TestInt32Unop(kExprI32Clz, 15, 0x00010300);
+ TestInt32Unop(kExprI32Clz, 16, 0x00008040);
+ TestInt32Unop(kExprI32Clz, 17, 0x00004005);
+ TestInt32Unop(kExprI32Clz, 18, 0x00002050);
+ TestInt32Unop(kExprI32Clz, 19, 0x00001700);
+ TestInt32Unop(kExprI32Clz, 20, 0x00000870);
+ TestInt32Unop(kExprI32Clz, 21, 0x00000405);
+ TestInt32Unop(kExprI32Clz, 22, 0x00000203);
+ TestInt32Unop(kExprI32Clz, 23, 0x00000101);
+ TestInt32Unop(kExprI32Clz, 24, 0x00000089);
+ TestInt32Unop(kExprI32Clz, 25, 0x00000041);
+ TestInt32Unop(kExprI32Clz, 26, 0x00000022);
+ TestInt32Unop(kExprI32Clz, 27, 0x00000013);
+ TestInt32Unop(kExprI32Clz, 28, 0x00000008);
+ TestInt32Unop(kExprI32Clz, 29, 0x00000004);
+ TestInt32Unop(kExprI32Clz, 30, 0x00000002);
+ TestInt32Unop(kExprI32Clz, 31, 0x00000001);
+ TestInt32Unop(kExprI32Clz, 32, 0x00000000);
+}
+
+
+TEST(Run_WasmInt32Ctz) {
+ TestInt32Unop(kExprI32Ctz, 32, 0x00000000);
+ TestInt32Unop(kExprI32Ctz, 31, 0x80000000);
+ TestInt32Unop(kExprI32Ctz, 30, 0x40000000);
+ TestInt32Unop(kExprI32Ctz, 29, 0x20000000);
+ TestInt32Unop(kExprI32Ctz, 28, 0x10000000);
+ TestInt32Unop(kExprI32Ctz, 27, 0xa8000000);
+ TestInt32Unop(kExprI32Ctz, 26, 0xf4000000);
+ TestInt32Unop(kExprI32Ctz, 25, 0x62000000);
+ TestInt32Unop(kExprI32Ctz, 24, 0x91000000);
+ TestInt32Unop(kExprI32Ctz, 23, 0xcd800000);
+ TestInt32Unop(kExprI32Ctz, 22, 0x09400000);
+ TestInt32Unop(kExprI32Ctz, 21, 0xaf200000);
+ TestInt32Unop(kExprI32Ctz, 20, 0xac100000);
+ TestInt32Unop(kExprI32Ctz, 19, 0xe0b80000);
+ TestInt32Unop(kExprI32Ctz, 18, 0x9ce40000);
+ TestInt32Unop(kExprI32Ctz, 17, 0xc7920000);
+ TestInt32Unop(kExprI32Ctz, 16, 0xb8f10000);
+ TestInt32Unop(kExprI32Ctz, 15, 0x3b9f8000);
+ TestInt32Unop(kExprI32Ctz, 14, 0xdb4c4000);
+ TestInt32Unop(kExprI32Ctz, 13, 0xe9a32000);
+ TestInt32Unop(kExprI32Ctz, 12, 0xfca61000);
+ TestInt32Unop(kExprI32Ctz, 11, 0x6c8a7800);
+ TestInt32Unop(kExprI32Ctz, 10, 0x8ce5a400);
+ TestInt32Unop(kExprI32Ctz, 9, 0xcb7d0200);
+ TestInt32Unop(kExprI32Ctz, 8, 0xcb4dc100);
+ TestInt32Unop(kExprI32Ctz, 7, 0xdfbec580);
+ TestInt32Unop(kExprI32Ctz, 6, 0x27a9db40);
+ TestInt32Unop(kExprI32Ctz, 5, 0xde3bcb20);
+ TestInt32Unop(kExprI32Ctz, 4, 0xd7e8a610);
+ TestInt32Unop(kExprI32Ctz, 3, 0x9afdbc88);
+ TestInt32Unop(kExprI32Ctz, 2, 0x9afdbc84);
+ TestInt32Unop(kExprI32Ctz, 1, 0x9afdbc82);
+ TestInt32Unop(kExprI32Ctz, 0, 0x9afdbc81);
+}
+
+
+TEST(Run_WasmInt32Popcnt) {
+ TestInt32Unop(kExprI32Popcnt, 32, 0xffffffff);
+ TestInt32Unop(kExprI32Popcnt, 0, 0x00000000);
+ TestInt32Unop(kExprI32Popcnt, 1, 0x00008000);
+ TestInt32Unop(kExprI32Popcnt, 13, 0x12345678);
+ TestInt32Unop(kExprI32Popcnt, 19, 0xfedcba09);
+}
+
+
+#if WASM_64
+void TestInt64Binop(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
+ if (!WasmOpcodes::IsSupported(opcode)) return;
+ {
+ WasmRunner<int64_t> r;
+ // return K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_I64(a), WASM_I64(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ // return a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+void TestInt64Cmp(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
+ if (!WasmOpcodes::IsSupported(opcode)) return;
+ {
+ WasmRunner<int32_t> r;
+ // return K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_I64(a), WASM_I64(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ // return a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+TEST(Run_WasmInt64Binops) {
+ // TODO(titzer): real 64-bit numbers
+ TestInt64Binop(kExprI64Add, 8888888888888LL, 3333333333333LL,
+ 5555555555555LL);
+ TestInt64Binop(kExprI64Sub, -111111111111LL, 777777777777LL, 888888888888LL);
+ TestInt64Binop(kExprI64Mul, 65130756, 88734, 734);
+ TestInt64Binop(kExprI64DivS, -66, -4777344, 72384);
+ TestInt64Binop(kExprI64DivU, 805306368, 0xF0000000, 5);
+ TestInt64Binop(kExprI64RemS, -3, -3003, 1000);
+ TestInt64Binop(kExprI64RemU, 4, 4004, 1000);
+ TestInt64Binop(kExprI64And, 0xEE, 0xFFEE, 0xFF0000FF);
+ TestInt64Binop(kExprI64Ior, 0xF0FF00FF, 0xF0F000EE, 0x000F0011);
+ TestInt64Binop(kExprI64Xor, 0xABCDEF01, 0xABCDEFFF, 0xFE);
+ TestInt64Binop(kExprI64Shl, 0xA0000000, 0xA, 28);
+ TestInt64Binop(kExprI64ShrU, 0x0700001000123456LL, 0x7000010001234567LL, 4);
+ TestInt64Binop(kExprI64ShrS, 0xFF00000000000000LL, 0x8000000000000000LL, 7);
+ TestInt64Cmp(kExprI64Eq, 1, -9999, -9999);
+ TestInt64Cmp(kExprI64Ne, 1, -9199, -9999);
+ TestInt64Cmp(kExprI64LtS, 1, -4, 4);
+ TestInt64Cmp(kExprI64LeS, 0, -2, -3);
+ TestInt64Cmp(kExprI64LtU, 1, 0, -6);
+ TestInt64Cmp(kExprI64LeU, 1, 98978, 0xF0000000);
+}
+
+
+TEST(Run_WasmInt64Clz) {
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{0, 0x8000100000000000}, {1, 0x4000050000000000},
+ {2, 0x2000030000000000}, {3, 0x1000000300000000},
+ {4, 0x0805000000000000}, {5, 0x0400600000000000},
+ {6, 0x0200000000000000}, {7, 0x010000a000000000},
+ {8, 0x00800c0000000000}, {9, 0x0040000000000000},
+ {10, 0x0020000d00000000}, {11, 0x00100f0000000000},
+ {12, 0x0008000000000000}, {13, 0x0004100000000000},
+ {14, 0x0002002000000000}, {15, 0x0001030000000000},
+ {16, 0x0000804000000000}, {17, 0x0000400500000000},
+ {18, 0x0000205000000000}, {19, 0x0000170000000000},
+ {20, 0x0000087000000000}, {21, 0x0000040500000000},
+ {22, 0x0000020300000000}, {23, 0x0000010100000000},
+ {24, 0x0000008900000000}, {25, 0x0000004100000000},
+ {26, 0x0000002200000000}, {27, 0x0000001300000000},
+ {28, 0x0000000800000000}, {29, 0x0000000400000000},
+ {30, 0x0000000200000000}, {31, 0x0000000100000000},
+ {32, 0x0000000080001000}, {33, 0x0000000040000500},
+ {34, 0x0000000020000300}, {35, 0x0000000010000003},
+ {36, 0x0000000008050000}, {37, 0x0000000004006000},
+ {38, 0x0000000002000000}, {39, 0x00000000010000a0},
+ {40, 0x0000000000800c00}, {41, 0x0000000000400000},
+ {42, 0x000000000020000d}, {43, 0x0000000000100f00},
+ {44, 0x0000000000080000}, {45, 0x0000000000041000},
+ {46, 0x0000000000020020}, {47, 0x0000000000010300},
+ {48, 0x0000000000008040}, {49, 0x0000000000004005},
+ {50, 0x0000000000002050}, {51, 0x0000000000001700},
+ {52, 0x0000000000000870}, {53, 0x0000000000000405},
+ {54, 0x0000000000000203}, {55, 0x0000000000000101},
+ {56, 0x0000000000000089}, {57, 0x0000000000000041},
+ {58, 0x0000000000000022}, {59, 0x0000000000000013},
+ {60, 0x0000000000000008}, {61, 0x0000000000000004},
+ {62, 0x0000000000000002}, {63, 0x0000000000000001},
+ {64, 0x0000000000000000}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_CLZ(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+
+TEST(Run_WasmInt64Ctz) {
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{64, 0x0000000000000000}, {63, 0x8000000000000000},
+ {62, 0x4000000000000000}, {61, 0x2000000000000000},
+ {60, 0x1000000000000000}, {59, 0xa800000000000000},
+ {58, 0xf400000000000000}, {57, 0x6200000000000000},
+ {56, 0x9100000000000000}, {55, 0xcd80000000000000},
+ {54, 0x0940000000000000}, {53, 0xaf20000000000000},
+ {52, 0xac10000000000000}, {51, 0xe0b8000000000000},
+ {50, 0x9ce4000000000000}, {49, 0xc792000000000000},
+ {48, 0xb8f1000000000000}, {47, 0x3b9f800000000000},
+ {46, 0xdb4c400000000000}, {45, 0xe9a3200000000000},
+ {44, 0xfca6100000000000}, {43, 0x6c8a780000000000},
+ {42, 0x8ce5a40000000000}, {41, 0xcb7d020000000000},
+ {40, 0xcb4dc10000000000}, {39, 0xdfbec58000000000},
+ {38, 0x27a9db4000000000}, {37, 0xde3bcb2000000000},
+ {36, 0xd7e8a61000000000}, {35, 0x9afdbc8800000000},
+ {34, 0x9afdbc8400000000}, {33, 0x9afdbc8200000000},
+ {32, 0x9afdbc8100000000}, {31, 0x0000000080000000},
+ {30, 0x0000000040000000}, {29, 0x0000000020000000},
+ {28, 0x0000000010000000}, {27, 0x00000000a8000000},
+ {26, 0x00000000f4000000}, {25, 0x0000000062000000},
+ {24, 0x0000000091000000}, {23, 0x00000000cd800000},
+ {22, 0x0000000009400000}, {21, 0x00000000af200000},
+ {20, 0x00000000ac100000}, {19, 0x00000000e0b80000},
+ {18, 0x000000009ce40000}, {17, 0x00000000c7920000},
+ {16, 0x00000000b8f10000}, {15, 0x000000003b9f8000},
+ {14, 0x00000000db4c4000}, {13, 0x00000000e9a32000},
+ {12, 0x00000000fca61000}, {11, 0x000000006c8a7800},
+ {10, 0x000000008ce5a400}, {9, 0x00000000cb7d0200},
+ {8, 0x00000000cb4dc100}, {7, 0x00000000dfbec580},
+ {6, 0x0000000027a9db40}, {5, 0x00000000de3bcb20},
+ {4, 0x00000000d7e8a610}, {3, 0x000000009afdbc88},
+ {2, 0x000000009afdbc84}, {1, 0x000000009afdbc82},
+ {0, 0x000000009afdbc81}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_CTZ(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+
+TEST(Run_WasmInt64Popcnt) {
+ struct {
+ int64_t expected;
+ uint64_t input;
+ } values[] = {{64, 0xffffffffffffffff},
+ {0, 0x0000000000000000},
+ {2, 0x0000080000008000},
+ {26, 0x1123456782345678},
+ {38, 0xffedcba09edcba09}};
+
+ WasmRunner<int64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
+ for (size_t i = 0; i < arraysize(values); i++) {
+ CHECK_EQ(values[i].expected, r.Call(values[i].input));
+ }
+}
+
+
+#endif
+
+TEST(Run_WASM_Int32DivS_trap) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_TRAP(r.Call(100, 0));
+ CHECK_TRAP(r.Call(-1001, 0));
+ CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), -1));
+ CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+}
+
+
+TEST(Run_WASM_Int32RemS_trap) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(33, r.Call(133, 100));
+ CHECK_EQ(0, r.Call(std::numeric_limits<int32_t>::min(), -1));
+ CHECK_TRAP(r.Call(100, 0));
+ CHECK_TRAP(r.Call(-1001, 0));
+ CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+}
+
+
+TEST(Run_WASM_Int32DivU_trap) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_EQ(0, r.Call(std::numeric_limits<int32_t>::min(), -1));
+ CHECK_TRAP(r.Call(100, 0));
+ CHECK_TRAP(r.Call(-1001, 0));
+ CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+}
+
+
+TEST(Run_WASM_Int32RemU_trap) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ BUILD(r, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(17, r.Call(217, 100));
+ CHECK_TRAP(r.Call(100, 0));
+ CHECK_TRAP(r.Call(-1001, 0));
+ CHECK_TRAP(r.Call(std::numeric_limits<int32_t>::min(), 0));
+ CHECK_EQ(std::numeric_limits<int32_t>::min(),
+ r.Call(std::numeric_limits<int32_t>::min(), -1));
+}
+
+
+TEST(Run_WASM_Int32DivS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; denom++) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I8(denom)));
+ for (int32_t val = -7; val < 8; val++) {
+ if (denom == 0) {
+ CHECK_TRAP(r.Call(val));
+ } else {
+ CHECK_EQ(val / denom, r.Call(val));
+ }
+ }
+ }
+}
+
+
+TEST(Run_WASM_Int32DivU_byzero_const) {
+ for (uint32_t denom = 0xfffffffe; denom < 8; denom++) {
+ WasmRunner<uint32_t> r(MachineType::Uint32());
+ BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32(denom)));
+
+ for (uint32_t val = 0xfffffff0; val < 8; val++) {
+ if (denom == 0) {
+ CHECK_TRAP(r.Call(val));
+ } else {
+ CHECK_EQ(val / denom, r.Call(val));
+ }
+ }
+ }
+}
+
+
+TEST(Run_WASM_Int32DivS_trap_effect) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ TestingModule module;
+ module.AddMemoryElems<int32_t>(8);
+ r.env()->module = &module;
+
+ BUILD(r,
+ WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ WASM_I32_DIVS(WASM_STORE_MEM(MachineType::Int8(),
+ WASM_ZERO, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(1)),
+ WASM_I32_DIVS(WASM_STORE_MEM(MachineType::Int8(),
+ WASM_ZERO, WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(1))));
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_TRAP(r.Call(8, 0));
+ CHECK_TRAP(r.Call(4, 0));
+ CHECK_TRAP(r.Call(0, 0));
+}
+
+
+#if WASM_64
+#define as64(x) static_cast<int64_t>(x)
+TEST(Run_WASM_Int64DivS_trap) {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(0, r.Call(as64(0), as64(100)));
+ CHECK_TRAP64(r.Call(as64(100), as64(0)));
+ CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
+}
+
+
+TEST(Run_WASM_Int64RemS_trap) {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(33, r.Call(as64(133), as64(100)));
+ CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
+ CHECK_TRAP64(r.Call(as64(100), as64(0)));
+ CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
+}
+
+
+TEST(Run_WASM_Int64DivU_trap) {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(0, r.Call(as64(0), as64(100)));
+ CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
+ CHECK_TRAP64(r.Call(as64(100), as64(0)));
+ CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
+}
+
+
+TEST(Run_WASM_Int64RemU_trap) {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(17, r.Call(as64(217), as64(100)));
+ CHECK_TRAP64(r.Call(as64(100), as64(0)));
+ CHECK_TRAP64(r.Call(as64(-1001), as64(0)));
+ CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), as64(0)));
+ CHECK_EQ(std::numeric_limits<int64_t>::min(),
+ r.Call(std::numeric_limits<int64_t>::min(), as64(-1)));
+}
+
+
+TEST(Run_WASM_Int64DivS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; denom++) {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64(denom)));
+ for (int64_t val = -7; val < 8; val++) {
+ if (denom == 0) {
+ CHECK_TRAP64(r.Call(val));
+ } else {
+ CHECK_EQ(val / denom, r.Call(val));
+ }
+ }
+ }
+}
+
+
+TEST(Run_WASM_Int64DivU_byzero_const) {
+ for (uint64_t denom = 0xfffffffffffffffe; denom < 8; denom++) {
+ WasmRunner<uint64_t> r(MachineType::Uint64());
+ BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64(denom)));
+
+ for (uint64_t val = 0xfffffffffffffff0; val < 8; val++) {
+ if (denom == 0) {
+ CHECK_TRAP64(r.Call(val));
+ } else {
+ CHECK_EQ(val / denom, r.Call(val));
+ }
+ }
+ }
+}
+#endif
+
+
+void TestFloat32Binop(WasmOpcode opcode, int32_t expected, float a, float b) {
+ {
+ WasmRunner<int32_t> r;
+ // return K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Float32(), MachineType::Float32());
+ // return a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+void TestFloat32BinopWithConvert(WasmOpcode opcode, int32_t expected, float a,
+ float b) {
+ {
+ WasmRunner<int32_t> r;
+ // return int(K op K)
+ BUILD(r,
+ WASM_I32_SCONVERT_F32(WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b))));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Float32(), MachineType::Float32());
+ // return int(a op b)
+ BUILD(r, WASM_I32_SCONVERT_F32(
+ WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+void TestFloat32UnopWithConvert(WasmOpcode opcode, int32_t expected, float a) {
+ {
+ WasmRunner<int32_t> r;
+ // return int(op(K))
+ BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_F32(a))));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Float32());
+ // return int(op(a))
+ BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_GET_LOCAL(0))));
+ CHECK_EQ(expected, r.Call(a));
+ }
+}
+
+
+void TestFloat64Binop(WasmOpcode opcode, int32_t expected, double a, double b) {
+ {
+ WasmRunner<int32_t> r;
+ // return K op K
+ BUILD(r, WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b)));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Float64(), MachineType::Float64());
+ // return a op b
+ BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+void TestFloat64BinopWithConvert(WasmOpcode opcode, int32_t expected, double a,
+ double b) {
+ {
+ WasmRunner<int32_t> r;
+ // return int(K op K)
+ BUILD(r,
+ WASM_I32_SCONVERT_F64(WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b))));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Float64(), MachineType::Float64());
+ BUILD(r, WASM_I32_SCONVERT_F64(
+ WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+}
+
+
+void TestFloat64UnopWithConvert(WasmOpcode opcode, int32_t expected, double a) {
+ {
+ WasmRunner<int32_t> r;
+ // return int(op(K))
+ BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_F64(a))));
+ CHECK_EQ(expected, r.Call());
+ }
+ {
+ WasmRunner<int32_t> r(MachineType::Float64());
+ // return int(op(a))
+ BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_GET_LOCAL(0))));
+ CHECK_EQ(expected, r.Call(a));
+ }
+}
+
+
+// TODO(titzer): Fix for nosee4 and re-enable.
+#if 0
+
+TEST(Run_WasmFloat32Binops) {
+ TestFloat32Binop(kExprF32Eq, 1, 8.125f, 8.125f);
+ TestFloat32Binop(kExprF32Ne, 1, 8.125f, 8.127f);
+ TestFloat32Binop(kExprF32Lt, 1, -9.5f, -9.0f);
+ TestFloat32Binop(kExprF32Le, 1, -1111.0f, -1111.0f);
+ TestFloat32Binop(kExprF32Gt, 1, -9.0f, -9.5f);
+ TestFloat32Binop(kExprF32Ge, 1, -1111.0f, -1111.0f);
+
+ TestFloat32BinopWithConvert(kExprF32Add, 10, 3.5f, 6.5f);
+ TestFloat32BinopWithConvert(kExprF32Sub, 2, 44.5f, 42.5f);
+ TestFloat32BinopWithConvert(kExprF32Mul, -66, -132.1f, 0.5f);
+ TestFloat32BinopWithConvert(kExprF32Div, 11, 22.1f, 2.0f);
+}
+
+
+TEST(Run_WasmFloat32Unops) {
+ TestFloat32UnopWithConvert(kExprF32Abs, 8, 8.125f);
+ TestFloat32UnopWithConvert(kExprF32Abs, 9, -9.125f);
+ TestFloat32UnopWithConvert(kExprF32Neg, -213, 213.125f);
+ TestFloat32UnopWithConvert(kExprF32Sqrt, 12, 144.4f);
+}
+
+
+TEST(Run_WasmFloat64Binops) {
+ TestFloat64Binop(kExprF64Eq, 1, 16.25, 16.25);
+ TestFloat64Binop(kExprF64Ne, 1, 16.25, 16.15);
+ TestFloat64Binop(kExprF64Lt, 1, -32.4, 11.7);
+ TestFloat64Binop(kExprF64Le, 1, -88.9, -88.9);
+ TestFloat64Binop(kExprF64Gt, 1, 11.7, -32.4);
+ TestFloat64Binop(kExprF64Ge, 1, -88.9, -88.9);
+
+ TestFloat64BinopWithConvert(kExprF64Add, 100, 43.5, 56.5);
+ TestFloat64BinopWithConvert(kExprF64Sub, 200, 12200.1, 12000.1);
+ TestFloat64BinopWithConvert(kExprF64Mul, -33, 134, -0.25);
+ TestFloat64BinopWithConvert(kExprF64Div, -1111, -2222.3, 2);
+}
+
+
+TEST(Run_WasmFloat64Unops) {
+ TestFloat64UnopWithConvert(kExprF64Abs, 108, 108.125);
+ TestFloat64UnopWithConvert(kExprF64Abs, 209, -209.125);
+ TestFloat64UnopWithConvert(kExprF64Neg, -209, 209.125);
+ TestFloat64UnopWithConvert(kExprF64Sqrt, 13, 169.4);
+}
+
+#endif
+
+
+TEST(Run_WasmFloat32Neg) {
+ WasmRunner<float> r(MachineType::Float32());
+ BUILD(r, WASM_F32_NEG(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ CHECK_EQ(0x80000000,
+ bit_cast<uint32_t>(*i) ^ bit_cast<uint32_t>(r.Call(*i)));
+ }
+}
+
+
+TEST(Run_WasmFloat64Neg) {
+ WasmRunner<double> r(MachineType::Float64());
+ BUILD(r, WASM_F64_NEG(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ CHECK_EQ(0x8000000000000000,
+ bit_cast<uint64_t>(*i) ^ bit_cast<uint64_t>(r.Call(*i)));
+ }
+}
+
+
+TEST(Run_Wasm_IfElse_P) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // if (p0) return 11; else return 22;
+ BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_I8(11), // --
+ WASM_I8(22))); // --
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_IfElse_Unreachable1) {
+ WasmRunner<int32_t> r;
+ // if (0) unreachable; else return 22;
+ BUILD(r, WASM_IF_ELSE(WASM_ZERO, // --
+ WASM_UNREACHABLE, // --
+ WASM_I8(27))); // --
+ CHECK_EQ(27, r.Call());
+}
+
+
+TEST(Run_Wasm_Return12) {
+ WasmRunner<int32_t> r;
+
+ BUILD(r, WASM_RETURN(WASM_I8(12)));
+ CHECK_EQ(12, r.Call());
+}
+
+
+TEST(Run_Wasm_Return17) {
+ WasmRunner<int32_t> r;
+
+ BUILD(r, WASM_BLOCK(1, WASM_RETURN(WASM_I8(17))));
+ CHECK_EQ(17, r.Call());
+}
+
+
+TEST(Run_Wasm_Return_I32) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+
+ BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+
+#if WASM_64
+TEST(Run_Wasm_Return_I64) {
+ WasmRunner<int64_t> r(MachineType::Int64());
+
+ BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+#endif
+
+
+TEST(Run_Wasm_Return_F32) {
+ WasmRunner<float> r(MachineType::Float32());
+
+ BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ float expect = *i;
+ float result = r.Call(expect);
+ if (std::isnan(expect)) {
+ CHECK(std::isnan(result));
+ } else {
+ CHECK_EQ(expect, result);
+ }
+ }
+}
+
+
+TEST(Run_Wasm_Return_F64) {
+ WasmRunner<double> r(MachineType::Float64());
+
+ BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ double expect = *i;
+ double result = r.Call(expect);
+ if (std::isnan(expect)) {
+ CHECK(std::isnan(result));
+ } else {
+ CHECK_EQ(expect, result);
+ }
+ }
+}
+
+
+TEST(Run_Wasm_Select) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // return select(a, 11, 22);
+ BUILD(r, WASM_SELECT(WASM_GET_LOCAL(0), WASM_I8(11), WASM_I8(22)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_Select_strict1) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // select(a, a = 11, 22); return a
+ BUILD(r,
+ WASM_BLOCK(2, WASM_SELECT(WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_I8(11)), WASM_I8(22)),
+ WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(11, r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_Select_strict2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // select(a, 11, a = 22); return a;
+ BUILD(r, WASM_BLOCK(2, WASM_SELECT(WASM_GET_LOCAL(0), WASM_I8(11),
+ WASM_SET_LOCAL(0, WASM_I8(22))),
+ WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(22, r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_BrIf_strict) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_BLOCK(1, WASM_BRV_IF(0, WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_I8(99)))),
+ WASM_GET_LOCAL(0)));
+
+ FOR_INT32_INPUTS(i) { CHECK_EQ(99, r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_TableSwitch1) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(93))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_TableSwitch_br) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_TABLESWITCH_OP(1, 2, WASM_CASE_BR(0), WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(91))),
+ WASM_I8(99));
+ CHECK_EQ(99, r.Call(0));
+ CHECK_EQ(91, r.Call(1));
+ CHECK_EQ(91, r.Call(2));
+ CHECK_EQ(91, r.Call(3));
+}
+
+
+TEST(Run_Wasm_TableSwitch_br2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_BLOCK(2, WASM_TABLESWITCH_OP(
+ 1, 4, WASM_CASE_BR(0), WASM_CASE_BR(1),
+ WASM_CASE_BR(2), WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0),
+ WASM_RETURN(WASM_I8(85))),
+ WASM_RETURN(WASM_I8(86))),
+ WASM_RETURN(WASM_I8(87))),
+ WASM_I8(88));
+ CHECK_EQ(86, r.Call(0));
+ CHECK_EQ(87, r.Call(1));
+ CHECK_EQ(88, r.Call(2));
+ CHECK_EQ(85, r.Call(3));
+ CHECK_EQ(85, r.Call(4));
+ CHECK_EQ(85, r.Call(5));
+}
+
+
+TEST(Run_Wasm_TableSwitch2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(91)),
+ WASM_RETURN(WASM_I8(92))));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i == 0 ? 91 : 92;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_TableSwitch2b) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(1), WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(81)),
+ WASM_RETURN(WASM_I8(82))));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i == 0 ? 82 : 81;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_TableSwitch4) {
+ for (int i = 0; i < 4; i++) {
+ const uint16_t br = 0x8000u;
+ uint16_t c = 0;
+ uint16_t cases[] = {i == 0 ? br : c++, i == 1 ? br : c++, i == 2 ? br : c++,
+ i == 3 ? br : c++};
+ byte code[] = {
+ WASM_BLOCK(1, WASM_TABLESWITCH_OP(
+ 3, 4, WASM_CASE(cases[0]), WASM_CASE(cases[1]),
+ WASM_CASE(cases[2]), WASM_CASE(cases[3])),
+ WASM_TABLESWITCH_BODY(
+ WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(71)),
+ WASM_RETURN(WASM_I8(72)), WASM_RETURN(WASM_I8(73)))),
+ WASM_RETURN(WASM_I8(74))};
+
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.Build(code, code + arraysize(code));
+
+ FOR_INT32_INPUTS(i) {
+ int index = (*i < 0 || *i > 3) ? 3 : *i;
+ int32_t expected = 71 + cases[index];
+ if (expected >= 0x8000) expected = 74;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_TableSwitch4b) {
+ for (int a = 0; a < 2; a++) {
+ for (int b = 0; b < 2; b++) {
+ for (int c = 0; c < 2; c++) {
+ for (int d = 0; d < 2; d++) {
+ if (a + b + c + d == 0) continue;
+ if (a + b + c + d == 4) continue;
+
+ byte code[] = {
+ WASM_TABLESWITCH_OP(2, 4, WASM_CASE(a), WASM_CASE(b),
+ WASM_CASE(c), WASM_CASE(d)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_RETURN(WASM_I8(61)),
+ WASM_RETURN(WASM_I8(62)))};
+
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.Build(code, code + arraysize(code));
+
+ CHECK_EQ(61 + a, r.Call(0));
+ CHECK_EQ(61 + b, r.Call(1));
+ CHECK_EQ(61 + c, r.Call(2));
+ CHECK_EQ(61 + d, r.Call(3));
+ CHECK_EQ(61 + d, r.Call(4));
+ }
+ }
+ }
+ }
+}
+
+
+TEST(Run_Wasm_TableSwitch4_fallthru) {
+ byte code[] = {
+ WASM_TABLESWITCH_OP(4, 4, WASM_CASE(0), WASM_CASE(1), WASM_CASE(2),
+ WASM_CASE(3)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_INC_LOCAL_BY(1, 1),
+ WASM_INC_LOCAL_BY(1, 2), WASM_INC_LOCAL_BY(1, 4),
+ WASM_INC_LOCAL_BY(1, 8)),
+ WASM_GET_LOCAL(1)};
+
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ r.Build(code, code + arraysize(code));
+
+ CHECK_EQ(15, r.Call(0, 0));
+ CHECK_EQ(14, r.Call(1, 0));
+ CHECK_EQ(12, r.Call(2, 0));
+ CHECK_EQ(8, r.Call(3, 0));
+ CHECK_EQ(8, r.Call(4, 0));
+
+ CHECK_EQ(115, r.Call(0, 100));
+ CHECK_EQ(114, r.Call(1, 100));
+ CHECK_EQ(112, r.Call(2, 100));
+ CHECK_EQ(108, r.Call(3, 100));
+ CHECK_EQ(108, r.Call(4, 100));
+}
+
+
+TEST(Run_Wasm_TableSwitch4_fallthru_br) {
+ byte code[] = {
+ WASM_TABLESWITCH_OP(4, 4, WASM_CASE(0), WASM_CASE(1), WASM_CASE(2),
+ WASM_CASE(3)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_INC_LOCAL_BY(1, 1),
+ WASM_BRV(0, WASM_INC_LOCAL_BY(1, 2)),
+ WASM_INC_LOCAL_BY(1, 4),
+ WASM_BRV(0, WASM_INC_LOCAL_BY(1, 8))),
+ WASM_GET_LOCAL(1)};
+
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ r.Build(code, code + arraysize(code));
+
+ CHECK_EQ(3, r.Call(0, 0));
+ CHECK_EQ(2, r.Call(1, 0));
+ CHECK_EQ(12, r.Call(2, 0));
+ CHECK_EQ(8, r.Call(3, 0));
+ CHECK_EQ(8, r.Call(4, 0));
+
+ CHECK_EQ(203, r.Call(0, 200));
+ CHECK_EQ(202, r.Call(1, 200));
+ CHECK_EQ(212, r.Call(2, 200));
+ CHECK_EQ(208, r.Call(3, 200));
+ CHECK_EQ(208, r.Call(4, 200));
+}
+
+
+TEST(Run_Wasm_F32ReinterpretI32) {
+ WasmRunner<int32_t> r;
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_I32_REINTERPRET_F32(
+ WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)));
+
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i;
+ memory[0] = expected;
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+
+TEST(Run_Wasm_I32ReinterpretF32) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
+ WASM_F32_REINTERPRET_I32(WASM_GET_LOCAL(0))),
+ WASM_I8(107)));
+
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i;
+ CHECK_EQ(107, r.Call(expected));
+ CHECK_EQ(expected, memory[0]);
+ }
+}
+
+
+TEST(Run_Wasm_ReturnStore) {
+ WasmRunner<int32_t> r;
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
+
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i;
+ memory[0] = expected;
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+
+TEST(Run_Wasm_VoidReturn1) {
+ WasmRunner<void> r;
+ BUILD(r, kExprNop);
+ r.Call();
+}
+
+
+TEST(Run_Wasm_VoidReturn2) {
+ WasmRunner<void> r;
+ BUILD(r, WASM_RETURN0);
+ r.Call();
+}
+
+
+TEST(Run_Wasm_Block_If_P) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // { if (p0) return 51; return 52; }
+ BUILD(r, WASM_BLOCK(2, // --
+ WASM_IF(WASM_GET_LOCAL(0), // --
+ WASM_BRV(0, WASM_I8(51))), // --
+ WASM_I8(52))); // --
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 51 : 52;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_Block_BrIf_P) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(51)),
+ WASM_I8(52)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 51 : 52;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_Block_IfElse_P_assign) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // { if (p0) p0 = 71; else p0 = 72; return p0; }
+ BUILD(r, WASM_BLOCK(2, // --
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_SET_LOCAL(0, WASM_I8(71)), // --
+ WASM_SET_LOCAL(0, WASM_I8(72))), // --
+ WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 71 : 72;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_Block_IfElse_P_return) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // if (p0) return 81; else return 82;
+ BUILD(r, // --
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_RETURN(WASM_I8(81)), // --
+ WASM_RETURN(WASM_I8(82)))); // --
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 81 : 82;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_Block_If_P_assign) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // { if (p0) p0 = 61; p0; }
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(61))),
+ WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 61 : *i;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_DanglingAssign) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // { return 0; p0 = 0; }
+ BUILD(r,
+ WASM_BLOCK(2, WASM_RETURN(WASM_I8(99)), WASM_SET_LOCAL(0, WASM_ZERO)));
+ CHECK_EQ(99, r.Call(1));
+}
+
+
+TEST(Run_Wasm_ExprIf_P) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // p0 ? 11 : 22;
+ BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_I8(11), // --
+ WASM_I8(22))); // --
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_ExprIf_P_fallthru) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // p0 ? 11 : 22;
+ BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
+ WASM_I8(11), // --
+ WASM_I8(22))); // --
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+
+TEST(Run_Wasm_CountDown) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(
+ 2, WASM_LOOP(
+ 1, WASM_IF(WASM_GET_LOCAL(0),
+ WASM_BRV(0, WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0),
+ WASM_I8(1)))))),
+ WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(1));
+ CHECK_EQ(0, r.Call(10));
+ CHECK_EQ(0, r.Call(100));
+}
+
+
+TEST(Run_Wasm_CountDown_fallthru) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(
+ 2, WASM_LOOP(3, WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)), WASM_BREAK(0)),
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
+ WASM_CONTINUE(0)),
+ WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(1));
+ CHECK_EQ(0, r.Call(10));
+ CHECK_EQ(0, r.Call(100));
+}
+
+
+TEST(Run_Wasm_WhileCountDown) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_WHILE(WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
+ WASM_I8(1)))),
+ WASM_GET_LOCAL(0)));
+ CHECK_EQ(0, r.Call(1));
+ CHECK_EQ(0, r.Call(10));
+ CHECK_EQ(0, r.Call(100));
+}
+
+
+TEST(Run_Wasm_Loop_if_break1) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(0)),
+ WASM_SET_LOCAL(0, WASM_I8(99))),
+ WASM_GET_LOCAL(0)));
+ CHECK_EQ(99, r.Call(0));
+ CHECK_EQ(3, r.Call(3));
+ CHECK_EQ(10000, r.Call(10000));
+ CHECK_EQ(-29, r.Call(-29));
+}
+
+
+TEST(Run_Wasm_Loop_if_break2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_LOOP(2, WASM_BR_IF(1, WASM_GET_LOCAL(0)),
+ WASM_SET_LOCAL(0, WASM_I8(99))),
+ WASM_GET_LOCAL(0)));
+ CHECK_EQ(99, r.Call(0));
+ CHECK_EQ(3, r.Call(3));
+ CHECK_EQ(10000, r.Call(10000));
+ CHECK_EQ(-29, r.Call(-29));
+}
+
+
+TEST(Run_Wasm_Loop_if_break_fallthru) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
+ WASM_SET_LOCAL(0, WASM_I8(93)))),
+ WASM_GET_LOCAL(0));
+ CHECK_EQ(93, r.Call(0));
+ CHECK_EQ(3, r.Call(3));
+ CHECK_EQ(10001, r.Call(10001));
+ CHECK_EQ(-22, r.Call(-22));
+}
+
+
+TEST(Run_Wasm_LoadMemI32) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ module.RandomizeMemory(1111);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(0)));
+
+ memory[0] = 99999999;
+ CHECK_EQ(99999999, r.Call(0));
+
+ memory[0] = 88888888;
+ CHECK_EQ(88888888, r.Call(0));
+
+ memory[0] = 77777777;
+ CHECK_EQ(77777777, r.Call(0));
+}
+
+
+TEST(Run_Wasm_LoadMemI32_oob) {
+ WasmRunner<int32_t> r(MachineType::Uint32());
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ module.RandomizeMemory(1111);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
+
+ memory[0] = 88888888;
+ CHECK_EQ(88888888, r.Call(0u));
+ for (uint32_t offset = 29; offset < 40; offset++) {
+ CHECK_TRAP(r.Call(offset));
+ }
+
+ for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
+ CHECK_TRAP(r.Call(offset));
+ }
+}
+
+
+TEST(Run_Wasm_LoadMemI32_oob_asm) {
+ WasmRunner<int32_t> r(MachineType::Uint32());
+ TestingModule module;
+ module.asm_js = true;
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ module.RandomizeMemory(1112);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
+
+ memory[0] = 999999;
+ CHECK_EQ(999999, r.Call(0u));
+ // TODO(titzer): offset 29-31 should also be OOB.
+ for (uint32_t offset = 32; offset < 40; offset++) {
+ CHECK_EQ(0, r.Call(offset));
+ }
+
+ for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
+ CHECK_EQ(0, r.Call(offset));
+ }
+}
+
+
+TEST(Run_Wasm_LoadMem_offset_oob) {
+ TestingModule module;
+ module.AddMemoryElems<int32_t>(8);
+
+ static const MachineType machineTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+
+ for (size_t m = 0; m < arraysize(machineTypes); m++) {
+ module.RandomizeMemory(1116 + static_cast<int>(m));
+ WasmRunner<int32_t> r(MachineType::Uint32());
+ r.env()->module = &module;
+ uint32_t boundary = 24 - WasmOpcodes::MemSize(machineTypes[m]);
+
+ BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], 8, WASM_GET_LOCAL(0)),
+ WASM_ZERO);
+
+ CHECK_EQ(0, r.Call(boundary)); // in bounds.
+
+ for (uint32_t offset = boundary + 1; offset < boundary + 19; offset++) {
+ CHECK_TRAP(r.Call(offset)); // out of bounds.
+ }
+ }
+}
+
+
+TEST(Run_Wasm_LoadMemI32_offset) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(4);
+ module.RandomizeMemory(1111);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0)));
+
+ memory[0] = 66666666;
+ memory[1] = 77777777;
+ memory[2] = 88888888;
+ memory[3] = 99999999;
+ CHECK_EQ(77777777, r.Call(0));
+ CHECK_EQ(88888888, r.Call(4));
+ CHECK_EQ(99999999, r.Call(8));
+
+ memory[0] = 11111111;
+ memory[1] = 22222222;
+ memory[2] = 33333333;
+ memory[3] = 44444444;
+ CHECK_EQ(22222222, r.Call(0));
+ CHECK_EQ(33333333, r.Call(4));
+ CHECK_EQ(44444444, r.Call(8));
+}
+
+
+// TODO(titzer): Fix for mips and re-enable.
+#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+TEST(Run_Wasm_LoadMemI32_const_oob) {
+ TestingModule module;
+ const int kMemSize = 12;
+ module.AddMemoryElems<byte>(kMemSize);
+
+ for (int offset = 0; offset < kMemSize + 5; offset++) {
+ for (int index = 0; index < kMemSize + 5; index++) {
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+ module.RandomizeMemory();
+
+ BUILD(r,
+ WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset, WASM_I8(index)));
+
+ if ((offset + index) <= (kMemSize - sizeof(int32_t))) {
+ CHECK_EQ(module.raw_val_at<int32_t>(offset + index), r.Call());
+ } else {
+ CHECK_TRAP(r.Call());
+ }
+ }
+ }
+}
+
+#endif
+
+
+TEST(Run_Wasm_StoreMemI32_offset) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ const int32_t kWritten = 0xaabbccdd;
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(4);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0),
+ WASM_I32(kWritten)));
+
+ for (int i = 0; i < 2; i++) {
+ module.RandomizeMemory(1111);
+ memory[0] = 66666666;
+ memory[1] = 77777777;
+ memory[2] = 88888888;
+ memory[3] = 99999999;
+ CHECK_EQ(kWritten, r.Call(i * 4));
+ CHECK_EQ(66666666, memory[0]);
+ CHECK_EQ(i == 0 ? kWritten : 77777777, memory[1]);
+ CHECK_EQ(i == 1 ? kWritten : 88888888, memory[2]);
+ CHECK_EQ(i == 2 ? kWritten : 99999999, memory[3]);
+ }
+}
+
+
+TEST(Run_Wasm_StoreMem_offset_oob) {
+ TestingModule module;
+ byte* memory = module.AddMemoryElems<byte>(32);
+
+#if WASM_64
+ static const MachineType machineTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+#else
+ static const MachineType machineTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Float32(), MachineType::Float64()};
+#endif
+
+ for (size_t m = 0; m < arraysize(machineTypes); m++) {
+ module.RandomizeMemory(1119 + static_cast<int>(m));
+ WasmRunner<int32_t> r(MachineType::Uint32());
+ r.env()->module = &module;
+
+ BUILD(r, WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_GET_LOCAL(0),
+ WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
+ WASM_ZERO);
+
+ byte memsize = WasmOpcodes::MemSize(machineTypes[m]);
+ uint32_t boundary = 24 - memsize;
+ CHECK_EQ(0, r.Call(boundary)); // in bounds.
+ CHECK_EQ(0, memcmp(&memory[0], &memory[8 + boundary], memsize));
+
+ for (uint32_t offset = boundary + 1; offset < boundary + 19; offset++) {
+ CHECK_TRAP(r.Call(offset)); // out of bounds.
+ }
+ }
+}
+
+
+#if WASM_64
+TEST(Run_Wasm_F64ReinterpretI64) {
+ WasmRunner<int64_t> r;
+ TestingModule module;
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_I64_REINTERPRET_F64(
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
+
+ FOR_INT32_INPUTS(i) {
+ int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
+ memory[0] = expected;
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+
+TEST(Run_Wasm_I64ReinterpretF64) {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ TestingModule module;
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
+ WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
+
+ FOR_INT32_INPUTS(i) {
+ int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
+ CHECK_EQ(expected, r.Call(expected));
+ CHECK_EQ(expected, memory[0]);
+ }
+}
+
+
+TEST(Run_Wasm_LoadMemI64) {
+ WasmRunner<int64_t> r;
+ TestingModule module;
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ module.RandomizeMemory(1111);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_I8(0)));
+
+ memory[0] = 0xaabbccdd00112233LL;
+ CHECK_EQ(0xaabbccdd00112233LL, r.Call());
+
+ memory[0] = 0x33aabbccdd001122LL;
+ CHECK_EQ(0x33aabbccdd001122LL, r.Call());
+
+ memory[0] = 77777777;
+ CHECK_EQ(77777777, r.Call());
+}
+#endif
+
+
+TEST(Run_Wasm_LoadMemI32_P) {
+ const int kNumElems = 8;
+ WasmRunner<int32_t> r(MachineType::Int32());
+ TestingModule module;
+ int32_t* memory = module.AddMemoryElems<int32_t>(kNumElems);
+ module.RandomizeMemory(2222);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
+
+ for (int i = 0; i < kNumElems; i++) {
+ CHECK_EQ(memory[i], r.Call(i * 4));
+ }
+}
+
+
+TEST(Run_Wasm_MemI32_Sum) {
+ WasmRunner<uint32_t> r(MachineType::Int32());
+ const int kNumElems = 20;
+ const byte kSum = r.AllocateLocal(kAstI32);
+ TestingModule module;
+ uint32_t* memory = module.AddMemoryElems<uint32_t>(kNumElems);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ 2, WASM_SET_LOCAL(
+ kSum, WASM_I32_ADD(
+ WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Int32(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
+ WASM_GET_LOCAL(1)));
+
+ // Run 4 trials.
+ for (int i = 0; i < 3; i++) {
+ module.RandomizeMemory(i * 33);
+ uint32_t expected = 0;
+ for (size_t j = kNumElems - 1; j > 0; j--) {
+ expected += memory[j];
+ }
+ uint32_t result = r.Call(static_cast<int>(4 * (kNumElems - 1)));
+ CHECK_EQ(expected, result);
+ }
+}
+
+
+TEST(Run_Wasm_CheckMachIntsZero) {
+ WasmRunner<uint32_t> r(MachineType::Int32());
+ const int kNumElems = 55;
+ TestingModule module;
+ module.AddMemoryElems<uint32_t>(kNumElems);
+ r.env()->module = &module;
+
+ BUILD(r, kExprBlock, 2, kExprLoop, 1, kExprIf, kExprGetLocal, 0, kExprBr, 0,
+ kExprIfElse, kExprI32LoadMem, 0, kExprGetLocal, 0, kExprBr, 2,
+ kExprI8Const, 255, kExprSetLocal, 0, kExprI32Sub, kExprGetLocal, 0,
+ kExprI8Const, 4, kExprI8Const, 0);
+
+ module.BlankMemory();
+ CHECK_EQ(0, r.Call((kNumElems - 1) * 4));
+}
+
+
+TEST(Run_Wasm_MemF32_Sum) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ const byte kSum = r.AllocateLocal(kAstF32);
+ const int kSize = 5;
+ TestingModule module;
+ module.AddMemoryElems<float>(kSize);
+ float* buffer = module.raw_mem_start<float>();
+ buffer[0] = -99.25;
+ buffer[1] = -888.25;
+ buffer[2] = -77.25;
+ buffer[3] = 66666.25;
+ buffer[4] = 5555.25;
+ r.env()->module = &module;
+
+ BUILD(r, WASM_BLOCK(
+ 3, WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ 2, WASM_SET_LOCAL(
+ kSum, WASM_F32_ADD(
+ WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Float32(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
+ WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
+ WASM_GET_LOCAL(kSum)),
+ WASM_GET_LOCAL(0)));
+
+ CHECK_EQ(0, r.Call(4 * (kSize - 1)));
+ CHECK_NE(-99.25, buffer[0]);
+ CHECK_EQ(71256.0f, buffer[0]);
+}
+
+
+#if WASM_64
+TEST(Run_Wasm_MemI64_Sum) {
+ WasmRunner<uint64_t> r(MachineType::Int32());
+ const int kNumElems = 20;
+ const byte kSum = r.AllocateLocal(kAstI64);
+ TestingModule module;
+ uint64_t* memory = module.AddMemoryElems<uint64_t>(kNumElems);
+ r.env()->module = &module;
+
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ 2, WASM_SET_LOCAL(
+ kSum, WASM_I64_ADD(
+ WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Int64(),
+ WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
+ WASM_GET_LOCAL(1)));
+
+ // Run 4 trials.
+ for (int i = 0; i < 3; i++) {
+ module.RandomizeMemory(i * 33);
+ uint64_t expected = 0;
+ for (size_t j = kNumElems - 1; j > 0; j--) {
+ expected += memory[j];
+ }
+ uint64_t result = r.Call(8 * (kNumElems - 1));
+ CHECK_EQ(expected, result);
+ }
+}
+#endif
+
+
+template <typename T>
+T GenerateAndRunFold(WasmOpcode binop, T* buffer, size_t size,
+ LocalType astType, MachineType memType) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ const byte kAccum = r.AllocateLocal(astType);
+ TestingModule module;
+ module.AddMemoryElems<T>(size);
+ for (size_t i = 0; i < size; i++) {
+ module.raw_mem_start<T>()[i] = buffer[i];
+ }
+ r.env()->module = &module;
+
+ BUILD(
+ r,
+ WASM_BLOCK(
+ 4, WASM_SET_LOCAL(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ 2, WASM_SET_LOCAL(
+ kAccum,
+ WASM_BINOP(binop, WASM_GET_LOCAL(kAccum),
+ WASM_LOAD_MEM(memType, WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(sizeof(T)))))),
+ WASM_STORE_MEM(memType, WASM_ZERO, WASM_GET_LOCAL(kAccum)),
+ WASM_GET_LOCAL(0)));
+ r.Call(static_cast<int>(sizeof(T) * (size - 1)));
+ return module.raw_mem_at<double>(0);
+}
+
+
+TEST(Run_Wasm_MemF64_Mul) {
+ const size_t kSize = 6;
+ double buffer[kSize] = {1, 2, 2, 2, 2, 2};
+ double result = GenerateAndRunFold<double>(kExprF64Mul, buffer, kSize,
+ kAstF64, MachineType::Float64());
+ CHECK_EQ(32, result);
+}
+
+
+TEST(Build_Wasm_Infinite_Loop) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ // Only build the graph and compile, don't run.
+ BUILD(r, WASM_INFINITE_LOOP);
+}
+
+
+TEST(Build_Wasm_Infinite_Loop_effect) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ TestingModule module;
+ module.AddMemoryElems<int8_t>(16);
+ r.env()->module = &module;
+
+ // Only build the graph and compile, don't run.
+ BUILD(r, WASM_LOOP(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
+}
+
+
+TEST(Run_Wasm_Unreachable0a) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(2, WASM_BRV(0, WASM_I8(9)), WASM_RETURN(WASM_GET_LOCAL(0))));
+ CHECK_EQ(9, r.Call(0));
+ CHECK_EQ(9, r.Call(1));
+}
+
+
+TEST(Run_Wasm_Unreachable0b) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_BRV(0, WASM_I8(7)), WASM_UNREACHABLE));
+ CHECK_EQ(7, r.Call(0));
+ CHECK_EQ(7, r.Call(1));
+}
+
+
+TEST(Build_Wasm_Unreachable1) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_UNREACHABLE);
+}
+
+
+TEST(Build_Wasm_Unreachable2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE);
+}
+
+
+TEST(Build_Wasm_Unreachable3) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE, WASM_UNREACHABLE);
+}
+
+
+TEST(Build_Wasm_UnreachableIf1) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_UNREACHABLE, WASM_IF(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+}
+
+
+TEST(Build_Wasm_UnreachableIf2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_UNREACHABLE,
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
+}
+
+
+TEST(Run_Wasm_Unreachable_Load) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_BRV(0, WASM_GET_LOCAL(0)),
+ WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
+ CHECK_EQ(11, r.Call(11));
+ CHECK_EQ(21, r.Call(21));
+}
+
+
+TEST(Run_Wasm_Infinite_Loop_not_taken1) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP),
+ WASM_I8(45)));
+ // Run the code, but don't go into the infinite loop.
+ CHECK_EQ(45, r.Call(0));
+}
+
+
+TEST(Run_Wasm_Infinite_Loop_not_taken2) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(1, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(45)),
+ WASM_INFINITE_LOOP)));
+ // Run the code, but don't go into the infinite loop.
+ CHECK_EQ(45, r.Call(1));
+}
+
+
+TEST(Run_Wasm_Infinite_Loop_not_taken2_brif) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(45)),
+ WASM_INFINITE_LOOP));
+ // Run the code, but don't go into the infinite loop.
+ CHECK_EQ(45, r.Call(1));
+}
+
+
+static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
+ if (!WasmOpcodes::IsSupported(opcode)) return;
+
+ Zone zone;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ // Enable all optional operators.
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(&zone, MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps);
+ Graph graph(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ FunctionEnv env;
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ init_env(&env, sig);
+
+ if (sig->parameter_count() == 1) {
+ byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
+ TestBuildingGraph(&zone, &jsgraph, &env, code, code + arraysize(code));
+ } else {
+ CHECK_EQ(2, sig->parameter_count());
+ byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
+ 1};
+ TestBuildingGraph(&zone, &jsgraph, &env, code, code + arraysize(code));
+ }
+}
+
+
+TEST(Build_Wasm_SimpleExprs) {
+// Test that the decoder can build a graph for all supported simple expressions.
+#define GRAPH_BUILD_TEST(name, opcode, sig) \
+ TestBuildGraphForSimpleExpression(kExpr##name);
+
+ FOREACH_SIMPLE_OPCODE(GRAPH_BUILD_TEST);
+
+#undef GRAPH_BUILD_TEST
+}
+
+
+TEST(Run_Wasm_Int32LoadInt8_signext) {
+ TestingModule module;
+ const int kNumElems = 16;
+ int8_t* memory = module.AddMemoryElems<int8_t>(kNumElems);
+ module.RandomizeMemory();
+ memory[0] = -1;
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0)));
+
+ for (size_t i = 0; i < kNumElems; i++) {
+ CHECK_EQ(memory[i], r.Call(static_cast<int>(i)));
+ }
+}
+
+
+TEST(Run_Wasm_Int32LoadInt8_zeroext) {
+ TestingModule module;
+ const int kNumElems = 16;
+ byte* memory = module.AddMemory(kNumElems);
+ module.RandomizeMemory(77);
+ memory[0] = 255;
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ BUILD(r, WASM_LOAD_MEM(MachineType::Uint8(), WASM_GET_LOCAL(0)));
+
+ for (size_t i = 0; i < kNumElems; i++) {
+ CHECK_EQ(memory[i], r.Call(static_cast<int>(i)));
+ }
+}
+
+
+TEST(Run_Wasm_Int32LoadInt16_signext) {
+ TestingModule module;
+ const int kNumBytes = 16;
+ byte* memory = module.AddMemory(kNumBytes);
+ module.RandomizeMemory(888);
+ memory[1] = 200;
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ BUILD(r, WASM_LOAD_MEM(MachineType::Int16(), WASM_GET_LOCAL(0)));
+
+ for (size_t i = 0; i < kNumBytes; i += 2) {
+ int32_t expected = memory[i] | (static_cast<int8_t>(memory[i + 1]) << 8);
+ CHECK_EQ(expected, r.Call(static_cast<int>(i)));
+ }
+}
+
+
+TEST(Run_Wasm_Int32LoadInt16_zeroext) {
+ TestingModule module;
+ const int kNumBytes = 16;
+ byte* memory = module.AddMemory(kNumBytes);
+ module.RandomizeMemory(9999);
+ memory[1] = 204;
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ BUILD(r, WASM_LOAD_MEM(MachineType::Uint16(), WASM_GET_LOCAL(0)));
+
+ for (size_t i = 0; i < kNumBytes; i += 2) {
+ int32_t expected = memory[i] | (memory[i + 1] << 8);
+ CHECK_EQ(expected, r.Call(static_cast<int>(i)));
+ }
+}
+
+
+TEST(Run_WasmInt32Global) {
+ TestingModule module;
+ int32_t* global = module.AddGlobal<int32_t>(MachineType::Int32());
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ // global = global + p0
+ BUILD(r, WASM_STORE_GLOBAL(
+ 0, WASM_I32_ADD(WASM_LOAD_GLOBAL(0), WASM_GET_LOCAL(0))));
+
+ *global = 116;
+ for (int i = 9; i < 444444; i += 111111) {
+ int32_t expected = *global + i;
+ r.Call(i);
+ CHECK_EQ(expected, *global);
+ }
+}
+
+
+TEST(Run_WasmInt32Globals_DontAlias) {
+ const int kNumGlobals = 3;
+ TestingModule module;
+ int32_t* globals[] = {module.AddGlobal<int32_t>(MachineType::Int32()),
+ module.AddGlobal<int32_t>(MachineType::Int32()),
+ module.AddGlobal<int32_t>(MachineType::Int32())};
+
+ for (int g = 0; g < kNumGlobals; g++) {
+ // global = global + p0
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ BUILD(r, WASM_STORE_GLOBAL(
+ g, WASM_I32_ADD(WASM_LOAD_GLOBAL(g), WASM_GET_LOCAL(0))));
+
+ // Check that reading/writing global number {g} doesn't alter the others.
+ *globals[g] = 116 * g;
+ int32_t before[kNumGlobals];
+ for (int i = 9; i < 444444; i += 111113) {
+ int32_t sum = *globals[g] + i;
+ for (int j = 0; j < kNumGlobals; j++) before[j] = *globals[j];
+ r.Call(i);
+ for (int j = 0; j < kNumGlobals; j++) {
+ int32_t expected = j == g ? sum : before[j];
+ CHECK_EQ(expected, *globals[j]);
+ }
+ }
+ }
+}
+
+
+#if WASM_64
+TEST(Run_WasmInt64Global) {
+ TestingModule module;
+ int64_t* global = module.AddGlobal<int64_t>(MachineType::Int64());
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ // global = global + p0
+ BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
+ 0, WASM_I64_ADD(
+ WASM_LOAD_GLOBAL(0),
+ WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO));
+
+ *global = 0xFFFFFFFFFFFFFFFFLL;
+ for (int i = 9; i < 444444; i += 111111) {
+ int64_t expected = *global + i;
+ r.Call(i);
+ CHECK_EQ(expected, *global);
+ }
+}
+#endif
+
+
+TEST(Run_WasmFloat32Global) {
+ TestingModule module;
+ float* global = module.AddGlobal<float>(MachineType::Float32());
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ // global = global + p0
+ BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
+ 0, WASM_F32_ADD(
+ WASM_LOAD_GLOBAL(0),
+ WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO));
+
+ *global = 1.25;
+ for (int i = 9; i < 4444; i += 1111) {
+ volatile float expected = *global + i;
+ r.Call(i);
+ CHECK_EQ(expected, *global);
+ }
+}
+
+
+TEST(Run_WasmFloat64Global) {
+ TestingModule module;
+ double* global = module.AddGlobal<double>(MachineType::Float64());
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+ // global = global + p0
+ BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
+ 0, WASM_F64_ADD(
+ WASM_LOAD_GLOBAL(0),
+ WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
+ WASM_ZERO));
+
+ *global = 1.25;
+ for (int i = 9; i < 4444; i += 1111) {
+ volatile double expected = *global + i;
+ r.Call(i);
+ CHECK_EQ(expected, *global);
+ }
+}
+
+
+TEST(Run_WasmMixedGlobals) {
+ TestingModule module;
+ int32_t* unused = module.AddGlobal<int32_t>(MachineType::Int32());
+ byte* memory = module.AddMemory(32);
+
+ int8_t* var_int8 = module.AddGlobal<int8_t>(MachineType::Int8());
+ uint8_t* var_uint8 = module.AddGlobal<uint8_t>(MachineType::Uint8());
+ int16_t* var_int16 = module.AddGlobal<int16_t>(MachineType::Int16());
+ uint16_t* var_uint16 = module.AddGlobal<uint16_t>(MachineType::Uint16());
+ int32_t* var_int32 = module.AddGlobal<int32_t>(MachineType::Int32());
+ uint32_t* var_uint32 = module.AddGlobal<uint32_t>(MachineType::Uint32());
+ float* var_float = module.AddGlobal<float>(MachineType::Float32());
+ double* var_double = module.AddGlobal<double>(MachineType::Float64());
+
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->module = &module;
+
+ BUILD(
+ r,
+ WASM_BLOCK(
+ 9,
+ WASM_STORE_GLOBAL(1, WASM_LOAD_MEM(MachineType::Int8(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(2, WASM_LOAD_MEM(MachineType::Uint8(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(3, WASM_LOAD_MEM(MachineType::Int16(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(4, WASM_LOAD_MEM(MachineType::Uint16(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(5, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(6, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(7,
+ WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
+ WASM_STORE_GLOBAL(8,
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
+ WASM_ZERO));
+
+ memory[0] = 0xaa;
+ memory[1] = 0xcc;
+ memory[2] = 0x55;
+ memory[3] = 0xee;
+ memory[4] = 0x33;
+ memory[5] = 0x22;
+ memory[6] = 0x11;
+ memory[7] = 0x99;
+ r.Call(1);
+
+ CHECK(static_cast<int8_t>(0xaa) == *var_int8);
+ CHECK(static_cast<uint8_t>(0xaa) == *var_uint8);
+ CHECK(static_cast<int16_t>(0xccaa) == *var_int16);
+ CHECK(static_cast<uint16_t>(0xccaa) == *var_uint16);
+ CHECK(static_cast<int32_t>(0xee55ccaa) == *var_int32);
+ CHECK(static_cast<uint32_t>(0xee55ccaa) == *var_uint32);
+ CHECK(bit_cast<float>(0xee55ccaa) == *var_float);
+ CHECK(bit_cast<double>(0x99112233ee55ccaaULL) == *var_double);
+
+ USE(unused);
+}
+
+
+#if WASM_64
+// Test the WasmRunner with an Int64 return value and different numbers of
+// Int64 parameters.
+TEST(Run_TestI64WasmRunner) {
+ {
+ FOR_INT64_INPUTS(i) {
+ WasmRunner<int64_t> r;
+ BUILD(r, WASM_I64(*i));
+ CHECK_EQ(*i, r.Call());
+ }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64());
+ BUILD(r, WASM_GET_LOCAL(0));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ(*i + *j, r.Call(*i, *j)); }
+ }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0),
+ WASM_I64_ADD(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(*i + *j + *j, r.Call(*i, *j, *j));
+ CHECK_EQ(*j + *i + *j, r.Call(*j, *i, *j));
+ CHECK_EQ(*j + *j + *i, r.Call(*j, *j, *i));
+ }
+ }
+ }
+ {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0),
+ WASM_I64_ADD(WASM_GET_LOCAL(1),
+ WASM_I64_ADD(WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(3)))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(*i + *j + *j + *j, r.Call(*i, *j, *j, *j));
+ CHECK_EQ(*j + *i + *j + *j, r.Call(*j, *i, *j, *j));
+ CHECK_EQ(*j + *j + *i + *j, r.Call(*j, *j, *i, *j));
+ CHECK_EQ(*j + *j + *j + *i, r.Call(*j, *j, *j, *i));
+ }
+ }
+ }
+}
+#endif
+
+
+TEST(Run_WasmCallEmpty) {
+ const int32_t kExpected = -414444;
+ // Build the target function.
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.i_v());
+ BUILD(t, WASM_I32(kExpected));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Build the calling function.
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION0(index));
+
+ int32_t result = r.Call();
+ CHECK_EQ(kExpected, result);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST(Run_WasmCallF32StackParameter) {
+ // Build the target function.
+ LocalType param_types[20];
+ for (int i = 0; i < 20; i++) param_types[i] = kAstF32;
+ FunctionSig sig(1, 19, param_types);
+ TestingModule module;
+ WasmFunctionCompiler t(&sig);
+ BUILD(t, WASM_GET_LOCAL(17));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Build the calling function.
+ WasmRunner<float> r;
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION(
+ index, WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
+ WASM_F32(8.0f), WASM_F32(16.0f), WASM_F32(32.0f),
+ WASM_F32(64.0f), WASM_F32(128.0f), WASM_F32(256.0f),
+ WASM_F32(1.5f), WASM_F32(2.5f), WASM_F32(4.5f), WASM_F32(8.5f),
+ WASM_F32(16.5f), WASM_F32(32.5f), WASM_F32(64.5f),
+ WASM_F32(128.5f), WASM_F32(256.5f), WASM_F32(512.5f)));
+
+ float result = r.Call();
+ CHECK_EQ(256.5f, result);
+}
+
+
+TEST(Run_WasmCallF64StackParameter) {
+ // Build the target function.
+ LocalType param_types[20];
+ for (int i = 0; i < 20; i++) param_types[i] = kAstF64;
+ FunctionSig sig(1, 19, param_types);
+ TestingModule module;
+ WasmFunctionCompiler t(&sig);
+ BUILD(t, WASM_GET_LOCAL(17));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Build the calling function.
+ WasmRunner<double> r;
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_F64(1.0), WASM_F64(2.0),
+ WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
+ WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
+ WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
+ WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5),
+ WASM_F64(32.5), WASM_F64(64.5), WASM_F64(128.5),
+ WASM_F64(256.5), WASM_F64(512.5)));
+
+ float result = r.Call();
+ CHECK_EQ(256.5, result);
+}
+
+#endif
+
+
+TEST(Run_WasmCallVoid) {
+ const byte kMemOffset = 8;
+ const int32_t kElemNum = kMemOffset / sizeof(int32_t);
+ const int32_t kExpected = -414444;
+ // Build the target function.
+ TestSignatures sigs;
+ TestingModule module;
+ module.AddMemory(16);
+ module.RandomizeMemory();
+ WasmFunctionCompiler t(sigs.v_v());
+ t.env.module = &module;
+ BUILD(t, WASM_STORE_MEM(MachineType::Int32(), WASM_I8(kMemOffset),
+ WASM_I32(kExpected)));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Build the calling function.
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION0(index),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kMemOffset)));
+
+ int32_t result = r.Call();
+ CHECK_EQ(kExpected, result);
+ CHECK_EQ(kExpected, module.raw_mem_start<int32_t>()[kElemNum]);
+}
+
+
+TEST(Run_WasmCall_Int32Add) {
+ // Build the target function.
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.i_ii());
+ BUILD(t, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Build the caller function.
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>(static_cast<uint32_t>(*i) +
+ static_cast<uint32_t>(*j));
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+#if WASM_64
+TEST(Run_WasmCall_Int64Sub) {
+ // Build the target function.
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.l_ll());
+ BUILD(t, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Build the caller function.
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int64_t a = static_cast<int64_t>(*i) << 32 |
+ (static_cast<int64_t>(*j) | 0xFFFFFFFF);
+ int64_t b = static_cast<int64_t>(*j) << 32 |
+ (static_cast<int64_t>(*i) | 0xFFFFFFFF);
+
+ int64_t expected = static_cast<int64_t>(static_cast<uint64_t>(a) -
+ static_cast<uint64_t>(b));
+ CHECK_EQ(expected, r.Call(a, b));
+ }
+ }
+}
+#endif
+
+
+TEST(Run_WasmCall_Float32Sub) {
+ TestSignatures sigs;
+ WasmFunctionCompiler t(sigs.f_ff());
+
+ // Build the target function.
+ TestingModule module;
+ BUILD(t, WASM_F32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ uint32_t index = t.CompileAndAdd(&module);
+
+ // Builder the caller function.
+ WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+ r.env()->module = &module;
+ BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) {
+ volatile float expected = *i - *j;
+ CheckFloatEq(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+TEST(Run_WasmCall_Float64Sub) {
+ WasmRunner<int32_t> r;
+ TestingModule module;
+ double* memory = module.AddMemoryElems<double>(16);
+ r.env()->module = &module;
+
+ // TODO(titzer): convert to a binop test.
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_STORE_MEM(
+ MachineType::Float64(), WASM_ZERO,
+ WASM_F64_SUB(
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_I8(8)))),
+ WASM_I8(107)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) {
+ memory[0] = *i;
+ memory[1] = *j;
+ double expected = *i - *j;
+ CHECK_EQ(107, r.Call());
+ if (expected != expected) {
+ CHECK(memory[0] != memory[0]);
+ } else {
+ CHECK_EQ(expected, memory[0]);
+ }
+ }
+ }
+}
+
+#define ADD_CODE(vec, ...) \
+ do { \
+ byte __buf[] = {__VA_ARGS__}; \
+ for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
+ } while (false)
+
+
+static void Run_WasmMixedCall_N(int start) {
+ const int kExpected = 6333;
+ const int kElemSize = 8;
+ TestSignatures sigs;
+
+#if WASM_64
+ static MachineType mixed[] = {
+ MachineType::Int32(), MachineType::Float32(), MachineType::Int64(),
+ MachineType::Float64(), MachineType::Float32(), MachineType::Int64(),
+ MachineType::Int32(), MachineType::Float64(), MachineType::Float32(),
+ MachineType::Float64(), MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int32(), MachineType::Int32()};
+#else
+ static MachineType mixed[] = {
+ MachineType::Int32(), MachineType::Float32(), MachineType::Float64(),
+ MachineType::Float32(), MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float32(), MachineType::Float64(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32()};
+#endif
+
+ int num_params = static_cast<int>(arraysize(mixed)) - start;
+ for (int which = 0; which < num_params; which++) {
+ Zone zone;
+ TestingModule module;
+ module.AddMemory(1024);
+ MachineType* memtypes = &mixed[start];
+ MachineType result = memtypes[which];
+
+ // =========================================================================
+ // Build the selector function.
+ // =========================================================================
+ uint32_t index;
+ FunctionSig::Builder b(&zone, 1, num_params);
+ b.AddReturn(WasmOpcodes::LocalTypeFor(result));
+ for (int i = 0; i < num_params; i++) {
+ b.AddParam(WasmOpcodes::LocalTypeFor(memtypes[i]));
+ }
+ WasmFunctionCompiler t(b.Build());
+ t.env.module = &module;
+ BUILD(t, WASM_GET_LOCAL(which));
+ index = t.CompileAndAdd(&module);
+
+ // =========================================================================
+ // Build the calling function.
+ // =========================================================================
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+
+ {
+ std::vector<byte> code;
+ ADD_CODE(code,
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
+ WasmOpcodes::LoadStoreAccessOf(false));
+ ADD_CODE(code, WASM_ZERO);
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
+
+ for (int i = 0; i < num_params; i++) {
+ int offset = (i + 1) * kElemSize;
+ ADD_CODE(code, WASM_LOAD_MEM(memtypes[i], WASM_I8(offset)));
+ }
+
+ ADD_CODE(code, WASM_I32(kExpected));
+ size_t end = code.size();
+ code.push_back(0);
+ r.Build(&code[0], &code[end]);
+ }
+
+ // Run the code.
+ for (int t = 0; t < 10; t++) {
+ module.RandomizeMemory();
+ CHECK_EQ(kExpected, r.Call());
+
+ int size = WasmOpcodes::MemSize(result);
+ for (int i = 0; i < size; i++) {
+ int base = (which + 1) * kElemSize;
+ byte expected = module.raw_mem_at<byte>(base + i);
+ byte result = module.raw_mem_at<byte>(i);
+ CHECK_EQ(expected, result);
+ }
+ }
+ }
+}
+
+
+TEST(Run_WasmMixedCall_0) { Run_WasmMixedCall_N(0); }
+TEST(Run_WasmMixedCall_1) { Run_WasmMixedCall_N(1); }
+TEST(Run_WasmMixedCall_2) { Run_WasmMixedCall_N(2); }
+TEST(Run_WasmMixedCall_3) { Run_WasmMixedCall_N(3); }
+
+
+TEST(Run_Wasm_CountDown_expr) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_LOOP(
+ 3, WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)),
+ WASM_BREAKV(0, WASM_GET_LOCAL(0))),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
+ WASM_CONTINUE(0)));
+ CHECK_EQ(0, r.Call(1));
+ CHECK_EQ(0, r.Call(10));
+ CHECK_EQ(0, r.Call(100));
+}
+
+
+TEST(Run_Wasm_ExprBlock2a) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))),
+ WASM_I8(1)));
+ CHECK_EQ(1, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+}
+
+
+TEST(Run_Wasm_ExprBlock2b) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))),
+ WASM_I8(2)));
+ CHECK_EQ(2, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+}
+
+
+TEST(Run_Wasm_ExprBlock2c) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(1)),
+ WASM_I8(1)));
+ CHECK_EQ(1, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+}
+
+
+TEST(Run_Wasm_ExprBlock2d) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(1)),
+ WASM_I8(2)));
+ CHECK_EQ(2, r.Call(0));
+ CHECK_EQ(1, r.Call(1));
+}
+
+
+TEST(Run_Wasm_ExprBlock_ManualSwitch) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(6, WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
+ WASM_BRV(0, WASM_I8(11))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2)),
+ WASM_BRV(0, WASM_I8(12))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3)),
+ WASM_BRV(0, WASM_I8(13))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4)),
+ WASM_BRV(0, WASM_I8(14))),
+ WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5)),
+ WASM_BRV(0, WASM_I8(15))),
+ WASM_I8(99)));
+ CHECK_EQ(99, r.Call(0));
+ CHECK_EQ(11, r.Call(1));
+ CHECK_EQ(12, r.Call(2));
+ CHECK_EQ(13, r.Call(3));
+ CHECK_EQ(14, r.Call(4));
+ CHECK_EQ(15, r.Call(5));
+ CHECK_EQ(99, r.Call(6));
+}
+
+
+TEST(Run_Wasm_ExprBlock_ManualSwitch_brif) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(6, WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
+ WASM_I8(11)),
+ WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2)),
+ WASM_I8(12)),
+ WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3)),
+ WASM_I8(13)),
+ WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4)),
+ WASM_I8(14)),
+ WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5)),
+ WASM_I8(15)),
+ WASM_I8(99)));
+ CHECK_EQ(99, r.Call(0));
+ CHECK_EQ(11, r.Call(1));
+ CHECK_EQ(12, r.Call(2));
+ CHECK_EQ(13, r.Call(3));
+ CHECK_EQ(14, r.Call(4));
+ CHECK_EQ(15, r.Call(5));
+ CHECK_EQ(99, r.Call(6));
+}
+
+
+TEST(Run_Wasm_nested_ifs) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+
+ BUILD(r, WASM_IF_ELSE(
+ WASM_GET_LOCAL(0),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
+
+
+ CHECK_EQ(11, r.Call(1, 1));
+ CHECK_EQ(12, r.Call(1, 0));
+ CHECK_EQ(13, r.Call(0, 1));
+ CHECK_EQ(14, r.Call(0, 0));
+}
+
+
+TEST(Run_Wasm_ExprBlock_if) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+
+ BUILD(r,
+ WASM_BLOCK(1, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(11)),
+ WASM_BRV(0, WASM_I8(14)))));
+
+ CHECK_EQ(11, r.Call(1));
+ CHECK_EQ(14, r.Call(0));
+}
+
+
+TEST(Run_Wasm_ExprBlock_nested_ifs) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+
+ BUILD(r, WASM_BLOCK(
+ 1, WASM_IF_ELSE(
+ WASM_GET_LOCAL(0),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(11)),
+ WASM_BRV(0, WASM_I8(12))),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(13)),
+ WASM_BRV(0, WASM_I8(14))))));
+
+
+ CHECK_EQ(11, r.Call(1, 1));
+ CHECK_EQ(12, r.Call(1, 0));
+ CHECK_EQ(13, r.Call(0, 1));
+ CHECK_EQ(14, r.Call(0, 0));
+}
+
+
+TEST(Run_Wasm_ExprLoop_nested_ifs) {
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+
+ BUILD(r, WASM_LOOP(
+ 1, WASM_IF_ELSE(
+ WASM_GET_LOCAL(0),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(11)),
+ WASM_BRV(1, WASM_I8(12))),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(13)),
+ WASM_BRV(1, WASM_I8(14))))));
+
+
+ CHECK_EQ(11, r.Call(1, 1));
+ CHECK_EQ(12, r.Call(1, 0));
+ CHECK_EQ(13, r.Call(0, 1));
+ CHECK_EQ(14, r.Call(0, 0));
+}
+
+
+#if WASM_64
+TEST(Run_Wasm_LoadStoreI64_sx) {
+ byte loads[] = {kExprI64LoadMem8S, kExprI64LoadMem16S, kExprI64LoadMem32S,
+ kExprI64LoadMem};
+
+ for (size_t m = 0; m < arraysize(loads); m++) {
+ WasmRunner<int64_t> r;
+ TestingModule module;
+ byte* memory = module.AddMemoryElems<byte>(16);
+ r.env()->module = &module;
+
+ byte code[] = {kExprI64StoreMem, 0, kExprI8Const, 8,
+ loads[m], 0, kExprI8Const, 0};
+
+ r.Build(code, code + arraysize(code));
+
+ // Try a bunch of different negative values.
+ for (int i = -1; i >= -128; i -= 11) {
+ int size = 1 << m;
+ module.BlankMemory();
+ memory[size - 1] = static_cast<byte>(i); // set the high order byte.
+
+ int64_t expected = static_cast<int64_t>(i) << ((size - 1) * 8);
+
+ CHECK_EQ(expected, r.Call());
+ CHECK_EQ(static_cast<byte>(i), memory[8 + size - 1]);
+ for (int j = size; j < 8; j++) {
+ CHECK_EQ(255, memory[8 + j]);
+ }
+ }
+ }
+}
+
+
+#endif
+
+
+TEST(Run_Wasm_SimpleCallIndirect) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+
+ WasmRunner<int32_t> r(MachineType::Int32());
+ TestSignatures sigs;
+ TestingModule module;
+ r.env()->module = &module;
+ WasmFunctionCompiler t1(sigs.i_ii());
+ BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t1.CompileAndAdd(&module);
+
+ WasmFunctionCompiler t2(sigs.i_ii());
+ BUILD(t2, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t2.CompileAndAdd(&module);
+
+ // Signature table.
+ module.AddSignature(sigs.f_ff());
+ module.AddSignature(sigs.i_ii());
+ module.AddSignature(sigs.d_dd());
+
+ // Function table.
+ int table_size = 2;
+ module.module->function_table = new std::vector<uint16_t>;
+ module.module->function_table->push_back(0);
+ module.module->function_table->push_back(1);
+
+ // Function table.
+ Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
+ fixed->set(0, Smi::FromInt(1));
+ fixed->set(1, Smi::FromInt(1));
+ fixed->set(2, *module.function_code->at(0));
+ fixed->set(3, *module.function_code->at(1));
+ module.function_table = fixed;
+
+ // Builder the caller function.
+ BUILD(r, WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
+
+ CHECK_EQ(88, r.Call(0));
+ CHECK_EQ(44, r.Call(1));
+ CHECK_TRAP(r.Call(2));
+}
+
+
+TEST(Run_Wasm_MultipleCallIndirect) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+
+ WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ TestSignatures sigs;
+ TestingModule module;
+ r.env()->module = &module;
+ WasmFunctionCompiler t1(sigs.i_ii());
+ BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t1.CompileAndAdd(&module);
+
+ WasmFunctionCompiler t2(sigs.i_ii());
+ BUILD(t2, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t2.CompileAndAdd(&module);
+
+ // Signature table.
+ module.AddSignature(sigs.f_ff());
+ module.AddSignature(sigs.i_ii());
+ module.AddSignature(sigs.d_dd());
+
+ // Function table.
+ int table_size = 2;
+ module.module->function_table = new std::vector<uint16_t>;
+ module.module->function_table->push_back(0);
+ module.module->function_table->push_back(1);
+
+ // Function table.
+ Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
+ fixed->set(0, Smi::FromInt(1));
+ fixed->set(1, Smi::FromInt(1));
+ fixed->set(2, *module.function_code->at(0));
+ fixed->set(3, *module.function_code->at(1));
+ module.function_table = fixed;
+
+ // Builder the caller function.
+ BUILD(r,
+ WASM_I32_ADD(WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(2)),
+ WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(0))));
+
+ CHECK_EQ(5, r.Call(0, 1, 2));
+ CHECK_EQ(19, r.Call(0, 1, 9));
+ CHECK_EQ(1, r.Call(1, 0, 2));
+ CHECK_EQ(1, r.Call(1, 0, 9));
+
+ CHECK_TRAP(r.Call(0, 2, 1));
+ CHECK_TRAP(r.Call(1, 2, 0));
+ CHECK_TRAP(r.Call(2, 0, 1));
+ CHECK_TRAP(r.Call(2, 1, 0));
+}
+
+
+// TODO(titzer): Fix for nosee4 and re-enable.
+#if 0
+
+TEST(Run_Wasm_F32Floor) {
+ WasmRunner<float> r(MachineType::Float32());
+ BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(floor(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F32Ceil) {
+ WasmRunner<float> r(MachineType::Float32());
+ BUILD(r, WASM_F32_CEIL(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceil(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F32Trunc) {
+ WasmRunner<float> r(MachineType::Float32());
+ BUILD(r, WASM_F32_TRUNC(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(trunc(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F32NearestInt) {
+ WasmRunner<float> r(MachineType::Float32());
+ BUILD(r, WASM_F32_NEARESTINT(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(nearbyint(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F64Floor) {
+ WasmRunner<double> r(MachineType::Float64());
+ BUILD(r, WASM_F64_FLOOR(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(floor(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F64Ceil) {
+ WasmRunner<double> r(MachineType::Float64());
+ BUILD(r, WASM_F64_CEIL(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(ceil(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F64Trunc) {
+ WasmRunner<double> r(MachineType::Float64());
+ BUILD(r, WASM_F64_TRUNC(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(trunc(*i), r.Call(*i)); }
+}
+
+
+TEST(Run_Wasm_F64NearestInt) {
+ WasmRunner<double> r(MachineType::Float64());
+ BUILD(r, WASM_F64_NEARESTINT(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(nearbyint(*i), r.Call(*i)); }
+}
+
+#endif
+
+
+TEST(Run_Wasm_F32Min) {
+ WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+ BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) {
+ float expected;
+ if (*i < *j) {
+ expected = *i;
+ } else if (*j < *i) {
+ expected = *j;
+ } else if (*i != *i) {
+ // If *i or *j is NaN, then the result is NaN.
+ expected = *i;
+ } else {
+ expected = *j;
+ }
+
+ CheckFloatEq(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_F64Min) {
+ WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
+ BUILD(r, WASM_F64_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) {
+ double expected;
+ if (*i < *j) {
+ expected = *i;
+ } else if (*j < *i) {
+ expected = *j;
+ } else if (*i != *i) {
+ // If *i or *j is NaN, then the result is NaN.
+ expected = *i;
+ } else {
+ expected = *j;
+ }
+
+ CheckDoubleEq(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_F32Max) {
+ WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+ BUILD(r, WASM_F32_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) {
+ float expected;
+ if (*i > *j) {
+ expected = *i;
+ } else if (*j > *i) {
+ expected = *j;
+ } else if (*i != *i) {
+ // If *i or *j is NaN, then the result is NaN.
+ expected = *i;
+ } else {
+ expected = *j;
+ }
+
+ CheckFloatEq(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_F64Max) {
+ WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
+ BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) {
+ double expected;
+ if (*i > *j) {
+ expected = *i;
+ } else if (*j > *i) {
+ expected = *j;
+ } else if (*i != *i) {
+ // If *i or *j is NaN, then the result is NaN.
+ expected = *i;
+ } else {
+ expected = *j;
+ }
+
+ CheckDoubleEq(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+
+#if WASM_64
+TEST(Run_Wasm_F32SConvertI64) {
+ WasmRunner<float> r(MachineType::Int64());
+ BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<float>(*i), r.Call(*i)); }
+}
+
+
+#if !defined(_WIN64)
+// TODO(ahaas): Fix this failure.
+TEST(Run_Wasm_F32UConvertI64) {
+ WasmRunner<float> r(MachineType::Uint64());
+ BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(static_cast<float>(*i), r.Call(*i)); }
+}
+#endif
+
+
+TEST(Run_Wasm_F64SConvertI64) {
+ WasmRunner<double> r(MachineType::Int64());
+ BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
+ FOR_INT64_INPUTS(i) { CHECK_EQ(static_cast<double>(*i), r.Call(*i)); }
+}
+
+
+#if !defined(_WIN64)
+// TODO(ahaas): Fix this failure.
+TEST(Run_Wasm_F64UConvertI64) {
+ WasmRunner<double> r(MachineType::Uint64());
+ BUILD(r, WASM_F64_UCONVERT_I64(WASM_GET_LOCAL(0)));
+ FOR_UINT64_INPUTS(i) { CHECK_EQ(static_cast<double>(*i), r.Call(*i)); }
+}
+#endif
+
+
+TEST(Run_Wasm_I64SConvertF32) {
+ WasmRunner<int64_t> r(MachineType::Float32());
+ BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(INT64_MAX) &&
+ *i >= static_cast<float>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_I64SConvertF64) {
+ WasmRunner<int64_t> r(MachineType::Float64());
+ BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<double>(INT64_MAX) &&
+ *i >= static_cast<double>(INT64_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_I64UConvertF32) {
+ WasmRunner<uint64_t> r(MachineType::Float32());
+ BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_I64UConvertF64) {
+ WasmRunner<uint64_t> r(MachineType::Float64());
+ BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<float>(UINT64_MAX) && *i > -1) {
+ CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP64(r.Call(*i));
+ }
+ }
+}
+#endif
+
+
+// TODO(titzer): Fix and re-enable.
+#if 0
+TEST(Run_Wasm_I32SConvertF32) {
+ WasmRunner<int32_t> r(MachineType::Float32());
+ BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(INT32_MAX) &&
+ *i >= static_cast<float>(INT32_MIN)) {
+ CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP32(r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_I32SConvertF64) {
+ WasmRunner<int32_t> r(MachineType::Float64());
+ BUILD(r, WASM_I32_SCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<double>(INT32_MAX) &&
+ *i >= static_cast<double>(INT32_MIN)) {
+ CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP32(r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_I32UConvertF32) {
+ WasmRunner<uint32_t> r(MachineType::Float32());
+ BUILD(r, WASM_I32_UCONVERT_F32(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i < static_cast<float>(UINT32_MAX) && *i > -1) {
+ CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP32(r.Call(*i));
+ }
+ }
+}
+
+
+TEST(Run_Wasm_I32UConvertF64) {
+ WasmRunner<uint32_t> r(MachineType::Float64());
+ BUILD(r, WASM_I32_UCONVERT_F64(WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ if (*i < static_cast<float>(UINT32_MAX) && *i > -1) {
+ CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
+ } else {
+ CHECK_TRAP32(r.Call(*i));
+ }
+ }
+}
+#endif
+
+
+TEST(Run_Wasm_F64CopySign) {
+ WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
+ BUILD(r, WASM_F64_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) { CheckDoubleEq(copysign(*i, *j), r.Call(*i, *j)); }
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST(Run_Wasm_F32CopySign) {
+ WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+ BUILD(r, WASM_F32_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) { CheckFloatEq(copysign(*i, *j), r.Call(*i, *j)); }
+ }
+}
+
+#endif
diff --git a/deps/v8/test/cctest/wasm/test-signatures.h b/deps/v8/test/cctest/wasm/test-signatures.h
new file mode 100644
index 0000000000..30ea605386
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-signatures.h
@@ -0,0 +1,111 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEST_SIGNATURES_H
+#define TEST_SIGNATURES_H
+
+#include "src/signature.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+typedef Signature<LocalType> FunctionSig;
+
+// A helper class with many useful signatures in order to simplify tests.
+class TestSignatures {
+ public:
+ TestSignatures()
+ : sig_i_v(1, 0, kIntTypes4),
+ sig_i_i(1, 1, kIntTypes4),
+ sig_i_ii(1, 2, kIntTypes4),
+ sig_i_iii(1, 3, kIntTypes4),
+ sig_i_f(1, 1, kIntFloatTypes4),
+ sig_i_ff(1, 2, kIntFloatTypes4),
+ sig_i_d(1, 1, kIntDoubleTypes4),
+ sig_i_dd(1, 2, kIntDoubleTypes4),
+ sig_l_v(1, 0, kLongTypes4),
+ sig_l_l(1, 1, kLongTypes4),
+ sig_l_ll(1, 2, kLongTypes4),
+ sig_i_ll(1, 2, kIntLongTypes4),
+ sig_f_ff(1, 2, kFloatTypes4),
+ sig_d_dd(1, 2, kDoubleTypes4),
+ sig_v_v(0, 0, kIntTypes4),
+ sig_v_i(0, 1, kIntTypes4),
+ sig_v_ii(0, 2, kIntTypes4),
+ sig_v_iii(0, 3, kIntTypes4) {
+ // I used C++ and you won't believe what happened next....
+ for (int i = 0; i < 4; i++) kIntTypes4[i] = kAstI32;
+ for (int i = 0; i < 4; i++) kLongTypes4[i] = kAstI64;
+ for (int i = 0; i < 4; i++) kFloatTypes4[i] = kAstF32;
+ for (int i = 0; i < 4; i++) kDoubleTypes4[i] = kAstF64;
+ for (int i = 0; i < 4; i++) kIntLongTypes4[i] = kAstI64;
+ for (int i = 0; i < 4; i++) kIntFloatTypes4[i] = kAstF32;
+ for (int i = 0; i < 4; i++) kIntDoubleTypes4[i] = kAstF64;
+ kIntLongTypes4[0] = kAstI32;
+ kIntFloatTypes4[0] = kAstI32;
+ kIntDoubleTypes4[0] = kAstI32;
+ }
+
+ FunctionSig* i_v() { return &sig_i_v; }
+ FunctionSig* i_i() { return &sig_i_i; }
+ FunctionSig* i_ii() { return &sig_i_ii; }
+ FunctionSig* i_iii() { return &sig_i_iii; }
+
+ FunctionSig* i_f() { return &sig_i_f; }
+ FunctionSig* i_ff() { return &sig_i_ff; }
+ FunctionSig* i_d() { return &sig_i_d; }
+ FunctionSig* i_dd() { return &sig_i_dd; }
+
+ FunctionSig* l_v() { return &sig_l_v; }
+ FunctionSig* l_l() { return &sig_l_l; }
+ FunctionSig* l_ll() { return &sig_l_ll; }
+ FunctionSig* i_ll() { return &sig_i_ll; }
+
+ FunctionSig* f_ff() { return &sig_f_ff; }
+ FunctionSig* d_dd() { return &sig_d_dd; }
+
+ FunctionSig* v_v() { return &sig_v_v; }
+ FunctionSig* v_i() { return &sig_v_i; }
+ FunctionSig* v_ii() { return &sig_v_ii; }
+ FunctionSig* v_iii() { return &sig_v_iii; }
+
+ private:
+ LocalType kIntTypes4[4];
+ LocalType kLongTypes4[4];
+ LocalType kFloatTypes4[4];
+ LocalType kDoubleTypes4[4];
+ LocalType kIntLongTypes4[4];
+ LocalType kIntFloatTypes4[4];
+ LocalType kIntDoubleTypes4[4];
+
+ FunctionSig sig_i_v;
+ FunctionSig sig_i_i;
+ FunctionSig sig_i_ii;
+ FunctionSig sig_i_iii;
+
+ FunctionSig sig_i_f;
+ FunctionSig sig_i_ff;
+ FunctionSig sig_i_d;
+ FunctionSig sig_i_dd;
+
+ FunctionSig sig_l_v;
+ FunctionSig sig_l_l;
+ FunctionSig sig_l_ll;
+ FunctionSig sig_i_ll;
+
+ FunctionSig sig_f_ff;
+ FunctionSig sig_d_dd;
+
+ FunctionSig sig_v_v;
+ FunctionSig sig_v_i;
+ FunctionSig sig_v_ii;
+ FunctionSig sig_v_iii;
+};
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // TEST_SIGNATURES_H
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
new file mode 100644
index 0000000000..cc23b46b73
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -0,0 +1,391 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_RUN_UTILS_H
+#define WASM_RUN_UTILS_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/utils/random-number-generator.h"
+
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/wasm-compiler.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+// TODO(titzer): pull WASM_64 up to a common header.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
+// TODO(titzer): check traps more robustly in tests.
+// Currently, in tests, we just return 0xdeadbeef from the function in which
+// the trap occurs if the runtime context is not available to throw a JavaScript
+// exception.
+#define CHECK_TRAP32(x) \
+ CHECK_EQ(0xdeadbeef, (bit_cast<uint32_t>(x)) & 0xFFFFFFFF)
+#define CHECK_TRAP64(x) \
+ CHECK_EQ(0xdeadbeefdeadbeef, (bit_cast<uint64_t>(x)) & 0xFFFFFFFFFFFFFFFF)
+#define CHECK_TRAP(x) CHECK_TRAP32(x)
+
+namespace {
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+inline void init_env(FunctionEnv* env, FunctionSig* sig) {
+ env->module = nullptr;
+ env->sig = sig;
+ env->local_int32_count = 0;
+ env->local_int64_count = 0;
+ env->local_float32_count = 0;
+ env->local_float64_count = 0;
+ env->SumLocals();
+}
+
+const uint32_t kMaxGlobalsSize = 128;
+
+// A helper for module environments that adds the ability to allocate memory
+// and global variables.
+class TestingModule : public ModuleEnv {
+ public:
+ TestingModule() : mem_size(0), global_offset(0) {
+ globals_area = 0;
+ mem_start = 0;
+ mem_end = 0;
+ module = nullptr;
+ linker = nullptr;
+ function_code = nullptr;
+ asm_js = false;
+ memset(global_data, 0, sizeof(global_data));
+ }
+
+ ~TestingModule() {
+ if (mem_start) {
+ free(raw_mem_start<byte>());
+ }
+ if (function_code) delete function_code;
+ if (module) delete module;
+ }
+
+ byte* AddMemory(size_t size) {
+ CHECK_EQ(0, mem_start);
+ CHECK_EQ(0, mem_size);
+ mem_start = reinterpret_cast<uintptr_t>(malloc(size));
+ CHECK(mem_start);
+ byte* raw = raw_mem_start<byte>();
+ memset(raw, 0, size);
+ mem_end = mem_start + size;
+ mem_size = size;
+ return raw_mem_start<byte>();
+ }
+
+ template <typename T>
+ T* AddMemoryElems(size_t count) {
+ AddMemory(count * sizeof(T));
+ return raw_mem_start<T>();
+ }
+
+ template <typename T>
+ T* AddGlobal(MachineType mem_type) {
+ WasmGlobal* global = AddGlobal(mem_type);
+ return reinterpret_cast<T*>(globals_area + global->offset);
+ }
+
+ byte AddSignature(FunctionSig* sig) {
+ AllocModule();
+ if (!module->signatures) {
+ module->signatures = new std::vector<FunctionSig*>();
+ }
+ module->signatures->push_back(sig);
+ size_t size = module->signatures->size();
+ CHECK(size < 127);
+ return static_cast<byte>(size - 1);
+ }
+
+ template <typename T>
+ T* raw_mem_start() {
+ DCHECK(mem_start);
+ return reinterpret_cast<T*>(mem_start);
+ }
+
+ template <typename T>
+ T* raw_mem_end() {
+ DCHECK(mem_end);
+ return reinterpret_cast<T*>(mem_end);
+ }
+
+ template <typename T>
+ T raw_mem_at(int i) {
+ DCHECK(mem_start);
+ return reinterpret_cast<T*>(mem_start)[i];
+ }
+
+ template <typename T>
+ T raw_val_at(int i) {
+ T val;
+ memcpy(&val, reinterpret_cast<void*>(mem_start + i), sizeof(T));
+ return val;
+ }
+
+ // Zero-initialize the memory.
+ void BlankMemory() {
+ byte* raw = raw_mem_start<byte>();
+ memset(raw, 0, mem_size);
+ }
+
+ // Pseudo-randomly intialize the memory.
+ void RandomizeMemory(unsigned int seed = 88) {
+ byte* raw = raw_mem_start<byte>();
+ byte* end = raw_mem_end<byte>();
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(seed);
+ rng.NextBytes(raw, end - raw);
+ }
+
+ WasmFunction* AddFunction(FunctionSig* sig, Handle<Code> code) {
+ AllocModule();
+ if (module->functions == nullptr) {
+ module->functions = new std::vector<WasmFunction>();
+ function_code = new std::vector<Handle<Code>>();
+ }
+ module->functions->push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
+ function_code->push_back(code);
+ return &module->functions->back();
+ }
+
+ private:
+ size_t mem_size;
+ uint32_t global_offset;
+ byte global_data[kMaxGlobalsSize];
+
+ WasmGlobal* AddGlobal(MachineType mem_type) {
+ AllocModule();
+ if (globals_area == 0) {
+ globals_area = reinterpret_cast<uintptr_t>(global_data);
+ module->globals = new std::vector<WasmGlobal>();
+ }
+ byte size = WasmOpcodes::MemSize(mem_type);
+ global_offset = (global_offset + size - 1) & ~(size - 1); // align
+ module->globals->push_back({0, mem_type, global_offset, false});
+ global_offset += size;
+ // limit number of globals.
+ CHECK_LT(global_offset, kMaxGlobalsSize);
+ return &module->globals->back();
+ }
+ void AllocModule() {
+ if (module == nullptr) {
+ module = new WasmModule();
+ module->shared_isolate = CcTest::InitIsolateOnce();
+ module->globals = nullptr;
+ module->functions = nullptr;
+ module->data_segments = nullptr;
+ }
+ }
+};
+
+
+inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, FunctionEnv* env,
+ const byte* start, const byte* end) {
+ compiler::WasmGraphBuilder builder(zone, jsgraph, env->sig);
+ TreeResult result = BuildTFGraph(&builder, env, start, end);
+ if (result.failed()) {
+ ptrdiff_t pc = result.error_pc - result.start;
+ ptrdiff_t pt = result.error_pt - result.start;
+ std::ostringstream str;
+ str << "Verification failed: " << result.error_code << " pc = +" << pc;
+ if (result.error_pt) str << ", pt = +" << pt;
+ str << ", msg = " << result.error_msg.get();
+ FATAL(str.str().c_str());
+ }
+ if (FLAG_trace_turbo_graph) {
+ OFStream os(stdout);
+ os << AsRPO(*jsgraph->graph());
+ }
+}
+
+
+// A helper for compiling functions that are only internally callable WASM code.
+class WasmFunctionCompiler : public HandleAndZoneScope,
+ private GraphAndBuilders {
+ public:
+ explicit WasmFunctionCompiler(FunctionSig* sig, ModuleEnv* module = nullptr)
+ : GraphAndBuilders(main_zone()),
+ jsgraph(this->isolate(), this->graph(), this->common(), nullptr,
+ nullptr, this->machine()),
+ descriptor_(nullptr) {
+ init_env(&env, sig);
+ env.module = module;
+ }
+
+ JSGraph jsgraph;
+ FunctionEnv env;
+ // The call descriptor is initialized when the function is compiled.
+ CallDescriptor* descriptor_;
+
+ Isolate* isolate() { return main_isolate(); }
+ Graph* graph() const { return main_graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ CommonOperatorBuilder* common() { return &main_common_; }
+ MachineOperatorBuilder* machine() { return &main_machine_; }
+ CallDescriptor* descriptor() { return descriptor_; }
+
+ void Build(const byte* start, const byte* end) {
+ TestBuildingGraph(main_zone(), &jsgraph, &env, start, end);
+ }
+
+ byte AllocateLocal(LocalType type) {
+ int result = static_cast<int>(env.total_locals);
+ env.AddLocals(type, 1);
+ byte b = static_cast<byte>(result);
+ CHECK_EQ(result, b);
+ return b;
+ }
+
+ Handle<Code> Compile(ModuleEnv* module) {
+ descriptor_ = module->GetWasmCallDescriptor(this->zone(), env.sig);
+ CompilationInfo info("wasm compile", this->isolate(), this->zone());
+ Handle<Code> result =
+ Pipeline::GenerateCodeForTesting(&info, descriptor_, this->graph());
+#ifdef ENABLE_DISASSEMBLER
+ if (!result.is_null() && FLAG_print_opt_code) {
+ OFStream os(stdout);
+ result->Disassemble("wasm code", os);
+ }
+#endif
+
+ return result;
+ }
+
+ uint32_t CompileAndAdd(TestingModule* module) {
+ uint32_t index = 0;
+ if (module->module && module->module->functions) {
+ index = static_cast<uint32_t>(module->module->functions->size());
+ }
+ module->AddFunction(env.sig, Compile(module));
+ return index;
+ }
+};
+
+
+// A helper class to build graphs from Wasm bytecode, generate machine
+// code, and run that code.
+template <typename ReturnType>
+class WasmRunner {
+ public:
+ WasmRunner(MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None())
+ : signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
+ GetParameterCount(p0, p1, p2, p3), storage_),
+ compiler_(&signature_),
+ call_wrapper_(p0, p1, p2, p3),
+ compilation_done_(false) {
+ int index = 0;
+ MachineType ret = MachineTypeForC<ReturnType>();
+ if (ret != MachineType::None()) {
+ storage_[index++] = WasmOpcodes::LocalTypeFor(ret);
+ }
+ if (p0 != MachineType::None())
+ storage_[index++] = WasmOpcodes::LocalTypeFor(p0);
+ if (p1 != MachineType::None())
+ storage_[index++] = WasmOpcodes::LocalTypeFor(p1);
+ if (p2 != MachineType::None())
+ storage_[index++] = WasmOpcodes::LocalTypeFor(p2);
+ if (p3 != MachineType::None())
+ storage_[index++] = WasmOpcodes::LocalTypeFor(p3);
+ }
+
+
+ FunctionEnv* env() { return &compiler_.env; }
+
+
+ // Builds a graph from the given Wasm code, and generates the machine
+ // code and call wrapper for that graph. This method must not be called
+ // more than once.
+ void Build(const byte* start, const byte* end) {
+ DCHECK(!compilation_done_);
+ compilation_done_ = true;
+ // Build the TF graph.
+ compiler_.Build(start, end);
+ // Generate code.
+ Handle<Code> code = compiler_.Compile(env()->module);
+
+ // Construct the call wrapper.
+ Node* inputs[5];
+ int input_count = 0;
+ inputs[input_count++] = call_wrapper_.HeapConstant(code);
+ for (size_t i = 0; i < signature_.parameter_count(); i++) {
+ inputs[input_count++] = call_wrapper_.Parameter(i);
+ }
+
+ call_wrapper_.Return(call_wrapper_.AddNode(
+ call_wrapper_.common()->Call(compiler_.descriptor()), input_count,
+ inputs));
+ }
+
+ ReturnType Call() { return call_wrapper_.Call(); }
+
+ template <typename P0>
+ ReturnType Call(P0 p0) {
+ return call_wrapper_.Call(p0);
+ }
+
+ template <typename P0, typename P1>
+ ReturnType Call(P0 p0, P1 p1) {
+ return call_wrapper_.Call(p0, p1);
+ }
+
+ template <typename P0, typename P1, typename P2>
+ ReturnType Call(P0 p0, P1 p1, P2 p2) {
+ return call_wrapper_.Call(p0, p1, p2);
+ }
+
+ template <typename P0, typename P1, typename P2, typename P3>
+ ReturnType Call(P0 p0, P1 p1, P2 p2, P3 p3) {
+ return call_wrapper_.Call(p0, p1, p2, p3);
+ }
+
+ byte AllocateLocal(LocalType type) {
+ int result = static_cast<int>(env()->total_locals);
+ env()->AddLocals(type, 1);
+ byte b = static_cast<byte>(result);
+ CHECK_EQ(result, b);
+ return b;
+ }
+
+ private:
+ LocalType storage_[5];
+ FunctionSig signature_;
+ WasmFunctionCompiler compiler_;
+ BufferedRawMachineAssemblerTester<ReturnType> call_wrapper_;
+ bool compilation_done_;
+
+ static size_t GetParameterCount(MachineType p0, MachineType p1,
+ MachineType p2, MachineType p3) {
+ if (p0 == MachineType::None()) return 0;
+ if (p1 == MachineType::None()) return 1;
+ if (p2 == MachineType::None()) return 2;
+ if (p3 == MachineType::None()) return 3;
+ return 4;
+ }
+};
+
+} // namespace
+
+#endif
diff --git a/deps/v8/test/ignition.gyp b/deps/v8/test/ignition.gyp
new file mode 100644
index 0000000000..6aebec9e19
--- /dev/null
+++ b/deps/v8/test/ignition.gyp
@@ -0,0 +1,27 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'ignition_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest/cctest.gyp:cctest_run',
+ 'mjsunit/mjsunit.gyp:mjsunit_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'ignition.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/ignition.isolate b/deps/v8/test/ignition.isolate
new file mode 100644
index 0000000000..9604a694b2
--- /dev/null
+++ b/deps/v8/test/ignition.isolate
@@ -0,0 +1,9 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'includes': [
+ 'cctest/cctest.isolate',
+ 'mjsunit/mjsunit.isolate',
+ ],
+}
diff --git a/deps/v8/test/intl/date-format/format-test.js b/deps/v8/test/intl/date-format/format-test.js
index 9817c97ed9..f2a1448e8a 100644
--- a/deps/v8/test/intl/date-format/format-test.js
+++ b/deps/v8/test/intl/date-format/format-test.js
@@ -44,3 +44,7 @@ invalidValues.forEach(function(value) {
assertTrue(error !== undefined);
assertEquals('RangeError', error.name);
});
+
+// https://code.google.com/p/chromium/issues/detail?id=537382
+
+assertEquals('11/11/1500', dtf.format(new Date(1500,10,11,12,0,0)));
diff --git a/deps/v8/test/intl/date-format/resolved-options.js b/deps/v8/test/intl/date-format/resolved-options.js
index 707eb07a98..374960cdb6 100644
--- a/deps/v8/test/intl/date-format/resolved-options.js
+++ b/deps/v8/test/intl/date-format/resolved-options.js
@@ -38,7 +38,8 @@ assertEquals('latn', resolved.numberingSystem);
assertTrue(resolved.hasOwnProperty('calendar'));
assertEquals('gregory', resolved.calendar);
assertTrue(resolved.hasOwnProperty('timeZone'));
-assertEquals(getDefaultTimeZone(), resolved.timeZone);
+// TODO(littledan): getDefaultTimeZone() is not available from JavaScript
+// assertEquals(getDefaultTimeZone(), resolved.timeZone);
// These are in by default.
assertTrue(resolved.hasOwnProperty('year'));
assertEquals('numeric', resolved.year);
diff --git a/deps/v8/test/intl/date-format/timezone.js b/deps/v8/test/intl/date-format/timezone.js
index 03e25f0574..af363711c7 100644
--- a/deps/v8/test/intl/date-format/timezone.js
+++ b/deps/v8/test/intl/date-format/timezone.js
@@ -27,8 +27,9 @@
// Tests time zone support.
-var df = Intl.DateTimeFormat();
-assertEquals(getDefaultTimeZone(), df.resolvedOptions().timeZone);
+// TODO(littledan): getDefaultTimeZone() is not available from JavaScript
+// var df = Intl.DateTimeFormat();
+// assertEquals(getDefaultTimeZone(), df.resolvedOptions().timeZone);
df = Intl.DateTimeFormat(undefined, {timeZone: 'UtC'});
assertEquals('UTC', df.resolvedOptions().timeZone);
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index f20b164148..e89008517b 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -27,11 +27,6 @@
[
[ALWAYS, {
- # The following tests use getDefaultTimeZone().
- 'date-format/resolved-options': [FAIL],
- 'date-format/timezone': [FAIL],
- 'general/v8Intl-exists': [FAIL],
-
# TODO(jochen): The following test is flaky.
'overrides/caching': [PASS, FAIL],
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index bee8ef4ce9..31049d143f 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -13,7 +13,6 @@
"path": ["RestParameters"],
"main": "run.js",
"resources": ["rest.js"],
- "flags": ["--harmony-rest-parameters"],
"run_count": 5,
"units": "score",
"results_regexp": "^%s\\-RestParameters\\(Score\\): (.+)$",
diff --git a/deps/v8/test/message/arrow-bare-rest-param.js b/deps/v8/test/message/arrow-bare-rest-param.js
index bd5761f4e8..b826ec20b7 100644
--- a/deps/v8/test/message/arrow-bare-rest-param.js
+++ b/deps/v8/test/message/arrow-bare-rest-param.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
...x => 10
diff --git a/deps/v8/test/message/arrow-missing.js b/deps/v8/test/message/arrow-missing.js
index c78bef9cd6..b9f9acd05c 100644
--- a/deps/v8/test/message/arrow-missing.js
+++ b/deps/v8/test/message/arrow-missing.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
function foo() { return(); }
diff --git a/deps/v8/test/message/arrow-param-after-rest-2.js b/deps/v8/test/message/arrow-param-after-rest-2.js
index c01cec8ad4..617c8726d7 100644
--- a/deps/v8/test/message/arrow-param-after-rest-2.js
+++ b/deps/v8/test/message/arrow-param-after-rest-2.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
(w, ...x, y) => 10
diff --git a/deps/v8/test/message/arrow-param-after-rest.js b/deps/v8/test/message/arrow-param-after-rest.js
index 3284606a4e..9192bc6c0c 100644
--- a/deps/v8/test/message/arrow-param-after-rest.js
+++ b/deps/v8/test/message/arrow-param-after-rest.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
(...x, y) => 10
diff --git a/deps/v8/test/message/arrow-two-rest-params.js b/deps/v8/test/message/arrow-two-rest-params.js
index 44eb47e5f6..222f10ab4f 100644
--- a/deps/v8/test/message/arrow-two-rest-params.js
+++ b/deps/v8/test/message/arrow-two-rest-params.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
(w, ...x, ...y) => 10
diff --git a/deps/v8/test/message/default-parameter-tdz-arrow.js b/deps/v8/test/message/default-parameter-tdz-arrow.js
new file mode 100644
index 0000000000..cad091f8ac
--- /dev/null
+++ b/deps/v8/test/message/default-parameter-tdz-arrow.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-default-parameters
+
+((a=-a) => { })();
diff --git a/deps/v8/test/message/default-parameter-tdz-arrow.out b/deps/v8/test/message/default-parameter-tdz-arrow.out
new file mode 100644
index 0000000000..7d5f894ef5
--- /dev/null
+++ b/deps/v8/test/message/default-parameter-tdz-arrow.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: ReferenceError: a is not defined
+((a=-a) => { })();
+ ^
+ReferenceError: a is not defined
+ at *%(basename)s:7:6
+ at *%(basename)s:7:16
diff --git a/deps/v8/test/message/default-parameter-tdz.js b/deps/v8/test/message/default-parameter-tdz.js
new file mode 100644
index 0000000000..ff2a400e09
--- /dev/null
+++ b/deps/v8/test/message/default-parameter-tdz.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-default-parameters
+
+(function(a=+a) { })();
diff --git a/deps/v8/test/message/default-parameter-tdz.out b/deps/v8/test/message/default-parameter-tdz.out
new file mode 100644
index 0000000000..8a6d56abae
--- /dev/null
+++ b/deps/v8/test/message/default-parameter-tdz.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: ReferenceError: a is not defined
+(function(a=+a) { })();
+ ^
+ReferenceError: a is not defined
+ at *%(basename)s:7:14
+ at *%(basename)s:7:21
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array.js b/deps/v8/test/message/destructuring-decl-no-init-array.js
index 7a4fb00f59..7c73d3b670 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-array.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
var [ a, b, c ];
diff --git a/deps/v8/test/message/destructuring-decl-no-init-array2.js b/deps/v8/test/message/destructuring-decl-no-init-array2.js
index 2e0ded342f..a82afa46b0 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-array2.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-array2.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
for (var [ a ]; a; ) {}
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj.js b/deps/v8/test/message/destructuring-decl-no-init-obj.js
index f73e0b7f62..23424aa8bb 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-obj.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
var { a, b, c };
diff --git a/deps/v8/test/message/destructuring-decl-no-init-obj2.js b/deps/v8/test/message/destructuring-decl-no-init-obj2.js
index 928434acf5..6c76137b75 100644
--- a/deps/v8/test/message/destructuring-decl-no-init-obj2.js
+++ b/deps/v8/test/message/destructuring-decl-no-init-obj2.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
for (var { a, b, c }; a && b && c; ) {}
diff --git a/deps/v8/test/message/destructuring-modify-const.js b/deps/v8/test/message/destructuring-modify-const.js
index cabd924b37..88bda351d8 100644
--- a/deps/v8/test/message/destructuring-modify-const.js
+++ b/deps/v8/test/message/destructuring-modify-const.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
'use strict';
const { x : x, y : y } = { x : 1, y : 2 };
diff --git a/deps/v8/test/message/for-in-loop-initializers-destructuring.js b/deps/v8/test/message/for-in-loop-initializers-destructuring.js
new file mode 100644
index 0000000000..eab8b81cf3
--- /dev/null
+++ b/deps/v8/test/message/for-in-loop-initializers-destructuring.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-destructuring-bind
+
+function f() {
+ for (var [x, y] = {} in {});
+}
diff --git a/deps/v8/test/message/for-in-loop-initializers-destructuring.out b/deps/v8/test/message/for-in-loop-initializers-destructuring.out
new file mode 100644
index 0000000000..9dbda2c639
--- /dev/null
+++ b/deps/v8/test/message/for-in-loop-initializers-destructuring.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: for-in loop variable declaration may not have an initializer.
+ for (var [x, y] = {} in {});
+ ^^^^^^
+SyntaxError: for-in loop variable declaration may not have an initializer.
diff --git a/deps/v8/test/message/formal-parameters-bad-rest.js b/deps/v8/test/message/formal-parameters-bad-rest.js
index c67e1de93f..3e5860ec07 100644
--- a/deps/v8/test/message/formal-parameters-bad-rest.js
+++ b/deps/v8/test/message/formal-parameters-bad-rest.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
function foo(...b, a) { return a }
diff --git a/deps/v8/test/message/invalid-spread-2.js b/deps/v8/test/message/invalid-spread-2.js
index 60635fe38c..14dfd728a0 100644
--- a/deps/v8/test/message/invalid-spread-2.js
+++ b/deps/v8/test/message/invalid-spread-2.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
(x, ...y, z)
diff --git a/deps/v8/test/message/invalid-spread.js b/deps/v8/test/message/invalid-spread.js
index bf051fe692..cc42874431 100644
--- a/deps/v8/test/message/invalid-spread.js
+++ b/deps/v8/test/message/invalid-spread.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
(x, ...y)
diff --git a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js b/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
index 1df487a578..c7a35cd4a6 100644
--- a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring-bind
let [let];
diff --git a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js b/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
index e4027a0b32..d2b7c905d0 100644
--- a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring-bind
let {let};
diff --git a/deps/v8/test/message/nf-yield-in-generator.js b/deps/v8/test/message/nf-yield-in-generator.js
new file mode 100644
index 0000000000..ecdaf33242
--- /dev/null
+++ b/deps/v8/test/message/nf-yield-in-generator.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* gen() {
+ function yield() {}
+}
diff --git a/deps/v8/test/message/nf-yield-in-generator.out b/deps/v8/test/message/nf-yield-in-generator.out
new file mode 100644
index 0000000000..91986dfffc
--- /dev/null
+++ b/deps/v8/test/message/nf-yield-in-generator.out
@@ -0,0 +1,4 @@
+*%(basename)s:6: SyntaxError: Unexpected identifier
+ function yield() {}
+ ^^^^^
+SyntaxError: Unexpected identifier
diff --git a/deps/v8/test/message/nf-yield-strict-in-generator.js b/deps/v8/test/message/nf-yield-strict-in-generator.js
new file mode 100644
index 0000000000..7e87881bed
--- /dev/null
+++ b/deps/v8/test/message/nf-yield-strict-in-generator.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* gen() {
+ "use strict";
+ function yield() {}
+}
diff --git a/deps/v8/test/message/nf-yield-strict-in-generator.out b/deps/v8/test/message/nf-yield-strict-in-generator.out
new file mode 100644
index 0000000000..7f2a7596b3
--- /dev/null
+++ b/deps/v8/test/message/nf-yield-strict-in-generator.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Unexpected strict mode reserved word
+ function yield() {}
+ ^^^^^
+SyntaxError: Unexpected strict mode reserved word
diff --git a/deps/v8/test/message/nf-yield-strict.js b/deps/v8/test/message/nf-yield-strict.js
new file mode 100644
index 0000000000..d959a2164e
--- /dev/null
+++ b/deps/v8/test/message/nf-yield-strict.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function yield() { "use strict"; }
diff --git a/deps/v8/test/message/nf-yield-strict.out b/deps/v8/test/message/nf-yield-strict.out
new file mode 100644
index 0000000000..38c0bade08
--- /dev/null
+++ b/deps/v8/test/message/nf-yield-strict.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Unexpected strict mode reserved word
+function yield() { "use strict"; }
+ ^^^^^
+SyntaxError: Unexpected strict mode reserved word
diff --git a/deps/v8/test/message/nfe-yield-generator.js b/deps/v8/test/message/nfe-yield-generator.js
new file mode 100644
index 0000000000..4e193fe881
--- /dev/null
+++ b/deps/v8/test/message/nfe-yield-generator.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function* yield() {})
diff --git a/deps/v8/test/message/nfe-yield-generator.out b/deps/v8/test/message/nfe-yield-generator.out
new file mode 100644
index 0000000000..75cc9a559b
--- /dev/null
+++ b/deps/v8/test/message/nfe-yield-generator.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Unexpected identifier
+(function* yield() {})
+ ^^^^^
+SyntaxError: Unexpected identifier
diff --git a/deps/v8/test/message/nfe-yield-strict.js b/deps/v8/test/message/nfe-yield-strict.js
new file mode 100644
index 0000000000..11021083e1
--- /dev/null
+++ b/deps/v8/test/message/nfe-yield-strict.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function yield() { "use strict"; })
diff --git a/deps/v8/test/message/nfe-yield-strict.out b/deps/v8/test/message/nfe-yield-strict.out
new file mode 100644
index 0000000000..2d6e97417b
--- /dev/null
+++ b/deps/v8/test/message/nfe-yield-strict.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Unexpected strict mode reserved word
+(function yield() { "use strict"; })
+ ^^^^^
+SyntaxError: Unexpected strict mode reserved word
diff --git a/deps/v8/test/message/no-legacy-const-2.js b/deps/v8/test/message/no-legacy-const-2.js
index 29aeba5e1f..24e3f85639 100644
--- a/deps/v8/test/message/no-legacy-const-2.js
+++ b/deps/v8/test/message/no-legacy-const-2.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --no-legacy-const
+// Flags: --no-legacy-const --no-harmony-sloppy --no-harmony-sloppy-let
+// Flags: --no-harmony-sloppy-function
const = 42;
diff --git a/deps/v8/test/message/no-legacy-const-2.out b/deps/v8/test/message/no-legacy-const-2.out
index 55c855ee4f..5385250aaf 100644
--- a/deps/v8/test/message/no-legacy-const-2.out
+++ b/deps/v8/test/message/no-legacy-const-2.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token const
+*%(basename)s:8: SyntaxError: Unexpected token const
const = 42;
^^^^^
diff --git a/deps/v8/test/message/no-legacy-const-3.js b/deps/v8/test/message/no-legacy-const-3.js
index 6981571e62..4f6e9a4bbb 100644
--- a/deps/v8/test/message/no-legacy-const-3.js
+++ b/deps/v8/test/message/no-legacy-const-3.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --no-legacy-const
+// Flags: --no-legacy-const --no-harmony-sloppy --no-harmony-sloppy-let
+// Flags: --no-harmony-sloppy-function
const
diff --git a/deps/v8/test/message/no-legacy-const-3.out b/deps/v8/test/message/no-legacy-const-3.out
index 046e9f7023..7539bbcd1d 100644
--- a/deps/v8/test/message/no-legacy-const-3.out
+++ b/deps/v8/test/message/no-legacy-const-3.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token const
+*%(basename)s:8: SyntaxError: Unexpected token const
const
^^^^^
diff --git a/deps/v8/test/message/no-legacy-const.js b/deps/v8/test/message/no-legacy-const.js
index ecad2181b8..d9a716b3a8 100644
--- a/deps/v8/test/message/no-legacy-const.js
+++ b/deps/v8/test/message/no-legacy-const.js
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --no-legacy-const
+// Flags: --no-legacy-const --no-harmony-sloppy --no-harmony-sloppy-let
+// Flags: --no-harmony-sloppy-function
const x = 42;
diff --git a/deps/v8/test/message/no-legacy-const.out b/deps/v8/test/message/no-legacy-const.out
index b28dd10b77..33bb038836 100644
--- a/deps/v8/test/message/no-legacy-const.out
+++ b/deps/v8/test/message/no-legacy-const.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token const
+*%(basename)s:8: SyntaxError: Unexpected token const
const x = 42;
^^^^^
diff --git a/deps/v8/test/message/paren_in_arg_string.out b/deps/v8/test/message/paren_in_arg_string.out
index 0ed59bab1e..57adf58bcd 100644
--- a/deps/v8/test/message/paren_in_arg_string.out
+++ b/deps/v8/test/message/paren_in_arg_string.out
@@ -2,5 +2,4 @@
var paren_in_arg_string_bad = new Function(')', 'return;');
^
SyntaxError: Function arg string contains parenthesis
- at Function (native)
at *%(basename)s:29:31
diff --git a/deps/v8/test/message/rest-param-class-setter-strict.js b/deps/v8/test/message/rest-param-class-setter-strict.js
index 2d478a6644..84e9f8172b 100644
--- a/deps/v8/test/message/rest-param-class-setter-strict.js
+++ b/deps/v8/test/message/rest-param-class-setter-strict.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
'use strict';
var _bad = "setting this should fail!";
diff --git a/deps/v8/test/message/rest-param-object-setter-sloppy.js b/deps/v8/test/message/rest-param-object-setter-sloppy.js
index 08c03298dc..00006545f4 100644
--- a/deps/v8/test/message/rest-param-object-setter-sloppy.js
+++ b/deps/v8/test/message/rest-param-object-setter-sloppy.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
var _bad = "this should fail!";
({
diff --git a/deps/v8/test/message/rest-param-object-setter-strict.js b/deps/v8/test/message/rest-param-object-setter-strict.js
index e3a8f604e6..fe46fd688c 100644
--- a/deps/v8/test/message/rest-param-object-setter-strict.js
+++ b/deps/v8/test/message/rest-param-object-setter-strict.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-rest-parameters
+//
'use strict';
var _bad = "this should fail!";
diff --git a/deps/v8/test/message/single-function-literal.out b/deps/v8/test/message/single-function-literal.out
deleted file mode 100644
index a6a54b61a5..0000000000
--- a/deps/v8/test/message/single-function-literal.out
+++ /dev/null
@@ -1,5 +0,0 @@
-undefined:1: SyntaxError: Single function literal required
-(function() { return 5; })();
- ^
-SyntaxError: Single function literal required
- at *%(basename)s:32:16 \ No newline at end of file
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index bc73510c09..7c53041016 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -59,6 +59,10 @@ class MessageTestSuite(testsuite.TestSuite):
tests.append(test)
return tests
+ def CreateVariantGenerator(self, variants):
+ return super(MessageTestSuite, self).CreateVariantGenerator(
+ variants + ["preparser"])
+
def GetFlagsForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
result = []
diff --git a/deps/v8/test/message/try-catch-lexical-conflict.js b/deps/v8/test/message/try-catch-lexical-conflict.js
index efb1ec7e01..a5db29898a 100644
--- a/deps/v8/test/message/try-catch-lexical-conflict.js
+++ b/deps/v8/test/message/try-catch-lexical-conflict.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
"use strict";
try {
diff --git a/deps/v8/test/message/try-catch-variable-conflict.js b/deps/v8/test/message/try-catch-variable-conflict.js
index 9b0749b28c..6cf04fa207 100644
--- a/deps/v8/test/message/try-catch-variable-conflict.js
+++ b/deps/v8/test/message/try-catch-variable-conflict.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
try {
} catch ({x}) {
diff --git a/deps/v8/test/mjsunit/apply.js b/deps/v8/test/mjsunit/apply.js
index abbc9a11b4..fdd032dab3 100644
--- a/deps/v8/test/mjsunit/apply.js
+++ b/deps/v8/test/mjsunit/apply.js
@@ -114,7 +114,7 @@ function al() {
return arguments.length + arguments[arguments.length - 1];
}
-for (var j = 1; j < 0x40000000; j <<= 1) {
+for (var j = 1; j < 0x4000000; j <<= 1) {
try {
var a = %NormalizeElements([]);
a.length = j;
@@ -122,7 +122,7 @@ for (var j = 1; j < 0x40000000; j <<= 1) {
assertEquals(42 + j, al.apply(345, a));
} catch (e) {
assertTrue(e.toString().indexOf("Maximum call stack size exceeded") != -1);
- for (; j < 0x40000000; j <<= 1) {
+ for (; j < 0x4000000; j <<= 1) {
var caught = false;
try {
a = %NormalizeElements([]);
diff --git a/deps/v8/test/mjsunit/array-constructor.js b/deps/v8/test/mjsunit/array-constructor.js
index bf5d3d611a..c9707b9654 100644
--- a/deps/v8/test/mjsunit/array-constructor.js
+++ b/deps/v8/test/mjsunit/array-constructor.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
var loop_count = 5
@@ -117,3 +116,23 @@ for (var i = 0; i < loop_count; i++) {
assertThrows('new Array(3.14)');
assertThrows('Array(2.72)');
+
+// Make sure that throws occur in the context of the Array function.
+var b = Realm.create();
+var bArray = Realm.eval(b, "Array");
+var bError = Realm.eval(b, "RangeError");
+
+function verifier(array, error) {
+ try {
+ new array(3.14);
+ } catch(e) {
+ return e.__proto__ === error.__proto__;
+ }
+ assertTrue(false); // should never get here.
+}
+
+
+assertTrue(verifier(Array, RangeError()));
+assertTrue(verifier(bArray, bError()));
+assertFalse(verifier(Array, bError()));
+assertFalse(verifier(bArray, RangeError()));
diff --git a/deps/v8/test/mjsunit/array-isarray.js b/deps/v8/test/mjsunit/array-isarray.js
new file mode 100644
index 0000000000..a21b1e1e7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-isarray.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+
+assertTrue(Array.isArray([]));
+assertFalse(Array.isArray({}));
+assertFalse(Array.isArray(null));
+
+assertTrue(Array.isArray(new Proxy([], {})));
+assertFalse(Array.isArray(new Proxy({}, {})));
+
+assertTrue(Array.isArray(new Proxy(new Proxy([], {}), {})));
+assertFalse(Array.isArray(new Proxy(new Proxy({}, {}), {})));
diff --git a/deps/v8/test/mjsunit/asm/atomics-add.js b/deps/v8/test/mjsunit/asm/atomics-add.js
index 77dd4d8a03..9a07ecf2b1 100644
--- a/deps/v8/test/mjsunit/asm/atomics-add.js
+++ b/deps/v8/test/mjsunit/asm/atomics-add.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var add = stdlib.Atomics.add;
var fround = stdlib.Math.fround;
@@ -61,9 +61,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -71,10 +68,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f) {
+function testElementType(taConstr, f, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
assertEquals(0, f(0, 10), name);
assertEquals(10, ta[0]);
@@ -85,9 +82,21 @@ function testElementType(taConstr, f) {
assertEquals(0, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.addi8);
-testElementType(Int16Array, m.addi16);
-testElementType(Int32Array, m.addi32);
-testElementType(Uint8Array, m.addu8);
-testElementType(Uint16Array, m.addu16);
-testElementType(Uint32Array, m.addu32);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.addi8, offset);
+ testElementType(Int16Array, m.addi16, offset);
+ testElementType(Int32Array, m.addi32, offset);
+ testElementType(Uint8Array, m.addu8, offset);
+ testElementType(Uint16Array, m.addu16, offset);
+ testElementType(Uint32Array, m.addu32, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-and.js b/deps/v8/test/mjsunit/asm/atomics-and.js
index 5660f508b0..2e7de75c2e 100644
--- a/deps/v8/test/mjsunit/asm/atomics-and.js
+++ b/deps/v8/test/mjsunit/asm/atomics-and.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var and = stdlib.Atomics.and;
var fround = stdlib.Math.fround;
@@ -61,9 +61,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -71,10 +68,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f) {
+function testElementType(taConstr, f, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
ta[0] = 0x7f;
assertEquals(0x7f, f(0, 0xf), name);
@@ -86,9 +83,21 @@ function testElementType(taConstr, f) {
assertEquals(0, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.andi8);
-testElementType(Int16Array, m.andi16);
-testElementType(Int32Array, m.andi32);
-testElementType(Uint8Array, m.andu8);
-testElementType(Uint16Array, m.andu16);
-testElementType(Uint32Array, m.andu32);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.andi8, offset);
+ testElementType(Int16Array, m.andi16, offset);
+ testElementType(Int32Array, m.andi32, offset);
+ testElementType(Uint8Array, m.andu8, offset);
+ testElementType(Uint16Array, m.andu16, offset);
+ testElementType(Uint32Array, m.andu32, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-compareexchange.js b/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
index edcd7f908c..84d38504b3 100644
--- a/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
+++ b/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var compareExchange = stdlib.Atomics.compareExchange;
var fround = stdlib.Math.fround;
@@ -67,9 +67,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -77,10 +74,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f, oobValue) {
+function testElementType(taConstr, f, oobValue, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
assertEquals(0, ta[0]);
assertEquals(0, f(0, 0, 50), name);
@@ -93,9 +90,21 @@ function testElementType(taConstr, f, oobValue) {
assertEquals(oobValue, f(ta.length, 0, 0), name);
}
-testElementType(Int8Array, m.compareExchangei8, 0);
-testElementType(Int16Array, m.compareExchangei16, 0);
-testElementType(Int32Array, m.compareExchangei32, 0);
-testElementType(Uint8Array, m.compareExchangeu8, 0);
-testElementType(Uint16Array, m.compareExchangeu16, 0);
-testElementType(Uint32Array, m.compareExchangeu32, 0);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.compareExchangei8, 0, offset);
+ testElementType(Int16Array, m.compareExchangei16, 0, offset);
+ testElementType(Int32Array, m.compareExchangei32, 0, offset);
+ testElementType(Uint8Array, m.compareExchangeu8, 0, offset);
+ testElementType(Uint16Array, m.compareExchangeu16, 0, offset);
+ testElementType(Uint32Array, m.compareExchangeu32, 0, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-exchange.js b/deps/v8/test/mjsunit/asm/atomics-exchange.js
index ed2b0fa21b..6de7b4b954 100644
--- a/deps/v8/test/mjsunit/asm/atomics-exchange.js
+++ b/deps/v8/test/mjsunit/asm/atomics-exchange.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var exchange = stdlib.Atomics.exchange;
var fround = stdlib.Math.fround;
@@ -71,10 +71,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f) {
+function testElementType(taConstr, f, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
ta[0] = 0x7f;
assertEquals(0x7f, f(0, 0xf), name);
@@ -84,9 +84,21 @@ function testElementType(taConstr, f) {
assertEquals(0, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.exchangei8);
-testElementType(Int16Array, m.exchangei16);
-testElementType(Int32Array, m.exchangei32);
-testElementType(Uint8Array, m.exchangeu8);
-testElementType(Uint16Array, m.exchangeu16);
-testElementType(Uint32Array, m.exchangeu32);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.exchangei8, offset);
+ testElementType(Int16Array, m.exchangei16, offset);
+ testElementType(Int32Array, m.exchangei32, offset);
+ testElementType(Uint8Array, m.exchangeu8, offset);
+ testElementType(Uint16Array, m.exchangeu16, offset);
+ testElementType(Uint32Array, m.exchangeu32, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-load.js b/deps/v8/test/mjsunit/asm/atomics-load.js
index 4234d22c4e..3e1d19f3a6 100644
--- a/deps/v8/test/mjsunit/asm/atomics-load.js
+++ b/deps/v8/test/mjsunit/asm/atomics-load.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var load = stdlib.Atomics.load;
var fround = stdlib.Math.fround;
@@ -55,9 +55,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -65,10 +62,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f, oobValue) {
+function testElementType(taConstr, f, oobValue, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
ta[0] = 10;
assertEquals(10, f(0), name);
@@ -78,9 +75,21 @@ function testElementType(taConstr, f, oobValue) {
assertEquals(oobValue, f(ta.length), name);
}
-testElementType(Int8Array, m.loadi8, 0);
-testElementType(Int16Array, m.loadi16, 0);
-testElementType(Int32Array, m.loadi32, 0);
-testElementType(Uint8Array, m.loadu8, 0);
-testElementType(Uint16Array, m.loadu16, 0);
-testElementType(Uint32Array, m.loadu32, 0);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.loadi8, 0, offset);
+ testElementType(Int16Array, m.loadi16, 0, offset);
+ testElementType(Int32Array, m.loadi32, 0, offset);
+ testElementType(Uint8Array, m.loadu8, 0, offset);
+ testElementType(Uint16Array, m.loadu16, 0, offset);
+ testElementType(Uint32Array, m.loadu32, 0, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-or.js b/deps/v8/test/mjsunit/asm/atomics-or.js
index 7ea29156e8..7431e35cf3 100644
--- a/deps/v8/test/mjsunit/asm/atomics-or.js
+++ b/deps/v8/test/mjsunit/asm/atomics-or.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var or = stdlib.Atomics.or;
var fround = stdlib.Math.fround;
@@ -71,10 +71,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f) {
+function testElementType(taConstr, f, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
assertEquals(0, f(0, 0xf), name);
assertEquals(0xf, ta[0]);
@@ -85,9 +85,21 @@ function testElementType(taConstr, f) {
assertEquals(0, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.ori8);
-testElementType(Int16Array, m.ori16);
-testElementType(Int32Array, m.ori32);
-testElementType(Uint8Array, m.oru8);
-testElementType(Uint16Array, m.oru16);
-testElementType(Uint32Array, m.oru32);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.ori8, offset);
+ testElementType(Int16Array, m.ori16, offset);
+ testElementType(Int32Array, m.ori32, offset);
+ testElementType(Uint8Array, m.oru8, offset);
+ testElementType(Uint16Array, m.oru16, offset);
+ testElementType(Uint32Array, m.oru32, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-store.js b/deps/v8/test/mjsunit/asm/atomics-store.js
index bd4ab6a267..dab83af8a6 100644
--- a/deps/v8/test/mjsunit/asm/atomics-store.js
+++ b/deps/v8/test/mjsunit/asm/atomics-store.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var store = stdlib.Atomics.store;
var fround = stdlib.Math.fround;
@@ -61,9 +61,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -71,10 +68,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f, oobValue) {
+function testElementType(taConstr, f, oobValue, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
assertEquals(10, f(0, 10), name);
assertEquals(10, ta[0]);
@@ -83,9 +80,21 @@ function testElementType(taConstr, f, oobValue) {
assertEquals(oobValue, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.storei8, 0);
-testElementType(Int16Array, m.storei16, 0);
-testElementType(Int32Array, m.storei32, 0);
-testElementType(Uint8Array, m.storeu8, 0);
-testElementType(Uint16Array, m.storeu16, 0);
-testElementType(Uint32Array, m.storeu32, 0);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.storei8, 0, offset);
+ testElementType(Int16Array, m.storei16, 0, offset);
+ testElementType(Int32Array, m.storei32, 0, offset);
+ testElementType(Uint8Array, m.storeu8, 0, offset);
+ testElementType(Uint16Array, m.storeu16, 0, offset);
+ testElementType(Uint32Array, m.storeu32, 0, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-sub.js b/deps/v8/test/mjsunit/asm/atomics-sub.js
index d737811790..2ad97e479b 100644
--- a/deps/v8/test/mjsunit/asm/atomics-sub.js
+++ b/deps/v8/test/mjsunit/asm/atomics-sub.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var sub = stdlib.Atomics.sub;
var fround = stdlib.Math.fround;
@@ -61,9 +61,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -71,10 +68,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f) {
+function testElementType(taConstr, f, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
ta[0] = 30;
assertEquals(30, f(0, 10), name);
@@ -86,9 +83,21 @@ function testElementType(taConstr, f) {
assertEquals(0, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.subi8);
-testElementType(Int16Array, m.subi16);
-testElementType(Int32Array, m.subi32);
-testElementType(Uint8Array, m.subu8);
-testElementType(Uint16Array, m.subu16);
-testElementType(Uint32Array, m.subu32);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.subi8, offset);
+ testElementType(Int16Array, m.subi16, offset);
+ testElementType(Int32Array, m.subi32, offset);
+ testElementType(Uint8Array, m.subu8, offset);
+ testElementType(Uint16Array, m.subu16, offset);
+ testElementType(Uint32Array, m.subu32, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/atomics-xor.js b/deps/v8/test/mjsunit/asm/atomics-xor.js
index 66052b3940..990f8427a0 100644
--- a/deps/v8/test/mjsunit/asm/atomics-xor.js
+++ b/deps/v8/test/mjsunit/asm/atomics-xor.js
@@ -4,14 +4,14 @@
// Flags: --harmony-sharedarraybuffer
-function Module(stdlib, foreign, heap) {
+function Module(stdlib, foreign, heap, offset) {
"use asm";
- var MEM8 = new stdlib.Int8Array(heap);
- var MEM16 = new stdlib.Int16Array(heap);
- var MEM32 = new stdlib.Int32Array(heap);
- var MEMU8 = new stdlib.Uint8Array(heap);
- var MEMU16 = new stdlib.Uint16Array(heap);
- var MEMU32 = new stdlib.Uint32Array(heap);
+ var MEM8 = new stdlib.Int8Array(heap, offset);
+ var MEM16 = new stdlib.Int16Array(heap, offset);
+ var MEM32 = new stdlib.Int32Array(heap, offset);
+ var MEMU8 = new stdlib.Uint8Array(heap, offset);
+ var MEMU16 = new stdlib.Uint16Array(heap, offset);
+ var MEMU32 = new stdlib.Uint32Array(heap, offset);
var xor = stdlib.Atomics.xor;
var fround = stdlib.Math.fround;
@@ -61,9 +61,6 @@ function Module(stdlib, foreign, heap) {
};
}
-var sab = new SharedArrayBuffer(16);
-var m = Module(this, {}, sab);
-
function clearArray() {
var ui8 = new Uint8Array(sab);
for (var i = 0; i < sab.byteLength; ++i) {
@@ -71,10 +68,10 @@ function clearArray() {
}
}
-function testElementType(taConstr, f) {
+function testElementType(taConstr, f, offset) {
clearArray();
- var ta = new taConstr(sab);
+ var ta = new taConstr(sab, offset);
var name = Object.prototype.toString.call(ta);
assertEquals(0, f(0, 0xf), name);
assertEquals(0xf, ta[0]);
@@ -85,9 +82,21 @@ function testElementType(taConstr, f) {
assertEquals(0, f(ta.length, 0), name);
}
-testElementType(Int8Array, m.xori8);
-testElementType(Int16Array, m.xori16);
-testElementType(Int32Array, m.xori32);
-testElementType(Uint8Array, m.xoru8);
-testElementType(Uint16Array, m.xoru16);
-testElementType(Uint32Array, m.xoru32);
+function testElement(m, offset) {
+ testElementType(Int8Array, m.xori8, offset);
+ testElementType(Int16Array, m.xori16, offset);
+ testElementType(Int32Array, m.xori32, offset);
+ testElementType(Uint8Array, m.xoru8, offset);
+ testElementType(Uint16Array, m.xoru16, offset);
+ testElementType(Uint32Array, m.xoru32, offset);
+}
+
+var offset = 0;
+var sab = new SharedArrayBuffer(16);
+var m1 = Module(this, {}, sab, offset);
+testElement(m1, offset);
+
+offset = 32;
+sab = new SharedArrayBuffer(64);
+var m2 = Module(this, {}, sab, offset);
+testElement(m2, offset);
diff --git a/deps/v8/test/mjsunit/asm/infinite-loops-taken.js b/deps/v8/test/mjsunit/asm/infinite-loops-taken.js
index d136c62469..8a3f91a67a 100644
--- a/deps/v8/test/mjsunit/asm/infinite-loops-taken.js
+++ b/deps/v8/test/mjsunit/asm/infinite-loops-taken.js
@@ -35,7 +35,7 @@ function Module() {
}
var m = Module();
-assertThrows(function() { m.w0(counter(5)) }, error);
-assertThrows(function() { m.w1(counter(5)) }, error);
-assertThrows(function() { m.w2(counter(5)) }, error);
+assertThrowsEquals(function() { m.w0(counter(5)) }, error);
+assertThrowsEquals(function() { m.w1(counter(5)) }, error);
+assertThrowsEquals(function() { m.w2(counter(5)) }, error);
assertEquals(111, m.w3(counter(5)));
diff --git a/deps/v8/test/mjsunit/bugs/bug-4577.js b/deps/v8/test/mjsunit/bugs/bug-4577.js
new file mode 100644
index 0000000000..de2f843965
--- /dev/null
+++ b/deps/v8/test/mjsunit/bugs/bug-4577.js
@@ -0,0 +1,13 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(...arguments) {
+ return Array.isArray(arguments);
+}
+assertTrue(f());
+
+function g({arguments}) {
+ return arguments === 42;
+}
+assertTrue(g({arguments: 42}));
diff --git a/deps/v8/test/mjsunit/callsite.js b/deps/v8/test/mjsunit/callsite.js
new file mode 100644
index 0000000000..a4d9455b32
--- /dev/null
+++ b/deps/v8/test/mjsunit/callsite.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = (e,s) => s;
+var constructor = Error().stack[0].constructor;
+
+// Second argument needs to be a function.
+assertThrows(()=>constructor({}, {}, 1, false), TypeError);
+
+var receiver = {};
+function f() {}
+
+var site = constructor.call(null, receiver, f, {valueOf() { return 0 }}, false);
+assertEquals(receiver, site.getThis());
+assertEquals(1, site.getLineNumber());
+assertEquals(1, site.getColumnNumber());
diff --git a/deps/v8/test/mjsunit/regress/regress-351315.js b/deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js
index e2580fc34b..1df04bbad8 100644
--- a/deps/v8/test/mjsunit/regress/regress-351315.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-during-eval-lookup.js
@@ -1,4 +1,4 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,23 +27,21 @@
// Flags: --allow-natives-syntax
-function f_13(x, y, z) { }
-
-v_5 = f_13.bind({}, -7);
-
-function f_0(z) {
- return %NewObjectFromBound(v_5);
+function g() {
+ return 100;
}
-function f_8(z2, y2) {
- var v_0 = { f1 : 0.5, f2 : 0.25 };
- return f_0(v_0);
+function getter() {
+ // Test that we can deopt during the CallRuntimeForPair call to LoadLookupSlot
+ %DeoptimizeFunction(f);
+ return g;
}
-function f_12(f, args) {
- f.apply(this, args);
- %OptimizeFunctionOnNextCall(f);
- f.apply(this, args);
+Object.defineProperty(this, "eval", {get: getter });
+
+function f() {
+ return eval("200");
}
-f_12(f_8, [6, 4]);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(100, f());
diff --git a/deps/v8/test/message/single-function-literal.js b/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
index 96d3bd663a..b8c66448dc 100644
--- a/deps/v8/test/message/single-function-literal.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,8 +25,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-var single_function_good = "(function() { return 5; })";
-%CompileString(single_function_good, true);
-var single_function_bad = "(function() { return 5; })();";
-%CompileString(single_function_bad, true);
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(a) {
+ "use strict";
+ return arguments.length;
+}
+
+function g() {
+ return f(1,2,3);
+}
+
+assertEquals(3, g());
+assertEquals(3, g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals(3, g());
diff --git a/deps/v8/test/mjsunit/regress/regress-1945.js b/deps/v8/test/mjsunit/compiler/escape-analysis-10.js
index bffc775fc4..c53cf4d989 100644
--- a/deps/v8/test/mjsunit/regress/regress-1945.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-10.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,10 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-
-var _d = new Date();
-_d.setHours(0,0,0,0);
-_d.setHours(0,0,0,0);
-%OptimizeFunctionOnNextCall(_d.setHours);
-_d.setHours(0,0,0,0);
+// Flags: --allow-natives-syntax --turbo-escape
+(function() {
+ "use strict";
+ function f() {
+ for (let i = 0; i < 5; ++i) {
+ function g() { return i }
+ }
+ }
+ f();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
new file mode 100644
index 0000000000..d116e9a364
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
@@ -0,0 +1,45 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(a) {
+ "use strict";
+ if (arguments === a)
+ return 1;
+ return arguments.length;
+}
+
+function g(a) {
+ return f(a,1,2,3);
+}
+
+assertEquals(4, g());
+assertEquals(4, g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals(4, g());
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-3.js b/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
new file mode 100644
index 0000000000..d1ebc9b1f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
@@ -0,0 +1,44 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(a) {
+ "use strict";
+ return arguments.length;
+}
+
+function g() {
+ "use strict";
+ return arguments[f(1,2)];
+}
+
+assertEquals(6, g(4,5,6));
+assertEquals(6, g(4,5,6));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(6, g(4,5,6));
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-4.js b/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
new file mode 100644
index 0000000000..d9fdccc143
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(a) {
+ "use strict";
+ return arguments.length;
+}
+
+function h() {
+ "use strict";
+ return arguments;
+}
+
+function g() {
+ return "" + f(1,2,3) + " " + h(4,5,6);
+}
+
+assertEquals("3 [object Arguments]", g());
+assertEquals("3 [object Arguments]", g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals("3 [object Arguments]", g());
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-5.js b/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
new file mode 100644
index 0000000000..cfaf81dbc3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(h) {
+ "use strict";
+ h(arguments);
+ return arguments.length;
+}
+
+function g(h) {
+ return f(h,1,2,3);
+}
+
+function h(x) {
+ assertEquals("[object Arguments]", ""+x)
+}
+
+assertEquals(4, g(h));
+assertEquals(4, g(h));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(4, g(h));
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-6.js b/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
new file mode 100644
index 0000000000..6143cfbc1f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(a) {
+ "use strict";
+ return arguments;
+}
+
+function g() {
+ "use strict";
+ var x = f(1,2,3);
+ while (x.length < 4) {
+ x = f(4,5,6,7,8);
+ }
+ return x.length;
+}
+
+assertEquals(5, g());
+assertEquals(5, g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals(5, g());
diff --git a/deps/v8/test/mjsunit/compiler/stubs/floor-stub.js b/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
index 0a76d307ba..16bc71c017 100644
--- a/deps/v8/test/mjsunit/compiler/stubs/floor-stub.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
@@ -25,37 +25,28 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --noalways-opt --turbo-filter=*
-
-var stubs = %GetCodeStubExportsObject();
+// Flags: --allow-natives-syntax --turbo-escape
+//
-const kExtraTypeFeedbackMinusZeroSentinel = 1;
-const kFirstJSFunctionTypeFeedbackIndex = 5;
-const kFirstSlotExtraTypeFeedbackIndex = 5;
+function f() {
+ this.x=0;
+}
-(function() {
- var stub1 = stubs.MathFloorStub("MathFloorStub", 1);
- var tempForTypeVector = function(d) {
- return Math.round(d);
+function g(a) {
+ "use strict";
+ var o = new f();
+ if (a) {
+ o.x = 5;
+ } else {
+ o.x = 7;
}
- tempForTypeVector(5);
- var tv = %GetTypeFeedbackVector(tempForTypeVector);
- var floorFunc1 = function(v, first) {
- if (first) return;
- return stub1(stub1, kFirstSlotExtraTypeFeedbackIndex - 1, tv, undefined, v);
- };
- %OptimizeFunctionOnNextCall(stub1);
- floorFunc1(5, true);
- %FixedArraySet(tv, kFirstSlotExtraTypeFeedbackIndex - 1, stub1);
- assertTrue(kExtraTypeFeedbackMinusZeroSentinel !==
- %FixedArrayGet(tv, kFirstSlotExtraTypeFeedbackIndex));
- assertEquals(5.0, floorFunc1(5.5));
- assertTrue(kExtraTypeFeedbackMinusZeroSentinel !==
- %FixedArrayGet(tv, kFirstSlotExtraTypeFeedbackIndex));
- // Executing floor such that it returns -0 should set the proper sentinel in
- // the feedback vector.
- assertEquals(-Infinity, 1/floorFunc1(-0));
- assertEquals(kExtraTypeFeedbackMinusZeroSentinel,
- %FixedArrayGet(tv, kFirstSlotExtraTypeFeedbackIndex));
- %ClearFunctionTypeFeedback(floorFunc1);
-})();
+
+ return o.x;
+}
+
+assertEquals(5, g(true));
+assertEquals(7, g(false));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(5, g(true));
+assertEquals(7, g(false));
+assertEquals(7, g());
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-8.js b/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
new file mode 100644
index 0000000000..bc5b1d963e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f(a) {
+ this.x=a;
+ this.y=1;
+}
+
+function g() {
+ "use strict";
+ var o = new f(2);
+ while (o.y < 4) {
+ o.x = 5;
+ o.y = 5;
+ }
+ return o.x;
+}
+
+assertEquals(5, g());
+assertEquals(5, g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals(5, g());
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-9.js b/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
new file mode 100644
index 0000000000..a19786b360
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
@@ -0,0 +1,52 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+//
+
+function f() {
+ return arguments;
+}
+
+function g(a) {
+ "use strict";
+ var o = f(1,2);
+ if (a) {
+ o[0] = 5;
+ } else {
+ o[0] = 7;
+ }
+
+ return o[0];
+}
+
+assertEquals(7, g());
+assertEquals(7, g());
+%OptimizeFunctionOnNextCall(g);
+assertEquals(5, g(true));
+assertEquals(7, g(false));
+assertEquals(7, g());
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js
new file mode 100644
index 0000000000..7337264b85
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-1.js
@@ -0,0 +1,47 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+// Test deoptimization with captured objects in local variables.
+(function testDeoptLocal() {
+ "use strict";
+ function constructor1(a) {
+ return arguments;
+ }
+ function func(a) {
+ var o1 = constructor1(1,2,3);
+ if (a) { %DeoptimizeNow(); }
+ assertEquals(1, o1[0]);
+ assertEquals(2, o1[1]);
+ assertEquals(3, o1[2]);
+ }
+ func(false);
+ func(false);
+ %OptimizeFunctionOnNextCall(func);
+ func(true);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js
new file mode 100644
index 0000000000..306f3e7410
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-2.js
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+// Test deoptimization with captured objects in local variables.
+(function testDeoptLocal() {
+ "use strict";
+ function constructor1(a) {
+ return arguments;
+ }
+ function func() {
+ var o1 = constructor1(1,2,3);
+ var o2 = constructor1(4,o1);
+ %DeoptimizeNow();
+ assertEquals(1, o1[0]);
+ assertEquals(2, o1[1]);
+ assertEquals(3, o1[2]);
+ assertEquals(4, o2[0]);
+ assertEquals(o1, o2[1]);
+ }
+ func();
+ func();
+ %OptimizeFunctionOnNextCall(func);
+ func();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js
new file mode 100644
index 0000000000..9999e53178
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-3.js
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+// Test deoptimization with captured objects in local variables.
+(function testDeoptLocal() {
+ "use strict";
+ function constructor1(a) {
+ return arguments;
+ }
+ function func() {
+ var o1 = constructor1(1,2,3);
+ var o2 = constructor1(4,o1);
+ o1[0] = o1;
+ %DeoptimizeNow();
+ assertEquals(o1, o1[0]);
+ assertEquals(2, o1[1]);
+ assertEquals(3, o1[2]);
+ assertEquals(4, o2[0]);
+ assertEquals(o1, o2[1]);
+ }
+ func();
+ func();
+ %OptimizeFunctionOnNextCall(func);
+ func();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js
new file mode 100644
index 0000000000..c80765706c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-4.js
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+// Test deoptimization with captured objects in local variables.
+(function testDeoptLocal() {
+ "use strict";
+ function constructor1() {
+ this.x=1;
+ this.y=2;
+ this.z=3;
+ }
+ function constructor2(x) {
+ this.a=x;
+ this.b=4;
+ }
+ function func() {
+ var o1 = new constructor1();
+ var o2 = new constructor2(o1);
+ o1.x = o1;
+ %DeoptimizeNow();
+ assertEquals(o1, o1.x);
+ assertEquals(2, o1.y);
+ assertEquals(3, o1.z);
+ assertEquals(o1, o2.a);
+ assertEquals(4, o2.b);
+ }
+ func();
+ func();
+ %OptimizeFunctionOnNextCall(func);
+ func();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js
new file mode 100644
index 0000000000..e70f0b1221
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-deopt-5.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+function f() {
+ var x = new Array(2);
+ x[0] = 23.1234;
+ x[1] = 25.1234;
+ %DeoptimizeNow();
+ return x[0];
+}
+
+assertEquals(f(), 23.1234);
+assertEquals(f(), 23.1234);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(f(), 23.1234);
diff --git a/deps/v8/test/mjsunit/compiler/mul-div-52bit.js b/deps/v8/test/mjsunit/compiler/mul-div-52bit.js
new file mode 100644
index 0000000000..46a5d05a9f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/mul-div-52bit.js
@@ -0,0 +1,86 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function mul(a, b) {
+ const l = a & 0x3ffffff;
+ const h = b & 0x3ffffff;
+
+ return (l * h) >>> 0;
+}
+
+function mulAndDiv(a, b) {
+ const l = a & 0x3ffffff;
+ const h = b & 0x3ffffff;
+ const m = l * h;
+
+ const rl = m & 0x3ffffff;
+ const rh = (m / 0x4000000) >>> 0;
+
+ return rl | rh;
+}
+
+function overflowMul(a, b) {
+ const l = a | 0;
+ const h = b | 0;
+
+ return (l * h) >>> 0;
+}
+
+function overflowDiv(a, b) {
+ const l = a & 0x3ffffff;
+ const h = b & 0x3ffffff;
+ const m = l * h;
+
+ return (m / 0x10) >>> 0;
+}
+
+function nonPowerOfTwoDiv(a, b) {
+ const l = a & 0x3ffffff;
+ const h = b & 0x3ffffff;
+ const m = l * h;
+
+ return (m / 0x4000001) >>> 0;
+}
+
+function test(fn, a, b, sets) {
+ const expected = fn(a, b);
+ fn(1, 2);
+ fn(0, 0);
+ %OptimizeFunctionOnNextCall(fn);
+ const actual = fn(a, b);
+
+ assertEquals(expected, actual);
+
+ sets.forEach(function(set, i) {
+ assertEquals(set.expected, fn(set.a, set.b), fn.name + ', set #' + i);
+ });
+}
+
+test(mul, 0x3ffffff, 0x3ffffff, [
+ { a: 0, b: 0, expected: 0 },
+ { a: 0xdead, b: 0xbeef, expected: 0xa6144983 },
+ { a: 0x1aa1dea, b: 0x2badead, expected: 0x35eb2322 }
+]);
+test(mulAndDiv, 0x3ffffff, 0x3ffffff, [
+ { a: 0, b: 0, expected: 0 },
+ { a: 0xdead, b: 0xbeef, expected: 0x21449ab },
+ { a: 0x1aa1dea, b: 0x2badead, expected: 0x1ebf32f }
+]);
+test(overflowMul, 0x4ffffff, 0x4ffffff, [
+ { a: 0, b: 0, expected: 0 },
+ { a: 0xdead, b: 0xbeef, expected: 0xa6144983 },
+ { a: 0x1aa1dea, b: 0x2badead, expected: 0x35eb2322 }
+]);
+test(overflowDiv, 0x3ffffff, 0x3ffffff, [
+ { a: 0, b: 0, expected: 0 },
+ { a: 0xdead, b: 0xbeef, expected: 0xa614498 },
+ { a: 0x1aa1dea, b: 0x2badead, expected: 0x835eb232 }
+]);
+test(nonPowerOfTwoDiv, 0x3ffffff, 0x3ffffff, [
+ { a: 0, b: 0, expected: 0 },
+ { a: 0xdead, b: 0xbeef, expected: 0x29 },
+ { a: 0x1aa1dea, b: 0x2badead, expected: 0x122d20d }
+]);
diff --git a/deps/v8/test/mjsunit/compiler/regress-572409.js b/deps/v8/test/mjsunit/compiler/regress-572409.js
new file mode 100644
index 0000000000..126b622625
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-572409.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = function() {};
+function f() {
+ var lit = { __proto__: o };
+ o instanceof RegExp;
+}
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-96989.js b/deps/v8/test/mjsunit/compiler/regress-96989.js
index aedeb24318..85beaed595 100644
--- a/deps/v8/test/mjsunit/compiler/regress-96989.js
+++ b/deps/v8/test/mjsunit/compiler/regress-96989.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --legacy-const
// Test correct handling of uninitialized const.
diff --git a/deps/v8/test/mjsunit/compiler/regress-const.js b/deps/v8/test/mjsunit/compiler/regress-const.js
index aa55d0fd3a..89b559c3e0 100644
--- a/deps/v8/test/mjsunit/compiler/regress-const.js
+++ b/deps/v8/test/mjsunit/compiler/regress-const.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --legacy-const
// Test const initialization and assignments.
function f() {
diff --git a/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js b/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js
new file mode 100644
index 0000000000..834da290e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-f64-w32-change.js
@@ -0,0 +1,23 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var f = (function () {
+ "use asm";
+ var f64use = 0;
+ function f(x, b) {
+ x = x|0;
+ b = b >>> 0;
+ var f64 = x ? -1 : b;
+ f64use = f64 + 0.5;
+ var w32 = x ? 1 : f64;
+ return (w32 + 1)|0;
+ }
+
+ return f;
+})();
+
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0, f(0, -1));
diff --git a/deps/v8/test/mjsunit/const-declaration.js b/deps/v8/test/mjsunit/const-declaration.js
index e7bb678eb6..42d03d547d 100644
--- a/deps/v8/test/mjsunit/const-declaration.js
+++ b/deps/v8/test/mjsunit/const-declaration.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Test handling of const variables in various settings.
(function () {
diff --git a/deps/v8/test/mjsunit/const-eval-init.js b/deps/v8/test/mjsunit/const-eval-init.js
index 50e3a8d0be..a7e1fef6e7 100644
--- a/deps/v8/test/mjsunit/const-eval-init.js
+++ b/deps/v8/test/mjsunit/const-eval-init.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --legacy-const
// Test the handling of initialization of deleted const variables.
// This only makes sense in local scopes since the declaration and
diff --git a/deps/v8/test/mjsunit/const-redecl.js b/deps/v8/test/mjsunit/const-redecl.js
index f311f0de66..ba7293026b 100644
--- a/deps/v8/test/mjsunit/const-redecl.js
+++ b/deps/v8/test/mjsunit/const-redecl.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Test for const semantics.
diff --git a/deps/v8/test/mjsunit/const.js b/deps/v8/test/mjsunit/const.js
index adb0b7ae3b..f00932f7b2 100644
--- a/deps/v8/test/mjsunit/const.js
+++ b/deps/v8/test/mjsunit/const.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Test const properties and pre/postfix operation.
function f() {
const x = 1;
diff --git a/deps/v8/test/mjsunit/constant-fold-control-instructions.js b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
index a6f5540cfd..eb4994591d 100644
--- a/deps/v8/test/mjsunit/constant-fold-control-instructions.js
+++ b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
@@ -27,8 +27,8 @@ function test() {
assertTrue(%_IsFunction(function() {}));
assertFalse(%_IsFunction(null));
- assertTrue(%_IsSpecObject(new Date()));
- assertFalse(%_IsSpecObject(1));
+ assertTrue(%_IsJSReceiver(new Date()));
+ assertFalse(%_IsJSReceiver(1));
assertTrue(%_IsMinusZero(-0.0));
assertFalse(%_IsMinusZero(1));
diff --git a/deps/v8/test/mjsunit/constant-folding.js b/deps/v8/test/mjsunit/constant-folding.js
index 4deb43cd3a..148928aaaf 100644
--- a/deps/v8/test/mjsunit/constant-folding.js
+++ b/deps/v8/test/mjsunit/constant-folding.js
@@ -29,6 +29,8 @@
// The code generator now handles compile-time constants specially.
// Test the code generated when operands are known at compile time
+// Flags: --legacy-const
+
// Test count operations involving constants
function test_count() {
var x = "foo";
diff --git a/deps/v8/test/mjsunit/cross-realm-global-prototype.js b/deps/v8/test/mjsunit/cross-realm-global-prototype.js
new file mode 100644
index 0000000000..46e5a3a37f
--- /dev/null
+++ b/deps/v8/test/mjsunit/cross-realm-global-prototype.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+// Make sure we don't accidentally leak information about other objects.
+
+var realm = Realm.create();
+var test = Realm.eval(realm,
+ "() => { return Realm.global(0) instanceof Object }");
+
+assertFalse(test());
+
+// Set the prototype of the current global object to the global object of the
+// other realm.
+__proto__ = Realm.eval(realm, "this");
+assertFalse(test());
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+assertEquals(false, test());
diff --git a/deps/v8/test/mjsunit/d8-os.js b/deps/v8/test/mjsunit/d8-os.js
index 29d31032e7..c2d8ec59bc 100644
--- a/deps/v8/test/mjsunit/d8-os.js
+++ b/deps/v8/test/mjsunit/d8-os.js
@@ -73,7 +73,7 @@ if (this.os && os.system) {
// Check that they are there.
os.system('ls', [TEST_DIR + '/dir/foo']);
// Check that we can detect when something is not there.
- assertThrows("os.system('ls', [TEST_DIR + '/dir/bar']);", "dir not there");
+ assertThrows("os.system('ls', [TEST_DIR + '/dir/bar']);");
// Check that mkdirp makes intermediate directories.
os.mkdirp(TEST_DIR + "/dir2/foo");
os.system("ls", [TEST_DIR + "/dir2/foo"]);
@@ -86,16 +86,16 @@ if (this.os && os.system) {
// Check that we get an error if the name is taken by a file.
os.system("sh", ["-c", "echo foo > " + TEST_DIR + "/file1"]);
os.system("ls", [TEST_DIR + "/file1"]);
- assertThrows("os.mkdirp(TEST_DIR + '/file1');", "mkdir over file1");
- assertThrows("os.mkdirp(TEST_DIR + '/file1/foo');", "mkdir over file2");
- assertThrows("os.mkdirp(TEST_DIR + '/file1/');", "mkdir over file3");
- assertThrows("os.mkdirp(TEST_DIR + '/file1/foo/');", "mkdir over file4");
+ assertThrows("os.mkdirp(TEST_DIR + '/file1');");
+ assertThrows("os.mkdirp(TEST_DIR + '/file1/foo');");
+ assertThrows("os.mkdirp(TEST_DIR + '/file1/');");
+ assertThrows("os.mkdirp(TEST_DIR + '/file1/foo/');");
// Create a dir we cannot read.
os.mkdirp(TEST_DIR + "/dir4", 0);
// This test fails if you are root since root can read any dir.
- assertThrows("os.chdir(TEST_DIR + '/dir4');", "chdir dir4 I");
+ assertThrows("os.chdir(TEST_DIR + '/dir4');");
os.rmdir(TEST_DIR + "/dir4");
- assertThrows("os.chdir(TEST_DIR + '/dir4');", "chdir dir4 II");
+ assertThrows("os.chdir(TEST_DIR + '/dir4');");
// Set umask. This changes the umask for the whole process and is
// the reason why the test cannot be run multi-threaded.
@@ -103,9 +103,9 @@ if (this.os && os.system) {
// Create a dir we cannot read.
os.mkdirp(TEST_DIR + "/dir5");
// This test fails if you are root since root can read any dir.
- assertThrows("os.chdir(TEST_DIR + '/dir5');", "cd dir5 I");
+ assertThrows("os.chdir(TEST_DIR + '/dir5');");
os.rmdir(TEST_DIR + "/dir5");
- assertThrows("os.chdir(TEST_DIR + '/dir5');", "chdir dir5 II");
+ assertThrows("os.chdir(TEST_DIR + '/dir5');");
os.umask(old_umask);
os.mkdirp(TEST_DIR + "/hest/fisk/../fisk/ged");
@@ -129,10 +129,10 @@ if (this.os && os.system) {
have_echo = false;
}
if (have_sleep) {
- assertThrows("os.system('sleep', ['2000'], 20);", "sleep 1");
+ assertThrows("os.system('sleep', ['2000'], 20);");
// Check we time out with total time.
- assertThrows("os.system('sleep', ['2000'], -1, 20);", "sleep 2");
+ assertThrows("os.system('sleep', ['2000'], -1, 20);");
// Check that -1 means no timeout.
os.system('sleep', ['1'], -1, -1);
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index 1c637f7fe3..244a04202c 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -146,16 +146,6 @@ l.setUTCMilliseconds();
l.setUTCMilliseconds(2);
assertTrue(isNaN(l.getUTCMilliseconds()));
-// Test that toLocaleTimeString only returns the time portion of the
-// date without the timezone information.
-function testToLocaleTimeString() {
- var d = new Date();
- var s = d.toLocaleTimeString("en-GB");
- assertEquals(8, s.length);
-}
-
-testToLocaleTimeString();
-
// Test that -0 is treated correctly in MakeDay.
var d = new Date();
assertDoesNotThrow("d.setDate(-0)");
@@ -328,18 +318,11 @@ assertThrows('Date.prototype.setHours.call("", 1, 2, 3, 4);', TypeError);
assertThrows('Date.prototype.getDate.call("");', TypeError);
assertThrows('Date.prototype.getUTCDate.call("");', TypeError);
-var date = new Date();
-date.getTime();
-date.getTime();
-%OptimizeFunctionOnNextCall(Date.prototype.getTime);
-assertThrows(function() { Date.prototype.getTime.call(""); }, TypeError);
-assertUnoptimized(Date.prototype.getTime);
+assertThrows(function() { Date.prototype.getTime.call(0) }, TypeError);
+assertThrows(function() { Date.prototype.getTime.call("") }, TypeError);
-date.getYear();
-date.getYear();
-%OptimizeFunctionOnNextCall(Date.prototype.getYear);
-assertThrows(function() { Date.prototype.getYear.call(""); }, TypeError);
-assertUnoptimized(Date.prototype.getYear);
+assertThrows(function() { Date.prototype.getYear.call(0) }, TypeError);
+assertThrows(function() { Date.prototype.getYear.call("") }, TypeError);
(function TestDatePrototypeOrdinaryObject() {
assertEquals(Object.prototype, Date.prototype.__proto__);
@@ -353,7 +336,7 @@ delete Date.prototype.getUTCHours;
delete Date.prototype.getUTCMinutes;
delete Date.prototype.getUTCSeconds;
delete Date.prototype.getUTCMilliseconds;
-date.toISOString();
+(new Date()).toISOString();
(function TestDeleteToString() {
assertTrue(delete Date.prototype.toString);
diff --git a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js b/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
index f0613b2926..b7a8dff1ba 100644
--- a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
+++ b/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
@@ -29,7 +29,7 @@ function listener(event, exec_state, event_data, data) {
++break_count;
if (break_count !== expected_breaks) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
print("Next step prepared");
}
}
diff --git a/deps/v8/test/mjsunit/debug-break-native.js b/deps/v8/test/mjsunit/debug-break-native.js
index 11d7274929..3e6fff42b5 100644
--- a/deps/v8/test/mjsunit/debug-break-native.js
+++ b/deps/v8/test/mjsunit/debug-break-native.js
@@ -10,7 +10,7 @@ var exception = null;
function breakListener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
// Assert that the break happens at an intended location.
assertTrue(exec_state.frame(0).sourceLineText().indexOf("// break") > 0);
} catch (e) {
diff --git a/deps/v8/test/mjsunit/debug-constructor.js b/deps/v8/test/mjsunit/debug-constructor.js
index 6d4e7f3a8f..a4d50311e9 100644
--- a/deps/v8/test/mjsunit/debug-constructor.js
+++ b/deps/v8/test/mjsunit/debug-constructor.js
@@ -35,7 +35,7 @@ function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break)
{
call_graph += exec_state.frame().func().name();
- exec_state.prepareStep();
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
};
diff --git a/deps/v8/test/mjsunit/debug-continue.js b/deps/v8/test/mjsunit/debug-continue.js
index a501aa925c..55bbb29b1d 100644
--- a/deps/v8/test/mjsunit/debug-continue.js
+++ b/deps/v8/test/mjsunit/debug-continue.js
@@ -74,13 +74,14 @@ function listener(event, exec_state, event_data, data) {
// Test some illegal continue requests.
testArguments(exec_state, '{"stepaction":"maybe"}', false);
- testArguments(exec_state, '{"stepcount":-1}', false);
// Test some legal continue requests.
testArguments(exec_state, '{"stepaction":"in"}', true);
- testArguments(exec_state, '{"stepaction":"min"}', true);
testArguments(exec_state, '{"stepaction":"next"}', true);
testArguments(exec_state, '{"stepaction":"out"}', true);
+
+ // Step count argument is ignored.
+ testArguments(exec_state, '{"stepcount":-1}', true);
testArguments(exec_state, '{"stepcount":1}', true);
testArguments(exec_state, '{"stepcount":10}', true);
testArguments(exec_state, '{"stepcount":"10"}', true);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-closure.js b/deps/v8/test/mjsunit/debug-evaluate-closure.js
index 778defd0ab..541dec9d6d 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-closure.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-closure.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --debug-eval-readonly-locals
Debug = debug.Debug;
var listened = false;
@@ -34,17 +35,18 @@ function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
assertEquals("goo", exec_state.frame(0).evaluate("goo").value());
- exec_state.frame(0).evaluate("goo = 'goo foo'");
+ exec_state.frame(0).evaluate("goo = 'goo foo'"); // no effect
assertEquals("bar return", exec_state.frame(0).evaluate("bar()").value());
assertEquals("inner bar", exec_state.frame(0).evaluate("inner").value());
assertEquals("outer bar", exec_state.frame(0).evaluate("outer").value());
+
assertEquals("baz inner", exec_state.frame(0).evaluate("baz").value());
assertEquals("baz outer", exec_state.frame(1).evaluate("baz").value());
exec_state.frame(0).evaluate("w = 'w foo'");
- exec_state.frame(0).evaluate("inner = 'inner foo'");
- exec_state.frame(0).evaluate("outer = 'outer foo'");
- exec_state.frame(0).evaluate("baz = 'baz inner foo'");
- exec_state.frame(1).evaluate("baz = 'baz outer foo'");
+ exec_state.frame(0).evaluate("inner = 'inner foo'"); // no effect
+ exec_state.frame(0).evaluate("outer = 'outer foo'"); // has effect
+ exec_state.frame(0).evaluate("baz = 'baz inner foo'"); // no effect
+ exec_state.frame(1).evaluate("baz = 'baz outer foo'"); // has effect
listened = true;
} catch (e) {
print(e);
@@ -66,7 +68,7 @@ function foo() {
with (withv) {
var bar = function bar() {
- assertEquals("goo foo", goo);
+ assertEquals("goo", goo);
inner = "inner bar";
outer = "outer bar";
v = "v bar";
@@ -78,8 +80,8 @@ function foo() {
debugger;
}
- assertEquals("inner foo", inner);
- assertEquals("baz inner foo", baz);
+ assertEquals("inner bar", inner);
+ assertEquals("baz inner", baz);
assertEquals("w foo", withw.w);
assertEquals("v bar", withv.v);
}
diff --git a/deps/v8/test/mjsunit/debug-evaluate-const.js b/deps/v8/test/mjsunit/debug-evaluate-const.js
index 7fad483cd5..6ffddbb59d 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-const.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-const.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals --legacy-const
Debug = debug.Debug
@@ -50,10 +50,8 @@ function f() {
debugger; // Break point.
- assertEquals(30, var0);
- // TODO(yangguo): debug evaluate should not be able to alter
- // stack-allocated const values
- // assertEquals(0, const0);
+ assertEquals(undefined, var0);
+ assertEquals(0, const0);
assertEquals(undefined, const1);
assertEquals(undefined, const2);
var var0 = 20;
@@ -66,7 +64,7 @@ function f() {
g();
- assertEquals(31, var1);
+ assertEquals(21, var1);
assertEquals(3, const3);
}
diff --git a/deps/v8/test/mjsunit/debug-evaluate-declaration.js b/deps/v8/test/mjsunit/debug-evaluate-declaration.js
new file mode 100644
index 0000000000..c64498e097
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-declaration.js
@@ -0,0 +1,44 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test that debug-evaluate only resolves variables that are used by
+// the function inside which we debug-evaluate. This is to avoid
+// incorrect variable resolution when a context-allocated variable is
+// shadowed by a stack-allocated variable.
+
+"use strict";
+
+var Debug = debug.Debug
+
+var exception = null;
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ exec_state.frame(0).evaluate("var x = 2");
+ exec_state.frame(0).evaluate("'use strict'; let y = 3");
+ exec_state.frame(0).evaluate("var z = 4");
+ exec_state.frame(0).evaluate("function bar() { return 5; }");
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+var z = 1;
+
+(function() {
+ debugger;
+})();
+
+assertEquals(2, x); // declaration
+assertThrows(() => y, ReferenceError); // let-declaration does not stick
+assertEquals(4, z); // re-declaration
+assertEquals(5, bar()); // function declaration
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js b/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
index 77bdfc6294..6d65861fc7 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals
Debug = debug.Debug
var exception = null;
@@ -15,8 +15,8 @@ function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break) {
var frameMirror = exec_state.frame(0);
- f = frameMirror.evaluate('f = function() { i = 5; }, f(), f').value();
- print(f);
+ var i = frameMirror.evaluate('f = function() { i = 5; }, f(), i').value();
+ assertEquals(5, i);
}
} catch(e) {
exception = e;
@@ -35,7 +35,7 @@ Debug.setListener(listener);
} catch (e) {
assertEquals(0, i);
debugger;
- assertEquals(5, i);
+ assertEquals(0, i);
}
}());
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals.js b/deps/v8/test/mjsunit/debug-evaluate-locals.js
index ba3e92d124..642e0c0682 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-locals.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
@@ -119,13 +119,15 @@ function listener(event, exec_state, event_data, data) {
assertEquals(2, exec_state.frame(0).evaluate('b').value());
assertEquals(5, exec_state.frame(0).evaluate('eval').value());
assertEquals(3, exec_state.frame(1).evaluate('a').value());
- assertEquals(4, exec_state.frame(1).evaluate('b').value());
+ // Reference error because g does not reference b.
+ assertThrows(() => exec_state.frame(1).evaluate('b'), ReferenceError);
assertEquals("function",
typeof exec_state.frame(1).evaluate('eval').value());
assertEquals(5, exec_state.frame(2).evaluate('a').value());
assertEquals(6, exec_state.frame(2).evaluate('b').value());
assertEquals("function",
typeof exec_state.frame(2).evaluate('eval').value());
+ // Assignments to local variables only have temporary effect.
assertEquals("foo",
exec_state.frame(0).evaluate('a = "foo"').value());
assertEquals("bar",
@@ -144,7 +146,7 @@ Debug.setListener(listener);
var f_result = f();
-assertEquals('foobar', f_result);
+assertEquals(4, f_result);
// Make sure that the debug event listener was invoked.
assertFalse(exception, "exception in listener")
diff --git a/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js b/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js
new file mode 100644
index 0000000000..07d6ccbe6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals
+
+Debug = debug.Debug
+
+var exception = null;
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ exec_state.frame(0).evaluate("bar()");
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+(function() {
+ "use strict";
+ try {
+ throw 1;
+ } catch (e) {
+ let a = 1;
+ function bar() {
+ a = 2;
+ e = 2;
+ }
+ debugger;
+ assertEquals(2, a);
+ assertEquals(2, e);
+ }
+})();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-shadowed-context.js b/deps/v8/test/mjsunit/debug-evaluate-shadowed-context.js
new file mode 100644
index 0000000000..6847a93f66
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-shadowed-context.js
@@ -0,0 +1,83 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --no-analyze-environment-liveness
+
+// Test that debug-evaluate only resolves variables that are used by
+// the function inside which we debug-evaluate. This is to avoid
+// incorrect variable resolution when a context-allocated variable is
+// shadowed by a stack-allocated variable.
+
+Debug = debug.Debug
+
+var exception = null;
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ for (var i = 0; i < exec_state.frameCount() - 1; i++) {
+ var frame = exec_state.frame(i);
+ var value;
+ try {
+ value = frame.evaluate("x").value();
+ } catch (e) {
+ value = e.name;
+ }
+ print(frame.sourceLineText());
+ var expected = frame.sourceLineText().match(/\/\/ (.*$)/)[1];
+ assertEquals(String(expected), String(value));
+ }
+ assertEquals("[object global]",
+ String(exec_state.frame(0).evaluate("this").value()));
+ assertEquals("y", exec_state.frame(0).evaluate("y").value());
+ assertEquals("a", exec_state.frame(0).evaluate("a").value());
+ exec_state.frame(0).evaluate("a = 'A'");
+ assertThrows(() => exec_state.frame(0).evaluate("z"), ReferenceError);
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+var a = "a";
+(function() {
+ var x = 1; // context allocate x
+ (() => x);
+ var y = "y";
+ var z = "z";
+ (function() {
+ var x = 2; // stack allocate shadowing x
+ (function() {
+ y; // access y
+ debugger; // ReferenceError
+ })(); // 2
+ })(); // 1
+ return y;
+})();
+
+assertEquals("A", a);
+a = "a";
+
+(function() {
+ var x = 1; // context allocate x
+ (() => x);
+ var y = "y";
+ var z = "z";
+ (function() {
+ var x = 2; // stack allocate shadowing x
+ (() => {
+ y;
+ a;
+ this; // context allocate receiver
+ debugger; // ReferenceError
+ })(); // 2
+ })(); // 1
+ return y;
+})();
+
+assertEquals("A", a);
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-liveedit-stepin.js b/deps/v8/test/mjsunit/debug-liveedit-stepin.js
new file mode 100644
index 0000000000..601a66f93d
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-stepin.js
@@ -0,0 +1,81 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+function BestEditor() {
+ var best_editor = "Emacs";
+ return best_editor;
+}
+
+var exception = null;
+var results = [];
+var log = []
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var source_line = event_data.sourceLineText();
+ log.push(source_line);
+ if (source_line.indexOf("return") >= 0) {
+ switch (results.length) {
+ case 0:
+ break;
+ case 1:
+ Replace(BestEditor, "Emacs", "Eclipse");
+ break;
+ case 2:
+ Replace(BestEditor, "Eclipse", "Vim");
+ break;
+ default:
+ assertUnreachable();
+ }
+ }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function Replace(fun, original, patch) {
+ var script = Debug.findScript(fun);
+ if (fun.toString().indexOf(original) < 0) return;
+ var patch_pos = script.source.indexOf(original);
+ var change_log = [];
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos, original.length, patch, change_log);
+}
+
+Debug.setListener(listener);
+
+debugger;
+results.push(BestEditor());
+results.push(BestEditor());
+results.push(BestEditor());
+Debug.setListener(null);
+
+assertNull(exception);
+assertEquals(["Emacs", "Eclipse", "Vim"], results);
+print(JSON.stringify(log, 1));
+assertEquals([
+ "debugger;",
+ "results.push(BestEditor());",
+ " var best_editor = \"Emacs\";",
+ " return best_editor;","}",
+ "results.push(BestEditor());",
+ "results.push(BestEditor());",
+ " var best_editor = \"Emacs\";",
+ " return best_editor;",
+ " var best_editor = \"Eclipse\";",
+ " return best_editor;","}",
+ "results.push(BestEditor());",
+ "results.push(BestEditor());",
+ " var best_editor = \"Eclipse\";",
+ " return best_editor;",
+ " var best_editor = \"Vim\";",
+ " return best_editor;",
+ "}","results.push(BestEditor());",
+ "Debug.setListener(null);"
+], log);
diff --git a/deps/v8/test/mjsunit/debug-return-value.js b/deps/v8/test/mjsunit/debug-return-value.js
index 3ea106c40f..7c9f94eda1 100644
--- a/deps/v8/test/mjsunit/debug-return-value.js
+++ b/deps/v8/test/mjsunit/debug-return-value.js
@@ -98,7 +98,7 @@ function listener(event, exec_state, event_data, data) {
default:
fail("Unexpected");
}
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
} else {
// Position at the end of the function.
assertEquals(debugger_source_position + 50,
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 276fd55e90..8874960208 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -85,8 +85,8 @@ assertEquals('native math.js', math_script.name);
assertEquals(Debug.ScriptType.Native, math_script.type);
// Test a builtins delay loaded script.
-var date_delay_script = Debug.findScript('native date.js');
-assertEquals('native date.js', date_delay_script.name);
+var date_delay_script = Debug.findScript('native json.js');
+assertEquals('native json.js', date_delay_script.name);
assertEquals(Debug.ScriptType.Native, date_delay_script.type);
// Test a debugger script.
diff --git a/deps/v8/test/mjsunit/debug-sourceinfo.js b/deps/v8/test/mjsunit/debug-sourceinfo.js
index 1dbe1b7a0a..cb41107c60 100644
--- a/deps/v8/test/mjsunit/debug-sourceinfo.js
+++ b/deps/v8/test/mjsunit/debug-sourceinfo.js
@@ -63,9 +63,9 @@ var comment_lines = 28;
// This is the last position in the entire file (note: this equals
// file size of <debug-sourceinfo.js> - 1, since starting at 0).
-var last_position = 11337;
+var last_position = 11519;
// This is the last line of entire file (note: starting at 0).
-var last_line = 265;
+var last_line = 269;
// This is the last column of last line (note: starting at 0 and +1, due
// to trailing <LF>).
var last_column = 1;
@@ -244,18 +244,22 @@ assertEquals(70 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);
assertEquals(6 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);
for (i = 1; i <= num_lines_d; i++) {
- assertEquals(6 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
+ assertEquals(6 + (i * line_length_d) + start_d,
+ Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
}
assertEquals(158 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);
// Make sure invalid inputs work properly.
assertEquals(0, script.locationFromPosition(-1).line);
-assertEquals(null, script.locationFromPosition(last_position + 1));
+assertEquals(null, script.locationFromPosition(last_position + 2));
// Test last position.
assertEquals(last_position, script.locationFromPosition(last_position).position);
assertEquals(last_line, script.locationFromPosition(last_position).line);
assertEquals(last_column, script.locationFromPosition(last_position).column);
+assertEquals(last_line + 1,
+ script.locationFromPosition(last_position + 1).line);
+assertEquals(0, script.locationFromPosition(last_position + 1).column);
// Test that script.sourceLine(line) works.
var location;
diff --git a/deps/v8/test/mjsunit/debug-step-4-in-frame.js b/deps/v8/test/mjsunit/debug-step-4.js
index 93884303ca..3992f622a6 100644
--- a/deps/v8/test/mjsunit/debug-step-4-in-frame.js
+++ b/deps/v8/test/mjsunit/debug-step-4.js
@@ -53,9 +53,7 @@ function h() {
}
}
-function TestCase(frame_index, step_count, expected_final_state) {
- print("Test case, parameters " + frame_index + "/" + step_count);
-
+function TestCase(expected_final_state) {
var listener_exception = null;
var state_snapshot;
var listener_state;
@@ -68,12 +66,7 @@ function TestCase(frame_index, step_count, expected_final_state) {
if (event == Debug.DebugEvent.Break) {
if (listener_state == 0) {
Debug.clearBreakPoint(bp);
- var context_frame;
- if (frame_index !== undefined) {
- context_frame = exec_state.frame(frame_index);
- }
- exec_state.prepareStep(Debug.StepAction.StepNext,
- step_count, context_frame);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
listener_state = 1;
} else if (listener_state == 1) {
state_snapshot = String(state);
@@ -107,26 +100,4 @@ function TestCase(frame_index, step_count, expected_final_state) {
// Warm-up -- make sure all is compiled and ready for breakpoint.
h();
-
-// Stepping in the default (top) frame.
-TestCase(undefined, 0, "0,0,-1");
-TestCase(undefined, 1, "0,0,-1");
-TestCase(undefined, 2, "0,0,0");
-TestCase(undefined, 5, "0,0,1");
-TestCase(undefined, 8, "0,0,3");
-
-// Stepping in the frame #0 (should be exactly the same as above).
-TestCase(0, 0, "0,0,-1");
-TestCase(0, 1, "0,0,-1");
-TestCase(0, 2, "0,0,0");
-TestCase(0, 5, "0,0,1");
-TestCase(0, 8, "0,0,3");
-
-// Stepping in the frame #1.
-TestCase(1, 0, "0,0,3");
-TestCase(1, 3, "0,1,3");
-TestCase(1, 7, "0,3,3");
-
-// Stepping in the frame #2.
-TestCase(2, 3, "1,3,3");
-TestCase(2, 7, "3,3,3");
+TestCase("0,0,-1");
diff --git a/deps/v8/test/mjsunit/debug-step-end-of-script.js b/deps/v8/test/mjsunit/debug-step-end-of-script.js
new file mode 100644
index 0000000000..ded58d1dad
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-step-end-of-script.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var expected = ["debugger;", "", "debugger;"];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(expected.shift(), exec_state.frame(0).sourceLineText());
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+debugger;
diff --git a/deps/v8/test/mjsunit/debug-step-into-json.js b/deps/v8/test/mjsunit/debug-step-into-json.js
new file mode 100644
index 0000000000..d4ba7097c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-step-into-json.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ assertTrue(
+ event_data.sourceLineText().indexOf(`Break ${break_count++}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function toJsonCallback() {
+ return "x"; // Break 2.
+} // Break 3.
+var o = {};
+o.toJSON = toJsonCallback;
+
+Debug.setListener(listener);
+debugger; // Break 0.
+var result = JSON.stringify(o); // Break 1.
+Debug.setListener(null); // Break 4.
+
+assertEquals('"x"', result);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-step-into-valueof.js b/deps/v8/test/mjsunit/debug-step-into-valueof.js
new file mode 100644
index 0000000000..b1d9cf1454
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-step-into-valueof.js
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ assertTrue(event_data.sourceLineText().indexOf(`Break ${break_count++}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function valueOfCallback() {
+ return 2; // Break 2.
+} // Break 3.
+var o = {};
+o.valueOf = valueOfCallback;
+
+Debug.setListener(listener);
+debugger; // Break 0.
+var result = 1 + o; // Break 1.
+Debug.setListener(null); // Break 4.
+
+assertEquals(3, result);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-step-turbofan.js b/deps/v8/test/mjsunit/debug-step-turbofan.js
index 1710942e9a..6c1fceff31 100644
--- a/deps/v8/test/mjsunit/debug-step-turbofan.js
+++ b/deps/v8/test/mjsunit/debug-step-turbofan.js
@@ -33,7 +33,7 @@ var break_count = 0;
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
- exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
print(exec_state.frame(0).sourceLineText());
var match = exec_state.frame(0).sourceLineText().match(/Break (\d)/);
assertNotNull(match);
diff --git a/deps/v8/test/mjsunit/debug-step.js b/deps/v8/test/mjsunit/debug-step.js
index 45f077f967..bfbea16380 100644
--- a/deps/v8/test/mjsunit/debug-step.js
+++ b/deps/v8/test/mjsunit/debug-step.js
@@ -37,10 +37,10 @@ var bp1, bp2;
function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break) {
- if (state == 0) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1000);
- state = 1;
- } else if (state == 1) {
+ if (step_count > 0) {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ step_count--;
+ } else {
result = exec_state.frame().evaluate("i").value();
// Clear the break point on line 2 if set.
if (bp2) {
@@ -65,19 +65,8 @@ function f() {
bp1 = Debug.setBreakPoint(f, 1);
// Check that performing 1000 steps will make i 499.
-state = 0;
+var step_count = 1000;
result = -1;
f();
assertEquals(332, result);
-
-// Check that performing 1000 steps with a break point on the statement in the
-// for loop (line 2) will only make i 0 as a real break point breaks even when
-// multiple steps have been requested.
-state = 0;
-result = -1;
-bp2 = Debug.setBreakPoint(f, 3);
-f();
-assertEquals(0, result);
-
-// Get rid of the debug event listener.
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepframe-clearing.js b/deps/v8/test/mjsunit/debug-stepframe-clearing.js
index c440e78dd2..dec46fd7d6 100644
--- a/deps/v8/test/mjsunit/debug-stepframe-clearing.js
+++ b/deps/v8/test/mjsunit/debug-stepframe-clearing.js
@@ -48,7 +48,7 @@ function listener(event, exec_state, event_data, data) {
if (break_count >= 0 && break_count < 2) {
// 0, 1: Keep stepping through frames.
assertEquals(break_count, match_value);
- exec_state.prepareStep(Debug.StepAction.StepFrame, 1);
+ exec_state.prepareStep(Debug.StepAction.StepFrame);
} else if (break_count === 2) {
// 2: let the code run to a breakpoint we set. The load should
// go monomorphic.
@@ -58,7 +58,7 @@ function listener(event, exec_state, event_data, data) {
// call still have the ability to break like before?
assertEquals(break_count, match_value);
Debug.clearBreakPoint(bp_f1_line7);
- exec_state.prepareStep(Debug.StepAction.StepFrame, 1);
+ exec_state.prepareStep(Debug.StepAction.StepFrame);
} else {
assertEquals(4, break_count);
assertEquals(2, match_value);
diff --git a/deps/v8/test/mjsunit/debug-stepframe.js b/deps/v8/test/mjsunit/debug-stepframe.js
index f7983c010e..8d9f959e78 100644
--- a/deps/v8/test/mjsunit/debug-stepframe.js
+++ b/deps/v8/test/mjsunit/debug-stepframe.js
@@ -82,7 +82,6 @@ Object.defineProperty(o, "set", { set : set });
Debug = debug.Debug;
var break_count = 0
var exception = null;
-var step_size;
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
@@ -92,22 +91,20 @@ function listener(event, exec_state, event_data, data) {
var match = line.match(/\/\/ Break (\d+)$/);
assertEquals(2, match.length);
assertEquals(break_count, parseInt(match[1]));
- break_count += step_size;
- exec_state.prepareStep(Debug.StepAction.StepFrame, step_size);
+ break_count ++;
+ exec_state.prepareStep(Debug.StepAction.StepFrame);
} catch (e) {
print(e + e.stack);
exception = e;
}
}
-for (step_size = 1; step_size < 6; step_size++) {
- print("step size = " + step_size);
- break_count = 0;
- Debug.setListener(listener);
- debugger; // Break 0
- f0();
- Debug.setListener(null); // Break 16
- assertTrue(break_count > 14);
-}
+
+break_count = 0;
+Debug.setListener(listener);
+debugger; // Break 0
+f0();
+Debug.setListener(null); // Break 16
+assertTrue(break_count > 14);
assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor-ic.js b/deps/v8/test/mjsunit/debug-stepin-accessor-ic.js
index 5f40dcb250..66c0580fd6 100644
--- a/deps/v8/test/mjsunit/debug-stepin-accessor-ic.js
+++ b/deps/v8/test/mjsunit/debug-stepin-accessor-ic.js
@@ -31,7 +31,7 @@ function listener(event, exec_state, event_data, data) {
try {
var source_line = exec_state.frame(0).sourceLineText();
assertTrue(source_line.indexOf("// Break") > 0);
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
break_count++;
} catch (e) {
exception = e;
diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor.js b/deps/v8/test/mjsunit/debug-stepin-accessor.js
index 70acd5ef6b..daf86a3652 100644
--- a/deps/v8/test/mjsunit/debug-stepin-accessor.js
+++ b/deps/v8/test/mjsunit/debug-stepin-accessor.js
@@ -41,14 +41,14 @@ var expected_function_name = null;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- state = 2;
- } else if (state == 2) {
+ if (state == 3) {
assertEquals(expected_source_line_text,
event_data.sourceLineText());
assertEquals(expected_function_name, event_data.func().name());
- state = 3;
+ state = 4;
+ } else {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ state++;
}
}
} catch(e) {
@@ -241,7 +241,7 @@ for (var n in this) {
state = 1;
this[n]();
assertNull(exception);
- assertEquals(3, state);
+ assertEquals(4, state);
}
// Get rid of the debug event listener.
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js b/deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js
new file mode 100644
index 0000000000..7e281ab220
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ assertTrue(
+ event_data.sourceLineText().indexOf(`Break ${break_count++}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function replaceCallback(a) {
+ return "x"; // Break 2.
+} // Break 3.
+
+var re = /x/g;
+// Optimize the inner helper function for string replace.
+for (var i = 0; i < 10000; i++) "x".replace(re, replaceCallback);
+
+Debug.setListener(listener);
+debugger; // Break 0.
+var result = "x".replace(re, replaceCallback); // Break 1.
+Debug.setListener(null); // Break 4.
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin-callback.js b/deps/v8/test/mjsunit/debug-stepin-builtin-callback.js
index 223159d4f5..4fde6e440f 100644
--- a/deps/v8/test/mjsunit/debug-stepin-builtin-callback.js
+++ b/deps/v8/test/mjsunit/debug-stepin-builtin-callback.js
@@ -31,127 +31,114 @@
Debug = debug.Debug
-var exception = false;
+var exception = null;
function array_listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (breaks == 0) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- breaks = 1;
- } else if (breaks <= 3) {
- breaks++;
- // Check whether we break at the expected line.
- print(event_data.sourceLineText());
- assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
- exec_state.prepareStep(Debug.StepAction.StepIn, 3);
- }
+ print(event_data.sourceLineText(), breaks);
+ assertTrue(event_data.sourceLineText().indexOf(`B${breaks++}`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
} catch (e) {
- exception = true;
+ print(e);
+ quit();
+ exception = e;
}
};
function cb_false(num) {
- print("element " + num); // Expected to step to this point.
- return false;
-}
+ print("element " + num); // B2 B5 B8
+ return false; // B3 B6 B9
+} // B4 B7 B10
function cb_true(num) {
- print("element " + num); // Expected to step to this point.
- return true;
-}
+ print("element " + num); // B2 B5 B8
+ return true; // B3 B6 B9
+} // B4 B7 B10
function cb_reduce(a, b) {
- print("elements " + a + " and " + b); // Expected to step to this point.
- return a + b;
-}
+ print("elements " + a + " and " + b); // B2 B5
+ return a + b; // B3 B6
+} // B4 B7
-var a = [1, 2, 3, 4];
-
-Debug.setListener(array_listener);
+var a = [1, 2, 3];
var breaks = 0;
-debugger;
-a.forEach(cb_true);
-assertFalse(exception);
-assertEquals(4, breaks);
+Debug.setListener(array_listener);
+debugger; // B0
+a.forEach(cb_true); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
breaks = 0;
-debugger;
-a.some(cb_false);
-assertFalse(exception);
-assertEquals(4, breaks);
+Debug.setListener(array_listener);
+debugger; // B0
+a.some(cb_false); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
breaks = 0;
-debugger;
-a.every(cb_true);
-assertEquals(4, breaks);
-assertFalse(exception);
+Debug.setListener(array_listener);
+debugger; // B0
+a.every(cb_true); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
breaks = 0;
-debugger;
-a.map(cb_true);
-assertFalse(exception);
-assertEquals(4, breaks);
+Debug.setListener(array_listener);
+debugger; // B0
+a.map(cb_true); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
breaks = 0;
-debugger;
-a.filter(cb_true);
-assertFalse(exception);
-assertEquals(4, breaks);
+Debug.setListener(array_listener);
+debugger; // B0
+a.filter(cb_true); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
breaks = 0;
-debugger;
-a.reduce(cb_reduce);
-assertFalse(exception);
-assertEquals(4, breaks);
+Debug.setListener(array_listener);
+debugger; // B0
+a.reduce(cb_reduce); // B1
+Debug.setListener(null); // B8
+assertNull(exception);
+assertEquals(9, breaks);
breaks = 0;
-debugger;
-a.reduceRight(cb_reduce);
-assertFalse(exception);
-assertEquals(4, breaks);
-
-Debug.setListener(null);
+Debug.setListener(array_listener);
+debugger; // B0
+a.reduceRight(cb_reduce); // B1
+Debug.setListener(null); // B8
+assertNull(exception);
+assertEquals(9, breaks);
// Test two levels of builtin callbacks:
// Array.forEach calls a callback function, which by itself uses
// Array.forEach with another callback function.
-function second_level_listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (breaks == 0) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 3);
- breaks = 1;
- } else if (breaks <= 16) {
- breaks++;
- // Check whether we break at the expected line.
- assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
- // Step two steps further every four breaks to skip the
- // forEach call in the first level of recurision.
- var step = (breaks % 4 == 1) ? 6 : 3;
- exec_state.prepareStep(Debug.StepAction.StepIn, step);
- }
- }
- } catch (e) {
- exception = true;
- }
-};
+function cb_true_2(num) {
+ print("element " + num); // B3 B6 B9 B15 B18 B21 B27 B30 B33
+ return true; // B4 B7 B10 B16 B19 B22 B28 B31 B34
+} // B5 B8 B11 B17 B20 B23 B29 B32 B35
function cb_foreach(num) {
- a.forEach(cb_true);
- print("back to the first level of recursion.");
-}
-
-Debug.setListener(second_level_listener);
+ a.forEach(cb_true_2); // B2 B14 B20 B26
+ print("back."); // B12 B18 B24 B36
+} // B13 B19 B25 B37
breaks = 0;
-debugger;
-a.forEach(cb_foreach);
-assertFalse(exception);
-assertEquals(17, breaks);
-
-Debug.setListener(null);
+Debug.setListener(array_listener);
+debugger; // B0
+a.forEach(cb_foreach); // B1
+Debug.setListener(null); // B38
+assertNull(exception);
+assertEquals(39, breaks);
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin.js b/deps/v8/test/mjsunit/debug-stepin-builtin.js
index d9c6061104..f61098045c 100644
--- a/deps/v8/test/mjsunit/debug-stepin-builtin.js
+++ b/deps/v8/test/mjsunit/debug-stepin-builtin.js
@@ -41,14 +41,14 @@ var expected_function_name = null;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- state = 2;
- } else if (state == 2) {
+ if (state == 3) {
assertEquals(expected_function_name, event_data.func().name());
assertEquals(expected_source_line_text,
event_data.sourceLineText());
- state = 3;
+ state = 4;
+ } else {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ state++;
}
}
} catch(e) {
@@ -72,7 +72,7 @@ function testStepInArraySlice() {
state = 1;
testStepInArraySlice();
assertNull(exception);
-assertEquals(3, state);
+assertEquals(4, state);
// Get rid of the debug event listener.
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js b/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
index 053b8bfe8a..b3e385bfb5 100644
--- a/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
+++ b/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
@@ -42,11 +42,11 @@ var step_in_count = 2;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (state == 0) {
+ if (state < step_in_count) {
// Step into f().
- exec_state.prepareStep(Debug.StepAction.StepIn, step_in_count);
- state = 2;
- } else if (state == 2) {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ state++;
+ } else {
assertEquals(expected_source_line_text,
event_data.sourceLineText());
assertEquals(expected_function_name, event_data.func().name());
diff --git a/deps/v8/test/mjsunit/debug-stepin-construct-call.js b/deps/v8/test/mjsunit/debug-stepin-construct-call.js
index 5e2145591f..7dbf7b1d28 100644
--- a/deps/v8/test/mjsunit/debug-stepin-construct-call.js
+++ b/deps/v8/test/mjsunit/debug-stepin-construct-call.js
@@ -12,7 +12,7 @@ function listener(event, exec_state, event_data, data) {
try {
var source_line = exec_state.frame(0).sourceLineText();
print(source_line);
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
break_count++;
} catch (e) {
exception = e;
diff --git a/deps/v8/test/mjsunit/debug-stepin-foreach.js b/deps/v8/test/mjsunit/debug-stepin-foreach.js
index c2702f794a..69ce3efab7 100644
--- a/deps/v8/test/mjsunit/debug-stepin-foreach.js
+++ b/deps/v8/test/mjsunit/debug-stepin-foreach.js
@@ -27,7 +27,7 @@ function listener(event, exec_state, event_data, data) {
"Expected: // Break " + break_count + ".");
++break_count;
if (break_count !== expected_breaks) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
} catch(e) {
diff --git a/deps/v8/test/mjsunit/debug-stepin-function-call.js b/deps/v8/test/mjsunit/debug-stepin-function-call.js
index eaeebcedb2..8af7aad19d 100644
--- a/deps/v8/test/mjsunit/debug-stepin-function-call.js
+++ b/deps/v8/test/mjsunit/debug-stepin-function-call.js
@@ -38,11 +38,11 @@ var state = 0;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (state == 0) {
+ if (state < 2) {
// Step into f2.call:
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- state = 2;
- } else if (state == 2) {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ state++;
+ } else {
assertEquals('g', event_data.func().name());
assertEquals(' return t + 1; // expected line',
event_data.sourceLineText());
diff --git a/deps/v8/test/mjsunit/debug-stepin-property-function-call.js b/deps/v8/test/mjsunit/debug-stepin-property-function-call.js
index 081fb24fb7..dff83c7e14 100644
--- a/deps/v8/test/mjsunit/debug-stepin-property-function-call.js
+++ b/deps/v8/test/mjsunit/debug-stepin-property-function-call.js
@@ -38,13 +38,13 @@ var state = 1;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 3);
- state = 2;
- } else if (state == 2) {
+ if (state < 4) {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ state++;
+ } else {
assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0,
"source line: \"" + event_data.sourceLineText() + "\"");
- state = 3;
+ state = 5;
}
}
} catch(e) {
@@ -143,7 +143,7 @@ for (var n in this) {
this[n]();
++functionsCalled;
assertNull(exception, n);
- assertEquals(3, state, n);
+ assertEquals(5, state, n);
assertEquals(functionsCalled, count, n);
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-recursive-function.js b/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
index 3741f26a8d..9082294b78 100644
--- a/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
+++ b/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
@@ -30,7 +30,6 @@
Debug = debug.Debug
var exception = null;
-var step_out_count = 1;
// Simple debug event handler which counts the number of breaks hit and steps.
var break_point_hit_count = 0;
@@ -40,7 +39,7 @@ function listener(event, exec_state, event_data, data) {
break_point_hit_count++;
// Continue stepping until returned to bottom frame.
if (exec_state.frameCount() > 1) {
- exec_state.prepareStep(Debug.StepAction.StepOut, step_out_count);
+ exec_state.prepareStep(Debug.StepAction.StepOut);
}
}
@@ -80,27 +79,23 @@ function fact(x) {
BeginTest('Test 1');
shouldBreak = function(x) { return x == 3; };
-step_out_count = 1;
fact(3);
EndTest(2);
BeginTest('Test 2');
shouldBreak = function(x) { return x == 2; };
-step_out_count = 1;
fact(3);
EndTest(3);
BeginTest('Test 3');
shouldBreak = function(x) { return x == 1; };
-step_out_count = 2;
fact(3);
-EndTest(2);
+EndTest(4);
BeginTest('Test 4');
shouldBreak = function(x) { return x == 1 || x == 3; };
-step_out_count = 2;
fact(3);
-EndTest(3);
+EndTest(5);
// Get rid of the debug event listener.
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part1.js b/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
index cce88b73dc..27a91d4d02 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part2.js b/deps/v8/test/mjsunit/debug-stepout-scope-part2.js
index ba05317979..5b0b17b9e4 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part2.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part2.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part3.js b/deps/v8/test/mjsunit/debug-stepout-scope-part3.js
index c120640605..2584914681 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part3.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part3.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part4.js b/deps/v8/test/mjsunit/debug-stepout-scope-part4.js
index a5743fe566..765bfe246b 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part4.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part4.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part5.js b/deps/v8/test/mjsunit/debug-stepout-scope-part5.js
index cabacbaa8e..e819e293ce 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part5.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part5.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part6.js b/deps/v8/test/mjsunit/debug-stepout-scope-part6.js
index f222fbd4fb..fb009e6f7f 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part6.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part6.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part7.js b/deps/v8/test/mjsunit/debug-stepout-scope-part7.js
index eba115d26f..969fb7f9ed 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part7.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part7.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part8.js b/deps/v8/test/mjsunit/debug-stepout-scope-part8.js
index c0a8a0034a..790caca7c3 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part8.js
+++ b/deps/v8/test/mjsunit/debug-stepout-scope-part8.js
@@ -44,9 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
// Do steps until we reach the global scope again.
- if (true) {
- exec_state.prepareStep(Debug.StepAction.StepInMin, 1);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
diff --git a/deps/v8/test/mjsunit/debug-stepout-to-builtin.js b/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
index 772fb4b5e8..2e5e7a25f8 100644
--- a/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
+++ b/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
@@ -42,9 +42,9 @@ function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
if (state == 1) {
- exec_state.prepareStep(Debug.StepAction.StepOut, 2);
- state = 2;
- } else if (state == 2) {
+ exec_state.prepareStep(Debug.StepAction.StepOut);
+ state++;
+ } else {
assertEquals(expected_function_name, event_data.func().name());
assertEquals(expected_source_line_text,
event_data.sourceLineText());
diff --git a/deps/v8/test/mjsunit/declare-locally.js b/deps/v8/test/mjsunit/declare-locally.js
index dca357b060..45d30e0947 100644
--- a/deps/v8/test/mjsunit/declare-locally.js
+++ b/deps/v8/test/mjsunit/declare-locally.js
@@ -33,7 +33,7 @@
// This exercises the code in runtime.cc in
// DeclareGlobal...Locally().
-// Flags: --es52-globals
+// Flags: --legacy-const
this.__proto__.foo = 42;
this.__proto__.bar = 87;
diff --git a/deps/v8/test/mjsunit/error-constructors.js b/deps/v8/test/mjsunit/error-constructors.js
index 1ada39de55..5c2aac5b87 100644
--- a/deps/v8/test/mjsunit/error-constructors.js
+++ b/deps/v8/test/mjsunit/error-constructors.js
@@ -69,9 +69,11 @@ try {
assertTrue(e.hasOwnProperty('stack'));
// Check that intercepting property access from toString is prevented for
-// compiler errors. This is not specified, but allowing interception
-// through a getter can leak error objects from different
-// script tags in the same context in a browser setting.
+// compiler errors. This is not specified, but allowing interception through a
+// getter can leak error objects from different script tags in the same context
+// in a browser setting. Use Realm.eval as a proxy for loading scripts. We
+// ignore the exception thrown from it since that would not be catchable from
+// user-land code.
var errors = [SyntaxError, ReferenceError, TypeError, RangeError, URIError];
var error_triggers = ["syntax error",
"var error = reference",
@@ -79,39 +81,12 @@ var error_triggers = ["syntax error",
"String.fromCodePoint(0xFFFFFF)",
"decodeURI('%F')"];
for (var i in errors) {
- var name = errors[i].name;
-
// Monkey-patch prototype.
- var props = ["name", "message", "stack"];
- for (var j in props) {
- errors[i].prototype.__defineGetter__(props[j], fail);
+ for (var prop of ["name", "message", "stack"]) {
+ errors[i].prototype.__defineGetter__(prop, fail);
}
// String conversion should not invoke monkey-patched getters on prototype.
- var error;
- try {
- eval(error_triggers[i]);
- } catch (e) {
- error = e;
- }
- assertTrue(error.toString().startsWith(name));
-
- // Deleting message on the error (exposing the getter) is fine.
- delete error.message;
- assertEquals(name, error.toString());
-
- // Custom properties shadowing the name are fine.
- var myerror = { name: "myerror", message: "mymessage"};
- myerror.__proto__ = error;
- assertEquals("myerror: mymessage", myerror.toString());
-
- // Custom getters in actual objects are welcome.
- error.__defineGetter__("name", function() { return "mine"; });
- assertEquals("mine", error.toString());
-
- // Custom properties shadowing the name are fine.
- var myerror2 = { message: "mymessage"};
- myerror2.__proto__ = error;
- assertEquals("mine: mymessage", myerror2.toString());
+ assertThrows(()=>Realm.eval(0, error_triggers[i]));
}
// Monkey-patching non-internal errors should still be observable.
@@ -128,7 +103,7 @@ for (var i in errors) {
Error.prototype.toString = Object.prototype.toString;
-assertEquals("[object Error]", Error.prototype.toString());
+assertEquals("[object Object]", Error.prototype.toString());
assertEquals(Object.prototype, Error.prototype.__proto__);
var e = new Error("foo");
assertEquals("[object Error]", e.toString());
diff --git a/deps/v8/test/mjsunit/error-tostring.js b/deps/v8/test/mjsunit/error-tostring.js
index 8a8a969085..e4fc6af9db 100644
--- a/deps/v8/test/mjsunit/error-tostring.js
+++ b/deps/v8/test/mjsunit/error-tostring.js
@@ -41,7 +41,7 @@ e.message = e;
e.stack = "Does not occur in output";
e.arguments = "Does not occur in output";
e.type = "Does not occur in output";
-assertEquals('', e.toString());
+assertThrows(()=>e.toString(), RangeError);
e = new Error();
e.name = [ e ];
diff --git a/deps/v8/test/mjsunit/es6/array-tostring.js b/deps/v8/test/mjsunit/es6/array-tostring.js
index 8a9198ca16..397fde4ab1 100644
--- a/deps/v8/test/mjsunit/es6/array-tostring.js
+++ b/deps/v8/test/mjsunit/es6/array-tostring.js
@@ -41,7 +41,7 @@ function testToStringTag(className) {
Object.defineProperty(obj, Symbol.toStringTag, {
get: function() { throw className; }
});
- assertThrows(function() {
+ assertThrowsEquals(function() {
Array.prototype.toString.call(obj);
}, className);
diff --git a/deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js b/deps/v8/test/mjsunit/es6/arrow-rest-params-lazy-parsing.js
index 478603e746..05e92b6ed6 100644
--- a/deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js
+++ b/deps/v8/test/mjsunit/es6/arrow-rest-params-lazy-parsing.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
// Flags: --min-preparse-length=0
(function testRestIndex() {
diff --git a/deps/v8/test/mjsunit/harmony/arrow-rest-params.js b/deps/v8/test/mjsunit/es6/arrow-rest-params.js
index 3cb7adf92c..dc25584fe0 100644
--- a/deps/v8/test/mjsunit/harmony/arrow-rest-params.js
+++ b/deps/v8/test/mjsunit/es6/arrow-rest-params.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
-
(function testRestIndex() {
assertEquals(5, ((...args) => args.length)(1,2,3,4,5));
assertEquals(4, ((a, ...args) => args.length)(1,2,3,4,5));
diff --git a/deps/v8/test/mjsunit/es6/block-early-errors.js b/deps/v8/test/mjsunit/es6/block-early-errors.js
index bf24942bb1..4af6521f64 100644
--- a/deps/v8/test/mjsunit/es6/block-early-errors.js
+++ b/deps/v8/test/mjsunit/es6/block-early-errors.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --no-harmony-sloppy-let
+
function CheckException(e) {
var string = e.toString();
assertInstanceof(e, SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/block-eval-var-over-legacy-const.js b/deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js
index 0eb8456433..be1687b5e9 100644
--- a/deps/v8/test/mjsunit/harmony/block-eval-var-over-legacy-const.js
+++ b/deps/v8/test/mjsunit/es6/block-eval-var-over-legacy-const.js
@@ -3,64 +3,41 @@
// found in the LICENSE file.
// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
+// Flags: --legacy-const
-// Var-let conflict in a function throws, even if the var is in an eval
-
-let caught = false;
+// Legacy-const-let conflict in a function throws, even if the legacy const
+// is in an eval
// Throws at the top level of a function
-try {
- (function() {
- let x = 1;
- eval('const x = 2');
- })()
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ let x = 1;
+ eval('const x = 2');
+}, TypeError);
// If the eval is in its own block scope, throws
-caught = false;
-try {
- (function() {
- let y = 1;
- { eval('const y = 2'); }
- })()
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ let y = 1;
+ { eval('const y = 2'); }
+}, TypeError);
// If the let is in its own block scope, with the eval, throws
-caught = false
-try {
- (function() {
- {
- let x = 1;
- eval('const x = 2');
- }
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ {
+ let x = 1;
+ eval('const x = 2');
+ }
+}, TypeError);
// Legal if the let is no longer visible
-caught = false
-try {
- (function() {
- {
- let x = 1;
- }
- eval('const x = 2');
- })();
-} catch (e) {
- caught = true;
-}
-assertFalse(caught);
+assertDoesNotThrow(function() {
+ {
+ let x = 1;
+ }
+ eval('const x = 2');
+});
// In global scope
-caught = false;
+let caught = false;
try {
let z = 1;
eval('const z = 2');
@@ -81,7 +58,7 @@ try {
}
assertFalse(caught);
-// var across with doesn't conflict
+// legacy const across with doesn't conflict
caught = false;
try {
(function() {
@@ -94,7 +71,7 @@ try {
}
assertFalse(caught);
-// var can still conflict with let across a with
+// legacy const can still conflict with let across a with
caught = false;
try {
(function() {
diff --git a/deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
index a4c5aeb211..20ca10719b 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring-bind
+// Flags: --legacy-const
// let is usable as a variable with var or legacy const, not let or ES6 const
diff --git a/deps/v8/test/mjsunit/es6/block-non-strict-errors.js b/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
index 50d5f22cf1..db7f558905 100644
--- a/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
+++ b/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --no-harmony-sloppy-let --no-harmony-sloppy-function
+// Flags: --no-harmony-sloppy
+
function CheckError(source) {
var exception = null;
try {
diff --git a/deps/v8/test/mjsunit/harmony/block-scope-class.js b/deps/v8/test/mjsunit/es6/block-scope-class.js
index 351feaa90e..351feaa90e 100644
--- a/deps/v8/test/mjsunit/harmony/block-scope-class.js
+++ b/deps/v8/test/mjsunit/es6/block-scope-class.js
diff --git a/deps/v8/test/mjsunit/es6/built-in-accessor-names.js b/deps/v8/test/mjsunit/es6/built-in-accessor-names.js
index d902ae6700..c5f8cec3c0 100644
--- a/deps/v8/test/mjsunit/es6/built-in-accessor-names.js
+++ b/deps/v8/test/mjsunit/es6/built-in-accessor-names.js
@@ -23,25 +23,13 @@ assertGetterName('get size', Set.prototype, 'size');
assertGetterName('get size', Map.prototype, 'size');
-let typedArrays = [
- Uint8Array,
- Int8Array,
- Uint16Array,
- Int16Array,
- Uint32Array,
- Int32Array,
- Float32Array,
- Float64Array,
- Uint8ClampedArray
-];
-
-for (let f of typedArrays) {
- assertGetterName('get buffer', f.prototype, 'buffer');
- assertGetterName('get byteOffset', f.prototype, 'byteOffset');
- assertGetterName('get byteLength', f.prototype, 'byteLength');
- assertGetterName('get length', f.prototype, 'length');
- assertGetterName('get [Symbol.toStringTag]', f.prototype, Symbol.toStringTag);
-}
+let TypedArray = Uint8Array.__proto__;
+
+assertGetterName('get buffer', TypedArray.prototype, 'buffer');
+assertGetterName('get byteOffset', TypedArray.prototype, 'byteOffset');
+assertGetterName('get byteLength', TypedArray.prototype, 'byteLength');
+assertGetterName('get length', TypedArray.prototype, 'length');
+assertGetterName('get [Symbol.toStringTag]', TypedArray.prototype, Symbol.toStringTag);
assertGetterName('get buffer', DataView.prototype, 'buffer');
diff --git a/deps/v8/test/mjsunit/harmony/classes-derived-return-type.js b/deps/v8/test/mjsunit/es6/classes-derived-return-type.js
index 8283bcb227..8283bcb227 100644
--- a/deps/v8/test/mjsunit/harmony/classes-derived-return-type.js
+++ b/deps/v8/test/mjsunit/es6/classes-derived-return-type.js
diff --git a/deps/v8/test/mjsunit/es6/classes-proxy.js b/deps/v8/test/mjsunit/es6/classes-proxy.js
new file mode 100644
index 0000000000..09d12c28dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-proxy.js
@@ -0,0 +1,73 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-proxies --harmony-reflect
+
+function CreateConstructableProxy(handler) {
+ return new Proxy(function(){}, handler);
+}
+
+(function() {
+ var prototype = { x: 1 };
+ var log = [];
+
+ var proxy = CreateConstructableProxy({
+ get(k) {
+ log.push("get trap");
+ return prototype;
+ }});
+
+ var o = Reflect.construct(Number, [100], proxy);
+ assertEquals(["get trap"], log);
+ assertTrue(Object.getPrototypeOf(o) === prototype);
+ assertEquals(100, Number.prototype.valueOf.call(o));
+})();
+
+(function() {
+ var prototype = { x: 1 };
+ var log = [];
+
+ var proxy = CreateConstructableProxy({
+ get(k) {
+ log.push("get trap");
+ return 10;
+ }});
+
+ var o = Reflect.construct(Number, [100], proxy);
+ assertEquals(["get trap"], log);
+ assertTrue(Object.getPrototypeOf(o) === Number.prototype);
+ assertEquals(100, Number.prototype.valueOf.call(o));
+})();
+
+(function() {
+ var prototype = { x: 1 };
+ var log = [];
+
+ var proxy = CreateConstructableProxy({
+ get(k) {
+ log.push("get trap");
+ return prototype;
+ }});
+
+ var o = Reflect.construct(Function, ["return 1000"], proxy);
+ assertEquals(["get trap"], log);
+ assertTrue(Object.getPrototypeOf(o) === prototype);
+ assertEquals(1000, o());
+})();
+
+(function() {
+ var prototype = { x: 1 };
+ var log = [];
+
+ var proxy = CreateConstructableProxy({
+ get(k) {
+ log.push("get trap");
+ return prototype;
+ }});
+
+ var o = Reflect.construct(Array, [1, 2, 3], proxy);
+ assertEquals(["get trap"], log);
+ assertTrue(Object.getPrototypeOf(o) === prototype);
+ assertEquals([1, 2, 3], o);
+})();
diff --git a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
index 74dd489cb8..313aad1d8f 100644
--- a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
+++ b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --harmony-reflect --harmony-regexp-subclass
+// Flags: --expose-gc --strong-mode
"use strict";
@@ -20,10 +21,11 @@ function checkPrototypeChain(object, constructors) {
(function() {
class A extends Object {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -34,6 +36,7 @@ function checkPrototypeChain(object, constructors) {
checkPrototypeChain(s, [A, Object]);
assertEquals(42, s.a);
assertEquals(4.2, s.d);
+ assertEquals(153, s.o.foo);
var s1 = new A("bar");
assertTrue(%HaveSameMap(s, s1));
@@ -46,6 +49,7 @@ function checkPrototypeChain(object, constructors) {
checkPrototypeChain(s, [A, Object]);
assertEquals(42, n.a);
assertEquals(4.2, n.d);
+ assertEquals(153, n.o.foo);
var n1 = new A(312);
assertTrue(%HaveSameMap(n, n1));
@@ -59,46 +63,99 @@ function checkPrototypeChain(object, constructors) {
checkPrototypeChain(s, [A, Object]);
assertEquals(42, b.a);
assertEquals(4.2, b.d);
+ assertEquals(153, b.o.foo);
var b1 = new A(true);
assertTrue(%HaveSameMap(b, b1));
assertTrue(%HaveSameMap(b, s));
+
+ gc();
})();
(function() {
class A extends Function {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
+ // Strong functions are not extensible, so don't add fields.
+ if (args[args.length - 1].indexOf("use strong") >= 0) {
+ assertThrows(()=>{ this.a = 10; }, TypeError);
+ return;
+ }
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
+ }
+ }
+ var sloppy_func = new A("");
+ var strict_func = new A("'use strict';");
+ assertNull(sloppy_func.caller);
+ assertThrows("strict_f.caller");
+ assertNull(Object.getOwnPropertyDescriptor(sloppy_func, "caller").value);
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(strict_func, "caller"));
+
+ function CheckFunction(func, is_strong) {
+ assertEquals("function", typeof func);
+ assertTrue(func instanceof Object);
+ assertTrue(func instanceof Function);
+ assertTrue(func instanceof A);
+ checkPrototypeChain(func, [A, Function, Object]);
+ if (!is_strong) {
+ assertEquals(42, func.a);
+ assertEquals(4.2, func.d);
+ assertEquals(153, func.o.foo);
+ assertTrue(undefined !== func.prototype);
+ func.prototype.bar = "func.bar";
+ var obj = new func();
+ assertTrue(obj instanceof Object);
+ assertTrue(obj instanceof func);
+ assertEquals("object", typeof obj);
+ assertEquals(113, obj.foo);
+ assertEquals("func.bar", obj.bar);
+ delete func.prototype.bar;
}
}
- var o = new A("this.foo = 153;");
- assertTrue(o instanceof Object);
- assertTrue(o instanceof Function);
- assertTrue(o instanceof A);
- assertEquals("function", typeof o);
- checkPrototypeChain(o, [A, Function, Object]);
- assertEquals(42, o.a);
- assertEquals(4.2, o.d);
- var oo = new o();
- assertEquals(153, oo.foo);
+ var source = "this.foo = 113;";
- var o1 = new A("return 312;");
- assertTrue(%HaveSameMap(o, o1));
+ // Sloppy function
+ var sloppy_func = new A(source);
+ assertTrue(undefined !== sloppy_func.prototype);
+ CheckFunction(sloppy_func, false);
+
+ var sloppy_func1 = new A("return 312;");
+ assertTrue(%HaveSameMap(sloppy_func, sloppy_func1));
+
+ // Strict function
+ var strict_func = new A("'use strict'; " + source);
+ assertFalse(%HaveSameMap(strict_func, sloppy_func));
+ CheckFunction(strict_func, false);
+
+ var strict_func1 = new A("'use strict'; return 312;");
+ assertTrue(%HaveSameMap(strict_func, strict_func1));
+
+ // Strong function
+ var strong_func = new A("'use strong'; " + source);
+ assertFalse(%HaveSameMap(strong_func, sloppy_func));
+ assertFalse(%HaveSameMap(strong_func, strict_func));
+ CheckFunction(strong_func, true);
+
+ var strong_func1 = new A("'use strong'; return 312;");
+ assertTrue(%HaveSameMap(strong_func, strong_func1));
+
+ gc();
})();
(function() {
class A extends Boolean {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -111,19 +168,23 @@ function checkPrototypeChain(object, constructors) {
assertTrue(o.valueOf());
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A(false);
assertTrue(%HaveSameMap(o, o1));
+
+ gc();
})();
function TestErrorSubclassing(error) {
class A extends error {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -142,9 +203,12 @@ function TestErrorSubclassing(error) {
assertEquals(error.name + ": message", o.toString());
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A("achtung!");
assertTrue(%HaveSameMap(o, o1));
+
+ gc();
}
@@ -162,10 +226,11 @@ function TestErrorSubclassing(error) {
(function() {
class A extends Number {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -178,19 +243,23 @@ function TestErrorSubclassing(error) {
assertEquals(153, o.valueOf());
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A(312);
assertTrue(%HaveSameMap(o, o1));
+
+ gc();
})();
(function() {
class A extends Date {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -203,22 +272,26 @@ function TestErrorSubclassing(error) {
assertEquals(1234567890, o.getTime());
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A(2015, 10, 29);
assertEquals(2015, o1.getFullYear());
assertEquals(10, o1.getMonth());
assertEquals(29, o1.getDate());
assertTrue(%HaveSameMap(o, o1));
+
+ gc();
})();
(function() {
class A extends String {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -232,19 +305,23 @@ function TestErrorSubclassing(error) {
assertEquals("foo", o.valueOf());
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A("bar");
assertTrue(%HaveSameMap(o, o1));
+
+ gc();
})();
(function() {
class A extends RegExp {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -263,38 +340,81 @@ function TestErrorSubclassing(error) {
assertEquals(10, o.lastIndex);
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
+
+ var o1 = new A(7);
+ assertTrue(%HaveSameMap(o, o1));
+
+ gc();
+})();
+
+
+(function TestArraySubclassing() {
+ class A extends Array {
+ constructor(...args) {
+ assertFalse(new.target === undefined);
+ super(...args);
+ this.a = 42;
+ this.d = 4.2;
+ this.o = {foo:153};
+ }
+ }
+
+ var o = new Array(13);
+ assertTrue(o instanceof Object);
+ assertTrue(o instanceof Array);
+ assertEquals("object", typeof o);
+ checkPrototypeChain(o, [Array, Object]);
+ assertEquals(13, o.length);
+
+ var o = new A(10);
+ assertTrue(o instanceof Object);
+ assertTrue(o instanceof Array);
+ assertTrue(o instanceof A);
+ assertEquals("object", typeof o);
+ checkPrototypeChain(o, [A, Array, Object]);
+ assertEquals(10, o.length);
+ assertEquals(42, o.a);
+ assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A(7);
assertTrue(%HaveSameMap(o, o1));
})();
-function TestArraySubclassing(array) {
+var TypedArray = Uint8Array.__proto__;
+
+function TestTypedArraySubclassing(array) {
class A extends array {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
var o = new array(13);
assertTrue(o instanceof Object);
+ assertTrue(o instanceof TypedArray);
assertTrue(o instanceof array);
assertEquals("object", typeof o);
- checkPrototypeChain(o, [array, Object]);
+ checkPrototypeChain(o, [array, TypedArray, Object]);
assertEquals(13, o.length);
var o = new A(10);
assertTrue(o instanceof Object);
+ assertTrue(o instanceof TypedArray);
assertTrue(o instanceof array);
assertTrue(o instanceof A);
assertEquals("object", typeof o);
- checkPrototypeChain(o, [A, array, Object]);
+ checkPrototypeChain(o, [A, array, TypedArray, Object]);
assertEquals(10, o.length);
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A(7);
assertTrue(%HaveSameMap(o, o1));
@@ -302,16 +422,15 @@ function TestArraySubclassing(array) {
(function() {
- TestArraySubclassing(Array);
- TestArraySubclassing(Int8Array);
- TestArraySubclassing(Uint8Array);
- TestArraySubclassing(Uint8ClampedArray);
- TestArraySubclassing(Int16Array);
- TestArraySubclassing(Uint16Array);
- TestArraySubclassing(Int32Array);
- TestArraySubclassing(Uint32Array);
- TestArraySubclassing(Float32Array);
- TestArraySubclassing(Float64Array);
+ TestTypedArraySubclassing(Int8Array);
+ TestTypedArraySubclassing(Uint8Array);
+ TestTypedArraySubclassing(Uint8ClampedArray);
+ TestTypedArraySubclassing(Int16Array);
+ TestTypedArraySubclassing(Uint16Array);
+ TestTypedArraySubclassing(Int32Array);
+ TestTypedArraySubclassing(Uint32Array);
+ TestTypedArraySubclassing(Float32Array);
+ TestTypedArraySubclassing(Float64Array);
})();
@@ -320,10 +439,11 @@ function TestMapSetSubclassing(container, is_map) {
class A extends container {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -358,9 +478,12 @@ function TestMapSetSubclassing(container, is_map) {
}
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A();
assertTrue(%HaveSameMap(o, o1));
+
+ gc();
}
@@ -375,10 +498,11 @@ function TestMapSetSubclassing(container, is_map) {
(function() {
class A extends ArrayBuffer {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -392,6 +516,7 @@ function TestMapSetSubclassing(container, is_map) {
assertEquals(16, o.byteLength);
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A("bar");
assertTrue(%HaveSameMap(o, o1));
@@ -423,16 +548,19 @@ function TestMapSetSubclassing(container, is_map) {
assertEquals(-1, int32view[1]);
assertEquals(0xfffffffe, uint32view[0]);
assertEquals(0xffffffff, uint32view[1]);
+
+ gc();
})();
(function() {
class A extends DataView {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
}
}
@@ -449,55 +577,145 @@ function TestMapSetSubclassing(container, is_map) {
assertEquals(0xbebafeca, o.getUint32(0, true));
assertEquals(42, o.a);
assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
var o1 = new A(buffer);
assertTrue(%HaveSameMap(o, o1));
+ gc();
})();
(function() {
- // TODO(ishell): remove once GeneratorFunction is available.
- var GeneratorFunction = (function*() {}).__proto__.constructor;
+ var GeneratorFunction = (function*() {}).constructor;
class A extends GeneratorFunction {
constructor(...args) {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(...args);
+ // Strong functions are not extensible, so don't add fields.
+ if (args[args.length - 1].indexOf("use strong") >= 0) {
+ assertThrows(()=>{ this.a = 10; }, TypeError);
+ return;
+ }
this.a = 42;
this.d = 4.2;
+ this.o = {foo:153};
+ }
+ }
+ var sloppy_func = new A("yield 153;");
+ var strict_func = new A("'use strict'; yield 153;");
+ // Unfortunately the difference is not observable from outside.
+ assertThrows("sloppy_func.caller");
+ assertThrows("strict_f.caller");
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(sloppy_func, "caller"));
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(strict_func, "caller"));
+
+ function CheckFunction(func, is_strong) {
+ assertEquals("function", typeof func);
+ assertTrue(func instanceof Object);
+ assertTrue(func instanceof Function);
+ assertTrue(func instanceof GeneratorFunction);
+ assertTrue(func instanceof A);
+ checkPrototypeChain(func, [A, GeneratorFunction, Function, Object]);
+ if (!is_strong) {
+ assertEquals(42, func.a);
+ assertEquals(4.2, func.d);
+ assertEquals(153, func.o.foo);
+
+ assertTrue(undefined !== func.prototype);
+ func.prototype.bar = "func.bar";
+ var obj = func(); // Generator object.
+ assertTrue(obj instanceof Object);
+ assertTrue(obj instanceof func);
+ assertEquals("object", typeof obj);
+ assertEquals("func.bar", obj.bar);
+ delete func.prototype.bar;
+
+ assertPropertiesEqual({done: false, value: 1}, obj.next());
+ assertPropertiesEqual({done: false, value: 1}, obj.next());
+ assertPropertiesEqual({done: false, value: 2}, obj.next());
+ assertPropertiesEqual({done: false, value: 3}, obj.next());
+ assertPropertiesEqual({done: false, value: 5}, obj.next());
+ assertPropertiesEqual({done: false, value: 8}, obj.next());
+ assertPropertiesEqual({done: true, value: undefined}, obj.next());
}
}
- var generator_func = new A("var index = 0; while (index < 5) { yield ++index; }");
- assertTrue(generator_func instanceof Object);
- assertTrue(generator_func instanceof Function);
- assertTrue(generator_func instanceof GeneratorFunction);
- assertTrue(generator_func instanceof A);
- assertEquals("function", typeof generator_func);
- checkPrototypeChain(generator_func, [A, GeneratorFunction, Function, Object]);
- assertEquals(42, generator_func.a);
- assertEquals(4.2, generator_func.d);
-
- var o = new generator_func();
+
+ var source = "yield 1; yield 1; yield 2; yield 3; yield 5; yield 8;";
+
+ // Sloppy generator function
+ var sloppy_func = new A(source);
+ assertTrue(undefined !== sloppy_func.prototype);
+ CheckFunction(sloppy_func, false);
+
+ var sloppy_func1 = new A("yield 312;");
+ assertTrue(%HaveSameMap(sloppy_func, sloppy_func1));
+
+ // Strict generator function
+ var strict_func = new A("'use strict'; " + source);
+ assertFalse(%HaveSameMap(strict_func, sloppy_func));
+ CheckFunction(strict_func, false);
+
+ var strict_func1 = new A("'use strict'; yield 312;");
+ assertTrue(%HaveSameMap(strict_func, strict_func1));
+
+ // Strong generator function
+ var strong_func = new A("'use strong'; " + source);
+ assertFalse(%HaveSameMap(strong_func, sloppy_func));
+ assertFalse(%HaveSameMap(strong_func, strict_func));
+ CheckFunction(strong_func, true);
+
+ var strong_func1 = new A("'use strong'; yield 312;");
+ assertTrue(%HaveSameMap(strong_func, strong_func1));
+
+ gc();
+})();
+
+
+(function() {
+ class A extends Promise {
+ constructor(...args) {
+ assertFalse(new.target === undefined);
+ super(...args);
+ this.a = 42;
+ this.d = 4.2;
+ this.o = {foo:153};
+ }
+ }
+
+ var o = new A(function(resolve, reject) {
+ resolve("ok");
+ });
assertTrue(o instanceof Object);
- assertTrue(o instanceof generator_func);
+ assertTrue(o instanceof Promise);
+ assertTrue(o instanceof A);
assertEquals("object", typeof o);
+ checkPrototypeChain(o, [A, Promise, Object]);
+ assertEquals(42, o.a);
+ assertEquals(4.2, o.d);
+ assertEquals(153, o.o.foo);
+ o.then(
+ function(val) { assertEquals("ok", val); },
+ function(reason) { assertUnreachable(); })
+ .catch(function(reason) { %AbortJS("catch handler called: " + reason); });
+
+ var o1 = new A(function(resolve, reject) {
+ reject("fail");
+ });
+ o1.then(
+ function(val) { assertUnreachable(); },
+ function(reason) { assertEquals("fail", reason); })
+ .catch(function(reason) { %AbortJS("catch handler called: " + reason); });
+ assertTrue(%HaveSameMap(o, o1));
- assertPropertiesEqual({done: false, value: 1}, o.next());
- assertPropertiesEqual({done: false, value: 2}, o.next());
- assertPropertiesEqual({done: false, value: 3}, o.next());
- assertPropertiesEqual({done: false, value: 4}, o.next());
- assertPropertiesEqual({done: false, value: 5}, o.next());
- assertPropertiesEqual({done: true, value: undefined}, o.next());
-
- var generator_func1 = new A("return 0;");
- assertTrue(%HaveSameMap(generator_func, generator_func1));
+ gc();
})();
(function() {
class A extends Boolean {
constructor() {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super(true);
this.a00 = 0
this.a01 = 0
@@ -524,7 +742,7 @@ function TestMapSetSubclassing(container, is_map) {
class B extends A {
constructor() {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super();
this.b00 = 0
this.b01 = 0
@@ -551,7 +769,7 @@ function TestMapSetSubclassing(container, is_map) {
class C extends B {
constructor() {
- assertTrue(%IsConstructCall());
+ assertFalse(new.target === undefined);
super();
this.c00 = 0
this.c01 = 0
@@ -584,6 +802,8 @@ function TestMapSetSubclassing(container, is_map) {
assertTrue(o instanceof C);
assertEquals("object", typeof o);
checkPrototypeChain(o, [C, B, A, Boolean, Object]);
+
+ gc();
})();
@@ -604,3 +824,133 @@ function TestMapSetSubclassing(container, is_map) {
class A extends Symbol {}
assertThrows("new A");
})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var o = Reflect.construct(
+ Number, [{valueOf() { f.prototype=p2; return 10; }}], f);
+
+ assertTrue(o.__proto__ === f.prototype);
+ assertTrue(p2 === f.prototype);
+ assertFalse(p === o.__proto__);
+ assertEquals(10, Number.prototype.valueOf.call(o));
+})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var o = Reflect.construct(
+ String, [{toString() { f.prototype=p2; return "biep"; }}], f);
+
+ assertTrue(o.__proto__ === f.prototype);
+ assertTrue(p2 === o.__proto__);
+ assertFalse(p === o.__proto__);
+ assertEquals("biep", String.prototype.toString.call(o));
+})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var o = Reflect.construct(
+ Date, [{valueOf() { f.prototype=p2; return 1447836899614; }}], f);
+
+ assertTrue(o.__proto__ === f.prototype);
+ assertTrue(p2 === f.prototype);
+ assertFalse(p === o.__proto__);
+ assertEquals(new Date(1447836899614).toString(),
+ Date.prototype.toString.call(o));
+})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var o = Reflect.construct(
+ Date, [2015, {valueOf() { f.prototype=p2; return 10; }}], f);
+
+ assertTrue(o.__proto__ === f.prototype);
+ assertTrue(p2 === f.prototype);
+ assertFalse(p === o.__proto__);
+ assertEquals(new Date(2015, 10).getYear(), Date.prototype.getYear.call(o));
+ assertEquals(new Date(2015, 10).getMonth(), Date.prototype.getMonth.call(o));
+})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var o = Reflect.construct(
+ DataView, [new ArrayBuffer(100),
+ {valueOf(){ f.prototype=p2; return 5; }}], f);
+
+ var byteOffset = Object.getOwnPropertyDescriptor(
+ DataView.prototype, "byteOffset").get;
+ var byteLength = Object.getOwnPropertyDescriptor(
+ DataView.prototype, "byteLength").get;
+
+ assertTrue(o.__proto__ === f.prototype);
+ assertTrue(p2 === f.prototype);
+ assertFalse(p === o.__proto__);
+ assertEquals(5, byteOffset.call(o));
+ assertEquals(95, byteLength.call(o));
+})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var o = Reflect.construct(
+ DataView, [new ArrayBuffer(100),
+ 30, {valueOf() { f.prototype=p2; return 5; }}], f);
+
+ var byteOffset = Object.getOwnPropertyDescriptor(
+ DataView.prototype, "byteOffset").get;
+ var byteLength = Object.getOwnPropertyDescriptor(
+ DataView.prototype, "byteLength").get;
+
+ assertTrue(o.__proto__ === f.prototype);
+ assertTrue(p2 === f.prototype);
+ assertFalse(p === o.__proto__);
+ assertEquals(30, byteOffset.call(o));
+ assertEquals(5, byteLength.call(o));
+})();
+
+
+(function() {
+ function f() {}
+
+ var p = f.prototype;
+ var p2 = {};
+ var p3 = {};
+
+ var log = [];
+
+ var pattern = {toString() {
+ log.push("tostring");
+ f.prototype = p3; return "biep" }};
+
+ Object.defineProperty(pattern, Symbol.match, {
+ get() { log.push("match"); f.prototype = p2; return false; }});
+
+ var o = Reflect.construct(RegExp, [pattern], f);
+ assertEquals(["match", "tostring"], log);
+ assertEquals(/biep/, o);
+ assertTrue(o.__proto__ === p2);
+ assertTrue(f.prototype === p3);
+})();
diff --git a/deps/v8/test/mjsunit/es6/classof-proxy.js b/deps/v8/test/mjsunit/es6/classof-proxy.js
new file mode 100644
index 0000000000..c3bc985bb9
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classof-proxy.js
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-proxies
+
+function test_function(o) {
+ if (%_ClassOf(o) === "Function") {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+var non_callable = new Proxy({}, {});
+var callable = new Proxy(function(){}.__proto__, {});
+var constructable = new Proxy(function(){}, {});
+
+assertFalse(test_function(non_callable));
+assertTrue(test_function(callable));
+assertTrue(test_function(constructable));
+
+%OptimizeFunctionOnNextCall(test_function);
+
+assertFalse(test_function(non_callable));
+assertTrue(test_function(callable));
+assertTrue(test_function(constructable));
diff --git a/deps/v8/test/mjsunit/es6/debug-blockscopes.js b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
index 3f890ebd54..d3c36207f1 100644
--- a/deps/v8/test/mjsunit/es6/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --debug-eval-readonly-locals
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
@@ -525,15 +526,12 @@ function shadowing_1() {
{
let i = 5;
debugger;
- assertEqualsUnlessOptimized(27, i, shadowing_1);
}
assertEquals(0, i);
- debugger;
- assertEqualsUnlessOptimized(27, i, shadowing_1);
}
listener_delegate = function (exec_state) {
- exec_state.frame(0).evaluate("i = 27");
+ assertEqualsUnlessOptimized(5, exec_state.frame(0).evaluate("i").value());
}
shadowing_1();
EndTest();
@@ -546,13 +544,12 @@ function shadowing_2() {
{
let j = 5;
debugger;
- assertEqualsUnlessOptimized(27, j, shadowing_2);
}
- assertEqualsUnlessOptimized(0, i, shadowing_2);
}
listener_delegate = function (exec_state) {
- exec_state.frame(0).evaluate("j = 27");
+ assertEqualsUnlessOptimized(0, exec_state.frame(0).evaluate("i").value());
+ assertEqualsUnlessOptimized(5, exec_state.frame(0).evaluate("j").value());
}
shadowing_2();
EndTest();
diff --git a/deps/v8/test/mjsunit/es6/debug-break-default-constructor.js b/deps/v8/test/mjsunit/es6/debug-break-default-constructor.js
index a06c3b52de..fc8bebd13d 100644
--- a/deps/v8/test/mjsunit/es6/debug-break-default-constructor.js
+++ b/deps/v8/test/mjsunit/es6/debug-break-default-constructor.js
@@ -14,7 +14,7 @@ var step_count = 0;
function listener(event, execState, eventData, data) {
if (event != Debug.DebugEvent.Break) return;
try {
- execState.prepareStep(Debug.StepAction.StepInto);
+ execState.prepareStep(Debug.StepAction.StepIn);
var s = execState.frame().sourceLineText();
step_count++;
assertTrue(s.indexOf('// ' + step_count + '.') >= 0);
diff --git a/deps/v8/test/mjsunit/es6/debug-evaluate-arrow-function-receiver.js b/deps/v8/test/mjsunit/es6/debug-evaluate-arrow-function-receiver.js
new file mode 100644
index 0000000000..ce7201df9c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-evaluate-arrow-function-receiver.js
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test that debug-evaluate can find the correct this value for an arrow
+// function, if "this" is referenced within the arrow function scope.
+
+Debug = debug.Debug
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ for (var i = 0; i < exec_state.frameCount() - 1; i++) {
+ var frame = exec_state.frame(i);
+ var this_value = frame.evaluate("this").value();
+ var expected = frame.sourceLineText().match(/\/\/ (.*$)/)[1];
+ print(expected, this_value, frame.sourceLineText());
+ assertEquals(String(expected), String(this_value));
+ }
+ break_count++;
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+// Context-allocated receiver.
+function f() {
+ debugger; // foo
+ return () => {
+ debugger; // foo
+ with ({}) {
+ return () => {
+ debugger; // foo
+ try {
+ throw new Error();
+ } catch (e) {
+ return () => {
+ (() => this); // bind this.
+ debugger; // foo
+ return () => {
+ debugger; // undefined
+ return g.call("goo"); // undefined
+ }
+ };
+ }
+ };
+ }
+ };
+}
+
+// Stack-allocated receiver.
+function g() {
+ debugger; // goo
+ return () => {
+ debugger; // undefined
+ with ({}) {
+ return () => {
+ debugger; // undefined
+ try {
+ throw new Error();
+ } catch (e) {
+ return () => {
+ debugger; // undefined
+ return f.call("foo"); // undefined
+ };
+ }
+ };
+ }
+ };
+}
+
+Debug.setListener(listener);
+
+var h = f.call("foo");
+for (var i = 0; i < 20; i++) h = h();
+var h = g.call("goo");
+for (var i = 0; i < 20; i++) h = h();
+
+function x() {
+ (() => this); // bind this.
+ function y() {
+ (() => {
+ (() => this); // bind this.
+ debugger; // Y
+ })(); // Y
+ }
+ y.call("Y"); // X
+}
+x.call("X");
+
+function u() {
+ (() => this);
+ function v() {
+ (() => {
+ debugger; // undefined
+ })(); // V
+ }
+ v.call("V"); // U
+}
+u.call("U");
+
+(() => {
+ (() => this);
+ debugger; // [object global]
+})();
+
+Debug.setListener(null);
+
+assertEquals(55, break_count);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js
new file mode 100644
index 0000000000..043c5f10f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-1.js
@@ -0,0 +1,75 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-reflect --expose-debug-as debug --allow-natives-syntax
+
+// Test that live-editing a frame that uses new.target fails.
+
+Debug = debug.Debug
+var calls = 0;
+var exceptions = 0;
+var results = [];
+var replace_again;
+
+eval(`
+ function LogNewTarget() {
+ calls++;
+ ReplaceOnce();
+ results.push(true);
+ results.push(new.target);
+ }
+`);
+
+function Dummy() {}
+
+function Replace(fun, original, patch) {
+ %ExecuteInDebugContext(function() {
+ var change_log = [];
+ try {
+ var script = Debug.findScript(fun);
+ var patch_pos = script.source.indexOf(original);
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(
+ script, patch_pos, original.length, patch, change_log);
+ } catch (e) {
+ assertEquals("BLOCKED_NO_NEW_TARGET_ON_RESTART",
+ change_log[0].functions_on_stack[0].replace_problem);
+ assertInstanceof(e, Debug.LiveEdit.Failure);
+ exceptions++;
+ }
+ });
+}
+
+function ReplaceOnce() {
+ if (replace_again) {
+ replace_again = false;
+ Replace(LogNewTarget, "true", "false");
+ }
+}
+
+function Revert() {
+ Replace(LogNewTarget, "false", "true");
+}
+
+replace_again = true;
+ReplaceOnce();
+new LogNewTarget();
+Revert();
+assertEquals(1, calls);
+assertEquals(0, exceptions);
+assertEquals([false, LogNewTarget], results);
+
+replace_again = true;
+LogNewTarget();
+
+replace_again = true;
+new LogNewTarget();
+
+replace_again = true;
+Reflect.construct(LogNewTarget, [], Dummy);
+
+assertEquals(
+ [false, LogNewTarget, true, undefined, true, LogNewTarget, true, Dummy],
+ results);
+assertEquals(4, calls); // No restarts
+assertEquals(3, exceptions); // Replace failed.
diff --git a/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-2.js b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-2.js
new file mode 100644
index 0000000000..8c6dc7e7e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-2.js
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test that live-editing a frame to introduce new.target fails.
+
+Debug = debug.Debug
+var calls = 0;
+var exceptions = 0;
+var results = [];
+var replace_again;
+
+eval(`
+ function LogNewTarget() {
+ calls++;
+ ReplaceOnce();
+ results.push(true);
+ }
+`);
+
+function Replace(fun, original, patch) {
+ %ExecuteInDebugContext(function() {
+ var change_log = [];
+ try {
+ var script = Debug.findScript(fun);
+ var patch_pos = script.source.indexOf(original);
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(
+ script, patch_pos, original.length, patch, change_log);
+ } catch (e) {
+ assertEquals("BLOCKED_NO_NEW_TARGET_ON_RESTART",
+ change_log[0].functions_on_stack[0].replace_problem);
+ assertInstanceof(e, Debug.LiveEdit.Failure);
+ exceptions++;
+ }
+ });
+}
+
+function ReplaceOnce(x) {
+ if (replace_again) {
+ replace_again = false;
+ Replace(LogNewTarget, "true", "new.target");
+ }
+}
+
+function Revert() {
+ Replace(LogNewTarget, "new.target", "true");
+}
+
+replace_again = true;
+ReplaceOnce();
+new LogNewTarget();
+Revert();
+assertEquals(1, calls);
+assertEquals(0, exceptions);
+assertEquals([LogNewTarget], results);
+
+replace_again = true;
+new LogNewTarget();
+assertEquals(2, calls); // No restarts
+assertEquals(1, exceptions); // Replace failed.
+assertEquals([LogNewTarget, true], results);
diff --git a/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-3.js b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-3.js
new file mode 100644
index 0000000000..40facd3167
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-liveedit-new-target-3.js
@@ -0,0 +1,73 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test that live-editing a frame above one that uses new.target succeeds.
+
+Debug = debug.Debug
+var wrapper_calls = 0;
+var construct_calls = 0;
+var exceptions = 0;
+var results = [];
+var replace_again;
+
+eval(`
+ function LogNewTarget(arg) {
+ construct_calls++;
+ results.push(new.target);
+ }
+ function Wrapper() {
+ wrapper_calls++;
+ ReplaceOnce();
+ new LogNewTarget(true);
+ }
+`);
+
+function Replace(fun, original, patch) {
+ %ExecuteInDebugContext(function() {
+ var change_log = [];
+ try {
+ var script = Debug.findScript(fun);
+ var patch_pos = script.source.indexOf(original);
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(
+ script, patch_pos, original.length, patch, change_log);
+ } catch (e) {
+ exceptions++;
+ }
+ });
+}
+
+function ReplaceOnce(x) {
+ if (replace_again) {
+ replace_again = false;
+ Replace(Wrapper, "true", "false");
+ }
+}
+
+function Revert() {
+ Replace(Wrapper, "false", "true");
+}
+
+replace_again = true;
+ReplaceOnce();
+Wrapper();
+Revert();
+assertEquals(1, construct_calls);
+assertEquals(1, wrapper_calls);
+assertEquals(0, exceptions); // Replace succeeds
+assertEquals([LogNewTarget], results);
+
+Wrapper();
+assertEquals(2, construct_calls);
+assertEquals(2, wrapper_calls);
+assertEquals(0, exceptions); // Replace succeeds
+assertEquals([LogNewTarget, LogNewTarget], results);
+
+replace_again = true;
+Wrapper();
+assertEquals(3, construct_calls);
+assertEquals(4, wrapper_calls); // Restarts
+assertEquals(0, exceptions); // Replace succeeds
+assertEquals([LogNewTarget, LogNewTarget, LogNewTarget], results);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reentry.js b/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
index fbe54242dd..a97ce81012 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --promise-extra
// Test reentry of special try catch for Promises.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
index a0036cfd0f..ed4b2c435e 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we listen to uncaught exceptions and
// the Promise is rejected in a chained closure after it has been resolved.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
index fd4770ebee..e1a653889d 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we listen to all exceptions and
// there is a catch handler for the to-be-rejected Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
index 2ff13d5605..922449261b 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we only listen to uncaught exceptions, the Promise
// is rejected, and a catch handler is installed right before the rejection.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
index d3fd9f3ae7..afb46fea8f 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the to-be-rejected Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
index 0b0c0c8e38..63e3b8678d 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we listen to all exceptions and
// there is a catch handler for the to-be-rejected Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
index ac23b48b6f..b542bc69dd 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we only listen to uncaught exceptions and
// there is no catch handler for the to-be-rejected Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
index fa263458c4..8775df687d 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when a Promise is rejected, which is caught by a custom
// promise, which has a number for reject closure. We expect an Exception debug
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
index 6b7dc1a77c..b6c06df49e 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when a Promise is rejected, which is caught by a
// custom promise, which throws a new exception in its reject handler.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
index 4c57cf0237..d058d41b96 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when a Promise is rejected, which is caught by a custom
// promise, which has undefined for reject closure. We expect an Exception
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js b/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
index 4f3891b187..906969e105 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
@@ -12,7 +12,7 @@ function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
breaks.push(exec_state.frame(0).sourceLineText().trimLeft());
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
} catch (e) {
exception = e;
}
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js b/deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js
index 8548a2badd..8083c17103 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js
@@ -27,7 +27,7 @@ function listener(event, exec_state, event_data, data) {
"Expected: // Break " + break_count + ".");
++break_count;
if (break_count !== expected_breaks) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
}
} catch(e) {
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
index bd6d343f82..3b7c48c1cf 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we listen to all exceptions and
// there is a catch handler for the exception thrown in a Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
index ac79aba769..aa7e584320 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we only listen to uncaught exceptions, the Promise
// throws, and a catch handler is installed right before throwing.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
index 0ad9ce48a2..a424ccc9f7 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the exception thrown in a Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
index c4bc6c44e3..bfe0bedbac 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we listen to all exceptions and
// there is no catch handler for the exception thrown in a Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
index ba82a1f8cb..8dff592f33 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the exception thrown in a Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
index bd39a155cc..349d014701 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when an exception is thrown inside a Promise, which is
// caught by a custom promise, which throws a new exception in its reject
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
index c88feb9c39..69ee01ee41 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
// Test debug events when an exception is thrown inside a Promise, which is
// caught by a custom promise, which has no reject handler.
@@ -48,7 +48,7 @@ function listener(event, exec_state, event_data, data) {
} else if (expected_events == 0) {
// All of the frames on the stack are from native Javascript.
assertEquals(0, exec_state.frameCount());
- assertEquals("undefined is not a function",
+ assertEquals("(var).reject is not a function",
event_data.exception().message);
} else {
assertUnreachable();
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js b/deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js
index c368414ffc..6c887ab08d 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js
+++ b/deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js
@@ -14,7 +14,7 @@ var stepCount = 0;
function listener(event, execState, eventData, data) {
if (event == Debug.DebugEvent.Break) {
if (!done) {
- execState.prepareStep(Debug.StepAction.StepInto);
+ execState.prepareStep(Debug.StepAction.StepIn);
var s = execState.frame().sourceLineText();
assertTrue(s.indexOf('// ' + stepCount + '.') !== -1);
stepCount++;
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-constructor.js b/deps/v8/test/mjsunit/es6/debug-step-into-constructor.js
index 1e903256fd..96cdc93159 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-into-constructor.js
+++ b/deps/v8/test/mjsunit/es6/debug-step-into-constructor.js
@@ -12,7 +12,7 @@ var done, stepCount;
function listener(event, execState, eventData, data) {
if (event == Debug.DebugEvent.Break) {
if (!done) {
- execState.prepareStep(Debug.StepAction.StepInto);
+ execState.prepareStep(Debug.StepAction.StepIn);
var s = execState.frame().sourceLineText();
assertTrue(s.indexOf('// ' + stepCount + '.') !== -1);
stepCount++;
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js b/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js
new file mode 100644
index 0000000000..599fe05715
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-regexp-subclass
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ assertTrue(
+ event_data.sourceLineText().indexOf(`Break ${break_count++}.`) > 0);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function customSplit() {
+ return "x"; // Break 2.
+} // Break 3.
+var o = {};
+o[Symbol.split] = customSplit;
+
+Debug.setListener(listener);
+debugger; // Break 0.
+var result = "".split(o); // Break 1.
+Debug.setListener(null); // Break 4.
+
+assertEquals("x", result);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js b/deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js
index 08938f7751..5551843cb2 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js
@@ -6,113 +6,95 @@
Debug = debug.Debug
-var exception = false;
+var exception = null;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- if (breaks == 0) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
- breaks = 1;
- } else if (breaks <= 3) {
- breaks++;
- // Check whether we break at the expected line.
- print(event_data.sourceLineText());
- assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
- exec_state.prepareStep(Debug.StepAction.StepIn, 3);
- }
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ print(event_data.sourceLineText());
+ assertTrue(
+ event_data.sourceLineText().indexOf(`B${breaks++}`) > 0);
}
} catch (e) {
- exception = true;
+ print(e);
+ quit();
+ exception = e;
}
}
function cb_set(num) {
- print("element " + num); // Expected to step to this point.
- return true;
-}
+ print("element " + num); // B2 B5 B8
+ return true; // B3 B6 B9
+} // B4 B7 B10
function cb_map(key, val) {
- print("key " + key + ", value " + val); // Expected to step to this point.
- return true;
-}
+ print("key " + key + ", value " + val); // B2 B5 B8
+ return true; // B3 B6 B9
+} // B4 B7 B10
var s = new Set();
s.add(1);
s.add(2);
s.add(3);
-s.add(4);
var m = new Map();
m.set('foo', 1);
m.set('bar', 2);
m.set('baz', 3);
-m.set('bat', 4);
-
-Debug.setListener(listener);
var breaks = 0;
-debugger;
-s.forEach(cb_set);
-assertFalse(exception);
-assertEquals(4, breaks);
+Debug.setListener(listener);
+debugger; // B0
+s.forEach(cb_set); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
breaks = 0;
-debugger;
-m.forEach(cb_map);
-assertFalse(exception);
-assertEquals(4, breaks);
-
-Debug.setListener(null);
-
+Debug.setListener(listener);
+debugger; // B0
+m.forEach(cb_map); // B1
+Debug.setListener(null); // B11
+assertNull(exception);
+assertEquals(12, breaks);
// Test two levels of builtin callbacks:
// Array.forEach calls a callback function, which by itself uses
// Array.forEach with another callback function.
-function second_level_listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- if (breaks == 0) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 3);
- breaks = 1;
- } else if (breaks <= 16) {
- breaks++;
- // Check whether we break at the expected line.
- assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
- // Step two steps further every four breaks to skip the
- // forEach call in the first level of recurision.
- var step = (breaks % 4 == 1) ? 6 : 3;
- exec_state.prepareStep(Debug.StepAction.StepIn, step);
- }
- }
- } catch (e) {
- exception = true;
- }
-}
+function cb_set_2(num) {
+ print("element " + num); // B3 B6 B9 B15 B18 B21 B27 B30 B33
+ return true; // B4 B7 B10 B16 B19 B22 B28 B31 B34
+} // B5 B8 B11 B17 B20 B23 B29 B32 B35
+
+function cb_map_2(k, v) {
+ print(`key ${k}, value ${v}`); // B3 B6 B9 B15 B18 B21 B27 B30 B33
+ return true; // B4 B7 B10 B16 B19 B22 B28 B31 B34
+} // B5 B8 B11 B17 B20 B23 B29 B32 B35
function cb_set_foreach(num) {
- s.forEach(cb_set);
- print("back to the first level of recursion.");
-}
+ s.forEach(cb_set_2); // B2 B14 B26
+ print("back."); // B12 B24 B36
+} // B13 B25 B37
function cb_map_foreach(key, val) {
- m.forEach(cb_set);
- print("back to the first level of recursion.");
-}
-
-Debug.setListener(second_level_listener);
+ m.forEach(cb_map_2); // B2 B14 B26
+ print("back."); // B12 B24 B36
+} // B13 B25 B37
breaks = 0;
-debugger;
-s.forEach(cb_set_foreach);
-assertFalse(exception);
-assertEquals(17, breaks);
+Debug.setListener(listener);
+debugger; // B0
+s.forEach(cb_set_foreach); // B1
+Debug.setListener(null); // B38
+assertNull(exception);
+assertEquals(39, breaks);
breaks = 0;
-debugger;
-m.forEach(cb_map_foreach);
-assertFalse(exception);
-assertEquals(17, breaks);
-
-Debug.setListener(null);
+Debug.setListener(listener);
+debugger; // B0
+m.forEach(cb_map_foreach); // B1
+Debug.setListener(null); // B38
+assertNull(exception);
+assertEquals(39, breaks);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-generators.js b/deps/v8/test/mjsunit/es6/debug-stepin-generators.js
index f48c5ef75f..081dfb7063 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-generators.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-generators.js
@@ -15,7 +15,7 @@ function listener(event, exec_state, event_data, data) {
print(source);
if (/stop stepping/.test(source)) return;
if (/yield/.test(source)) yields++;
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
} catch (e) {
print(e, e.stack);
exception = e;
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
index 98510ff52b..6a7c5536dc 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
@@ -26,9 +26,9 @@ function listener(event, exec_state, event_data, data) {
"Unexpected pause at: " + source + "\n" +
"Expected: // Break " + break_count + ".");
if (source.indexOf("StepOver.") !== -1) {
- exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
} else {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
++break_count;
}
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js b/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js
new file mode 100644
index 0000000000..f500faeee2
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-string-template.js
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ var entry = "";
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ entry += exec_state.frame(i).sourceLineText().substr(-1);
+ entry += exec_state.frame(i).sourceColumn();
+ }
+ log.push(entry);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function u(x) {
+ return x.toUpperCase(); // d
+} // e
+
+var n = 3;
+
+var o = {
+ toString: function() {
+ return "D"; // f
+ } // g
+}
+
+
+
+Debug.setListener(listener);
+debugger; // a
+var s = `1 ${u("a")} 2 ${u("b")} 3 ${n} 4 ${o}`; // b
+Debug.setListener(null); // c
+
+assertNull(exception);
+
+assertEquals([
+ "a0",
+ "b0",
+ "d2b13",
+ "e0b13",
+ "b25",
+ "d2b25",
+ "e0b25",
+ "f4b44",
+ "g2b44",
+ "c0"
+], log);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
index 001f7053fd..932840a6f9 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
@@ -72,7 +72,7 @@ function listener(event, exec_state, event_data, data) {
var match = line.match(/\/\/ Break (\w)$/);
assertEquals(2, match.length);
log.push(match[1] + col);
- exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
break_count++;
} catch (e) {
exception = e;
@@ -86,6 +86,9 @@ Debug.setListener(null); // Break z
print("log:\n"+ JSON.stringify(log));
// The let declaration differs from var in that the loop variable
// is declared in every iteration.
+// TODO(verwaest): For-of has hacky position numbers for Symbol.iterator and
+// .next. Restore to proper positions once the CallPrinter can disambiguate
+// based on other values.
var expected = [
// Entry
"a2","b2",
@@ -99,12 +102,12 @@ var expected = [
"f12","f7","F4","f7","F4","f7","F4","f7",
// For-in-let: get enumerable, next, body, next, ...
"g16","g11","G4","g11","G4","g11","G4","g11",
- // For-of-var: next(), body, next(), body, ...
- "h16","H4","h16","H4","h16","H4","h16",
- // For-of: next(), body, next(), body, ...
- "i12","I4","i12","I4","i12","I4","i12",
- // For-of-let: next(), body, next(), ...
- "j16","J4","j16","J4","j16","J4","j16",
+ // For-of-var: [Symbol.iterator](), next(), body, next(), body, ...
+ "h16","h14","h15","H4","h15","H4","h15","H4","h15",
+ // For-of: [Symbol.iterator](), next(), body, next(), body, ...
+ "i12","i10","i11","I4","i11","I4","i11","I4","i11",
+ // For-of-let: [Symbol.iterator](), next(), body, next(), ...
+ "j16","j14","j15","J4","j15","J4","j15","J4","j15",
// For-var: var decl, condition, body, next, condition, body, ...
"k7","k20","K4","k26","k20","K4","k26","k20","K4","k26","k20",
// For: init, condition, body, next, condition, body, ...
diff --git a/deps/v8/test/mjsunit/es6/generators-parsing.js b/deps/v8/test/mjsunit/es6/generators-parsing.js
index e4408365d3..f3f8cad086 100644
--- a/deps/v8/test/mjsunit/es6/generators-parsing.js
+++ b/deps/v8/test/mjsunit/es6/generators-parsing.js
@@ -124,8 +124,8 @@ assertThrows("function* g() { yield 3 + yield 4; }", SyntaxError);
// Yield is still a future-reserved-word in strict mode
assertThrows("function f() { \"use strict\"; var yield = 13; }", SyntaxError);
-// The name of the NFE is let-bound in G, so is invalid.
-assertThrows("function* g() { yield (function yield() {}); }", SyntaxError);
+// The name of the NFE is bound in the generator expression, so is invalid.
+assertThrows("function f() { (function* yield() {}); }", SyntaxError);
// In generators, yield is invalid as a formal argument name.
assertThrows("function* g(yield) { yield (10); }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/es6/instanceof-proxies.js b/deps/v8/test/mjsunit/es6/instanceof-proxies.js
new file mode 100644
index 0000000000..cc720ad8fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/instanceof-proxies.js
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Flags: --harmony-proxies --allow-natives-syntax
+
+// Test instanceof with proxies.
+
+(function TestInstanceOfWithProxies() {
+ function foo(x) {
+ return x instanceof Array;
+ }
+ assertTrue(foo([]));
+ assertFalse(foo({}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo([]));
+ assertFalse(foo({}));
+
+ var handler = {
+ getPrototypeOf: function(target) { return Array.prototype; }
+ };
+ var p = new Proxy({}, handler);
+ assertTrue(foo(p));
+ var o = {};
+ o.__proto__ = p;
+ assertTrue(foo(o));
+
+ // Make sure we are also correct if the handler throws.
+ handler.getPrototypeOf = function(target) {
+ throw "uncooperative";
+ }
+ assertThrows("foo(o)");
+
+ // Including if the optimized function has a catch handler.
+ function foo_catch(x) {
+ try {
+ x instanceof Array;
+ } catch(e) {
+ assertEquals("uncooperative", e);
+ return true;
+ }
+ return false;
+ }
+ assertTrue(foo_catch(o));
+ %OptimizeFunctionOnNextCall(foo_catch);
+ assertTrue(foo_catch(o));
+ handler.getPrototypeOf = function(target) { return Array.prototype; }
+ assertFalse(foo_catch(o));
+})();
+
+
+(function testInstanceOfWithRecursiveProxy() {
+ // Make sure we gracefully deal with recursive proxies.
+ var proxy = new Proxy({},{});
+ proxy.__proto__ = proxy;
+ // instanceof will cause an inifinite prototype walk.
+ assertThrows(() => { proxy instanceof Object }, RangeError);
+
+ var proxy2 = new Proxy({}, {getPrototypeOf() { return proxy2 }});
+ assertThrows(() => { proxy instanceof Object }, RangeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/iteration-semantics.js b/deps/v8/test/mjsunit/es6/iteration-semantics.js
index f29e6e011b..6466ac5e26 100644
--- a/deps/v8/test/mjsunit/es6/iteration-semantics.js
+++ b/deps/v8/test/mjsunit/es6/iteration-semantics.js
@@ -307,35 +307,36 @@ assertEquals(5,
try_control(integers_until(10),
function(x) { return (x == 5) ? x : "continue" }));
+// TODO(neis,cbruni): Enable once the corresponding traps work again.
// Proxy results, with getters.
-function transparent_proxy(x) {
- return Proxy.create({
- get: function(receiver, name) { return x[name]; }
- });
-}
-assertEquals([1, 2],
- fold(append, [],
- results([one_time_getter({ value: 1 }, 'done', false),
- one_time_getter({ done: false }, 'value', 2),
- { value: 37, done: true },
- never_getter(never_getter({}, 'done'), 'value')]
- .map(transparent_proxy))));
+// function transparent_proxy(x) {
+// return new Proxy({}, {
+// get: function(receiver, name) { return x[name]; }
+// });
+// }
+// assertEquals([1, 2],
+// fold(append, [],
+// results([one_time_getter({ value: 1 }, 'done', false),
+// one_time_getter({ done: false }, 'value', 2),
+// { value: 37, done: true },
+// never_getter(never_getter({}, 'done'), 'value')]
+// .map(transparent_proxy))));
// Proxy iterators.
-function poison_proxy_after(iterable, n) {
- var iterator = iterable[Symbol.iterator]();
- return wrap_iterator(Proxy.create({
- get: function(receiver, name) {
- if (name == 'next' && n-- < 0) throw "unreachable";
- return iterator[name];
- },
- // Needed for integers_until(10)'s this.n++.
- set: function(receiver, name, val) {
- return iterator[name] = val;
- }
- }));
-}
-assertEquals(45, fold(sum, 0, poison_proxy_after(integers_until(10), 10)));
+// function poison_proxy_after(iterable, n) {
+// var iterator = iterable[Symbol.iterator]();
+// return wrap_iterator(new Proxy({}, {
+// get: function(receiver, name) {
+// if (name == 'next' && n-- < 0) throw "unreachable";
+// return iterator[name];
+// },
+// // Needed for integers_until(10)'s this.n++.
+// set: function(receiver, name, val) {
+// return iterator[name] = val;
+// }
+// }));
+// }
+// assertEquals(45, fold(sum, 0, poison_proxy_after(integers_until(10), 10)));
function test_iterator_result_object_non_object(value, descr) {
diff --git a/deps/v8/test/mjsunit/es6/legacy-subclassing.js b/deps/v8/test/mjsunit/es6/legacy-subclassing.js
new file mode 100644
index 0000000000..dbf666d07c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/legacy-subclassing.js
@@ -0,0 +1,38 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noharmony-species
+
+// Before Symbol.species was added, ArrayBuffer subclasses constructed
+// ArrayBuffers, and Array subclasses constructed Arrays, but TypedArray and
+// Promise subclasses constructed an instance of the subclass.
+
+'use strict';
+
+assertEquals(undefined, Symbol.species);
+
+class MyArray extends Array { }
+let myArray = new MyArray();
+assertEquals(MyArray, myArray.constructor);
+assertEquals(Array, myArray.map(x => x + 1).constructor);
+assertEquals(Array, myArray.concat().constructor);
+
+class MyUint8Array extends Uint8Array { }
+Object.defineProperty(MyUint8Array.prototype, "BYTES_PER_ELEMENT", {value: 1});
+let myTypedArray = new MyUint8Array(3);
+assertEquals(MyUint8Array, myTypedArray.constructor);
+assertEquals(MyUint8Array, myTypedArray.map(x => x + 1).constructor);
+
+class MyArrayBuffer extends ArrayBuffer { }
+let myBuffer = new MyArrayBuffer(0);
+assertEquals(MyArrayBuffer, myBuffer.constructor);
+assertEquals(ArrayBuffer, myBuffer.slice().constructor);
+
+class MyPromise extends Promise { }
+let myPromise = new MyPromise(() => {});
+assertEquals(MyPromise, myPromise.constructor);
+assertEquals(MyPromise, myPromise.then().constructor);
+
+// However, subarray instantiates members of the parent class
+assertEquals(Uint8Array, myTypedArray.subarray(1).constructor);
diff --git a/deps/v8/test/mjsunit/es6/new-target.js b/deps/v8/test/mjsunit/es6/new-target.js
index 9ecff815fa..8a06ff6c89 100644
--- a/deps/v8/test/mjsunit/es6/new-target.js
+++ b/deps/v8/test/mjsunit/es6/new-target.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-reflect --harmony-destructuring --harmony-rest-parameters
+// Flags: --harmony-reflect --harmony-destructuring-bind
(function TestClass() {
diff --git a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
new file mode 100644
index 0000000000..b56a4b56dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Before Unicode RegExps are shipped, we shouldn't have the 'unicode'
+// property on RegExp.prototype, or read it from 'flags'.
+// mjsunit/es6/regexp-flags tests that the property is there when the
+// flag is on.
+
+// Flags: --harmony-regexp
+
+'use strict';
+
+assertFalse(RegExp.prototype.hasOwnProperty('unicode'));
+
+// If we were going to be really strict, we could have a test like this,
+// with the assertTrue replaced by assertFalse, since flags shouldn't
+// Get the 'unicode' property. However, it is probably OK to omit this
+// detailed fix.
+var x = /a/;
+var y = false;
+Object.defineProperty(x, 'unicode', { get() { y = true; } });
+assertEquals("", x.flags);
+assertTrue(y);
diff --git a/deps/v8/test/mjsunit/es6/object-tostring.js b/deps/v8/test/mjsunit/es6/object-tostring.js
index c73a7686cd..4d6090faf1 100644
--- a/deps/v8/test/mjsunit/es6/object-tostring.js
+++ b/deps/v8/test/mjsunit/es6/object-tostring.js
@@ -1,8 +1,8 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tostring
+// Flags: --harmony-tostring --harmony-proxies
var global = this;
@@ -41,7 +41,7 @@ function testToStringTag(className) {
Object.defineProperty(obj, Symbol.toStringTag, {
get: function() { throw className; }
});
- assertThrows(function() {
+ assertThrowsEquals(function() {
Object.prototype.toString.call(obj);
}, className);
@@ -137,3 +137,25 @@ function testObjectToStringOwnNonStringValue() {
assertEquals("[object Object]", ({}).toString.call(obj));
}
testObjectToStringOwnNonStringValue();
+
+
+// Proxies
+
+function assertTag(tag, obj) {
+ assertEquals("[object " + tag + "]", Object.prototype.toString.call(obj));
+}
+
+assertTag("Object", new Proxy({}, {}));
+assertTag("Array", new Proxy([], {}));
+assertTag("Function", new Proxy(() => 42, {}));
+assertTag("Foo", new Proxy(() => 42, {get() {return "Foo"}}));
+assertTag("Function", new Proxy(() => 42, {get() {return 666}}));
+
+revocable = Proxy.revocable([], {});
+revocable.revoke();
+assertThrows(() => Object.prototype.toString.call(revocable.proxy), TypeError);
+
+handler = {};
+revocable = Proxy.revocable([], handler);
+handler.get = () => revocable.revoke();
+assertThrows(() => Object.prototype.toString.call(revocable.proxy), TypeError);
diff --git a/deps/v8/test/mjsunit/es6/promise-internal-setter.js b/deps/v8/test/mjsunit/es6/promise-internal-setter.js
index 83e4738316..20d361f623 100644
--- a/deps/v8/test/mjsunit/es6/promise-internal-setter.js
+++ b/deps/v8/test/mjsunit/es6/promise-internal-setter.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --promise-extra
+
'use strict';
Object.defineProperties(Object.prototype, {
diff --git a/deps/v8/test/mjsunit/es6/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index 341242f8d9..e4c8b389e8 100644
--- a/deps/v8/test/mjsunit/es6/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-tostring
+// Flags: --allow-natives-syntax --harmony-tostring --promise-extra
// Make sure we don't rely on functions patchable by monkeys.
var call = Function.prototype.call.call.bind(Function.prototype.call)
@@ -47,6 +47,8 @@ function clear(o) {
clear(o.__proto__)
var properties = getOwnPropertyNames(o)
for (var i in properties) {
+ // Do not clobber Object.prototype.toString, which is used by tests.
+ if (properties[i] === "toString") continue;
clearProp(o, properties[i])
}
}
@@ -93,13 +95,30 @@ function assertAsync(b, s) {
--asyncAssertsExpected
}
+function assertLater(f, name) {
+ assertFalse(f()); // should not be true synchronously
+ ++asyncAssertsExpected;
+ var iterations = 0;
+ function runAssertion() {
+ if (f()) {
+ print(name, "succeeded");
+ --asyncAssertsExpected;
+ } else if (iterations++ < 10) {
+ %EnqueueMicrotask(runAssertion);
+ } else {
+ %AbortJS(name + " FAILED!");
+ }
+ }
+ %EnqueueMicrotask(runAssertion);
+}
+
function assertAsyncDone(iteration) {
var iteration = iteration || 0;
%EnqueueMicrotask(function() {
if (asyncAssertsExpected === 0)
assertAsync(true, "all")
else if (iteration > 10) // Shouldn't take more.
- assertAsync(false, "all")
+ assertAsync(false, "all... " + asyncAssertsExpected)
else
assertAsyncDone(iteration + 1)
});
@@ -176,8 +195,9 @@ function assertAsyncDone(iteration) {
var p1 = Promise.accept(5)
var p2 = Promise.accept(p1)
var p3 = Promise.accept(p2)
+ // Note: Chain now has then-style semantics, here and in future tests.
p3.chain(
- function(x) { assertAsync(x === p2, "resolved/chain") },
+ function(x) { assertAsync(x === 5, "resolved/chain") },
assertUnreachable
)
assertAsyncRan()
@@ -199,8 +219,8 @@ function assertAsyncDone(iteration) {
var p2 = Promise.accept(p1)
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "rejected/chain") },
- assertUnreachable
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "rejected/chain") }
)
assertAsyncRan()
})();
@@ -221,7 +241,7 @@ function assertAsyncDone(iteration) {
var p2 = Promise.accept(p1)
var p3 = Promise.accept(p2)
p3.chain(function(x) { return x }, assertUnreachable).chain(
- function(x) { assertAsync(x === p1, "resolved/chain/chain") },
+ function(x) { assertAsync(x === 5, "resolved/chain/chain") },
assertUnreachable
)
assertAsyncRan()
@@ -353,7 +373,7 @@ function assertAsyncDone(iteration) {
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "resolved/thenable/chain") },
+ function(x) { assertAsync(x === 5, "resolved/thenable/chain") },
assertUnreachable
)
assertAsyncRan()
@@ -375,8 +395,8 @@ function assertAsyncDone(iteration) {
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "rejected/thenable/chain") },
- assertUnreachable
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "rejected/thenable/chain") }
)
assertAsyncRan()
})();
@@ -398,7 +418,7 @@ function assertAsyncDone(iteration) {
var p2 = Promise.accept(p1)
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "chain/resolve") },
+ function(x) { assertAsync(x === 5, "chain/resolve") },
assertUnreachable
)
deferred.resolve(5)
@@ -408,8 +428,8 @@ function assertAsyncDone(iteration) {
(function() {
var deferred = Promise.defer()
var p1 = deferred.promise
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
p3.then(
function(x) { assertAsync(x === 5, "then/resolve") },
assertUnreachable
@@ -424,8 +444,8 @@ function assertAsyncDone(iteration) {
var p2 = Promise.accept(p1)
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "chain/reject") },
- assertUnreachable
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "chain/reject") }
)
deferred.reject(5)
assertAsyncRan()
@@ -474,7 +494,7 @@ function assertAsyncDone(iteration) {
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "chain/resolve/thenable") },
+ function(x) { assertAsync(x === 5, "chain/resolve/thenable") },
assertUnreachable
)
deferred.resolve(5)
@@ -500,8 +520,8 @@ function assertAsyncDone(iteration) {
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
var p3 = Promise.accept(p2)
p3.chain(
- function(x) { assertAsync(x === p2, "chain/reject/thenable") },
- assertUnreachable
+ assertUnreachable,
+ function(x) { assertAsync(x === 5, "chain/reject/thenable") }
)
deferred.reject(5)
assertAsyncRan()
@@ -526,7 +546,7 @@ function assertAsyncDone(iteration) {
var deferred = Promise.defer()
var p3 = deferred.promise
p3.chain(
- function(x) { assertAsync(x === p2, "chain/resolve2") },
+ function(x) { assertAsync(x === 5, "chain/resolve2") },
assertUnreachable
)
deferred.resolve(p2)
@@ -578,7 +598,7 @@ function assertAsyncDone(iteration) {
var deferred = Promise.defer()
var p3 = deferred.promise
p3.chain(
- function(x) { assertAsync(x === p2, "chain/resolve/thenable2") },
+ function(x) { assertAsync(x === 5, "chain/resolve/thenable2") },
assertUnreachable
)
deferred.resolve(p2)
@@ -623,8 +643,8 @@ function assertAsyncDone(iteration) {
var p = deferred.promise
deferred.resolve(p)
p.chain(
- function(x) { assertAsync(x === p, "cyclic/deferred/chain") },
- assertUnreachable
+ assertUnreachable,
+ function(r) { assertAsync(r instanceof TypeError, "cyclic/deferred/then") }
)
assertAsyncRan()
})();
@@ -741,7 +761,6 @@ function assertAsyncDone(iteration) {
assertAsyncRan()
})();
-
(function() {
'use strict';
var getCalls = 0;
@@ -1033,7 +1052,8 @@ function assertAsyncDone(iteration) {
log = ""
MyPromise.all([21, Promise.accept(22), 23, MyPromise.accept(24), 25, 26])
- assertTrue(log === "nx24nnx21nnnnx23nnnx25nnx26n", "subclass/all/self")
+ assertTrue(log === "nx24nnx21nnx[object Promise]nnx23nnnx25nnx26n",
+ "subclass/all/self")
})();
(function() {
@@ -1059,4 +1079,39 @@ function assertAsyncDone(iteration) {
"subclass/resolve/descendant with transplanted own constructor");
}());
+(function() {
+ var thenCalled = false;
+
+ var resolve;
+ var promise = new Promise(function(res) { resolve = res; });
+ resolve({ then() { thenCalled = true; throw new Error(); } });
+ assertLater(function() { return thenCalled; }, "resolve-with-thenable");
+});
+
+(function() {
+ var calledWith;
+
+ var resolve;
+ var p1 = (new Promise(function(res) { resolve = res; }));
+ var p2 = p1.then(function(v) {
+ return {
+ then(resolve, reject) { resolve({ then() { calledWith = v }}); }
+ };
+ });
+
+ resolve({ then(resolve) { resolve(2); } });
+ assertLater(function() { return calledWith === 2; },
+ "resolve-with-thenable2");
+})();
+
+(function() {
+ var p = Promise.resolve();
+ var callCount = 0;
+ defineProperty(p, "constructor", {
+ get: function() { ++callCount; return Promise; }
+ });
+ p.then();
+ assertEquals(1, callCount);
+})();
+
assertAsyncDone()
diff --git a/deps/v8/test/mjsunit/es6/regexp-constructor.js b/deps/v8/test/mjsunit/es6/regexp-constructor.js
new file mode 100644
index 0000000000..e3b7efa0e7
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regexp-constructor.js
@@ -0,0 +1,99 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-subclass
+
+"use strict";
+
+function should_not_be_called() {
+ throw new Error("should not be called");
+}
+
+(function() {
+ var r = new RegExp("biep");
+ assertTrue(r === RegExp(r));
+ assertFalse(r === new RegExp(r));
+ r[Symbol.match] = false;
+ Object.defineProperty(r, "source", {get: should_not_be_called});
+ Object.defineProperty(r, "flags", {get: should_not_be_called});
+ assertFalse(r === RegExp(r));
+})();
+
+(function() {
+ class A extends RegExp {
+ get source() { throw new Error("should not be called") }
+ get flags() { throw new Error("should not be called") }
+ }
+
+ var r = new A("biep");
+ var r2 = RegExp(r);
+
+ assertFalse(r === r2);
+ assertEquals(r, r2);
+ assertTrue(A.prototype === r.__proto__);
+ assertTrue(RegExp.prototype === r2.__proto__);
+
+ var r3 = RegExp(r);
+ assertFalse(r3 === r);
+ assertEquals(r3, r);
+
+ var r4 = new A(r2);
+ assertFalse(r4 === r2);
+ assertEquals(r4, r2);
+ assertTrue(A.prototype === r4.__proto__);
+
+ r[Symbol.match] = false;
+ var r5 = new A(r);
+ assertFalse(r5 === r);
+ assertEquals(r5, r);
+ assertTrue(A.prototype === r5.__proto__);
+})();
+
+(function() {
+ var log = [];
+ var match = {
+ get source() { log.push("source"); return "biep"; },
+ get flags() { log.push("flags"); return "i"; }
+ };
+ Object.defineProperty(match, Symbol.match,
+ {get() { log.push("match"); return true; }});
+ var r = RegExp(match);
+ assertEquals(["match", "source", "flags"], log);
+ assertFalse(r === match);
+ assertEquals(/biep/i, r);
+})();
+
+(function() {
+ var log = [];
+ var match = {
+ get source() { log.push("source"); return "biep"; },
+ get flags() { log.push("flags"); return "i"; }
+ };
+ Object.defineProperty(match, Symbol.match,
+ {get() { log.push("match"); return true; }});
+ match.constructor = RegExp;
+ var r = RegExp(match);
+ assertEquals(["match"], log);
+ assertTrue(r === match);
+})();
+
+(function() {
+ var r = RegExp("biep", "i");
+ r[Symbol.match] = false;
+ var r2 = RegExp(r, "g");
+ assertFalse(r === r2);
+ assertEquals(/biep/i, r);
+ assertEquals(/biep/g, r2);
+})();
+
+(function() {
+ class A extends RegExp {
+ get ["constructor"]() { log.push("constructor"); return RegExp; }
+ }
+ var r = new A("biep");
+ var log = [];
+ var r2 = RegExp(r);
+ assertEquals(["constructor"], log);
+ assertTrue(r === r2);
+})();
diff --git a/deps/v8/test/mjsunit/es6/regexp-flags.js b/deps/v8/test/mjsunit/es6/regexp-flags.js
index 98070fb735..2f1222197d 100644
--- a/deps/v8/test/mjsunit/es6/regexp-flags.js
+++ b/deps/v8/test/mjsunit/es6/regexp-flags.js
@@ -50,7 +50,11 @@ assertEquals(4, get_count);
function testName(name) {
- assertThrows(() => RegExp.prototype[name], TypeError);
+ if (name === "sticky") {
+ assertEquals(undefined, RegExp.prototype[name]);
+ } else {
+ assertThrows(() => RegExp.prototype[name], TypeError);
+ }
assertEquals(
"get " + name,
Object.getOwnPropertyDescriptor(RegExp.prototype, name).get.name);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4211.js b/deps/v8/test/mjsunit/es6/regress/regress-4211.js
index d83c1a71f9..e276bb4333 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4211.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4211.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-rest-parameters
assertThrows("()=>{}()", SyntaxError);
assertThrows("x=>{}()", SyntaxError);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-468661.js b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
index 543a87c1df..4a42350930 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-468661.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
@@ -44,7 +44,7 @@ function listener(event, exec_state, event_data, data) {
++break_count;
if (break_count !== expected_breaks) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
print("Next step prepared");
}
}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-508074.js b/deps/v8/test/mjsunit/es6/regress/regress-508074.js
index 93f82cfd0c..f4d1a44255 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-508074.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-508074.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
// Flags: --allow-natives-syntax
var f = (a, b, ...c) => {
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-513474.js b/deps/v8/test/mjsunit/es6/regress/regress-513474.js
index ec4bc84a30..98a052c549 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-513474.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-513474.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
-
(function(...a) { function f() { eval() } })();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js b/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js
index 9b66a7e08b..3144b39830 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --promise-extra
var x = 0;
var y = 0;
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js b/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
index 9bb313ffbe..2b0b7eace7 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr493566.js
@@ -2,36 +2,43 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-proxies
+// Flags: --harmony-proxies --harmony-reflect
"use strict";
-
-
var global = this;
-
(function TestGlobalReceiver() {
class A {
- s() {
- super.bla = 10;
+ s(value) {
+ super.bla = value;
}
}
- new A().s.call(global);
+ var a = new A();
+ a.s(9);
+ assertEquals(undefined, global.bla);
+ assertEquals(9, a.bla);
+
+ a = new A();
+ a.s.call(global, 10);
assertEquals(10, global.bla);
+ assertEquals(undefined, a.bla);
})();
(function TestProxyProto() {
var calls = 0;
var handler = {
- getPropertyDescriptor: function(name) {
+ set(t, p, v, r) {
calls++;
+ return Reflect.set(t, p, v, r);
+ },
+ getPropertyDescriptor(target, name) {
+ calls += 10;
return undefined;
}
};
-
- var proto = {};
- var proxy = Proxy.create(handler, proto);
+ var target = {};
+ var proxy = new Proxy(target, handler);
var object = {
__proto__: proxy,
setX(v) {
@@ -43,15 +50,23 @@ var global = this;
};
object.setX(1);
+ assertEquals(1, object.x);
+ assertEquals(1, Object.getOwnPropertyDescriptor(object, 'x').value);
+ assertEquals(1, calls);
+
+ calls = 0;
+ object.setX.call(proxy, 2);
+ assertEquals(2, target.x);
assertEquals(1, Object.getOwnPropertyDescriptor(object, 'x').value);
assertEquals(1, calls);
var sym = Symbol();
+ calls = 0;
object.setSymbol.call(global, sym, 2);
assertEquals(2, Object.getOwnPropertyDescriptor(global, sym).value);
// We currently do not invoke proxy traps for symbols
assertEquals(1, calls);
-})();
+});
(function TestProxyReceiver() {
@@ -62,19 +77,28 @@ var global = this;
};
var calls = 0;
+ var target = {target:1};
var handler = {
- getPropertyDescriptor(name) {
- assertUnreachable();
- },
- set(receiver, name, value) {
+ getOwnPropertyDescriptor(t, name) {
calls++;
- assertEquals(proxy, receiver);
- assertEquals('y', name);
- assertEquals(3, value);
+ },
+ defineProperty(t, name, desc) {
+ calls += 10;
+ t[name] = desc.value;
+ return true;
+ },
+ set(target, name, value) {
+ assertUnreachable();
}
};
+ var proxy = new Proxy(target, handler);
- var proxy = Proxy.create(handler);
+ assertEquals(undefined, object.y);
+ object.setY(10);
+ assertEquals(10, object.y);
+
+ // Change the receiver to the proxy, but the set is called on the global.
object.setY.call(proxy, 3);
- assertEquals(1, calls);
+ assertEquals(3, target.y);
+ assertEquals(11, calls);
})();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js b/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
index 2c10e19315..8d843ee694 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr512574.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
function f({}) {
for (var v in []);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js b/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js
new file mode 100644
index 0000000000..59932f6b4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js
@@ -0,0 +1,13 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g() { return { val: new.target }; }
+function f() { return (new g()).val; }
+
+assertEquals(g, f());
+assertEquals(g, f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals(g, f());
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-new-target-context.js b/deps/v8/test/mjsunit/es6/regress/regress-new-target-context.js
new file mode 100644
index 0000000000..eadf6e3aec
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-new-target-context.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test access of the new.target value in functions that also allocate local
+// function contexts of varying sizes, making sure the value is not clobbered.
+
+function makeFun(n) {
+ var source = "(function f" + n + "() { ";
+ for (var i = 0; i < n; ++i) source += "var v" + i + "; ";
+ source += "(function() { 0 ";
+ for (var i = 0; i < n; ++i) source += "+ v" + i + " ";
+ source += "})(); return { value: new.target }; })";
+ return eval(source);
+}
+
+// Exercise fast case.
+var a = makeFun(4);
+assertEquals(a, new a().value);
+assertEquals(undefined, a().value);
+
+// Exercise slow case.
+var b = makeFun(128);
+assertEquals(b, new b().value);
+assertEquals(undefined, b().value);
diff --git a/deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js b/deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js
index 2042a27f6b..c9b81661dc 100644
--- a/deps/v8/test/mjsunit/harmony/rest-params-lazy-parsing.js
+++ b/deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
// Flags: --min-preparse-length=0
function variadic(co, ...values) {
diff --git a/deps/v8/test/mjsunit/harmony/rest-params.js b/deps/v8/test/mjsunit/es6/rest-params.js
index 6ceb87e331..9afe9b409e 100644
--- a/deps/v8/test/mjsunit/harmony/rest-params.js
+++ b/deps/v8/test/mjsunit/es6/rest-params.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
-
(function testRestIndex() {
assertEquals(5, (function(...args) { return args.length; })(1,2,3,4,5));
assertEquals(4, (function(a, ...args) { return args.length; })(1,2,3,4,5));
@@ -219,3 +217,26 @@ var O = {
function(){ eval("(class{foo(a, ...rest) {'use strict';}});") },
SyntaxError);
})();
+
+(function TestRestArrayPattern() {
+ function f(...[a, b, c]) { return a + b + c; }
+ assertEquals(6, f(1, 2, 3));
+ assertEquals("123", f(1, "2", 3));
+ assertEquals(NaN, f(1));
+
+ var f2 = (...[a, b, c]) => a + b + c;
+ assertEquals(6, f2(1, 2, 3));
+ assertEquals("123", f2(1, "2", 3));
+ assertEquals(NaN, f2(1));
+})();
+
+(function TestRestObjectPattern() {
+ function f(...{length, 0: firstName, 1: lastName}) {
+ return `Hello ${lastName}, ${firstName}! Called with ${length} args!`;
+ }
+ assertEquals("Hello Ross, Bob! Called with 4 args!", f("Bob", "Ross", 0, 0));
+
+ var f2 = (...{length, 0: firstName, 1: lastName}) =>
+ `Hello ${lastName}, ${firstName}! Called with ${length} args!`;
+ assertEquals("Hello Ross, Bob! Called with 4 args!", f2("Bob", "Ross", 0, 0));
+})();
diff --git a/deps/v8/test/mjsunit/es6/spread-call-new-class.js b/deps/v8/test/mjsunit/es6/spread-call-new-class.js
index fc4770de7c..1fdf25b616 100644
--- a/deps/v8/test/mjsunit/es6/spread-call-new-class.js
+++ b/deps/v8/test/mjsunit/es6/spread-call-new-class.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-rest-parameters
+// Flags: --harmony-sloppy
(function testConstructClassStrict() {
diff --git a/deps/v8/test/mjsunit/es6/spread-call-super-property.js b/deps/v8/test/mjsunit/es6/spread-call-super-property.js
index b7326294fe..b298a69aa1 100644
--- a/deps/v8/test/mjsunit/es6/spread-call-super-property.js
+++ b/deps/v8/test/mjsunit/es6/spread-call-super-property.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-rest-parameters
+// Flags: --harmony-sloppy
(function testCallSuperPropertyStrict() {
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/string-repeat.js b/deps/v8/test/mjsunit/es6/string-repeat.js
index 15caea14f3..d61aec066c 100644
--- a/deps/v8/test/mjsunit/es6/string-repeat.js
+++ b/deps/v8/test/mjsunit/es6/string-repeat.js
@@ -65,6 +65,12 @@ assertThrows('"a".repeat(Number.POSITIVE_INFINITY)', RangeError);
assertThrows('"a".repeat(Math.pow(2, 30))', RangeError);
assertThrows('"a".repeat(Math.pow(2, 40))', RangeError);
+// Handling empty strings
+assertThrows('"".repeat(-1)', RangeError);
+assertThrows('"".repeat(Number.POSITIVE_INFINITY)', RangeError);
+assertEquals("", "".repeat(Math.pow(2, 30)));
+assertEquals("", "".repeat(Math.pow(2, 40)));
+
var myobj = {
toString: function() {
return "abc";
diff --git a/deps/v8/test/mjsunit/es6/string-search.js b/deps/v8/test/mjsunit/es6/string-search.js
new file mode 100644
index 0000000000..dc029826ad
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/string-search.js
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-subclass
+
+var pattern = {};
+pattern[Symbol.search] = function(string) {
+ return string.length;
+};
+// Check object coercible fails.
+assertThrows(() => String.prototype.search.call(null, pattern),
+ TypeError);
+// Override is called.
+assertEquals(5, "abcde".search(pattern));
+// Non-callable override.
+pattern[Symbol.search] = "dumdidum";
+assertThrows(() => "abcde".search(pattern), TypeError);
+
+assertEquals("[Symbol.search]", RegExp.prototype[Symbol.search].name);
diff --git a/deps/v8/test/mjsunit/es6/super.js b/deps/v8/test/mjsunit/es6/super.js
index f93b259fd2..67cb45f590 100644
--- a/deps/v8/test/mjsunit/es6/super.js
+++ b/deps/v8/test/mjsunit/es6/super.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// Flags: --harmony-destructuring --harmony-rest-parameters --harmony-sloppy
+// Flags: --harmony-destructuring-bind --harmony-sloppy
(function TestSuperNamedLoads() {
function Base() { }
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index 58142cf27f..d502a83681 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -526,3 +526,39 @@ function TestComparison() {
}
}
TestComparison();
+
+
+// Make sure that throws occur in the context of the Symbol function.
+function TestContext() {
+ var r = Realm.create();
+ var rSymbol = Realm.eval(r, "Symbol");
+ var rError = Realm.eval(r, "TypeError");
+
+ function verifier(symbol, error) {
+ try {
+ new symbol();
+ } catch(e) {
+ return e.__proto__ === error.__proto__;
+ }
+ assertTrue(false); // should never get here.
+ }
+
+ assertTrue(verifier(Symbol, TypeError()));
+ assertTrue(verifier(rSymbol, rError()));
+ assertFalse(verifier(Symbol, rError()));
+ assertFalse(verifier(rSymbol, TypeError()));
+}
+TestContext();
+
+
+function TestStringify(expected, input) {
+ assertEquals(expected, JSON.stringify(input));
+ assertEquals(expected, JSON.stringify(input, null, 0));
+}
+
+TestStringify(undefined, Symbol("a"));
+TestStringify('[{}]', [Object(Symbol())]);
+var symbol_wrapper = Object(Symbol("a"))
+TestStringify('{}', symbol_wrapper);
+symbol_wrapper.a = 1;
+TestStringify('{"a":1}', symbol_wrapper);
diff --git a/deps/v8/test/mjsunit/es6/templates.js b/deps/v8/test/mjsunit/es6/templates.js
index 621b06074e..3c4584d337 100644
--- a/deps/v8/test/mjsunit/es6/templates.js
+++ b/deps/v8/test/mjsunit/es6/templates.js
@@ -697,3 +697,22 @@ var global = this;
assertArrayEquals(["get0"], log);
assertArrayEquals([1], tagged);
})();
+
+
+// Since the first argument to the tag function is always an array,
+// eval calls will always just return that array.
+(function testEvalTagStrict() {
+ "use strict";
+ var f = (x) => eval`a${x}b`;
+ var result = f();
+ assertEquals(["a", "b"], result);
+ assertSame(result, f());
+})();
+
+
+(function testEvalTagSloppy() {
+ var f = (x) => eval`a${x}b`;
+ var result = f();
+ assertEquals(["a", "b"], result);
+ assertSame(result, f());
+})();
diff --git a/deps/v8/test/mjsunit/es6/typed-array-iterator.js b/deps/v8/test/mjsunit/es6/typed-array-iterator.js
index 9903b0abae..0b27625c5c 100644
--- a/deps/v8/test/mjsunit/es6/typed-array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/typed-array-iterator.js
@@ -9,23 +9,22 @@ var constructors = [Uint8Array, Int8Array,
Float32Array, Float64Array,
Uint8ClampedArray];
-function TestTypedArrayPrototype(constructor) {
- assertTrue(constructor.prototype.hasOwnProperty('entries'));
- assertTrue(constructor.prototype.hasOwnProperty('values'));
- assertTrue(constructor.prototype.hasOwnProperty('keys'));
- assertTrue(constructor.prototype.hasOwnProperty(Symbol.iterator));
-
- assertFalse(constructor.prototype.propertyIsEnumerable('entries'));
- assertFalse(constructor.prototype.propertyIsEnumerable('values'));
- assertFalse(constructor.prototype.propertyIsEnumerable('keys'));
- assertFalse(constructor.prototype.propertyIsEnumerable(Symbol.iterator));
-
- assertEquals(Array.prototype.entries, constructor.prototype.entries);
- assertEquals(Array.prototype[Symbol.iterator], constructor.prototype.values);
- assertEquals(Array.prototype.keys, constructor.prototype.keys);
- assertEquals(Array.prototype[Symbol.iterator], constructor.prototype[Symbol.iterator]);
-}
-constructors.forEach(TestTypedArrayPrototype);
+var TypedArrayPrototype = Uint8Array.prototype.__proto__;
+
+assertTrue(TypedArrayPrototype.hasOwnProperty('entries'));
+assertTrue(TypedArrayPrototype.hasOwnProperty('values'));
+assertTrue(TypedArrayPrototype.hasOwnProperty('keys'));
+assertTrue(TypedArrayPrototype.hasOwnProperty(Symbol.iterator));
+
+assertFalse(TypedArrayPrototype.propertyIsEnumerable('entries'));
+assertFalse(TypedArrayPrototype.propertyIsEnumerable('values'));
+assertFalse(TypedArrayPrototype.propertyIsEnumerable('keys'));
+assertFalse(TypedArrayPrototype.propertyIsEnumerable(Symbol.iterator));
+
+assertEquals(Array.prototype.entries, TypedArrayPrototype.entries);
+assertEquals(Array.prototype[Symbol.iterator], TypedArrayPrototype.values);
+assertEquals(Array.prototype.keys, TypedArrayPrototype.keys);
+assertEquals(Array.prototype[Symbol.iterator], TypedArrayPrototype[Symbol.iterator]);
function TestTypedArrayValues(constructor) {
diff --git a/deps/v8/test/mjsunit/es6/typedarray-of.js b/deps/v8/test/mjsunit/es6/typedarray-of.js
index cf57615d12..a6df29a0dd 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-of.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-of.js
@@ -4,6 +4,8 @@
// Based on Mozilla Array.of() tests at http://dxr.mozilla.org/mozilla-central/source/js/src/jit-test/tests/collections
+'use strict';
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -51,28 +53,29 @@ function TestTypedArrayOf(constructor) {
assertEquals(aux.length, a.length);
assertArrayEquals(aux, a);
- // %TypedArray%.of can be transplanted to other constructors.
+ // %TypedArray%.of can be called on subclasses of TypedArrays
var hits = 0;
- function Bag(length) {
- assertEquals(arguments.length, 1);
- assertEquals(length, 2);
- this.length = length;
- hits++;
+ class Bag extends constructor {
+ constructor(length) {
+ super(length);
+ assertEquals(arguments.length, 1);
+ assertEquals(length, 2);
+ hits++;
+ }
}
- Bag.of = constructor.of;
hits = 0;
- a = Bag.of("zero", "one");
+ a = Bag.of(5, 6);
assertEquals(1, hits);
assertEquals(2, a.length);
- assertArrayEquals(["zero", "one"], a);
+ assertArrayEquals([5, 6], a);
assertEquals(Bag.prototype, a.__proto__);
hits = 0;
- actual = constructor.of.call(Bag, "zero", "one");
+ var actual = constructor.of.call(Bag, 5, 6);
assertEquals(1, hits);
assertEquals(2, a.length);
- assertArrayEquals(["zero", "one"], a);
+ assertArrayEquals([5, 6], a);
assertEquals(Bag.prototype, a.__proto__);
// %TypedArray%.of does not trigger prototype setters.
@@ -90,26 +93,27 @@ function TestTypedArrayOf(constructor) {
// invoked.
// Setter on the newly created object.
- function Pack() {
- Object.defineProperty(this, "length", {
- set: function (v) { status = "fail"; }
- });
+ class Pack extends constructor {
+ constructor(length) {
+ super(length);
+ Object.defineProperty(this, "length", {
+ set: function (v) { status = "fail"; }
+ });
+ }
}
- Pack.of = constructor.of;
- var pack = Pack.of("wolves", "cards", "cigarettes", "lies");
+ var pack = Pack.of(5, 6, 7, 8);
assertEquals("pass", status);
// when the setter is on the new object's prototype
- function Bevy() {}
+ class Bevy extends constructor {}
Object.defineProperty(Bevy.prototype, "length", {
set: function (v) { status = "fail"; }
});
- Bevy.of = constructor.of;
- var bevy = Bevy.of("quail");
+ var bevy = Bevy.of(3);
assertEquals("pass", status);
// Check superficial features of %TypedArray%.of.
- var desc = Object.getOwnPropertyDescriptor(constructor, "of");
+ var desc = Object.getOwnPropertyDescriptor(constructor.__proto__, "of");
assertEquals(desc.configurable, false);
assertEquals(desc.enumerable, false);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-proto.js b/deps/v8/test/mjsunit/es6/typedarray-proto.js
index 558cb0ad7a..346b2ea63d 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-proto.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-proto.js
@@ -4,12 +4,10 @@
// Test that the methods for different TypedArray types have the same
// identity.
-// TODO(dehrenberg): Test that the TypedArray proto hierarchy is set
-// up properly.
-// TODO(dehrenberg): subarray is currently left out because that still
-// uses per-type methods. When that's fixed, stop leaving it out.
-var typedArrayConstructors = [
+'use strict';
+
+let typedArrayConstructors = [
Uint8Array,
Int8Array,
Uint16Array,
@@ -20,23 +18,57 @@ var typedArrayConstructors = [
Float32Array,
Float64Array];
+let TypedArray = Uint8Array.__proto__;
+let TypedArrayPrototype = TypedArray.prototype;
+
+assertEquals(TypedArray.__proto__, Function.prototype);
+assertEquals(TypedArrayPrototype.__proto__, Object.prototype);
+
+// There are extra own class properties due to it simply being a function
+let classProperties = new Set([
+ "length", "name", "arguments", "caller", "prototype", "BYTES_PER_ELEMENT"
+]);
+let instanceProperties = new Set([
+ "BYTES_PER_ELEMENT", "constructor", "prototype",
+ // length is also an instance property as a temporary workaround to
+ // BUG(chromium:579905). TODO(littledan): remove the workaround
+ "length"
+]);
+
function functionProperties(object) {
return Object.getOwnPropertyNames(object).filter(function(name) {
return typeof Object.getOwnPropertyDescriptor(object, name).value
- == "function"
- && name != 'constructor' && name != 'subarray';
+ == "function" && name != 'constructor';
});
}
-var typedArrayMethods = functionProperties(Uint8Array.prototype);
-var typedArrayClassMethods = functionProperties(Uint8Array);
+let typedArrayMethods = functionProperties(Uint8Array.prototype);
+let typedArrayClassMethods = functionProperties(Uint8Array);
-for (var constructor of typedArrayConstructors) {
- for (var method of typedArrayMethods) {
- assertEquals(constructor.prototype[method],
- Uint8Array.prototype[method], method);
+for (let constructor of typedArrayConstructors) {
+ for (let property of Object.getOwnPropertyNames(constructor.prototype)) {
+ assertTrue(instanceProperties.has(property), property);
}
- for (var classMethod of typedArrayClassMethods) {
- assertEquals(constructor[method], Uint8Array[method], classMethod);
+ for (let property of Object.getOwnPropertyNames(constructor)) {
+ assertTrue(classProperties.has(property), property);
}
}
+
+// Abstract %TypedArray% class can't be constructed directly
+
+assertThrows(() => new TypedArray(), TypeError);
+
+// The "prototype" property is nonconfigurable, nonenumerable, nonwritable,
+// both for %TypedArray% and for all subclasses
+
+let desc = Object.getOwnPropertyDescriptor(TypedArray, "prototype");
+assertFalse(desc.writable);
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+
+for (let constructor of typedArrayConstructors) {
+ let desc = Object.getOwnPropertyDescriptor(constructor, "prototype");
+ assertFalse(desc.writable);
+ assertFalse(desc.configurable);
+ assertFalse(desc.enumerable);
+}
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index a45b6308f3..c43ba1c4bf 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -270,7 +270,7 @@ function TestTypedArray(constr, elementSize, typicalElement) {
assertEquals("[object " + constr.name + "]",
Object.prototype.toString.call(a));
var desc = Object.getOwnPropertyDescriptor(
- constr.prototype, Symbol.toStringTag);
+ constr.prototype.__proto__, Symbol.toStringTag);
assertTrue(desc.configurable);
assertFalse(desc.enumerable);
assertFalse(!!desc.writable);
@@ -418,17 +418,13 @@ var typedArrayConstructors = [
function TestPropertyTypeChecks(constructor) {
function CheckProperty(name) {
assertThrows(function() { 'use strict'; new constructor(10)[name] = 0; })
- var d = Object.getOwnPropertyDescriptor(constructor.prototype, name);
+ var d = Object.getOwnPropertyDescriptor(constructor.prototype.__proto__, name);
var o = {};
assertThrows(function() {d.get.call(o);}, TypeError);
for (var i = 0; i < typedArrayConstructors.length; i++) {
var ctor = typedArrayConstructors[i];
var a = new ctor(10);
- if (ctor === constructor) {
- d.get.call(a); // shouldn't throw
- } else {
- assertThrows(function() {d.get.call(a);}, TypeError);
- }
+ d.get.call(a); // shouldn't throw
}
}
diff --git a/deps/v8/test/mjsunit/harmony/array-includes-to-object-sloppy.js b/deps/v8/test/mjsunit/es7/array-includes-to-object-sloppy.js
index 0f5d731237..eb7ccf8723 100644
--- a/deps/v8/test/mjsunit/harmony/array-includes-to-object-sloppy.js
+++ b/deps/v8/test/mjsunit/es7/array-includes-to-object-sloppy.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-includes
-
// Ported from
// https://github.com/tc39/Array.prototype.includes/blob/master/test/number-this.js
// using https://www.npmjs.org/package/test262-to-mjsunit
diff --git a/deps/v8/test/mjsunit/harmony/array-includes-to-object-strict.js b/deps/v8/test/mjsunit/es7/array-includes-to-object-strict.js
index ee87136244..ffefa88f99 100644
--- a/deps/v8/test/mjsunit/harmony/array-includes-to-object-strict.js
+++ b/deps/v8/test/mjsunit/es7/array-includes-to-object-strict.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-includes
-
// Ported from
// https://github.com/tc39/Array.prototype.includes/blob/master/test/number-this.js
// using https://www.npmjs.org/package/test262-to-mjsunit
diff --git a/deps/v8/test/mjsunit/harmony/array-includes.js b/deps/v8/test/mjsunit/es7/array-includes.js
index 2cdd1123d7..303042a4c1 100644
--- a/deps/v8/test/mjsunit/harmony/array-includes.js
+++ b/deps/v8/test/mjsunit/es7/array-includes.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-includes
-
// Largely ported from
// https://github.com/tc39/Array.prototype.includes/tree/master/test
// using https://www.npmjs.org/package/test262-to-mjsunit with further edits
diff --git a/deps/v8/test/mjsunit/es7/object-observe.js b/deps/v8/test/mjsunit/es7/object-observe.js
index 5a252a3745..712f5a6415 100644
--- a/deps/v8/test/mjsunit/es7/object-observe.js
+++ b/deps/v8/test/mjsunit/es7/object-observe.js
@@ -350,10 +350,10 @@ Object.freeze(obj);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
+ { object: obj, type: 'preventExtensions' },
{ object: obj, type: 'reconfigure', name: 'a' },
{ object: obj, type: 'reconfigure', name: 'b' },
{ object: obj, type: 'reconfigure', name: 'c' },
- { object: obj, type: 'preventExtensions' },
]);
reset();
@@ -387,9 +387,9 @@ Object.seal(obj);
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
+ { object: obj, type: 'preventExtensions' },
{ object: obj, type: 'reconfigure', name: 'a' },
{ object: obj, type: 'reconfigure', name: 'b' },
- { object: obj, type: 'preventExtensions' },
]);
reset();
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-443982.js b/deps/v8/test/mjsunit/es7/regress/regress-443982.js
index 5a2e9cd6db..e04f14c0c6 100644
--- a/deps/v8/test/mjsunit/es7/regress/regress-443982.js
+++ b/deps/v8/test/mjsunit/es7/regress/regress-443982.js
@@ -1,6 +1,8 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-object-observe
var records;
function observer(r) {
diff --git a/deps/v8/test/mjsunit/harmony/typed-array-includes.js b/deps/v8/test/mjsunit/es7/typed-array-includes.js
index 017a4301ee..8556d97113 100644
--- a/deps/v8/test/mjsunit/harmony/typed-array-includes.js
+++ b/deps/v8/test/mjsunit/es7/typed-array-includes.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-array-includes
-
// Largely ported from
// https://github.com/tc39/Array.prototype.includes/tree/master/test/built-ins/TypedArray/prototype/includes
// using https://www.npmjs.org/package/test262-to-mjsunit with further edits
diff --git a/deps/v8/test/mjsunit/fast-prototype.js b/deps/v8/test/mjsunit/fast-prototype.js
index 2fb476ccb1..7432ecce9d 100644
--- a/deps/v8/test/mjsunit/fast-prototype.js
+++ b/deps/v8/test/mjsunit/fast-prototype.js
@@ -50,8 +50,9 @@ function DoProtoMagic(proto, set__proto__) {
(new Sub()).__proto__ = proto;
} else {
Sub.prototype = proto;
- // Need to instantiate Sub to mark .prototype as prototype.
- new Sub();
+ // Need to instantiate Sub to mark .prototype as prototype. Make sure the
+ // instantiated object is used so that the allocation is not optimized away.
+ %DebugPrint(new Sub());
}
}
diff --git a/deps/v8/test/mjsunit/for-in-opt.js b/deps/v8/test/mjsunit/for-in-opt.js
index a6ee0ec81e..e458e1d537 100644
--- a/deps/v8/test/mjsunit/for-in-opt.js
+++ b/deps/v8/test/mjsunit/for-in-opt.js
@@ -17,6 +17,7 @@ function f(o) {
assertEquals(["0"], f("a"));
assertEquals(["0"], f("a"));
+
%OptimizeFunctionOnNextCall(f);
assertEquals(["0","1","2"], f("bla"));
@@ -27,15 +28,15 @@ var deopt_has = false;
var deopt_enum = false;
var handler = {
- enumerate: function(target) {
+ enumerate(target) {
if (deopt_enum) {
%DeoptimizeFunction(f2);
deopt_enum = false;
}
- return keys;
+ return keys[Symbol.iterator]();
},
- getPropertyDescriptor: function(k) {
+ has(target, k) {
if (deopt_has) {
%DeoptimizeFunction(f2);
deopt_has = false;
@@ -46,7 +47,7 @@ var handler = {
};
-var proxy = Proxy.create(handler);
+var proxy = new Proxy({}, handler);
var o = {__proto__: proxy};
function f2(o) {
@@ -65,10 +66,12 @@ function check_f2() {
check_f2();
check_f2();
+
// Test lazy deopt after GetPropertyNamesFast
%OptimizeFunctionOnNextCall(f2);
deopt_enum = true;
check_f2();
+
// Test lazy deopt after FILTER_KEY
%OptimizeFunctionOnNextCall(f2);
deopt_has = true;
@@ -81,18 +84,19 @@ function f3(o) {
f3({__proto__:{x:1}});
f3({__proto__:{x:1}});
+
%OptimizeFunctionOnNextCall(f3);
f3(undefined);
f3(null);
// Reliable repro for an issue previously flushed out by GC stress.
var handler2 = {
- getPropertyDescriptor: function(k) {
+ getPropertyDescriptor(target, k) {
has_keys.push(k);
return {value: 10, configurable: true, writable: false, enumerable: true};
}
}
-var proxy2 = Proxy.create(handler2);
+var proxy2 = new Proxy({}, handler2);
var o2 = {__proto__: proxy2};
var p = {x: "x"}
@@ -104,14 +108,18 @@ function f4(o, p) {
}
return result;
}
+
function check_f4() {
assertEquals(keys, f4(o, p));
assertEquals(keys, has_keys);
has_keys.length = 0;
}
+
check_f4();
check_f4();
+
%OptimizeFunctionOnNextCall(f4);
+
p.y = "y"; // Change map, cause eager deopt.
check_f4();
@@ -128,18 +136,18 @@ function listener(event, exec_state, event_data, data) {
}
var handler3 = {
- enumerate: function(target) {
- return ["a", "b"];
+ enumerate(target) {
+ return ["a", "b"][Symbol.iterator]();
},
- getPropertyDescriptor: function(k) {
+ has(target, k) {
if (k == "a") count++;
if (x) %ScheduleBreak();
return {value: 10, configurable: true, writable: false, enumerable: true};
}
};
-var proxy3 = Proxy.create(handler3);
+var proxy3 = new Proxy({}, handler3);
var o3 = {__proto__: proxy3};
function f5() {
diff --git a/deps/v8/test/mjsunit/function-bind.js b/deps/v8/test/mjsunit/function-bind.js
index ca1ed7e489..826986943b 100644
--- a/deps/v8/test/mjsunit/function-bind.js
+++ b/deps/v8/test/mjsunit/function-bind.js
@@ -27,7 +27,8 @@
// Flags: --allow-natives-syntax
-// Tests the Function.prototype.bind (ES 15.3.4.5) method.
+// Tests the Function.prototype.bind method.
+
// Simple tests.
function foo(x, y, z) {
@@ -39,24 +40,29 @@ assertEquals(3, foo.length);
var f = foo.bind(foo);
assertEquals([foo, 3, 1], f(1, 2, 3));
assertEquals(3, f.length);
+assertEquals("function () { [native code] }", f.toString());
f = foo.bind(foo, 1);
assertEquals([foo, 3, 1], f(2, 3));
assertEquals(2, f.length);
+assertEquals("function () { [native code] }", f.toString());
f = foo.bind(foo, 1, 2);
assertEquals([foo, 3, 1], f(3));
assertEquals(1, f.length);
+assertEquals("function () { [native code] }", f.toString());
f = foo.bind(foo, 1, 2, 3);
assertEquals([foo, 3, 1], f());
assertEquals(0, f.length);
+assertEquals("function () { [native code] }", f.toString());
// Test that length works correctly even if more than the actual number
// of arguments are given when binding.
f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
assertEquals([foo, 9, 1], f());
assertEquals(0, f.length);
+assertEquals("function () { [native code] }", f.toString());
// Use a different bound object.
var obj = {x: 42, y: 43};
@@ -76,6 +82,7 @@ assertEquals(1, f.length);
f = f_bound_this.bind(obj, 2);
assertEquals(3, f());
assertEquals(0, f.length);
+assertEquals('[object Function]', Object.prototype.toString.call(f));
// Test chained binds.
diff --git a/deps/v8/test/mjsunit/get-caller-js-function-throws.js b/deps/v8/test/mjsunit/get-caller-js-function-throws.js
deleted file mode 100644
index 42b098aee9..0000000000
--- a/deps/v8/test/mjsunit/get-caller-js-function-throws.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --noalways-opt --nostress-opt
-
-// Ensure that "real" js functions that call GetCallerJSFunction get an
-// exception, since they are not stubs.
-(function() {
- var a = function() {
- return %_GetCallerJSFunction();
- }
- assertThrows(a);
-}());
diff --git a/deps/v8/test/mjsunit/get-caller-js-function.js b/deps/v8/test/mjsunit/get-caller-js-function.js
deleted file mode 100644
index 5c7af64814..0000000000
--- a/deps/v8/test/mjsunit/get-caller-js-function.js
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --turbo-filter=* --nostress-opt
-
-// Test that for fully optimized but non inlined code, GetCallerJSFunction walks
-// up a single stack frame to get the calling function. Full optimization elides
-// the check in the runtime version of the intrinsic that would throw since the
-// caller isn't a stub. It's a bit of a hack, but allows minimal testing of the
-// intrinsic without writing a full-blown cctest.
-(function() {
- var a = function() {
- return %_GetCallerJSFunction();
- };
- var b = function() {
- return a();
- };
- %OptimizeFunctionOnNextCall(a);
- assertEquals(b, b());
-}());
diff --git a/deps/v8/test/mjsunit/get-prototype-of.js b/deps/v8/test/mjsunit/get-prototype-of.js
index 47edcb0a77..0a43b4cc26 100644
--- a/deps/v8/test/mjsunit/get-prototype-of.js
+++ b/deps/v8/test/mjsunit/get-prototype-of.js
@@ -82,12 +82,12 @@ var functions = [
// DataView,
Date,
Error,
- Float32Array,
- Float64Array,
+ // Float32Array, prototype is %TypedArray%
+ // Float64Array,
Function,
- Int16Array,
- Int32Array,
- Int8Array,
+ // Int16Array,
+ // Int32Array,
+ // Int8Array,
Map,
Number,
Object,
@@ -96,10 +96,10 @@ var functions = [
Set,
String,
// Symbol, not constructible
- Uint16Array,
- Uint32Array,
- Uint8Array,
- Uint8ClampedArray,
+ // Uint16Array,
+ // Uint32Array,
+ // Uint8Array,
+ // Uint8ClampedArray,
WeakMap,
WeakSet,
];
diff --git a/deps/v8/test/mjsunit/global-const-var-conflicts.js b/deps/v8/test/mjsunit/global-const-var-conflicts.js
index 3b87e3d7be..960b3d3753 100644
--- a/deps/v8/test/mjsunit/global-const-var-conflicts.js
+++ b/deps/v8/test/mjsunit/global-const-var-conflicts.js
@@ -28,6 +28,8 @@
// Check that dynamically introducing conflicting consts/vars
// is silently ignored (and does not lead to exceptions).
+// Flags: --legacy-const
+
var caught = 0;
eval("const a");
diff --git a/deps/v8/test/mjsunit/harmony/array-concat.js b/deps/v8/test/mjsunit/harmony/array-concat.js
index 71b6790bc7..cabdf2df08 100644
--- a/deps/v8/test/mjsunit/harmony/array-concat.js
+++ b/deps/v8/test/mjsunit/harmony/array-concat.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-concat-spreadable
+// Flags: --harmony-concat-spreadable --harmony-proxies --harmony-reflect
(function testArrayConcatArity() {
"use strict";
@@ -705,4 +705,170 @@ function testConcatTypedArray(type, elems, modulo) {
var r4 = [0].concat(arr3, arr3);
assertEquals(1 + arr3.length * 2, r4.length);
assertEquals(expectedTrace, trace);
+
+ // Clean up.
+ delete Array.prototype[123];
+ delete Array.prototype["123"];
+ delete Array.prototype["moe"];
+})();
+
+
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Tests with proxies
+
+// Note: concat does not currently support species so there is no difference
+// between [].concat(foo) and Array.prototype.concat.apply(foo).
+
+
+var log = [];
+var logger = {};
+var handler = new Proxy({}, logger);
+
+logger.get = function(t, trap, r) {
+ return function(...args) {
+ log.push([trap, ...args]);
+ return Reflect[trap](...args);
+ }
+};
+
+
+(function testUnspreadableNonArrayLikeProxy() {
+ var target = {0: "a", 1: "b"};
+ var obj = new Proxy(target, handler);
+
+ log.length = 0;
+ assertEquals([obj], [].concat(obj));
+ assertEquals(1, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+
+ log.length = 0;
+ assertEquals([obj], Array.prototype.concat.apply(obj));
+ assertEquals(1, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+})();
+
+
+(function testSpreadableNonArrayLikeProxy() {
+ var target = {0: "a", 1: "b", [Symbol.isConcatSpreadable]: "truish"};
+ var obj = new Proxy(target, handler);
+
+ log.length = 0;
+ assertEquals([], [].concat(obj));
+ assertEquals(2, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "length", obj], log[1]);
+
+ log.length = 0;
+ assertEquals([], Array.prototype.concat.apply(obj));
+ assertEquals(2, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "length", obj], log[1]);
+
+ target.length = 3;
+
+ log.length = 0;
+ assertEquals(["a", "b", undefined], [].concat(obj));
+ assertEquals(7, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "length", obj], log[1]);
+ assertEquals(["has", target, "0"], log[2]);
+ assertEquals(["get", target, "0", obj], log[3]);
+ assertEquals(["has", target, "1"], log[4]);
+ assertEquals(["get", target, "1", obj], log[5]);
+ assertEquals(["has", target, "2"], log[6]);
+
+ log.length = 0;
+ assertEquals(["a", "b", undefined], Array.prototype.concat.apply(obj));
+ assertEquals(7, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "length", obj], log[1]);
+ assertEquals(["has", target, "0"], log[2]);
+ assertEquals(["get", target, "0", obj], log[3]);
+ assertEquals(["has", target, "1"], log[4]);
+ assertEquals(["get", target, "1", obj], log[5]);
+ assertEquals(["has", target, "2"], log[6]);
+})();
+
+
+(function testUnspreadableArrayLikeProxy() {
+ var target = ["a", "b"];
+ target[Symbol.isConcatSpreadable] = "";
+ var obj = new Proxy(target, handler);
+
+ log.length = 0;
+ assertEquals([obj], [].concat(obj));
+ assertEquals(1, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+
+ log.length = 0;
+ assertEquals([obj], Array.prototype.concat.apply(obj));
+ assertEquals(1, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+})();
+
+
+(function testSpreadableArrayLikeProxy() {
+ var target = ["a", "b"];
+ target[Symbol.isConcatSpreadable] = undefined;
+ var obj = new Proxy(target, handler);
+
+ log.length = 0;
+ assertEquals(["a", "b"], [].concat(obj));
+ assertEquals(6, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "length", obj], log[1]);
+ assertEquals(["has", target, "0"], log[2]);
+ assertEquals(["get", target, "0", obj], log[3]);
+ assertEquals(["has", target, "1"], log[4]);
+ assertEquals(["get", target, "1", obj], log[5]);
+
+ log.length = 0;
+ assertEquals(["a", "b"], Array.prototype.concat.apply(obj));
+ assertEquals(6, log.length);
+ for (var i in log) assertSame(target, log[i][1]);
+ assertEquals(["get", target, Symbol.isConcatSpreadable, obj], log[0]);
+ assertEquals(["get", target, "length", obj], log[1]);
+ assertEquals(["has", target, "0"], log[2]);
+ assertEquals(["get", target, "0", obj], log[3]);
+ assertEquals(["has", target, "1"], log[4]);
+ assertEquals(["get", target, "1", obj], log[5]);
+})();
+
+
+(function testSpreadableArrayLikeProxyWithNontrivialLength() {
+ var getTrap = function(t, key) {
+ if (key === "length") return {[Symbol.toPrimitive]() {return 3}};
+ if (key === "2") return "baz";
+ if (key === "3") return "bar";
+ };
+ var target = [];
+ var obj = new Proxy(target, {get: getTrap, has: () => true});
+
+ assertEquals([undefined, undefined, "baz"], [].concat(obj));
+ assertEquals([undefined, undefined, "baz"], Array.prototype.concat.apply(obj))
+})();
+
+
+(function testSpreadableArrayLikeProxyWithBogusLength() {
+ var getTrap = function(t, key) {
+ if (key === "length") return Symbol();
+ if (key === "2") return "baz";
+ if (key === "3") return "bar";
+ };
+ var target = [];
+ var obj = new Proxy(target, {get: getTrap, has: () => true});
+
+ assertThrows(() => [].concat(obj), TypeError);
+ assertThrows(() => Array.prototype.concat.apply(obj), TypeError);
})();
diff --git a/deps/v8/test/mjsunit/harmony/array-species.js b/deps/v8/test/mjsunit/harmony/array-species.js
new file mode 100644
index 0000000000..75a45aaf59
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species.js
@@ -0,0 +1,156 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species --harmony-proxies
+
+// Test the ES2015 @@species feature
+
+'use strict';
+
+// Subclasses of Array construct themselves under map, etc
+
+class MyArray extends Array { }
+
+assertEquals(MyArray, new MyArray().map(()=>{}).constructor);
+assertEquals(MyArray, new MyArray().filter(()=>{}).constructor);
+assertEquals(MyArray, new MyArray().slice().constructor);
+assertEquals(MyArray, new MyArray().splice().constructor);
+
+// Subclasses can override @@species to return the another class
+
+class MyOtherArray extends Array {
+ static get [Symbol.species]() { return MyArray; }
+}
+
+assertEquals(MyArray, new MyOtherArray().map(()=>{}).constructor);
+assertEquals(MyArray, new MyOtherArray().filter(()=>{}).constructor);
+assertEquals(MyArray, new MyOtherArray().slice().constructor);
+assertEquals(MyArray, new MyOtherArray().splice().constructor);
+
+// Array methods on non-arrays return arrays
+
+class MyNonArray extends Array {
+ static get [Symbol.species]() { return MyObject; }
+}
+
+class MyObject { }
+
+assertEquals(MyObject,
+ Array.prototype.map.call(new MyNonArray(), ()=>{}).constructor);
+assertEquals(MyObject,
+ Array.prototype.filter.call(new MyNonArray(), ()=>{}).constructor);
+assertEquals(MyObject,
+ Array.prototype.slice.call(new MyNonArray()).constructor);
+assertEquals(MyObject,
+ Array.prototype.splice.call(new MyNonArray()).constructor);
+
+assertEquals(undefined,
+ Array.prototype.map.call(new MyNonArray(), ()=>{}).length);
+assertEquals(undefined,
+ Array.prototype.filter.call(new MyNonArray(), ()=>{}).length);
+// slice and splice actually do explicitly define the length for some reason
+assertEquals(0, Array.prototype.slice.call(new MyNonArray()).length);
+assertEquals(0, Array.prototype.splice.call(new MyNonArray()).length);
+
+// Cross-realm Arrays build same-realm arrays
+
+var realm = Realm.create();
+assertEquals(Array,
+ Array.prototype.map.call(
+ Realm.eval(realm, "[]"), ()=>{}).constructor);
+assertFalse(Array === Realm.eval(realm, "[]").map(()=>{}).constructor);
+assertFalse(Array === Realm.eval(realm, "[].map(()=>{}).constructor"));
+
+// Defaults when constructor or @@species is missing or non-constructor
+
+class MyDefaultArray extends Array {
+ static get [Symbol.species]() { return undefined; }
+}
+assertEquals(Array, new MyDefaultArray().map(()=>{}).constructor);
+
+class MyOtherDefaultArray extends Array { }
+assertEquals(MyOtherDefaultArray,
+ new MyOtherDefaultArray().map(()=>{}).constructor);
+MyOtherDefaultArray.prototype.constructor = undefined;
+assertEquals(Array, new MyOtherDefaultArray().map(()=>{}).constructor);
+
+// Exceptions propagated when getting constructor @@species throws
+
+class SpeciesError extends Error { }
+class ConstructorError extends Error { }
+class MyThrowingArray extends Array {
+ static get [Symbol.species]() { throw new SpeciesError; }
+}
+assertThrows(() => new MyThrowingArray().map(()=>{}), SpeciesError);
+Object.defineProperty(MyThrowingArray.prototype, 'constructor', {
+ get() { throw new ConstructorError; }
+});
+assertThrows(() => new MyThrowingArray().map(()=>{}), ConstructorError);
+
+// Previously unexpected errors from setting properties in arrays throw
+
+class FrozenArray extends Array {
+ constructor(...args) {
+ super(...args);
+ Object.freeze(this);
+ }
+}
+assertThrows(() => new FrozenArray([1]).map(()=>0), TypeError);
+assertThrows(() => new FrozenArray([1]).filter(()=>true), TypeError);
+assertThrows(() => new FrozenArray([1]).slice(0, 1), TypeError);
+assertThrows(() => new FrozenArray([1]).splice(0, 1), TypeError);
+
+// Verify call counts and constructor parameters
+
+var count;
+var params;
+class MyObservedArray extends Array {
+ constructor(...args) {
+ super(...args);
+ params = args;
+ }
+ static get [Symbol.species]() {
+ count++
+ return this;
+ }
+}
+
+count = 0;
+params = undefined;
+assertEquals(MyObservedArray,
+ new MyObservedArray().map(()=>{}).constructor);
+assertEquals(1, count);
+assertArrayEquals([0], params);
+
+count = 0;
+params = undefined;
+assertEquals(MyObservedArray,
+ new MyObservedArray().filter(()=>{}).constructor);
+assertEquals(1, count);
+assertArrayEquals([0], params);
+
+count = 0;
+params = undefined;
+assertEquals(MyObservedArray,
+ new MyObservedArray().slice().constructor);
+// TODO(littledan): Should be 1
+assertEquals(2, count);
+assertArrayEquals([0], params);
+
+count = 0;
+params = undefined;
+assertEquals(MyObservedArray,
+ new MyObservedArray().splice().constructor);
+// TODO(littledan): Should be 1
+assertEquals(2, count);
+assertArrayEquals([0], params);
+
+// @@species constructor can be a Proxy, and the realm access doesn't
+// crash
+
+class MyProxyArray extends Array { }
+let ProxyArray = new Proxy(MyProxyArray, {});
+MyProxyArray.constructor = ProxyArray;
+
+assertEquals(MyProxyArray, new ProxyArray().map(()=>{}).constructor);
diff --git a/deps/v8/test/mjsunit/harmony/arraybuffer-species.js b/deps/v8/test/mjsunit/harmony/arraybuffer-species.js
new file mode 100644
index 0000000000..0445a4b648
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/arraybuffer-species.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// ArrayBuffer.prototype.slice makes subclass and checks length
+
+class MyArrayBuffer extends ArrayBuffer { }
+assertEquals(MyArrayBuffer, new MyArrayBuffer(0).slice().constructor);
+
+class MyShortArrayBuffer extends ArrayBuffer {
+ constructor(length) { super(length - 1); }
+}
+assertThrows(() => new MyShortArrayBuffer(5).slice(0, 4), TypeError);
+
+class SingletonArrayBuffer extends ArrayBuffer {
+ constructor(...args) {
+ if (SingletonArrayBuffer.cached) return SingletonArrayBuffer.cached;
+ super(...args);
+ SingletonArrayBuffer.cached = this;
+ }
+}
+assertThrows(() => new SingletonArrayBuffer(5).slice(0, 4), TypeError);
+
+class NonArrayBuffer extends ArrayBuffer {
+ constructor() {
+ return {};
+ }
+}
+assertThrows(() => new NonArrayBuffer(5).slice(0, 4), TypeError);
+
+// Species fallback is ArrayBuffer
+class UndefinedArrayBuffer extends ArrayBuffer { }
+UndefinedArrayBuffer.prototype.constructor = undefined;
+assertEquals(ArrayBuffer, new UndefinedArrayBuffer(0).slice().constructor);
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index dbd372fa6d..4b9c9f6c66 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -77,12 +77,13 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
(function TestBadIndex() {
var sab = new SharedArrayBuffer(8);
var si32a = new Int32Array(sab);
+ var si32a2 = new Int32Array(sab, 4);
// Non-integer indexes are converted to an integer first, so they should all
// operate on index 0.
[undefined, null, false, 'hi', {}].forEach(function(i) {
- var name = String(i);
+ var name = String(i);
testAtomicOp(Atomics.compareExchange, si32a, i, 0, name);
testAtomicOp(Atomics.load, si32a, i, 0, name);
testAtomicOp(Atomics.store, si32a, i, 0, name);
@@ -109,6 +110,20 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
assertEquals(undefined, Atomics.exchange(si32a, i, 0), name);
});
+ // Out-of-bounds indexes for offset-array
+ [-1, 1, 100].forEach(function(i) {
+ var name = String(i);
+ assertEquals(undefined, Atomics.compareExchange(si32a2, i, 0, 0), name);
+ assertEquals(undefined, Atomics.load(si32a2, i), name);
+ assertEquals(undefined, Atomics.store(si32a2, i, 0), name);
+ assertEquals(undefined, Atomics.add(si32a2, i, 0), name);
+ assertEquals(undefined, Atomics.sub(si32a2, i, 0), name);
+ assertEquals(undefined, Atomics.and(si32a2, i, 0), name);
+ assertEquals(undefined, Atomics.or(si32a2, i, 0), name);
+ assertEquals(undefined, Atomics.xor(si32a2, i, 0), name);
+ assertEquals(undefined, Atomics.exchange(si32a2, i, 0), name);
+ });
+
// Monkey-patch length and make sure these functions still return undefined.
Object.defineProperty(si32a, 'length', {get: function() { return 1000; }});
[2, 100].forEach(function(i) {
@@ -128,39 +143,53 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
(function TestGoodIndex() {
var sab = new SharedArrayBuffer(64);
var si32a = new Int32Array(sab);
+ var si32a2 = new Int32Array(sab, 32);
var valueOf = {valueOf: function(){ return 3;}};
var toString = {toString: function(){ return '3';}};
[3, 3.5, '3', '3.5', valueOf, toString].forEach(function(i) {
var name = String(i);
-
- testAtomicOp(Atomics.compareExchange, si32a, i, 3, name);
- testAtomicOp(Atomics.load, si32a, i, 3, name);
- testAtomicOp(Atomics.store, si32a, i, 3, name);
- testAtomicOp(Atomics.add, si32a, i, 3, name);
- testAtomicOp(Atomics.sub, si32a, i, 3, name);
- testAtomicOp(Atomics.and, si32a, i, 3, name);
- testAtomicOp(Atomics.or, si32a, i, 3, name);
- testAtomicOp(Atomics.xor, si32a, i, 3, name);
- testAtomicOp(Atomics.exchange, si32a, i, 3, name);
+ [si32a, si32a2].forEach(function(array) {
+ testAtomicOp(Atomics.compareExchange, array, i, 3, name);
+ testAtomicOp(Atomics.load, array, i, 3, name);
+ testAtomicOp(Atomics.store, array, i, 3, name);
+ testAtomicOp(Atomics.add, array, i, 3, name);
+ testAtomicOp(Atomics.sub, array, i, 3, name);
+ testAtomicOp(Atomics.and, array, i, 3, name);
+ testAtomicOp(Atomics.or, array, i, 3, name);
+ testAtomicOp(Atomics.xor, array, i, 3, name);
+ testAtomicOp(Atomics.exchange, array, i, 3, name);
+ })
});
})();
+function clearArray(sab) {
+ var ui8 = new Uint8Array(sab);
+ for (var i = 0; i < sab.byteLength; ++i) {
+ ui8[i] = 0;
+ }
+}
+
(function TestCompareExchange() {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- // sta[i] == 0, CAS will store
- assertEquals(0, Atomics.compareExchange(sta, i, 0, 50), name);
- assertEquals(50, sta[i], name);
-
- // sta[i] == 50, CAS will not store
- assertEquals(50, Atomics.compareExchange(sta, i, 0, 100), name);
- assertEquals(50, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ // array[i] == 0, CAS will store
+ assertEquals(0, Atomics.compareExchange(array, i, 0, 50), name);
+ assertEquals(50, array[i], name);
+
+ // array[i] == 50, CAS will not store
+ assertEquals(50, Atomics.compareExchange(array, i, 0, 100), name);
+ assertEquals(50, array[i], name);
+ }
+ })
});
})();
@@ -168,13 +197,18 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- sta[i] = 0;
- assertEquals(0, Atomics.load(sta, i), name);
- sta[i] = 50;
- assertEquals(50, Atomics.load(sta, i), name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = 0;
+ assertEquals(0, Atomics.load(array, i), name);
+ array[i] = 50;
+ assertEquals(50, Atomics.load(array, i), name);
+ }
+ })
});
})();
@@ -182,14 +216,19 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- assertEquals(50, Atomics.store(sta, i, 50), name);
- assertEquals(50, sta[i], name);
-
- assertEquals(100, Atomics.store(sta, i, 100), name);
- assertEquals(100, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ assertEquals(50, Atomics.store(array, i, 50), name);
+ assertEquals(50, array[i], name);
+
+ assertEquals(100, Atomics.store(array, i, 100), name);
+ assertEquals(100, array[i], name);
+ }
+ })
});
})();
@@ -197,14 +236,19 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- assertEquals(0, Atomics.add(sta, i, 50), name);
- assertEquals(50, sta[i], name);
-
- assertEquals(50, Atomics.add(sta, i, 70), name);
- assertEquals(120, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ assertEquals(0, Atomics.add(array, i, 50), name);
+ assertEquals(50, array[i], name);
+
+ assertEquals(50, Atomics.add(array, i, 70), name);
+ assertEquals(120, array[i], name);
+ }
+ })
});
})();
@@ -212,15 +256,20 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- sta[i] = 120;
- assertEquals(120, Atomics.sub(sta, i, 50), name);
- assertEquals(70, sta[i], name);
-
- assertEquals(70, Atomics.sub(sta, i, 70), name);
- assertEquals(0, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = 120;
+ assertEquals(120, Atomics.sub(array, i, 50), name);
+ assertEquals(70, array[i], name);
+
+ assertEquals(70, Atomics.sub(array, i, 70), name);
+ assertEquals(0, array[i], name);
+ }
+ })
});
})();
@@ -228,15 +277,20 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- sta[i] = 0x3f;
- assertEquals(0x3f, Atomics.and(sta, i, 0x30), name);
- assertEquals(0x30, sta[i], name);
-
- assertEquals(0x30, Atomics.and(sta, i, 0x20), name);
- assertEquals(0x20, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(sta);
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = 0x3f;
+ assertEquals(0x3f, Atomics.and(array, i, 0x30), name);
+ assertEquals(0x30, array[i], name);
+
+ assertEquals(0x30, Atomics.and(array, i, 0x20), name);
+ assertEquals(0x20, array[i], name);
+ }
+ })
});
})();
@@ -244,15 +298,20 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- sta[i] = 0x30;
- assertEquals(0x30, Atomics.or(sta, i, 0x1c), name);
- assertEquals(0x3c, sta[i], name);
-
- assertEquals(0x3c, Atomics.or(sta, i, 0x09), name);
- assertEquals(0x3d, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = 0x30;
+ assertEquals(0x30, Atomics.or(array, i, 0x1c), name);
+ assertEquals(0x3c, array[i], name);
+
+ assertEquals(0x3c, Atomics.or(array, i, 0x09), name);
+ assertEquals(0x3d, array[i], name);
+ }
+ })
});
})();
@@ -260,15 +319,20 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- sta[i] = 0x30;
- assertEquals(0x30, Atomics.xor(sta, i, 0x1c), name);
- assertEquals(0x2c, sta[i], name);
-
- assertEquals(0x2c, Atomics.xor(sta, i, 0x09), name);
- assertEquals(0x25, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = 0x30;
+ assertEquals(0x30, Atomics.xor(array, i, 0x1c), name);
+ assertEquals(0x2c, array[i], name);
+
+ assertEquals(0x2c, Atomics.xor(array, i, 0x09), name);
+ assertEquals(0x25, array[i], name);
+ }
+ })
});
})();
@@ -276,15 +340,20 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
- var name = Object.prototype.toString.call(sta);
- for (var i = 0; i < 10; ++i) {
- sta[i] = 0x30;
- assertEquals(0x30, Atomics.exchange(sta, i, 0x1c), name);
- assertEquals(0x1c, sta[i], name);
-
- assertEquals(0x1c, Atomics.exchange(sta, i, 0x09), name);
- assertEquals(0x09, sta[i], name);
- }
+ var sta2 = new t.constr(sab, 5 * t.constr.BYTES_PER_ELEMENT);
+
+ [sta, sta2].forEach(function(array) {
+ clearArray(array.buffer);
+ var name = Object.prototype.toString.call(array);
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = 0x30;
+ assertEquals(0x30, Atomics.exchange(array, i, 0x1c), name);
+ assertEquals(0x1c, array[i], name);
+
+ assertEquals(0x1c, Atomics.exchange(array, i, 0x09), name);
+ assertEquals(0x09, array[i], name);
+ }
+ })
});
})();
diff --git a/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js b/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
index 292d073c81..c95123167c 100644
--- a/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
+++ b/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
@@ -6,116 +6,68 @@
// Var-let conflict in a function throws, even if the var is in an eval
-let caught = false;
-
// Throws at the top level of a function
-try {
- (function() {
- let x = 1;
- eval('var x = 2');
- })()
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ let x = 1;
+ eval('var x');
+}, TypeError);
// If the eval is in its own block scope, throws
-caught = false;
-try {
- (function() {
- let y = 1;
- { eval('var y = 2'); }
- })()
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ let y = 1;
+ { eval('var y'); }
+}, TypeError);
// If the let is in its own block scope, with the eval, throws
-caught = false
-try {
- (function() {
- {
- let x = 1;
- eval('var x = 2');
- }
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ {
+ let x = 1;
+ eval('var x');
+ }
+}, TypeError);
// Legal if the let is no longer visible
-caught = false
-try {
- (function() {
- {
- let x = 1;
- }
- eval('var x = 2');
- })();
-} catch (e) {
- caught = true;
-}
-assertFalse(caught);
+assertDoesNotThrow(function() {
+ {
+ let x = 1;
+ }
+ eval('var x');
+});
// All the same works for const:
// Throws at the top level of a function
-try {
- (function() {
- const x = 1;
- eval('var x = 2');
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ const x = 1;
+ eval('var x');
+}, TypeError);
// If the eval is in its own block scope, throws
-caught = false;
-try {
- (function() {
- const y = 1;
- { eval('var y = 2'); }
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ const y = 1;
+ { eval('var y'); }
+}, TypeError);
// If the const is in its own block scope, with the eval, throws
-caught = false
-try {
- (function() {
- {
- const x = 1;
- eval('var x = 2');
- }
- })();
-} catch (e) {
- caught = true;
-}
-assertTrue(caught);
+assertThrows(function() {
+ {
+ const x = 1;
+ eval('var x');
+ }
+}, TypeError);
// Legal if the const is no longer visible
-caught = false
-try {
- (function() {
- {
- const x = 1;
- }
- eval('var x = 2');
- })();
-} catch (e) {
- caught = true;
-}
-assertFalse(caught);
+assertDoesNotThrow(function() {
+ {
+ const x = 1;
+ }
+ eval('var x');
+});
// In global scope
-caught = false;
+let caught = false;
try {
let z = 1;
- eval('var z = 2');
+ eval('var z');
} catch (e) {
caught = true;
}
@@ -138,7 +90,7 @@ caught = false;
try {
(function() {
with ({x: 1}) {
- eval("var x = 2;");
+ eval("var x");
}
})();
} catch (e) {
@@ -152,7 +104,7 @@ try {
(function() {
let x;
with ({x: 1}) {
- eval("var x = 2;");
+ eval("var x");
}
})();
} catch (e) {
diff --git a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
index ff895d5b8a..4fa79c2149 100644
--- a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
+++ b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
@@ -3,8 +3,7 @@
// found in the LICENSE file.
// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
-// Flags: --harmony-sloppy-function --harmony-destructuring
-// Flags: --harmony-rest-parameters
+// Flags: --harmony-sloppy-function --harmony-destructuring-bind
// Test Annex B 3.3 semantics for functions declared in blocks in sloppy mode.
// http://www.ecma-international.org/ecma-262/6.0/#sec-block-level-function-declarations-web-legacy-compatibility-semantics
diff --git a/deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js b/deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js
new file mode 100644
index 0000000000..bfc75bd23f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-step-destructuring-assignment.js
@@ -0,0 +1,86 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-destructuring-assignment
+// Flags: --harmony-destructuring-bind
+
+var exception = null;
+var Debug = debug.Debug;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var source = exec_state.frame(0).sourceLineText();
+ print(source);
+ assertTrue(source.indexOf(`// B${break_count++}`) > 0);
+ if (source.indexOf("assertEquals") > 0) {
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } else {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+ } catch (e) {
+ exception = e;
+ print(e);
+ }
+};
+
+Debug.setListener(listener);
+
+function f() {
+ var a, b, c, d;
+ debugger; // B0
+ [ // B1
+ a, // B3
+ b, // B4
+ c = 3 // B5
+ ] = [1, 2]; // B2
+ assertEquals({a:1,b:2,c:3}, {a, b, c}); // B6
+
+ [ // B7
+ a, // B9
+ [
+ b, // B10
+ c // B11
+ ],
+ d // B12
+ ] = [5, [6, 7], 8]; // B8
+ assertEquals({a:5,b:6,c:7,d:8}, {a, b, c, d}); // B13
+
+ [ // B14
+ a, // B16
+ b, // B17
+ ...c // B18
+ ] = [1, 2, 3, 4]; // B15
+ assertEquals({a:1,b:2,c:[3,4]}, {a, b, c}); // B19
+
+ ({ // B20
+ a, // B22
+ b, // B23
+ c = 7 // B24
+ } = {a: 5, b: 6}); // B21
+ assertEquals({a:5,b:6,c:7}, {a, b, c}); // B25
+
+ ({ // B26
+ a, // B28
+ b = return1(), // B29
+ c = return1() // B30
+ } = {a: 5, b: 6}); // B27
+ assertEquals({a:5,b:6,c:1}, {a, b, c}); // B33
+
+ ({ // B34
+ x : a, // B36
+ y : b, // B37
+ z : c = 3 // B38
+ } = {x: 1, y: 2}); // B35
+ assertEquals({a:1,b:2,c:3}, {a, b, c}); // B39
+} // B40
+
+function return1() {
+ return 1; // B31
+} // B32
+
+f();
+Debug.setListener(null); // B41
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js b/deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js
new file mode 100644
index 0000000000..a78431bb02
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-step-destructuring-bind.js
@@ -0,0 +1,110 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-destructuring-bind
+
+var exception = null;
+var Debug = debug.Debug;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var source = exec_state.frame(0).sourceLineText();
+ print(source, break_count);
+ assertTrue(source.indexOf(`B${break_count++}`) > 0);
+ if (source.indexOf("assertEquals") > 0) {
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } else {
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ }
+ } catch (e) {
+ exception = e;
+ print(e);
+ }
+};
+
+Debug.setListener(listener);
+
+var id = x => x; // B9 B10 B36 B37
+
+function test() {
+ debugger; // B0
+ function fx1([
+ a, // B2
+ b // B3
+ ]) {
+ assertEquals([1, 2], [a, b]); // B4
+ } // B5
+ fx1([1, 2, 3]); // B1
+
+ function f2([
+ a, // B7
+ b = id(3) // B8
+ ]) {
+ assertEquals([4, 3], [a, b]); // B11
+ } // B12
+ f2([4]); // B6
+
+ function f3({
+ x: a, // B14
+ y: b // B15
+ }) {
+ assertEquals([5, 6], [a, b]); // B16
+ } // B17
+ f3({y: 6, x: 5}); // B13
+
+ function f4([
+ a, // B19
+ {
+ b, // B20
+ c, // B21
+ }
+ ]) {
+ assertEquals([2, 4, 6], [a, b, c]); // B22
+ } // B23
+ f4([2, {c: 6, b: 4}]); // B18
+
+ function f5([
+ {
+ a, // B25
+ b = 7 // B26
+ },
+ c = 3 // B27
+ ] = [{a:1}]) {
+ assertEquals([1, 7, 3], [a, b, c]); // B28
+ } // B29
+ f5(); // B24
+
+ var name = "x"; // B30
+ function f6({
+ [id(name)]: a, // B34 B35
+ b = a // B38
+ }) {
+ assertEquals([9, 9], [a, b]); // B39
+ } // B40
+ var o6 = {}; // B31
+ o6[name] = 9; // B32
+ f6(o6); // B33
+
+ try {
+ throw [3, 4]; // B41
+ } catch ([
+ a, // B42
+ b, // B43
+ c = 6 // B44
+ ]) {
+ assertEquals([3, 4, 6], [a, b, c]); // B45
+ }
+
+ var { // B46
+ x: a, // B47
+ y: b = 9 // B48
+ } = { x: 4 };
+ assertEquals([4, 9], [a, b]); // B49
+} // B50
+
+test();
+Debug.setListener(null); // B51
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js b/deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js
new file mode 100644
index 0000000000..6ebf7ba726
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-stepin-default-parameters.js
@@ -0,0 +1,46 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-default-parameters
+
+Debug = debug.Debug
+
+var exception = null;
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ var entry = "";
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ entry += exec_state.frame(i).sourceLineText().substr(-1);
+ entry += exec_state.frame(i).sourceColumn();
+ }
+ log.push(entry);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function default_arg(x) {
+ return "default"; // d
+} // e
+
+function f(arg0 = default_arg()) { // f
+ return arg0; // g
+} // h
+
+
+Debug.setListener(listener);
+debugger; // a
+var result = f(); // b
+Debug.setListener(null); // c
+
+assertNull(exception);
+assertEquals("default", result);
+
+assertEquals(["a0","b0","f18b13","d2f18b13","e0f18b13","g2b13","h0b13","c0"],
+ log);
diff --git a/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js b/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js
new file mode 100644
index 0000000000..0689801a4f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js
@@ -0,0 +1,78 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-proxies
+
+Debug = debug.Debug
+
+var exception = null;
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ print(event_data.sourceLineText());
+ var entry = "";
+ for (var i = 0; i < exec_state.frameCount(); i++) {
+ entry += exec_state.frame(i).sourceLineText().substr(-1);
+ entry += exec_state.frame(i).sourceColumn();
+ }
+ log.push(entry);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
+ } catch (e) {
+ exception = e;
+ }
+};
+
+var target = {};
+var handler = {
+ has: function(target, name) {
+ return true; // h
+ }, // i
+ get: function(target, name) {
+ return 42; // j
+ }, // k
+ set: function(target, name, value) {
+ return false; // l
+ }, // m
+ enumerate: function(target) {
+ function* keys() { // n
+ yield "foo"; // o
+ yield "bar"; // p
+ } // q
+ return keys(); // r
+ }, // s
+}
+
+var proxy = new Proxy(target, handler);
+
+Debug.setListener(listener);
+debugger; // a
+var has = "step" in proxy; // b
+var get = proxy.step; // c
+proxy.step = 43; // d
+for (var i in proxy) { // e
+ log.push(i); // f
+}
+
+Debug.setListener(null); // g
+
+assertNull(exception);
+assertTrue(has);
+assertEquals(42, get);
+
+assertEquals([
+ "a0",
+ "b0", "h4b20", "i2b20", // [[Has]]
+ "c0", "j4c15", "k2c15", // [[Get]]
+ "d0", "l4d11", "m2d11", // [[Set]]
+ "e14", "r4e14", "q4r11e14", "s2e14", // for-in [[Enumerate]]
+ "o6e14", "q4e14", "p6e14", "q4e14", "q4e14", // exhaust iterator
+ "e9", // for-in-body
+ "h4e9","i2e9", // [[Has]] property
+ "f2","foo", "e9", // for-in-body
+ "h4e9","i2e9", // [[Has]]property
+ "f2","bar", "e9", // for-in-body
+ "g0"
+], log);
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js b/deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js
index 526ab44c9c..47cca5c95b 100644
--- a/deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js
+++ b/deps/v8/test/mjsunit/harmony/default-parameters-destructuring.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-default-parameters --harmony-destructuring
+// Flags: --harmony-default-parameters --harmony-destructuring-bind
(function TestSloppyEvalScoping() {
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters.js b/deps/v8/test/mjsunit/harmony/default-parameters.js
index 106cf8cde5..8d1eb8b096 100644
--- a/deps/v8/test/mjsunit/harmony/default-parameters.js
+++ b/deps/v8/test/mjsunit/harmony/default-parameters.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-default-parameters --harmony-rest-parameters
+// Flags: --harmony-default-parameters
(function TestDefaults() {
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js b/deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js
new file mode 100644
index 0000000000..8915eb97a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/destructuring-assignment-lazy.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-destructuring-assignment --harmony-destructuring-bind
+// Flags: --min-preparse-length=0
+
+function f() {
+ var a, b;
+ [ a, b ] = [1, 2];
+ assertEquals(1, a);
+ assertEquals(2, b);
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-assignment.js b/deps/v8/test/mjsunit/harmony/destructuring-assignment.js
new file mode 100644
index 0000000000..bc8c424d8b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/destructuring-assignment.js
@@ -0,0 +1,482 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-destructuring-assignment --harmony-destructuring-bind
+
+// script-level tests
+var ox, oy = {}, oz;
+({
+ x: ox,
+ y: oy.value,
+ y2: oy["value2"],
+ z: ({ set v(val) { oz = val; } }).v
+} = {
+ x: "value of x",
+ y: "value of y1",
+ y2: "value of y2",
+ z: "value of z"
+});
+assertEquals("value of x", ox);
+assertEquals("value of y1", oy.value);
+assertEquals("value of y2", oy.value2);
+assertEquals("value of z", oz);
+
+[ox, oy.value, oy["value2"], ...{ set v(val) { oz = val; } }.v] = [
+ 1007,
+ 798432,
+ 555,
+ 1, 2, 3, 4, 5
+];
+assertEquals(ox, 1007);
+assertEquals(oy.value, 798432);
+assertEquals(oy.value2, 555);
+assertEquals(oz, [1, 2, 3, 4, 5]);
+
+
+(function testInFunction() {
+ var x, y = {}, z;
+ ({
+ x: x,
+ y: y.value,
+ y2: y["value2"],
+ z: ({ set v(val) { z = val; } }).v
+ } = {
+ x: "value of x",
+ y: "value of y1",
+ y2: "value of y2",
+ z: "value of z"
+ });
+ assertEquals("value of x", x);
+ assertEquals("value of y1", y.value);
+ assertEquals("value of y2", y.value2);
+ assertEquals("value of z", z);
+
+ [x, y.value, y["value2"], ...{ set v(val) { z = val; } }.v] = [
+ 1007,
+ 798432,
+ 555,
+ 1, 2, 3, 4, 5
+ ];
+ assertEquals(x, 1007);
+ assertEquals(y.value, 798432);
+ assertEquals(y.value2, 555);
+ assertEquals(z, [1, 2, 3, 4, 5]);
+})();
+
+
+(function testArrowFunctionInitializers() {
+ var fn = (config = {
+ value: defaults.value,
+ nada: { nada: defaults.nada } = { nada: "nothing" }
+ } = { value: "BLAH" }) => config;
+ var defaults = {};
+ assertEquals({ value: "BLAH" }, fn());
+ assertEquals("BLAH", defaults.value);
+ assertEquals("nothing", defaults.nada);
+})();
+
+
+(function testArrowFunctionInitializers2() {
+ var fn = (config = [
+ defaults.value,
+ { nada: defaults.nada } = { nada: "nothing" }
+ ] = ["BLAH"]) => config;
+ var defaults = {};
+ assertEquals(["BLAH"], fn());
+ assertEquals("BLAH", defaults.value);
+ assertEquals("nothing", defaults.nada);
+})();
+
+
+(function testFunctionInitializers() {
+ function fn(config = {
+ value: defaults.value,
+ nada: { nada: defaults.nada } = { nada: "nothing" }
+ } = { value: "BLAH" }) {
+ return config;
+ }
+ var defaults = {};
+ assertEquals({ value: "BLAH" }, fn());
+ assertEquals("BLAH", defaults.value);
+ assertEquals("nothing", defaults.nada);
+})();
+
+
+(function testFunctionInitializers2() {
+ function fn(config = [
+ defaults.value,
+ { nada: defaults.nada } = { nada: "nothing" }
+ ] = ["BLAH"]) { return config; }
+ var defaults = {};
+ assertEquals(["BLAH"], fn());
+ assertEquals("BLAH", defaults.value);
+ assertEquals("nothing", defaults.nada);
+})();
+
+
+(function testDeclarationInitializers() {
+ var defaults = {};
+ var { value } = { value: defaults.value } = { value: "BLAH" };
+ assertEquals("BLAH", value);
+ assertEquals("BLAH", defaults.value);
+})();
+
+
+(function testDeclarationInitializers2() {
+ var defaults = {};
+ var [value] = [defaults.value] = ["BLAH"];
+ assertEquals("BLAH", value);
+ assertEquals("BLAH", defaults.value);
+})();
+
+
+(function testObjectLiteralProperty() {
+ var ext = {};
+ var obj = {
+ a: { b: ext.b, c: ext["c"], d: { set v(val) { ext.d = val; } }.v } = {
+ b: "b", c: "c", d: "d" }
+ };
+ assertEquals({ b: "b", c: "c", d: "d" }, ext);
+ assertEquals({ a: { b: "b", c: "c", d: "d" } }, obj);
+})();
+
+
+(function testArrayLiteralProperty() {
+ var ext = {};
+ var obj = [
+ ...[ ext.b, ext["c"], { set v(val) { ext.d = val; } }.v ] = [
+ "b", "c", "d" ]
+ ];
+ assertEquals({ b: "b", c: "c", d: "d" }, ext);
+ assertEquals([ "b", "c", "d" ], obj);
+})();
+
+
+// TODO(caitp): add similar test for ArrayPatterns, once Proxies support
+// delegating symbol-keyed get/set.
+(function testObjectPatternOperationOrder() {
+ var steps = [];
+ var store = {};
+ function computePropertyName(name) {
+ steps.push("compute name: " + name);
+ return name;
+ }
+ function loadValue(descr, value) {
+ steps.push("load: " + descr + " > " + value);
+ return value;
+ }
+ function storeValue(descr, name, value) {
+ steps.push("store: " + descr + " = " + value);
+ store[name] = value;
+ }
+ var result = {
+ get a() { assertUnreachable(); },
+ set a(value) { storeValue("result.a", "a", value); },
+ get b() { assertUnreachable(); },
+ set b(value) { storeValue("result.b", "b", value); }
+ };
+
+ ({
+ obj: {
+ x: result.a = 10,
+ [computePropertyName("y")]: result.b = false,
+ } = {}
+ } = { obj: {
+ get x() { return loadValue(".temp.obj.x", undefined); },
+ set x(value) { assertUnreachable(); },
+ get y() { return loadValue(".temp.obj.y", undefined); },
+ set y(value) { assertUnreachable(); }
+ }});
+
+ assertPropertiesEqual({
+ a: 10,
+ b: false
+ }, store);
+
+ assertArrayEquals([
+ "load: .temp.obj.x > undefined",
+ "store: result.a = 10",
+
+ "compute name: y",
+ "load: .temp.obj.y > undefined",
+ "store: result.b = false"
+ ], steps);
+
+ steps = [];
+
+ ({
+ obj: {
+ x: result.a = 50,
+ [computePropertyName("y")]: result.b = "hello",
+ } = {}
+ } = { obj: {
+ get x() { return loadValue(".temp.obj.x", 20); },
+ set x(value) { assertUnreachable(); },
+ get y() { return loadValue(".temp.obj.y", true); },
+ set y(value) { assertUnreachable(); }
+ }});
+
+ assertPropertiesEqual({
+ a: 20,
+ b: true
+ }, store);
+
+ assertArrayEquals([
+ "load: .temp.obj.x > 20",
+ "store: result.a = 20",
+ "compute name: y",
+ "load: .temp.obj.y > true",
+ "store: result.b = true",
+ ], steps);
+})();
+
+// Credit to Mike Pennisi and other Test262 contributors for originally writing
+// the testse the following are based on.
+(function testArrayElision() {
+ var value = [1, 2, 3, 4, 5, 6, 7, 8, 9];
+ var a, obj = {};
+ var result = [, a, , obj.b, , ...obj["rest"]] = value;
+
+ assertEquals(result, value);
+ assertEquals(2, a);
+ assertEquals(4, obj.b);
+ assertArrayEquals([6, 7, 8, 9], obj.rest);
+})();
+
+(function testArrayElementInitializer() {
+ function test(value, initializer, expected) {
+ var a, obj = {};
+ var initialized = false;
+ var shouldBeInitialized = value[0] === undefined;
+ assertEquals(value, [ a = (initialized = true, initializer) ] = value);
+ assertEquals(expected, a);
+ assertEquals(shouldBeInitialized, initialized);
+
+ var initialized2 = false;
+ assertEquals(value, [ obj.a = (initialized2 = true, initializer) ] = value);
+ assertEquals(expected, obj.a);
+ assertEquals(shouldBeInitialized, initialized2);
+ }
+
+ test([], "BAM!", "BAM!");
+ test([], "BOOP!", "BOOP!");
+ test([null], 123, null);
+ test([undefined], 456, 456);
+ test([,], "PUPPIES", "PUPPIES");
+
+ (function accept_IN() {
+ var value = [], x;
+ assertEquals(value, [ x = 'x' in {} ] = value);
+ assertEquals(false, x);
+ })();
+
+ (function ordering() {
+ var x = 0, a, b, value = [];
+ assertEquals(value, [ a = x += 1, b = x *= 2 ] = value);
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(2, x);
+ })();
+
+ (function yieldExpression() {
+ var value = [], it, result, x;
+ it = (function*() {
+ result = [ x = yield ] = value;
+ })();
+ var next = it.next();
+
+ assertEquals(undefined, result);
+ assertEquals(undefined, next.value);
+ assertEquals(false, next.done);
+ assertEquals(undefined, x);
+
+ next = it.next(86);
+
+ assertEquals(value, result);
+ assertEquals(undefined, next.value);
+ assertEquals(true, next.done);
+ assertEquals(86, x);
+ })();
+
+ (function yieldIdentifier() {
+ var value = [], yield = "BOOP!", x;
+ assertEquals(value, [ x = yield ] = value);
+ assertEquals("BOOP!", x);
+ })();
+
+ assertThrows(function let_TDZ() {
+ "use strict";
+ var x;
+ [ x = y ] = [];
+ let y;
+ }, ReferenceError);
+})();
+
+
+(function testArrayElementNestedPattern() {
+ assertThrows(function nestedArrayRequireObjectCoercibleNull() {
+ var x; [ [ x ] ] = [ null ];
+ }, TypeError);
+
+ assertThrows(function nestedArrayRequireObjectCoercibleUndefined() {
+ var x; [ [ x ] ] = [ undefined ];
+ }, TypeError);
+
+ assertThrows(function nestedArrayRequireObjectCoercibleUndefined2() {
+ var x; [ [ x ] ] = [ ];
+ }, TypeError);
+
+ assertThrows(function nestedArrayRequireObjectCoercibleUndefined3() {
+ var x; [ [ x ] ] = [ , ];
+ }, TypeError);
+
+ assertThrows(function nestedObjectRequireObjectCoercibleNull() {
+ var x; [ { x } ] = [ null ];
+ }, TypeError);
+
+ assertThrows(function nestedObjectRequireObjectCoercibleUndefined() {
+ var x; [ { x } ] = [ undefined ];
+ }, TypeError);
+
+ assertThrows(function nestedObjectRequireObjectCoercibleUndefined2() {
+ var x; [ { x } ] = [ ];
+ }, TypeError);
+
+ assertThrows(function nestedObjectRequireObjectCoercibleUndefined3() {
+ var x; [ { x } ] = [ , ];
+ }, TypeError);
+
+ (function nestedArray() {
+ var x, value = [ [ "zap", "blonk" ] ];
+ assertEquals(value, [ [ , x ] ] = value);
+ assertEquals("blonk", x);
+ })();
+
+ (function nestedObject() {
+ var x, value = [ { a: "zap", b: "blonk" } ];
+ assertEquals(value, [ { b: x } ] = value);
+ assertEquals("blonk", x);
+ })();
+})();
+
+(function testArrayRestElement() {
+ (function testBasic() {
+ var x, rest, array = [1, 2, 3];
+ assertEquals(array, [x, ...rest] = array);
+ assertEquals(1, x);
+ assertEquals([2, 3], rest);
+
+ array = [4, 5, 6];
+ assertEquals(array, [, ...rest] = array);
+ assertEquals([5, 6], rest);
+
+ })();
+
+ (function testNestedRestObject() {
+ var value = [1, 2, 3], x;
+ assertEquals(value, [...{ 1: x }] = value);
+ assertEquals(2, x);
+ })();
+
+ (function iterable() {
+ var count = 0;
+ var x, y, z;
+ function* g() {
+ count++;
+ yield;
+ count++;
+ yield;
+ count++;
+ yield;
+ }
+ var it = g();
+ assertEquals(it, [...x] = it);
+ assertEquals([undefined, undefined, undefined], x);
+ assertEquals(3, count);
+
+ it = [g()];
+ assertEquals(it, [ [...y] ] = it);
+ assertEquals([undefined, undefined, undefined], y);
+ assertEquals(6, count);
+
+ it = { a: g() };
+ assertEquals(it, { a: [...z] } = it);
+ assertEquals([undefined, undefined, undefined], z);
+ assertEquals(9, count);
+ })();
+})();
+
+(function testRequireObjectCoercible() {
+ assertThrows(() => ({} = undefined), TypeError);
+ assertThrows(() => ({} = null), TypeError);
+ assertThrows(() => [] = undefined, TypeError);
+ assertThrows(() => [] = null, TypeError);
+ assertEquals("test", ({} = "test"));
+ assertEquals("test", [] = "test");
+ assertEquals(123, ({} = 123));
+})();
+
+(function testConstReassignment() {
+ "use strict";
+ const c = "untouchable";
+ assertThrows(() => { [ c ] = [ "nope!" ]; }, TypeError);
+ assertThrows(() => { [ [ c ] ] = [ [ "nope!" ] ]; }, TypeError);
+ assertThrows(() => { [ { c } ] = [ { c: "nope!" } ]; }, TypeError);
+ assertThrows(() => { ({ c } = { c: "nope!" }); }, TypeError);
+ assertThrows(() => { ({ a: { c } } = { a: { c: "nope!" } }); }, TypeError);
+ assertThrows(() => { ({ a: [ c ] } = { a: [ "nope!" ] }); }, TypeError);
+ assertEquals("untouchable", c);
+})();
+
+(function testForIn() {
+ var log = [];
+ var x = {};
+ var object = {
+ "Apenguin": 1,
+ "\u{1F382}cake": 2,
+ "Bpuppy": 3,
+ "Cspork": 4
+ };
+ for ([x.firstLetter, ...x.rest] in object) {
+ if (x.firstLetter === "A") {
+ assertEquals(["p", "e", "n", "g", "u", "i", "n"], x.rest);
+ continue;
+ }
+ if (x.firstLetter === "C") {
+ assertEquals(["s", "p", "o", "r", "k"], x.rest);
+ break;
+ }
+ log.push({ firstLetter: x.firstLetter, rest: x.rest });
+ }
+ assertEquals([
+ { firstLetter: "\u{1F382}", rest: ["c", "a", "k", "e"] },
+ { firstLetter: "B", rest: ["p", "u", "p", "p", "y"] },
+ ], log);
+})();
+
+(function testForOf() {
+ var log = [];
+ var x = {};
+ var names = [
+ "Apenguin",
+ "\u{1F382}cake",
+ "Bpuppy",
+ "Cspork"
+ ];
+ for ([x.firstLetter, ...x.rest] of names) {
+ if (x.firstLetter === "A") {
+ assertEquals(["p", "e", "n", "g", "u", "i", "n"], x.rest);
+ continue;
+ }
+ if (x.firstLetter === "C") {
+ assertEquals(["s", "p", "o", "r", "k"], x.rest);
+ break;
+ }
+ log.push({ firstLetter: x.firstLetter, rest: x.rest });
+ }
+ assertEquals([
+ { firstLetter: "\u{1F382}", rest: ["c", "a", "k", "e"] },
+ { firstLetter: "B", rest: ["p", "u", "p", "p", "y"] },
+ ], log);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
index 82158438b4..5b90fb17a9 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount-nolazy.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
// Flags: --no-lazy --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
index cc0e278acb..140ed9da52 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring-parameters-literalcount.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
// Flags: --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/harmony/destructuring.js b/deps/v8/test/mjsunit/harmony/destructuring.js
index 7192d7aa5b..50f27857ec 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
-// Flags: --harmony-default-parameters --harmony-rest-parameters
+// Flags: --harmony-destructuring-bind
+// Flags: --harmony-default-parameters
(function TestObjectLiteralPattern() {
var { x : x, y : y, get, set } = { x : 1, y : 2, get: 3, set: 4 };
@@ -991,8 +991,9 @@
function f20({x}) { function x() { return 2 }; return x(); }
assertEquals(2, f20({x: 1}));
+ // Function hoisting is blocked by the conflicting x declaration
function f21({x}) { { function x() { return 2 } } return x(); }
- assertEquals(2, f21({x: 1}));
+ assertThrows(() => f21({x: 1}), TypeError);
var g1 = ({x}) => { var x = 2; return x };
assertEquals(2, g1({x: 1}));
@@ -1025,7 +1026,7 @@
var g20 = ({x}) => { function x() { return 2 }; return x(); }
assertEquals(2, g20({x: 1}));
var g21 = ({x}) => { { function x() { return 2 } } return x(); }
- assertEquals(2, g21({x: 1}));
+ assertThrows(() => g21({x: 1}), TypeError);
assertThrows("'use strict'; function f(x) { let x = 0; }; f({});", SyntaxError);
assertThrows("'use strict'; function f({x}) { let x = 0; }; f({});", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions.js b/deps/v8/test/mjsunit/harmony/do-expressions.js
index c3f9e0cd86..e7e513a230 100644
--- a/deps/v8/test/mjsunit/harmony/do-expressions.js
+++ b/deps/v8/test/mjsunit/harmony/do-expressions.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-do-expressions --harmony-sloppy-let --allow-natives-syntax
-// Flags: --harmony-default-parameters --harmony-destructuring
+// Flags: --harmony-default-parameters --harmony-destructuring-bind
// Flags: --harmony-completion
function returnValue(v) { return v; }
@@ -263,6 +263,36 @@ function TestHoisting() {
TestHoisting();
+// v8:4661
+
+function tryFinallySimple() { (do { try {} finally {} }); }
+tryFinallySimple();
+tryFinallySimple();
+tryFinallySimple();
+tryFinallySimple();
+
+var finallyRanCount = 0;
+function tryFinallyDoExpr() {
+ return (do {
+ try {
+ throw "BOO";
+ } catch (e) {
+ "Caught: " + e + " (" + finallyRanCount + ")"
+ } finally {
+ ++finallyRanCount;
+ }
+ });
+}
+assertEquals("Caught: BOO (0)", tryFinallyDoExpr());
+assertEquals(1, finallyRanCount);
+assertEquals("Caught: BOO (1)", tryFinallyDoExpr());
+assertEquals(2, finallyRanCount);
+assertEquals("Caught: BOO (2)", tryFinallyDoExpr());
+assertEquals(3, finallyRanCount);
+assertEquals("Caught: BOO (3)", tryFinallyDoExpr());
+assertEquals(4, finallyRanCount);
+
+
function TestOSR() {
var numbers = do {
let nums = [];
diff --git a/deps/v8/test/mjsunit/harmony/function-name.js b/deps/v8/test/mjsunit/harmony/function-name.js
new file mode 100644
index 0000000000..8ca5d8209a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/function-name.js
@@ -0,0 +1,161 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-function-name
+
+(function testVariableDeclarationsFunction() {
+ 'use strict';
+ var a = function(){};
+ assertEquals('a', a.name);
+ let b = () => {};
+ assertEquals('b', b.name);
+ const c = ((function(){}));
+ assertEquals('c', c.name);
+
+ var x = function(){}, y = () => {}, z = function withName() {};
+ assertEquals('x', x.name);
+ assertEquals('y', y.name);
+ assertEquals('withName', z.name);
+})();
+
+(function testVariableDeclarationsClass() {
+ 'use strict';
+ var a = class {};
+ assertEquals('a', a.name);
+ let b = ((class {}));
+ assertEquals('b', b.name);
+ // Should not overwrite name property.
+ const c = class { static name() { } }
+ assertEquals('function', typeof c.name);
+
+ var x = class {}, y = class NamedClass {};
+ assertEquals('x', x.name);
+ assertEquals('NamedClass', y.name);
+})();
+
+(function testObjectProperties() {
+ 'use strict';
+ var obj = {
+ a: function() {},
+ b: () => {},
+ c() { },
+ get d() { },
+ set d(val) { },
+ x: function withName() { },
+ y: class { },
+ z: class ClassName { },
+ 42: function() {},
+ 4.2: function() {},
+ __proto__: function() {},
+ };
+
+ assertEquals('a', obj.a.name);
+ assertEquals('b', obj.b.name);
+ assertEquals('c', obj.c.name);
+ var dDescriptor = Object.getOwnPropertyDescriptor(obj, 'd');
+ assertEquals('get d', dDescriptor.get.name);
+ assertEquals('set d', dDescriptor.set.name);
+ assertEquals('withName', obj.x.name);
+ assertEquals('y', obj.y.name);
+ assertEquals('ClassName', obj.z.name);
+ assertEquals('42', obj[42].name);
+ assertEquals('4.2', obj[4.2].name);
+ assertEquals('', obj.__proto__.name);
+})();
+
+(function testClassProperties() {
+ 'use strict';
+ class C {
+ a() { }
+ static b() { }
+ get c() { }
+ set c(val) { }
+ 42() { }
+ static 43() { }
+ get 44() { }
+ set 44(val) { }
+ };
+
+ assertEquals('a', C.prototype.a.name);
+ assertEquals('b', C.b.name);
+ var descriptor = Object.getOwnPropertyDescriptor(C.prototype, 'c');
+ assertEquals('get c', descriptor.get.name);
+ assertEquals('set c', descriptor.set.name);
+ assertEquals('42', C.prototype[42].name);
+ assertEquals('43', C[43].name);
+ var descriptor = Object.getOwnPropertyDescriptor(C.prototype, '44');
+ assertEquals('get 44', descriptor.get.name);
+ assertEquals('set 44', descriptor.set.name);
+})();
+
+// TODO(adamk): Make computed property names work.
+(function testComputedProperties() {
+ 'use strict';
+ var a = 'a';
+ var sym1 = Symbol('1');
+ var sym2 = Symbol('2');
+ var obj = {
+ [a]: function() {},
+ [sym1]: function() {},
+ [sym2]: function withName() {},
+ };
+
+ // Should be 'a'
+ assertEquals('', obj[a].name);
+ // Should be '[1]'
+ assertEquals('', obj[sym1].name);
+ assertEquals('withName', obj[sym2].name);
+
+ class C {
+ [a]() { }
+ [sym1]() { }
+ static [sym2]() { }
+ }
+
+ // Should be 'a'
+ assertEquals('', C.prototype[a].name);
+ // Should be '[1]'
+ assertEquals('', C.prototype[sym1].name);
+ // Should be '[2]'
+ assertEquals('', C[sym2].name);
+})();
+
+
+(function testAssignment() {
+ var basicFn, arrowFn, generatorFn, classLit;
+
+ basicFn = function() { return true; };
+ assertEquals('basicFn', basicFn.name);
+ var basicFn2 = basicFn;
+ assertEquals('basicFn', basicFn2.name);
+ basicFn = function functionWithName() { };
+ assertEquals("functionWithName", basicFn.name);
+
+ arrowFn = x => x;
+ assertEquals('arrowFn', arrowFn.name);
+ var arrowFn2 = arrowFn;
+ assertEquals('arrowFn', arrowFn2.name);
+
+ generatorFn = function*() { yield true; };
+ assertEquals('generatorFn', generatorFn.name);
+ var generatorFn2 = generatorFn;
+ assertEquals('generatorFn', generatorFn2.name);
+ generatorFn = function* generatorWithName() { };
+ assertEquals("generatorWithName", generatorFn.name);
+
+ classLit = class { constructor() {} };
+ assertEquals('classLit', classLit.name);
+ var classLit2 = classLit;
+ assertEquals('classLit', classLit2.name);
+ classLit = class classWithName { constructor() {} };
+ assertEquals('classWithName', classLit.name);
+ classLit = class { constructor() {} static name() {} };
+ assertEquals('function', typeof classLit.name);
+ classLit = class { constructor() {} static get name() { return true; } };
+ assertTrue(classLit.name);
+ classLit = class { constructor() {} static ['name']() {} };
+ assertEquals('function', typeof classLit.name);
+ classLit = class { constructor() {} static get ['name']() { return true; } };
+ assertTrue(classLit.name);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
index 06c0a25ba1..3a73e0a9b8 100644
--- a/deps/v8/test/mjsunit/harmony/futex.js
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -46,7 +46,8 @@
})();
(function TestInvalidIndex() {
- var i32a = new Int32Array(new SharedArrayBuffer(16));
+ var sab = new SharedArrayBuffer(16);
+ var i32a = new Int32Array(sab);
// Valid indexes are 0-3.
[-1, 4, 100].forEach(function(invalidIndex) {
@@ -59,6 +60,16 @@
invalidIndex));
});
+ i32a = new Int32Array(sab, 8);
+ [-1, 2, 100].forEach(function(invalidIndex) {
+ assertEquals(undefined, Atomics.futexWait(i32a, invalidIndex, 0));
+ assertEquals(undefined, Atomics.futexWake(i32a, invalidIndex, 0));
+ var validIndex = 0;
+ assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0,
+ validIndex));
+ assertEquals(undefined, Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0,
+ invalidIndex));
+ });
})();
(function TestWaitTimeout() {
@@ -71,8 +82,13 @@
})();
(function TestWaitNotEqual() {
- var i32a = new Int32Array(new SharedArrayBuffer(16));
+ var sab = new SharedArrayBuffer(16);
+ var i32a = new Int32Array(sab);
assertEquals(Atomics.NOTEQUAL, Atomics.futexWait(i32a, 0, 42));
+
+ i32a = new Int32Array(sab, 8);
+ i32a[0] = 1;
+ assertEquals(Atomics.NOTEQUAL, Atomics.futexWait(i32a, 0, 0));
})();
(function TestWaitNegativeTimeout() {
@@ -90,14 +106,14 @@ if (this.Worker) {
var i32a = new Int32Array(sab);
var workerScript =
- `onmessage = function(sab) {
- var i32a = new Int32Array(sab);
+ `onmessage = function(msg) {
+ var i32a = new Int32Array(msg.sab, msg.offset);
var result = Atomics.futexWait(i32a, 0, 0, ${timeout});
postMessage(result);
};`;
var worker = new Worker(workerScript);
- worker.postMessage(sab, [sab]);
+ worker.postMessage({sab: sab, offset: offset}, [sab]);
// Spin until the worker is waiting on the futex.
while (%AtomicsFutexNumWaitersForTesting(i32a, 0) != 1) {}
@@ -105,6 +121,29 @@ if (this.Worker) {
Atomics.futexWake(i32a, 0, 1);
assertEquals(Atomics.OK, worker.getMessage());
worker.terminate();
+
+ var worker2 = new Worker(workerScript);
+ var offset = 8;
+ var i32a2 = new Int32Array(sab, offset);
+ worker2.postMessage({sab: sab, offset: offset}, [sab]);
+
+ // Spin until the worker is waiting on the futex.
+ while (%AtomicsFutexNumWaitersForTesting(i32a2, 0) != 1) {}
+ Atomics.futexWake(i32a2, 0, 1);
+ assertEquals(Atomics.OK, worker2.getMessage());
+ worker2.terminate();
+
+ // Futex should work when index and buffer views are different, but
+ // the real address is the same.
+ var worker3 = new Worker(workerScript);
+ i32a2 = new Int32Array(sab, 4);
+ worker3.postMessage({sab: sab, offset: 8}, [sab]);
+
+ // Spin until the worker is waiting on the futex.
+ while (%AtomicsFutexNumWaitersForTesting(i32a2, 1) != 1) {}
+ Atomics.futexWake(i32a2, 1, 1);
+ assertEquals(Atomics.OK, worker3.getMessage());
+ worker3.terminate();
};
// Test various infinite timeouts
@@ -266,6 +305,33 @@ if (this.Worker) {
for (id = 0; id < 4; ++id) {
assertEquals(Atomics.OK, workers[id].getMessage());
+ }
+
+ // Test futexWakeOrRequeue on offset typed array
+ var offset = 16;
+ sab = new SharedArrayBuffer(24);
+ i32a = new Int32Array(sab);
+ var i32a2 = new Int32Array(sab, offset);
+
+ for (id = 0; id < 4; id++) {
+ workers[id].postMessage({sab: sab, id: id}, [sab]);
+ }
+
+ while (%AtomicsFutexNumWaitersForTesting(i32a2, 0) != 4) { }
+
+ index1 = 0;
+ index2 = 1;
+ assertEquals(4, %AtomicsFutexNumWaitersForTesting(i32a2, index1));
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a2, index2));
+
+ assertEquals(2, Atomics.futexWakeOrRequeue(i32a2, index1, 2, 0, index2));
+ assertEquals(2, %AtomicsFutexNumWaitersForTesting(i32a2, index2));
+ assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a2, index1));
+
+ assertEquals(2, Atomics.futexWake(i32a2, index2, 2));
+
+ for (id = 0; id < 4; ++id) {
+ assertEquals(Atomics.OK, workers[id].getMessage());
workers[id].terminate();
}
diff --git a/deps/v8/test/mjsunit/harmony/private-symbols.js b/deps/v8/test/mjsunit/harmony/private-symbols.js
new file mode 100644
index 0000000000..369c222897
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-symbols.js
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+
+
+var symbol = %CreatePrivateSymbol("private");
+
+
+// Private symbols must never be listed.
+
+var object = {};
+object[symbol] = 42;
+for (var key of Object.keys(object)) assertUnreachable();
+for (var key of Object.getOwnPropertySymbols(object)) assertUnreachable();
+for (var key of Object.getOwnPropertyNames(object)) assertUnreachable();
+for (var key of Reflect.ownKeys(object)) assertUnreachable();
+for (var key of Reflect.enumerate(object)) assertUnreachable();
+for (var key in object) assertUnreachable();
+
+var object2 = {__proto__: object};
+for (var key of Object.keys(object2)) assertUnreachable();
+for (var key of Object.getOwnPropertySymbols(object2)) assertUnreachable();
+for (var key of Object.getOwnPropertyNames(object2)) assertUnreachable();
+for (var key of Reflect.ownKeys(object2)) assertUnreachable();
+for (var key of Reflect.enumerate(object2)) assertUnreachable();
+for (var key in object2) assertUnreachable();
+
+
+// Private symbols must never leak to proxy traps.
+
+var proxy = new Proxy({}, new Proxy({}, {get() {return () => {throw 666}}}));
+var object = {__proto__: proxy};
+
+// [[Set]]
+assertEquals(42, proxy[symbol] = 42);
+assertThrows(function() { "use strict"; proxy[symbol] = 42 }, TypeError);
+assertEquals(false, Reflect.set(proxy, symbol, 42));
+assertEquals(42, object[symbol] = 42);
+assertEquals(43, (function() {"use strict"; return object[symbol] = 43})());
+assertEquals(true, Reflect.set(object, symbol, 44));
+
+// [[DefineOwnProperty]]
+assertEquals(false, Reflect.defineProperty(proxy, symbol, {}));
+assertThrows(() => Object.defineProperty(proxy, symbol, {}), TypeError);
+assertEquals(true, Reflect.defineProperty(object, symbol, {}));
+assertEquals(object, Object.defineProperty(object, symbol, {}));
+
+// [[Delete]]
+assertEquals(true, delete proxy[symbol]);
+assertEquals(true, (function() {"use strict"; return delete proxy[symbol]})());
+assertEquals(true, Reflect.deleteProperty(proxy, symbol));
+assertEquals(true, delete object[symbol]);
+assertEquals(true, (function() {"use strict"; return delete object[symbol]})());
+assertEquals(true, Reflect.deleteProperty(object, symbol));
+
+// [[GetOwnPropertyDescriptor]]
+assertEquals(undefined, Object.getOwnPropertyDescriptor(proxy, symbol));
+assertEquals(undefined, Reflect.getOwnPropertyDescriptor(proxy, symbol));
+assertFalse(Object.prototype.hasOwnProperty.call(proxy, symbol));
+assertEquals(undefined, Object.getOwnPropertyDescriptor(object, symbol));
+assertEquals(undefined, Reflect.getOwnPropertyDescriptor(object, symbol));
+assertFalse(Object.prototype.hasOwnProperty.call(object, symbol));
+
+// [[Has]]
+assertFalse(symbol in proxy);
+assertFalse(Reflect.has(proxy, symbol));
+assertFalse(symbol in object);
+assertFalse(Reflect.has(object, symbol));
+
+// [[Get]]
+assertEquals(undefined, proxy[symbol]);
+assertEquals(undefined, Reflect.get(proxy, symbol));
+assertEquals(undefined, Reflect.get(proxy, symbol, 42));
+assertEquals(undefined, object[symbol]);
+assertEquals(undefined, Reflect.get(object, symbol));
+assertEquals(undefined, Reflect.get(object, symbol, 42));
diff --git a/deps/v8/test/mjsunit/harmony/promise-species.js b/deps/v8/test/mjsunit/harmony/promise-species.js
new file mode 100644
index 0000000000..12244f291a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/promise-species.js
@@ -0,0 +1,42 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species --allow-natives-syntax
+
+// Test that Promises use @@species appropriately
+
+// Another constructor with no species will not be instantiated
+var test = new Promise(function(){});
+var bogoCount = 0;
+function bogusConstructor() { bogoCount++; }
+test.constructor = bogusConstructor;
+assertTrue(Promise.resolve(test) instanceof Promise);
+assertFalse(Promise.resolve(test) instanceof bogusConstructor);
+// Tests that chromium:575314 is fixed thoroughly
+Promise.resolve(test).catch(e => %AbortJS("Error " + e)).then(() => {
+ if (bogoCount != 0) %AbortJS("bogoCount was " + bogoCount + " should be 0");
+});
+
+// If there is a species, it will be instantiated
+// @@species will be read exactly once, and the constructor is called with a
+// function
+var count = 0;
+var params;
+class MyPromise extends Promise {
+ constructor(...args) {
+ super(...args);
+ params = args;
+ }
+ static get [Symbol.species]() {
+ count++
+ return this;
+ }
+}
+
+var myPromise = MyPromise.resolve().then();
+assertEquals(1, count);
+assertEquals(1, params.length);
+assertEquals('function', typeof(params[0]));
+assertTrue(myPromise instanceof MyPromise);
+assertTrue(myPromise instanceof Promise);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-accesschecks.js b/deps/v8/test/mjsunit/harmony/proxies-accesschecks.js
new file mode 100644
index 0000000000..209d4329f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-accesschecks.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var realm = Realm.create();
+
+this.__proto__ = new Proxy({}, {
+ getPrototypeOf() { assertUnreachable() },
+ get() { assertUnreachable() }
+});
+
+var other_type_error = Realm.eval(realm, "TypeError");
+assertThrows(() => Realm.eval(realm, "Realm.global(0).foo"), other_type_error);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-apply.js b/deps/v8/test/mjsunit/harmony/proxies-apply.js
new file mode 100644
index 0000000000..4ddffe73b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-apply.js
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+(function testNonCallable() {
+ var proxy = new Proxy({},{});
+ assertThrows(function(){ proxy() }, TypeError);
+
+ var proxy2 = new Proxy(proxy, {});
+ assertThrows(function(){ proxy2() }, TypeError);
+})();
+
+(function testCallProxyFallbackNoArguments() {
+ var called = false;
+ var target = function() {
+ called = true;
+ }
+ var proxy = new Proxy(target, {});
+ assertFalse(called);
+ proxy();
+ assertTrue(called);
+
+ called = false;
+ var proxy2 = new Proxy(proxy, {});
+ assertFalse(called);
+ proxy2();
+ assertTrue(called);
+})();
+
+(function testCallProxyFallback1Argument() {
+ var called = false;
+ var target = function(a) {
+ called = true;
+ assertEquals('1', a);
+ }
+ var proxy = new Proxy(target, {});
+ assertFalse(called);
+ proxy('1');
+ assertTrue(called);
+})();
+
+(function testCallProxyFallback2Arguments() {
+ var called = false;
+ var target = function(a, b) {
+ called = true;
+ assertEquals('1', a);
+ assertEquals('2', b);
+ }
+ var proxy = new Proxy(target, {});
+ assertFalse(called);
+ proxy('1', '2');
+ assertTrue(called);
+})();
+
+(function testCallProxyFallbackChangedReceiver() {
+ var apply_receiver = {receiver:true};
+ var seen_receiver = undefined;
+ var target = function() {
+ seen_receiver = this;
+ }
+ var proxy = new Proxy(target, {});
+ assertEquals(undefined, seen_receiver);
+ Reflect.apply(proxy, apply_receiver, [1,2,3,4]);
+ assertSame(apply_receiver, seen_receiver);
+})();
+
+(function testCallProxyTrap() {
+ var called_target = false;
+ var called_handler = false;
+ var target = function(a, b) {
+ called_target = true;
+ assertEquals(1, a);
+ assertEquals(2, b);
+ }
+ var handler = {
+ apply: function(target, this_arg, args) {
+ target.apply(this_arg, args);
+ called_handler = true;
+ }
+ }
+ var proxy = new Proxy(target, handler);
+ assertFalse(called_target);
+ assertFalse(called_handler);
+ Reflect.apply(proxy, {rec:1}, [1,2]);
+ assertTrue(called_target);
+ assertTrue(called_handler);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-bind.js b/deps/v8/test/mjsunit/harmony/proxies-bind.js
new file mode 100644
index 0000000000..9e4c5b79c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-bind.js
@@ -0,0 +1,137 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+// Tests the interaction of Function.prototype.bind with proxies.
+
+
+// (Helper)
+
+var log = [];
+var logger = {};
+var handler = new Proxy({}, logger);
+
+logger.get = function(t, trap, r) {
+ return function() {
+ log.push([trap, ...arguments]);
+ return Reflect[trap](...arguments);
+ }
+};
+
+
+// Simple case
+
+var target = function(a, b, c) { "use strict"; return this };
+var proxy = new Proxy(target, handler);
+var this_value = Symbol();
+
+log.length = 0;
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(2, result.length);
+assertEquals(target.__proto__, result.__proto__);
+assertEquals(this_value, result());
+assertEquals(5, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+assertEquals(["getPrototypeOf", target], log[0]);
+assertEquals(["getOwnPropertyDescriptor", target, "length"], log[1]);
+assertEquals(["get", target, "length", proxy], log[2]);
+assertEquals(["get", target, "name", proxy], log[3]);
+assertEquals(["apply", target, this_value, ["foo"]], log[4]);
+assertEquals(new target(), new result());
+
+
+// Custom prototype
+
+log.length = 0;
+target.__proto__ = {radio: "gaga"};
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(2, result.length);
+assertSame(target.__proto__, result.__proto__);
+assertEquals(this_value, result());
+assertEquals(5, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+assertEquals(["getPrototypeOf", target], log[0]);
+assertEquals(["getOwnPropertyDescriptor", target, "length"], log[1]);
+assertEquals(["get", target, "length", proxy], log[2]);
+assertEquals(["get", target, "name", proxy], log[3]);
+assertEquals(["apply", target, this_value, ["foo"]], log[4]);
+
+
+// Custom length
+
+handler = {
+ get() {return 42},
+ getOwnPropertyDescriptor() {return {configurable: true}}
+};
+proxy = new Proxy(target, handler);
+
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(41, result.length);
+assertEquals(this_value, result());
+
+
+// Long length
+
+handler = {
+ get() {return Math.pow(2, 100)},
+ getOwnPropertyDescriptor() {return {configurable: true}}
+};
+proxy = new Proxy(target, handler);
+
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(Math.pow(2, 100) - 1, result.length);
+assertEquals(this_value, result());
+
+
+// Very long length
+
+handler = {
+ get() {return 1/0},
+ getOwnPropertyDescriptor() {return {configurable: true}}
+};
+proxy = new Proxy(target, handler);
+
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(1/0, result.length);
+assertEquals(this_value, result());
+
+
+// Non-integer length
+
+handler = {
+ get() {return 4.2},
+ getOwnPropertyDescriptor() {return {configurable: true}}
+};
+proxy = new Proxy(target, handler);
+
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(3, result.length);
+assertEquals(this_value, result());
+
+
+// Undefined length
+
+handler = {
+ get() {},
+ getOwnPropertyDescriptor() {return {configurable: true}}
+};
+proxy = new Proxy(target, handler);
+
+result = Function.prototype.bind.call(proxy, this_value, "foo");
+assertEquals(0, result.length);
+assertEquals(this_value, result());
+
+
+// Non-callable
+
+assertThrows(() => Function.prototype.bind.call(new Proxy({}, {})), TypeError);
+assertThrows(() => Function.prototype.bind.call(new Proxy([], {})), TypeError);
+
+
+// Non-constructable
+
+result = Function.prototype.bind.call(() => 42, this_value, "foo");
+assertEquals(42, result());
+assertThrows(() => new result());
diff --git a/deps/v8/test/mjsunit/harmony/proxies-construct.js b/deps/v8/test/mjsunit/harmony/proxies-construct.js
new file mode 100644
index 0000000000..6e02a47bd0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-construct.js
@@ -0,0 +1,158 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+(function testNonConstructable() {
+ var proxy = new Proxy({},{});
+ assertThrows(function(){ new proxy() }, TypeError);
+
+ var proxy2 = new Proxy(proxy, {});
+ assertThrows(function(){ proxy2() }, TypeError);
+})();
+
+(function testFailingConstructRevoked() {
+ var pair = Proxy.revocable(Array, {});
+ var instance = new pair.proxy();
+ pair.revoke();
+ assertThrows(function(){ new pair.proxy() }, TypeError);
+})();
+
+(function testFailingGetTrap() {
+ var handler = {
+ get() {
+ throw TypeError();
+ }
+ }
+ var proxy = new Proxy({},{});
+ var proxy2 = new Proxy({}, proxy);
+ assertThrows(function(){ new proxy2() }, TypeError);
+})();
+
+(function testConstructFallback() {
+ var called = false;
+ function Target() {
+ called = true;
+ this.property1 = 'value1';
+ };
+ Target.prototype = {};
+ var proxy = new Proxy(Target, {});
+
+ assertFalse(called);
+ var instance = new proxy();
+ assertTrue(called);
+ assertEquals('value1', instance.property1);
+ assertSame(Target.prototype, Reflect.getPrototypeOf(instance));
+
+ var proxy2 = new Proxy(proxy, {});
+ called = false;
+ var instance2 = new proxy2();
+ assertTrue(called);
+ assertEquals('value1', instance2.property1);
+ assertSame(Target.prototype, Reflect.getPrototypeOf(instance));
+})();
+
+(function testConstructTrapDirectReturn() {
+ function Target(a, b) {
+ this.sum = a + b;
+ };
+ var handler = {
+ construct(t, c, args) {
+ return { sum: 42 };
+ }
+ };
+ var proxy = new Proxy(Target, handler);
+ assertEquals(42, (new proxy(1, 2)).sum);
+})();
+
+(function testConstructTrap() {
+ function Target(arg1, arg2) {
+ this.arg1 = arg1;
+ this.arg2 = arg2;
+ }
+ var seen_target, seen_arguments, seen_new_target;
+ var handler = {
+ construct(target, args, new_target) {
+ seen_target = target;
+ seen_arguments = args;
+ seen_new_target = new_target;
+ return Reflect.construct(target, args, new_target);
+ }
+ }
+ var proxy = new Proxy(Target, handler);
+ var instance = new proxy('a', 'b');
+ assertEquals(Target, seen_target);
+ assertEquals(['a','b'], seen_arguments);
+ assertEquals(proxy, seen_new_target);
+ assertEquals('a', instance.arg1);
+ assertEquals('b', instance.arg2);
+
+ var instance2 = Reflect.construct(proxy, ['a1', 'b1'], Array);
+ assertEquals(Target, seen_target);
+ assertEquals(['a1', 'b1'], seen_arguments);
+ assertEquals(Array, seen_new_target);
+ assertEquals('a1', instance2.arg1);
+ assertEquals('b1', instance2.arg2);
+})();
+
+(function testConstructCrossRealm() {
+ var realm1 = Realm.create();
+ var handler = {
+ construct(target, args, new_target) {
+ return args;
+ }
+ };
+ var OtherProxy = Realm.eval(realm1, "Proxy");
+ var otherArrayPrototype = Realm.eval(realm1, 'Array.prototype');
+
+ // Proxy and handler are from this realm.
+ var proxy = new Proxy(Array, handler);
+ var result = new proxy();
+ assertSame(Array.prototype, Reflect.getPrototypeOf(result));
+
+ // Proxy is from this realm, handler is from realm1.
+ var otherProxy = new OtherProxy(Array, handler);
+ var otherResult = new otherProxy();
+ assertSame(Array.prototype, Reflect.getPrototypeOf(otherResult));
+
+ // Proxy and handler are from realm1.
+ var otherProxy2 = Realm.eval(realm1, 'new Proxy('+
+ 'Array, { construct(target, args, new_target) { return args }} )');
+ var otherResult2 = new otherProxy2();
+ assertSame(Array.prototype, Reflect.getPrototypeOf(otherResult2));
+})();
+
+(function testReflectConstructCrossReal() {
+ var realm1 = Realm.create();
+ var realm2 = Realm.create();
+ var realm3 = Realm.create();
+ var realm4 = Realm.create();
+
+ var argsRealm1 = Realm.eval(realm1, '[]');
+ var ProxyRealm2 = Realm.eval(realm2, 'Proxy');
+ var constructorRealm3 = Realm.eval(realm3, '(function(){})');
+ var handlerRealm4 = Realm.eval(realm4,
+ '({ construct(target, args, new_target) {return args} })');
+
+ var proxy = new ProxyRealm2(constructorRealm3, handlerRealm4);
+
+ // Check that the arguments array returned by handlerRealm4 is created in the
+ // realm of the Reflect.construct function.
+ var result = Reflect.construct(proxy, argsRealm1);
+ assertSame(Array.prototype, Reflect.getPrototypeOf(result));
+
+ var ReflectConstructRealm1 = Realm.eval(realm1, 'Reflect.construct');
+ var result2 = ReflectConstructRealm1(proxy, argsRealm1);
+ assertSame(Realm.eval(realm1, 'Array.prototype'),
+ Reflect.getPrototypeOf(result2));
+
+ var result3 = ReflectConstructRealm1(proxy, []);
+ assertSame(Realm.eval(realm1, 'Array.prototype'),
+ Reflect.getPrototypeOf(result3));
+
+ var ReflectConstructRealm2 = Realm.eval(realm2, 'Reflect.construct');
+ var result4 = ReflectConstructRealm2(proxy, argsRealm1);
+ assertSame(Realm.eval(realm2, 'Array.prototype'),
+ Reflect.getPrototypeOf(result4));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js b/deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js
new file mode 100644
index 0000000000..5530a60fed
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-cross-realm-exception.js
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+
+// Do not read out the prototype from a cross-realm object.
+var realm = Realm.create();
+
+__proto__ = {};
+assertEquals(null,
+ Realm.eval(realm, "3; Reflect.getPrototypeOf(Realm.global(0))"));
+assertFalse(Realm.eval(realm, "3; Realm.global(0) instanceof Object"));
+
+__proto__ = new Proxy({}, { getPrototypeOf() { assertUnreachable() } });
+assertEquals(null,
+ Realm.eval(realm, "1; Reflect.getPrototypeOf(Realm.global(0))"));
+assertFalse(Realm.eval(realm, "1; Realm.global(0) instanceof Object"));
+
+// Test that the instannceof check works in optimized code.
+var test = Realm.eval(realm,
+ "()=>{1.1; return Realm.global(0) instanceof Object; }");
+assertFalse(test());
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+assertFalse(test());
+
+__proto__ = {};
+__proto__ = new Proxy({}, { get(t, p, r) { assertUnreachable() } });
+assertEquals(null,
+ Realm.eval(realm, "2; Reflect.getPrototypeOf(Realm.global(0))"));
+assertFalse(Realm.eval(realm, "2; Realm.global(0) instanceof Object"));
+
+
+__proto__ = {};
+__proto__.__proto__ = new Proxy({}, {
+ getPrototypeOf() { assertUnreachable() }
+});
+assertEquals(null,
+ Realm.eval(realm, "4; Reflect.getPrototypeOf(Realm.global(0))"));
+assertFalse(Realm.eval(realm, "4; Realm.global(0) instanceof Object"));
+
+// 2-level proxy indirection
+__proto__ = {};
+__proto__ = new Proxy({},
+ new Proxy({}, {
+ get() { assertUnreachable() }
+ })
+);
+assertEquals(null,
+ Realm.eval(realm, "5; Reflect.getPrototypeOf(Realm.global(0))"));
+assertFalse(Realm.eval(realm, "5; Realm.global(0) instanceof Object"));
diff --git a/deps/v8/test/mjsunit/harmony/proxies-define-property.js b/deps/v8/test/mjsunit/harmony/proxies-define-property.js
new file mode 100644
index 0000000000..27f23be173
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-define-property.js
@@ -0,0 +1,84 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+// Check basic call to trap.
+
+var g_target, g_name, g_desc;
+var handler = {
+ defineProperty: function(target, name, desc) {
+ g_target = target;
+ g_name = name;
+ g_desc = desc;
+ return true;
+ }
+}
+var target = {}
+var proxy = new Proxy(target, handler);
+var desc = { value: 1, writable: true, configurable: true, enumerable: true };
+Object.defineProperty(proxy, "foo", desc);
+assertSame(target, g_target);
+assertEquals("foo", g_name);
+assertEquals(desc, g_desc);
+
+// Check specific steps in the spec
+
+// Step 4: revoked handler
+var pair = Proxy.revocable(target, handler);
+Object.defineProperty(proxy, "foo2", desc);
+assertSame(target, g_target);
+assertEquals("foo2", g_name);
+assertEquals(desc, g_desc);
+pair.revoke();
+assertThrows('Object.defineProperty(pair.proxy, "bar", desc);', TypeError);
+
+// Step 6: Trap isn't callable.
+handler.defineProperty = 1;
+assertThrows("Object.defineProperty(proxy, 'foo', {value: 2})", TypeError);
+
+// Step 7: Trap is undefined.
+handler.defineProperty = undefined;
+Object.defineProperty(proxy, "prop1", desc);
+assertEquals(desc, Object.getOwnPropertyDescriptor(target, "prop1"));
+var target2 = {};
+var proxy2 = new Proxy(target2, {});
+Object.defineProperty(proxy2, "prop2", desc);
+assertEquals(desc, Object.getOwnPropertyDescriptor(target2, "prop2"));
+
+// Step 9: Property name is passed to the trap as a string.
+handler.defineProperty = function(t, name, d) { g_name = name; return true; };
+Object.defineProperty(proxy, 0, desc);
+assertTrue(typeof g_name === "string");
+assertEquals("0", g_name);
+
+// Step 10: Trap returns false.
+handler.defineProperty = function(t, n, d) { return false; }
+assertThrows("Object.defineProperty(proxy, 'foo', desc)", TypeError);
+
+// Step 15a: Trap returns true for adding a property to a non-extensible target.
+handler.defineProperty = function(t, n, d) { return true; }
+Object.preventExtensions(target);
+assertThrows("Object.defineProperty(proxy, 'foo', desc)", TypeError);
+
+// Step 15b: Trap returns true for adding a non-configurable property.
+target = {};
+proxy = new Proxy(target, handler);
+desc = {value: 1, writable: true, configurable: false, enumerable: true};
+assertThrows("Object.defineProperty(proxy, 'foo', desc)", TypeError);
+// No exception is thrown if a non-configurable property exists on the target.
+Object.defineProperty(target, "nonconf",
+ {value: 1, writable: true, configurable: false});
+Object.defineProperty(proxy, "nonconf", {value: 2, configurable: false});
+
+// Step 16a: Trap returns true for non-compatible property descriptor.
+Object.defineProperty(target, "foo",
+ {value: 1, writable: false, configurable: false});
+assertThrows("Object.defineProperty(proxy, 'foo', {value: 2})", TypeError);
+
+// Step 16b: Trap returns true for overwriting a configurable property
+// with a non-configurable descriptor.
+target.bar = "baz";
+assertThrows("Object.defineProperty(proxy, 'bar', {configurable: false})",
+ TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-delete-property.js b/deps/v8/test/mjsunit/harmony/proxies-delete-property.js
new file mode 100644
index 0000000000..27f9c059cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-delete-property.js
@@ -0,0 +1,190 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+
+var properties =
+ ["bla", "0", 1, Symbol(), {[Symbol.toPrimitive]() {return "a"}}];
+
+
+function TestForwarding(handler, myDelete, shouldThrow) {
+ var target = {};
+ var proxy = new Proxy(target, handler);
+
+ assertFalse(target.hasOwnProperty("doesnotexist"));
+ assertTrue(myDelete(proxy, "doesnotexist"));
+
+ for (p of properties) {
+ target[p] = 42;
+ assertTrue(myDelete(proxy, p));
+ assertFalse(target.hasOwnProperty(p));
+ }
+
+ for (p of properties) {
+ Object.defineProperty(target, p, {value: 42, configurable: false});
+ if (shouldThrow) {
+ assertThrows(() => myDelete(proxy, p), TypeError);
+ } else {
+ assertFalse(myDelete(proxy, p));
+ }
+ assertTrue(target.hasOwnProperty(p));
+ }
+};
+
+
+(function () {
+ // No trap.
+
+ var handler = {};
+
+ TestForwarding(handler,
+ (o, p) => delete o[p], false);
+ TestForwarding(handler,
+ (o, p) => Reflect.deleteProperty(o, p), false);
+ TestForwarding(handler,
+ (o, p) => {"use strict"; return delete o[p]}, true);
+ TestForwarding(handler,
+ (o, p) => {"use strict"; return Reflect.deleteProperty(o, p)}, false);
+})();
+
+
+(function () {
+ // "Undefined" trap.
+
+ var handler = { deleteProperty: null };
+
+ TestForwarding(handler,
+ (o, p) => delete o[p], false);
+ TestForwarding(handler,
+ (o, p) => Reflect.deleteProperty(o, p), false);
+ TestForwarding(handler,
+ (o, p) => {"use strict"; return delete o[p]}, true);
+ TestForwarding(handler,
+ (o, p) => {"use strict"; return Reflect.deleteProperty(o, p)}, false);
+})();
+
+
+(function () {
+ // Invalid trap.
+
+ var target = {};
+ var handler = { deleteProperty: true };
+ var proxy = new Proxy(target, handler);
+
+ assertThrows(() => delete proxy[0], TypeError);
+ assertThrows(() => Reflect.deleteProperty(proxy, 0), TypeError);
+})();
+
+
+function TestTrappingTrueish(myDelete) {
+ var handler = { deleteProperty() {return 42} };
+ var target = {};
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish and target doesn't own property.
+ for (p of properties) {
+ assertTrue(myDelete(proxy, p));
+ }
+
+ // Trap returns trueish and target property is configurable.
+ for (p of properties) {
+ target[p] = 42;
+ assertTrue(myDelete(proxy, p));
+ }
+
+ // Trap returns trueish but target property is not configurable.
+ for (p of properties) {
+ Object.defineProperty(target, p, {value: 42, configurable: false});
+ assertThrows(() => myDelete(proxy, p), TypeError);
+ }
+};
+
+
+TestTrappingTrueish(
+ (o, p) => delete o[p]);
+TestTrappingTrueish(
+ (o, p) => Reflect.deleteProperty(o, p));
+TestTrappingTrueish(
+ (o, p) => {"use strict"; return delete o[p]});
+TestTrappingTrueish(
+ (o, p) => {"use strict"; return Reflect.deleteProperty(o, p)});
+
+
+function TestTrappingTrueish2(myDelete) {
+ var handler = {
+ deleteProperty(target, p) {
+ Object.defineProperty(target, p, {configurable: false});
+ return 42
+ }
+ };
+ var target = {};
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish but target property is not configurable. In contrast
+ // to above, here the target property was configurable before the trap call.
+ for (p of properties) {
+ target[p] = 42;
+ assertThrows(() => myDelete(proxy, p), TypeError);
+ }
+};
+
+
+TestTrappingTrueish2(
+ (o, p) => delete o[p]);
+TestTrappingTrueish2(
+ (o, p) => Reflect.deleteProperty(o, p));
+TestTrappingTrueish2(
+ (o, p) => {"use strict"; return delete o[p]});
+TestTrappingTrueish2(
+ (o, p) => {"use strict"; return Reflect.deleteProperty(o, p)});
+
+
+function TestTrappingFalsish(myDelete, shouldThrow) {
+ var handler = { deleteProperty() {return ""} };
+ var target = {};
+ var proxy = new Proxy(target, handler);
+
+ var properties =
+ ["bla", "0", 1, Symbol(), {[Symbol.toPrimitive]() {return "a"}}];
+
+ // Trap returns falsish and target doesn't own property.
+ for (p of properties) {
+ if (shouldThrow) {
+ assertThrows(() => myDelete(proxy, p), TypeError);
+ } else {
+ assertFalse(myDelete(proxy, p));
+ }
+ }
+
+ // Trap returns falsish and target property is configurable.
+ for (p of properties) {
+ target[p] = 42;
+ if (shouldThrow) {
+ assertThrows(() => myDelete(proxy, p), TypeError);
+ } else {
+ assertFalse(myDelete(proxy, p));
+ }
+ }
+
+ // Trap returns falsish and target property is not configurable.
+ for (p of properties) {
+ Object.defineProperty(target, p, {value: 42, configurable: false});
+ if (shouldThrow) {
+ assertThrows(() => myDelete(proxy, p), TypeError);
+ } else {
+ assertFalse(myDelete(proxy, p));
+ }
+ }
+};
+
+
+TestTrappingFalsish(
+ (o, p) => delete o[p], false);
+TestTrappingFalsish(
+ (o, p) => Reflect.deleteProperty(o, p), false);
+TestTrappingFalsish(
+ (o, p) => {"use strict"; return delete o[p]}, true);
+TestTrappingFalsish(
+ (o, p) => {"use strict"; return Reflect.deleteProperty(o, p)}, false);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-enumerate.js b/deps/v8/test/mjsunit/harmony/proxies-enumerate.js
new file mode 100644
index 0000000000..82464d0c7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-enumerate.js
@@ -0,0 +1,109 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var target = {
+ "target_one": 1
+};
+target.__proto__ = {
+ "target_two": 2
+};
+var handler = {
+ enumerate: function(target) {
+ function* keys() {
+ yield "foo";
+ yield "bar";
+ }
+ return keys();
+ },
+ // For-in calls "has" on every iteration, so for TestForIn() below to
+ // detect all results of the "enumerate" trap, "has" must return true.
+ has: function(target, name) {
+ return true;
+ }
+}
+
+var proxy = new Proxy(target, handler);
+
+function TestForIn(receiver, expected) {
+ var result = [];
+ for (var k in receiver) {
+ result.push(k);
+ }
+ assertEquals(expected, result);
+}
+
+TestForIn(proxy, ["foo", "bar"]);
+
+// Test revoked proxy.
+var pair = Proxy.revocable(target, handler);
+TestForIn(pair.proxy, ["foo", "bar"]);
+pair.revoke();
+assertThrows(()=>{ TestForIn(pair.proxy, ["foo", "bar"]) }, TypeError);
+
+// Properly call traps on proxies on the prototype chain.
+var receiver = {
+ "receiver_one": 1
+};
+receiver.__proto__ = proxy;
+TestForIn(receiver, ["receiver_one", "foo", "bar"]);
+
+// Fall through to default behavior when trap is undefined.
+handler.enumerate = undefined;
+TestForIn(proxy, ["target_one", "target_two"]);
+delete handler.enumerate;
+TestForIn(proxy, ["target_one", "target_two"]);
+
+// Non-string keys must be filtered.
+function TestNonStringKey(key) {
+ handler.enumerate = function(target) {
+ function* keys() { yield key; }
+ return keys();
+ }
+ assertThrows("for (var k in proxy) {}", TypeError);
+}
+
+TestNonStringKey(1);
+TestNonStringKey(3.14);
+TestNonStringKey(Symbol("foo"));
+TestNonStringKey({bad: "value"});
+TestNonStringKey(null);
+TestNonStringKey(undefined);
+TestNonStringKey(true);
+
+(function testProtoProxyEnumerate() {
+ var keys = ['a', 'b', 'c', 'd'];
+ var handler = {
+ enumerate() { return keys[Symbol.iterator]() },
+ has(target, key) { return false }
+ };
+ var proxy = new Proxy({}, handler);
+ var seen_keys = [];
+ for (var i in proxy) {
+ seen_keys.push(i);
+ }
+ assertEquals([], seen_keys);
+
+ handler.has = function(target, key) { return true };
+ for (var i in proxy) {
+ seen_keys.push(i);
+ }
+ assertEquals(keys, seen_keys);
+
+ o = {__proto__:proxy};
+ handler.has = function(target, key) { return false };
+ seen_keys = [];
+ for (var i in o) {
+ seen_keys.push(i);
+ }
+ assertEquals([], seen_keys);
+
+ handler.has = function(target, key) { return true };
+ seen_keys = [];
+ for (var i in o) {
+ seen_keys.push(i);
+ }
+ assertEquals(keys, seen_keys);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js b/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
index 7b2af722f2..72ab092a88 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-example-membrane.js
@@ -155,7 +155,7 @@ function createSimpleMembrane(target) {
}
var baseHandler = createHandler(obj);
- var handler = Proxy.create(Object.freeze({
+ var handler = new Proxy({}, Object.freeze({
get: function(receiver, name) {
return function() {
var arg = (name === "get" || name == "set") ? arguments[1] : "";
@@ -188,7 +188,7 @@ function createSimpleMembrane(target) {
return Proxy.createFunction(handler, callTrap, constructTrap);
} else {
var prototype = wrap(Object.getPrototypeOf(obj));
- return Proxy.create(handler, prototype);
+ return new Proxy(prototype, handler);
}
}
@@ -311,7 +311,7 @@ function createMembrane(wetTarget) {
if (dryResult) { return dryResult; }
var wetHandler = createHandler(wet);
- var dryRevokeHandler = Proxy.create(Object.freeze({
+ var dryRevokeHandler = new Proxy({}, Object.freeze({
get: function(receiver, name) {
return function() {
var arg = (name === "get" || name == "set") ? arguments[1] : "";
@@ -348,7 +348,7 @@ function createMembrane(wetTarget) {
Proxy.createFunction(dryRevokeHandler, callTrap, constructTrap);
} else {
dryResult =
- Proxy.create(dryRevokeHandler, asDry(Object.getPrototypeOf(wet)));
+ new Proxy(asDry(Object.getPrototypeOf(wet)), dryRevokeHandler);
}
wet2dry.set(wet, dryResult);
dry2wet.set(dryResult, wet);
@@ -378,7 +378,7 @@ function createMembrane(wetTarget) {
if (wetResult) { return wetResult; }
var dryHandler = createHandler(dry);
- var wetRevokeHandler = Proxy.create(Object.freeze({
+ var wetRevokeHandler = new Proxy({}, Object.freeze({
get: function(receiver, name) {
return function() {
var arg = (name === "get" || name == "set") ? arguments[1] : "";
@@ -415,7 +415,7 @@ function createMembrane(wetTarget) {
Proxy.createFunction(wetRevokeHandler, callTrap, constructTrap);
} else {
wetResult =
- Proxy.create(wetRevokeHandler, asWet(Object.getPrototypeOf(dry)));
+ new Proxy(asWet(Object.getPrototypeOf(dry)), wetRevokeHandler);
}
dry2wet.set(dry, wetResult);
wet2dry.set(wetResult, dry);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-for.js b/deps/v8/test/mjsunit/harmony/proxies-for.js
index e98c34afe5..aea9bd6c21 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-for.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-for.js
@@ -31,13 +31,17 @@
// Helper.
function TestWithProxies(test, x, y, z) {
- test(Proxy.create, x, y, z)
- test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+ test(function(h){ return new Proxy({}, h) }, x, y, z)
+ test(function(h) {
+ return new Proxy(function() {}, h)
+ }, x, y, z)
}
// Iterate over a proxy.
+Array.prototype.values = function() { return this[Symbol.iterator]() }
+
function TestForIn(properties, handler) {
TestWithProxies(TestForIn2, properties, handler)
}
@@ -50,30 +54,19 @@ function TestForIn2(create, properties, handler) {
}
TestForIn(["0", "a"], {
- enumerate: function() { return [0, "a"] }
+ enumerate() { return ["0", "a"].values() },
+ has(target, property) { return true }
})
TestForIn(["null", "a"], {
- enumerate: function() { return this.enumerate2() },
- enumerate2: function() { return [null, "a"] }
-})
-
-TestForIn(["b", "d"], {
- getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
- getPropertyDescriptor: function(k) {
- switch (k) {
- case "a": return {enumerable: false, value: "3", configurable: true};
- case "b": return {enumerable: true, get get() {}, configurable: true};
- case "c": return {value: 4, configurable: true};
- case "d": return {get enumerable() { return true }, configurable: true};
- default: return undefined;
- }
- }
+ enumerate() { return this.enumerate2() },
+ enumerate2() { return ["null", "a"].values() },
+ has(target, property) { return true }
})
-TestForIn(["b", "a", "0", "c"], Proxy.create({
+TestForIn(["b", "a", "0", "c"], new Proxy({}, {
get: function(pr, pk) {
- return function() { return ["b", "a", 0, "c"] }
+ return function() { return ["b", "a", "0", "c"].values() }
}
}))
@@ -101,31 +94,14 @@ function TestForInDerived2(create, properties, handler) {
}
TestForInDerived(["0", "a"], {
- enumerate: function() { return [0, "a"] },
- getPropertyDescriptor: function(k) {
- return k == "0" || k == "a" ? {configurable: true} : undefined
- }
+ enumerate: function() { return ["0", "a"].values() },
+ has: function(t, k) { return k == "0" || k == "a" }
})
TestForInDerived(["null", "a"], {
enumerate: function() { return this.enumerate2() },
- enumerate2: function() { return [null, "a"] },
- getPropertyDescriptor: function(k) {
- return k == "null" || k == "a" ? {configurable: true} : undefined
- }
-})
-
-TestForInDerived(["b", "d"], {
- getPropertyNames: function() { return ["a", "b", "c", "d", "e"] },
- getPropertyDescriptor: function(k) {
- switch (k) {
- case "a": return {enumerable: false, value: "3", configurable: true};
- case "b": return {enumerable: true, get get() {}, configurable: true};
- case "c": return {value: 4, configurable: true};
- case "d": return {get enumerable() { return true }, configurable: true};
- default: return undefined;
- }
- }
+ enumerate2: function() { return ["null", "a"].values() },
+ has: function(t, k) { return k == "null" || k == "a" }
})
@@ -139,8 +115,8 @@ function TestForInThrow(handler) {
function TestForInThrow2(create, handler) {
var p = create(handler)
var o = Object.create(p)
- assertThrows(function(){ for (var x in p) {} }, "myexn")
- assertThrows(function(){ for (var x in o) {} }, "myexn")
+ assertThrowsEquals(function(){ for (var x in p) {} }, "myexn")
+ assertThrowsEquals(function(){ for (var x in o) {} }, "myexn")
}
TestForInThrow({
@@ -152,23 +128,14 @@ TestForInThrow({
enumerate2: function() { throw "myexn" }
})
-TestForInThrow({
- getPropertyNames: function() { throw "myexn" }
-})
-
-TestForInThrow({
- getPropertyNames: function() { return ["a"] },
- getPropertyDescriptor: function() { throw "myexn" }
-})
-
-TestForInThrow(Proxy.create({
+TestForInThrow(new Proxy({}, {
get: function(pr, pk) {
return function() { throw "myexn" }
}
}));
(function() {
- var p = Proxy.create({enumerate:function() { return [0]; }});
+ var p = new Proxy({}, {enumerate:function() { return ["0"].values(); }});
var o = [0];
o.__proto__ = p;
var keys = [];
@@ -177,7 +144,6 @@ TestForInThrow(Proxy.create({
})();
(function () {
- var p = Proxy.create({getOwnPropertyNames:
- function() { return [1, Symbol(), 2] }});
+ var p = new Proxy({}, {ownKeys: function() { return ["1", Symbol(), "2"] }});
assertEquals(["1","2"], Object.getOwnPropertyNames(p));
})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-function.js b/deps/v8/test/mjsunit/harmony/proxies-function.js
index 113ea79f46..b2498b8665 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-function.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-function.js
@@ -28,747 +28,741 @@
// Flags: --harmony-proxies --allow-natives-syntax
-// Helper.
+// TODO(neis): These tests are temporarily commented out because of ongoing
+// changes to the implementation of proxies.
-function CreateFrozen(handler, callTrap, constructTrap) {
- if (handler.fix === undefined) handler.fix = function() { return {} }
- var f = Proxy.createFunction(handler, callTrap, constructTrap)
- Object.freeze(f)
- return f
-}
-
-// Ensures that checking the "length" property of a function proxy doesn't
-// crash due to lack of a [[Get]] method.
-var handler = {
- get : function(r, n) { return n == "length" ? 2 : undefined }
-}
-
-
-// Calling (call, Function.prototype.call, Function.prototype.apply,
-// Function.prototype.bind).
-
-var global_object = this
-var receiver
-
-function TestCall(isStrict, callTrap) {
- assertEquals(42, callTrap(5, 37))
- assertSame(isStrict ? undefined : global_object, receiver)
-
- var handler = {
- get: function(r, k) {
- return k == "length" ? 2 : Function.prototype[k]
- }
- }
- var f = Proxy.createFunction(handler, callTrap)
- var o = {f: f}
- global_object.f = f
-
- receiver = 333
- assertEquals(42, f(11, 31))
- assertSame(isStrict ? undefined : global_object, receiver)
- receiver = 333
- assertEquals(42, o.f(10, 32))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, o["f"](9, 33))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, (1, o).f(8, 34))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, (1, o)["f"](7, 35))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, f.call(o, 32, 10))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, f.call(undefined, 33, 9))
- assertSame(isStrict ? undefined : global_object, receiver)
- receiver = 333
- assertEquals(42, f.call(null, 33, 9))
- assertSame(isStrict ? null : global_object, receiver)
- receiver = 333
- assertEquals(44, f.call(2, 21, 23))
- assertSame(2, receiver.valueOf())
- receiver = 333
- assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
- assertSame(isStrict ? null : global_object, receiver)
- assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
- assertEquals(2, receiver.valueOf())
- receiver = 333
- assertEquals(32, f.apply(o, [16, 16]))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Call(f, o, 11, 31));
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Call(f, null, 11, 31));
- assertSame(isStrict ? null : global_object, receiver)
- receiver = 333
- assertEquals(42, %Apply(f, o, [11, 31], 0, 2))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Apply(f, null, [11, 31], 0, 2))
- assertSame(isStrict ? null : global_object, receiver)
- receiver = 333
- assertEquals(42, %_Call(f, o, 11, 31))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %_Call(f, null, 11, 31))
- assertSame(isStrict ? null : global_object, receiver)
-
- var ff = Function.prototype.bind.call(f, o, 12)
- assertTrue(ff.length <= 1) // TODO(rossberg): Not spec'ed yet, be lax.
- receiver = 333
- assertEquals(42, ff(30))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(33, Function.prototype.call.call(ff, {}, 21))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(23, %Call(ff, {}, 11));
- assertSame(o, receiver)
- receiver = 333
- assertEquals(23, %Call(ff, {}, 11, 3));
- assertSame(o, receiver)
- receiver = 333
- assertEquals(24, %Apply(ff, {}, [12, 13], 0, 1))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(24, %Apply(ff, {}, [12, 13], 0, 2))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(34, %_Call(ff, {}, 22))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(34, %_Call(ff, {}, 22, 3))
- assertSame(o, receiver)
-
- var fff = Function.prototype.bind.call(ff, o, 30)
- assertEquals(0, fff.length)
- receiver = 333
- assertEquals(42, fff())
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, Function.prototype.call.call(fff, {}))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, Function.prototype.apply.call(fff, {}))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Call(fff, {}));
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Call(fff, {}, 11, 3))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Apply(fff, {}, [], 0, 0))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Apply(fff, {}, [12, 13], 0, 0))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %Apply(fff, {}, [12, 13], 0, 2))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %_Call(fff, {}))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %_Call(fff, {}, 3, 4, 5))
- assertSame(o, receiver)
-
- var f = CreateFrozen({}, callTrap)
- receiver = 333
- assertEquals(42, f(11, 31))
- assertSame(isStrict ? undefined : global_object, receiver)
- var o = {f: f}
- receiver = 333
- assertEquals(42, o.f(10, 32))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, o["f"](9, 33))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, (1, o).f(8, 34))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, (1, o)["f"](7, 35))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(23, %Call(f, o, 11, 12))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(27, %Apply(f, o, [12, 13, 14], 1, 2))
- assertSame(o, receiver)
- receiver = 333
- assertEquals(42, %_Call(f, o, 18, 24))
- assertSame(o, receiver)
-}
-
-TestCall(false, function(x, y) {
- receiver = this
- return x + y
-})
-
-TestCall(true, function(x, y) {
- "use strict"
- receiver = this
- return x + y
-})
-
-TestCall(false, function() {
- receiver = this
- return arguments[0] + arguments[1]
-})
-
-TestCall(false, Proxy.createFunction(handler, function(x, y) {
- receiver = this
- return x + y
-}))
-
-TestCall(true, Proxy.createFunction(handler, function(x, y) {
- "use strict"
- receiver = this
- return x + y
-}))
-
-TestCall(false, CreateFrozen(handler, function(x, y) {
- receiver = this
- return x + y
-}))
-
-
-
-// Using intrinsics as call traps.
-
-function TestCallIntrinsic(type, callTrap) {
- var f = Proxy.createFunction({}, callTrap)
- var x = f()
- assertTrue(typeof x == type)
-}
-
-TestCallIntrinsic("boolean", Boolean)
-TestCallIntrinsic("number", Number)
-TestCallIntrinsic("string", String)
-TestCallIntrinsic("object", Object)
-TestCallIntrinsic("function", Function)
-
-
-
-// Throwing from call trap.
-
-function TestCallThrow(callTrap) {
- var f = Proxy.createFunction({}, callTrap)
- assertThrows(function(){ f(11) }, "myexn")
- assertThrows(function(){ ({x: f}).x(11) }, "myexn")
- assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
- assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
- assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
- assertThrows(function(){ %Call(f, {}) }, "myexn")
- assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
- assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
- assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
- assertThrows(function(){ %_Call(f, {}) }, "myexn")
- assertThrows(function(){ %_Call(f, {}, 1, 2) }, "myexn")
-
- var f = CreateFrozen({}, callTrap)
- assertThrows(function(){ f(11) }, "myexn")
- assertThrows(function(){ ({x: f}).x(11) }, "myexn")
- assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
- assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
- assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
- assertThrows(function(){ %Call(f, {}) }, "myexn")
- assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
- assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
- assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
- assertThrows(function(){ %_Call(f, {}) }, "myexn")
- assertThrows(function(){ %_Call(f, {}, 1, 2) }, "myexn")
-}
-
-TestCallThrow(function() { throw "myexn" })
-TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
-
-
-
-// Construction (new).
-
-var prototype = {myprop: 0}
-var receiver
-
-var handlerWithPrototype = {
- fix: function() { return { prototype: { value: prototype } }; },
- get: function(r, n) {
- if (n == "length") return 2;
- assertEquals("prototype", n);
- return prototype;
- }
-}
-
-var handlerSansPrototype = {
- fix: function() { return { length: { value: 2 } } },
- get: function(r, n) {
- if (n == "length") return 2;
- assertEquals("prototype", n);
- return undefined;
- }
-}
-
-function ReturnUndef(x, y) {
- "use strict";
- receiver = this;
- this.sum = x + y;
-}
-
-function ReturnThis(x, y) {
- "use strict";
- receiver = this;
- this.sum = x + y;
- return this;
-}
-
-function ReturnNew(x, y) {
- "use strict";
- receiver = this;
- return {sum: x + y};
-}
-
-function ReturnNewWithProto(x, y) {
- "use strict";
- receiver = this;
- var result = Object.create(prototype);
- result.sum = x + y;
- return result;
-}
-
-function TestConstruct(proto, constructTrap) {
- TestConstruct2(proto, constructTrap, handlerWithPrototype)
- TestConstruct2(proto, constructTrap, handlerSansPrototype)
-}
-
-function TestConstruct2(proto, constructTrap, handler) {
- var f = Proxy.createFunction(handler, function() {}, constructTrap)
- var o = new f(11, 31)
- assertEquals(undefined, receiver)
- assertEquals(42, o.sum)
- assertSame(proto, Object.getPrototypeOf(o))
-
- var f = CreateFrozen(handler, function() {}, constructTrap)
- var o = new f(11, 32)
- assertEquals(undefined, receiver)
- assertEquals(43, o.sum)
- assertSame(proto, Object.getPrototypeOf(o))
-}
-
-TestConstruct(Object.prototype, ReturnNew)
-TestConstruct(prototype, ReturnNewWithProto)
-
-TestConstruct(Object.prototype, Proxy.createFunction(handler, ReturnNew))
-TestConstruct(prototype, Proxy.createFunction(handler, ReturnNewWithProto))
-
-TestConstruct(Object.prototype, CreateFrozen(handler, ReturnNew))
-TestConstruct(prototype, CreateFrozen(handler, ReturnNewWithProto))
-
-
-
-// Construction with derived construct trap.
-
-function TestConstructFromCall(proto, returnsThis, callTrap) {
- TestConstructFromCall2(prototype, returnsThis, callTrap, handlerWithPrototype)
- TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
-}
-
-function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
- // TODO(rossberg): handling of prototype for derived construct trap will be
- // fixed in a separate change. Commenting out checks below for now.
- var f = Proxy.createFunction(handler, callTrap)
- var o = new f(11, 31)
- if (returnsThis) assertEquals(o, receiver)
- assertEquals(42, o.sum)
- // assertSame(proto, Object.getPrototypeOf(o))
-
- var g = CreateFrozen(handler, callTrap)
- // assertSame(f.prototype, g.prototype)
- var o = new g(11, 32)
- if (returnsThis) assertEquals(o, receiver)
- assertEquals(43, o.sum)
- // assertSame(proto, Object.getPrototypeOf(o))
-}
-
-TestConstructFromCall(Object.prototype, true, ReturnUndef)
-TestConstructFromCall(Object.prototype, true, ReturnThis)
-TestConstructFromCall(Object.prototype, false, ReturnNew)
-TestConstructFromCall(prototype, false, ReturnNewWithProto)
-
-TestConstructFromCall(Object.prototype, true,
- Proxy.createFunction(handler, ReturnUndef))
-TestConstructFromCall(Object.prototype, true,
- Proxy.createFunction(handler, ReturnThis))
-TestConstructFromCall(Object.prototype, false,
- Proxy.createFunction(handler, ReturnNew))
-TestConstructFromCall(prototype, false,
- Proxy.createFunction(handler, ReturnNewWithProto))
-
-TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
-TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
-TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
-TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
-
-ReturnUndef.prototype = prototype
-ReturnThis.prototype = prototype
-ReturnNew.prototype = prototype
-ReturnNewWithProto.prototype = prototype
-
-TestConstructFromCall(prototype, true, ReturnUndef)
-TestConstructFromCall(prototype, true, ReturnThis)
-TestConstructFromCall(Object.prototype, false, ReturnNew)
-TestConstructFromCall(prototype, false, ReturnNewWithProto)
-
-TestConstructFromCall(Object.prototype, true,
- Proxy.createFunction(handler, ReturnUndef))
-TestConstructFromCall(Object.prototype, true,
- Proxy.createFunction(handler, ReturnThis))
-TestConstructFromCall(Object.prototype, false,
- Proxy.createFunction(handler, ReturnNew))
-TestConstructFromCall(prototype, false,
- Proxy.createFunction(handler, ReturnNewWithProto))
-
-TestConstructFromCall(prototype, true,
- Proxy.createFunction(handlerWithPrototype, ReturnUndef))
-TestConstructFromCall(prototype, true,
- Proxy.createFunction(handlerWithPrototype, ReturnThis))
-TestConstructFromCall(Object.prototype, false,
- Proxy.createFunction(handlerWithPrototype, ReturnNew))
-TestConstructFromCall(prototype, false,
- Proxy.createFunction(handlerWithPrototype,
- ReturnNewWithProto))
-
-TestConstructFromCall(prototype, true,
- CreateFrozen(handlerWithPrototype, ReturnUndef))
-TestConstructFromCall(prototype, true,
- CreateFrozen(handlerWithPrototype, ReturnThis))
-TestConstructFromCall(Object.prototype, false,
- CreateFrozen(handlerWithPrototype, ReturnNew))
-TestConstructFromCall(prototype, false,
- CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
-
-
-
-// Throwing from the construct trap.
-
-function TestConstructThrow(trap) {
- TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
- trap))
- TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
- function() {},
- trap))
-}
-
-function TestConstructThrow2(f) {
- assertThrows(function(){ new f(11) }, "myexn")
- Object.freeze(f)
- assertThrows(function(){ new f(11) }, "myexn")
-}
-
-TestConstructThrow(function() { throw "myexn" })
-TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
-TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
-
-
-
-// Using function proxies as getters and setters.
-
-var value
-var receiver
-
-function TestAccessorCall(getterCallTrap, setterCallTrap) {
- var handler = { fix: function() { return {} } }
- var pgetter = Proxy.createFunction(handler, getterCallTrap)
- var psetter = Proxy.createFunction(handler, setterCallTrap)
-
- var o = {}
- var oo = Object.create(o)
- Object.defineProperty(o, "a", {get: pgetter, set: psetter})
- Object.defineProperty(o, "b", {get: pgetter})
- Object.defineProperty(o, "c", {set: psetter})
- Object.defineProperty(o, "3", {get: pgetter, set: psetter})
- Object.defineProperty(oo, "a", {value: 43})
-
- receiver = ""
- assertEquals(42, o.a)
- assertSame(o, receiver)
- receiver = ""
- assertEquals(42, o.b)
- assertSame(o, receiver)
- receiver = ""
- assertEquals(undefined, o.c)
- assertEquals("", receiver)
- receiver = ""
- assertEquals(42, o["a"])
- assertSame(o, receiver)
- receiver = ""
- assertEquals(42, o[3])
- assertSame(o, receiver)
-
- receiver = ""
- assertEquals(43, oo.a)
- assertEquals("", receiver)
- receiver = ""
- assertEquals(42, oo.b)
- assertSame(oo, receiver)
- receiver = ""
- assertEquals(undefined, oo.c)
- assertEquals("", receiver)
- receiver = ""
- assertEquals(43, oo["a"])
- assertEquals("", receiver)
- receiver = ""
- assertEquals(42, oo[3])
- assertSame(oo, receiver)
-
- receiver = ""
- assertEquals(50, o.a = 50)
- assertSame(o, receiver)
- assertEquals(50, value)
- receiver = ""
- assertEquals(51, o.b = 51)
- assertEquals("", receiver)
- assertEquals(50, value) // no setter
- assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
- receiver = ""
- assertEquals(52, o.c = 52)
- assertSame(o, receiver)
- assertEquals(52, value)
- receiver = ""
- assertEquals(53, o["a"] = 53)
- assertSame(o, receiver)
- assertEquals(53, value)
- receiver = ""
- assertEquals(54, o[3] = 54)
- assertSame(o, receiver)
- assertEquals(54, value)
-
- value = 0
- receiver = ""
- assertEquals(60, oo.a = 60)
- assertEquals("", receiver)
- assertEquals(0, value) // oo has own 'a'
- assertEquals(61, oo.b = 61)
- assertSame("", receiver)
- assertEquals(0, value) // no setter
- assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
- receiver = ""
- assertEquals(62, oo.c = 62)
- assertSame(oo, receiver)
- assertEquals(62, value)
- receiver = ""
- assertEquals(63, oo["c"] = 63)
- assertSame(oo, receiver)
- assertEquals(63, value)
- receiver = ""
- assertEquals(64, oo[3] = 64)
- assertSame(oo, receiver)
- assertEquals(64, value)
-}
-
-TestAccessorCall(
- function() { receiver = this; return 42 },
- function(x) { receiver = this; value = x }
-)
-
-TestAccessorCall(
- function() { "use strict"; receiver = this; return 42 },
- function(x) { "use strict"; receiver = this; value = x }
-)
-
-TestAccessorCall(
- Proxy.createFunction({}, function() { receiver = this; return 42 }),
- Proxy.createFunction({}, function(x) { receiver = this; value = x })
-)
-
-TestAccessorCall(
- CreateFrozen({}, function() { receiver = this; return 42 }),
- CreateFrozen({}, function(x) { receiver = this; value = x })
-)
-
-
-
-// Passing a proxy function to higher-order library functions.
-
-function TestHigherOrder(f) {
- assertEquals(6, [6, 2].map(f)[0])
- assertEquals(4, [5, 2].reduce(f, 4))
- assertTrue([1, 2].some(f))
- assertEquals("a.b.c", "a.b.c".replace(".", f))
-}
-
-TestHigherOrder(function(x) { return x })
-TestHigherOrder(function(x) { "use strict"; return x })
-TestHigherOrder(Proxy.createFunction({}, function(x) { return x }))
-TestHigherOrder(CreateFrozen({}, function(x) { return x }))
-
-
-
-// TODO(rossberg): Ultimately, I want to have the following test function
-// run through, but it currently fails on so many cases (some not even
-// involving proxies), that I leave that for later...
-/*
-function TestCalls() {
- var handler = {
- get: function(r, k) {
- return k == "length" ? 2 : Function.prototype[k]
- }
- }
- var bind = Function.prototype.bind
- var o = {}
-
- var traps = [
- function(x, y) {
- return {receiver: this, result: x + y, strict: false}
- },
- function(x, y) { "use strict";
- return {receiver: this, result: x + y, strict: true}
- },
- function() {
- var x = arguments[0], y = arguments[1]
- return {receiver: this, result: x + y, strict: false}
- },
- Proxy.createFunction(handler, function(x, y) {
- return {receiver: this, result: x + y, strict: false}
- }),
- Proxy.createFunction(handler, function() {
- var x = arguments[0], y = arguments[1]
- return {receiver: this, result: x + y, strict: false}
- }),
- Proxy.createFunction(handler, function(x, y) { "use strict"
- return {receiver: this, result: x + y, strict: true}
- }),
- CreateFrozen(handler, function(x, y) {
- return {receiver: this, result: x + y, strict: false}
- }),
- CreateFrozen(handler, function(x, y) { "use strict"
- return {receiver: this, result: x + y, strict: true}
- }),
- ]
- var creates = [
- function(trap) { return trap },
- function(trap) { return CreateFrozen({}, callTrap) },
- function(trap) { return Proxy.createFunction(handler, callTrap) },
- function(trap) {
- return Proxy.createFunction(handler, CreateFrozen({}, callTrap))
- },
- function(trap) {
- return Proxy.createFunction(handler, Proxy.createFunction(handler, callTrap))
- },
- ]
- var binds = [
- function(f, o, x, y) { return f },
- function(f, o, x, y) { return bind.call(f, o) },
- function(f, o, x, y) { return bind.call(f, o, x) },
- function(f, o, x, y) { return bind.call(f, o, x, y) },
- function(f, o, x, y) { return bind.call(f, o, x, y, 5) },
- function(f, o, x, y) { return bind.call(bind.call(f, o), {}, x, y) },
- function(f, o, x, y) { return bind.call(bind.call(f, o, x), {}, y) },
- function(f, o, x, y) { return bind.call(bind.call(f, o, x, y), {}, 5) },
- ]
- var calls = [
- function(f, x, y) { return f(x, y) },
- function(f, x, y) { var g = f; return g(x, y) },
- function(f, x, y) { with ({}) return f(x, y) },
- function(f, x, y) { var g = f; with ({}) return g(x, y) },
- function(f, x, y, o) { with (o) return f(x, y) },
- function(f, x, y, o) { return f.call(o, x, y) },
- function(f, x, y, o) { return f.apply(o, [x, y]) },
- function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
- function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
- function(f, x, y, o) { return %_Call(f, o, x, y) },
- function(f, x, y, o) { return %Call(f, o, x, y) },
- function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
- function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
- function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
- function(f, x, y, o) { if (typeof o == "object") return o["f"](x, y) },
- function(f, x, y, o) { if (typeof o == "object") return (1, o).f(x, y) },
- function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
- ]
- var receivers = [o, global_object, undefined, null, 2, "bla", true]
- var expectedSloppies = [o, global_object, global_object, global_object]
-
- for (var t = 0; t < traps.length; ++t) {
- for (var i = 0; i < creates.length; ++i) {
- for (var j = 0; j < binds.length; ++j) {
- for (var k = 0; k < calls.length; ++k) {
- for (var m = 0; m < receivers.length; ++m) {
- for (var n = 0; n < receivers.length; ++n) {
- var bound = receivers[m]
- var receiver = receivers[n]
- var func = binds[j](creates[i](traps[t]), bound, 31, 11)
- var expected = j > 0 ? bound : receiver
- var expectedSloppy = expectedSloppies[j > 0 ? m : n]
- o.f = func
- global_object.f = func
- var x = calls[k](func, 11, 31, receiver)
- if (x !== undefined) {
- assertEquals(42, x.result)
- if (calls[k].length < 4)
- assertSame(x.strict ? undefined : global_object, x.receiver)
- else if (x.strict)
- assertSame(expected, x.receiver)
- else if (expectedSloppy === undefined)
- assertSame(expected, x.receiver.valueOf())
- else
- assertSame(expectedSloppy, x.receiver)
- }
- }
- }
- }
- }
- }
- }
-}
-
-TestCalls()
-*/
-
-var realms = [Realm.create(), Realm.create()];
-Realm.shared = {};
-
-Realm.eval(realms[0], "function f() { return this; };");
-Realm.eval(realms[0], "Realm.shared.f = f;");
-Realm.eval(realms[0], "Realm.shared.fg = this;");
-Realm.eval(realms[1], "function g() { return this; };");
-Realm.eval(realms[1], "Realm.shared.g = g;");
-Realm.eval(realms[1], "Realm.shared.gg = this;");
-
-var fp = Proxy.createFunction({}, Realm.shared.f);
-var gp = Proxy.createFunction({}, Realm.shared.g);
-
-for (var i = 0; i < 10; i++) {
- assertEquals(Realm.shared.fg, fp());
- assertEquals(Realm.shared.gg, gp());
-
- with (this) {
- assertEquals(this, fp());
- assertEquals(this, gp());
- }
-
- with ({}) {
- assertEquals(Realm.shared.fg, fp());
- assertEquals(Realm.shared.gg, gp());
- }
-}
+//// Ensures that checking the "length" property of a function proxy doesn't
+//// crash due to lack of a [[Get]] method.
+//var handler = {
+// get : function(r, n) { return n == "length" ? 2 : undefined }
+//}
+//
+//
+//// Calling (call, Function.prototype.call, Function.prototype.apply,
+//// Function.prototype.bind).
+//
+//var global_object = this
+//var receiver
+//
+//function TestCall(isStrict, callTrap) {
+// assertEquals(42, callTrap(5, 37))
+// assertSame(isStrict ? undefined : global_object, receiver)
+//
+// var handler = {
+// get: function(r, k) {
+// return k == "length" ? 2 : Function.prototype[k]
+// }
+// }
+// var f = Proxy.createFunction(handler, callTrap)
+// var o = {f: f}
+// global_object.f = f
+//
+// receiver = 333
+// assertEquals(42, f(11, 31))
+// assertSame(isStrict ? undefined : global_object, receiver)
+// receiver = 333
+// assertEquals(42, o.f(10, 32))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, o["f"](9, 33))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, (1, o).f(8, 34))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, (1, o)["f"](7, 35))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, f.call(o, 32, 10))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, f.call(undefined, 33, 9))
+// assertSame(isStrict ? undefined : global_object, receiver)
+// receiver = 333
+// assertEquals(42, f.call(null, 33, 9))
+// assertSame(isStrict ? null : global_object, receiver)
+// receiver = 333
+// assertEquals(44, f.call(2, 21, 23))
+// assertSame(2, receiver.valueOf())
+// receiver = 333
+// assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
+// assertSame(isStrict ? null : global_object, receiver)
+// assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
+// assertEquals(2, receiver.valueOf())
+// receiver = 333
+// assertEquals(32, f.apply(o, [16, 16]))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Call(f, o, 11, 31));
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Call(f, null, 11, 31));
+// assertSame(isStrict ? null : global_object, receiver)
+// receiver = 333
+// assertEquals(42, %Apply(f, o, [11, 31], 0, 2))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Apply(f, null, [11, 31], 0, 2))
+// assertSame(isStrict ? null : global_object, receiver)
+// receiver = 333
+// assertEquals(42, %_Call(f, o, 11, 31))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %_Call(f, null, 11, 31))
+// assertSame(isStrict ? null : global_object, receiver)
+//
+// var ff = Function.prototype.bind.call(f, o, 12)
+// assertTrue(ff.length <= 1) // TODO(rossberg): Not spec'ed yet, be lax.
+// receiver = 333
+// assertEquals(42, ff(30))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(33, Function.prototype.call.call(ff, {}, 21))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(23, %Call(ff, {}, 11));
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(23, %Call(ff, {}, 11, 3));
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(24, %Apply(ff, {}, [12, 13], 0, 1))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(24, %Apply(ff, {}, [12, 13], 0, 2))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(34, %_Call(ff, {}, 22))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(34, %_Call(ff, {}, 22, 3))
+// assertSame(o, receiver)
+//
+// var fff = Function.prototype.bind.call(ff, o, 30)
+// assertEquals(0, fff.length)
+// receiver = 333
+// assertEquals(42, fff())
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, Function.prototype.call.call(fff, {}))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, Function.prototype.apply.call(fff, {}))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Call(fff, {}));
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Call(fff, {}, 11, 3))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Apply(fff, {}, [], 0, 0))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Apply(fff, {}, [12, 13], 0, 0))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %Apply(fff, {}, [12, 13], 0, 2))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %_Call(fff, {}))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %_Call(fff, {}, 3, 4, 5))
+// assertSame(o, receiver)
+//
+// var f = CreateFrozen({}, callTrap)
+// receiver = 333
+// assertEquals(42, f(11, 31))
+// assertSame(isStrict ? undefined : global_object, receiver)
+// var o = {f: f}
+// receiver = 333
+// assertEquals(42, o.f(10, 32))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, o["f"](9, 33))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, (1, o).f(8, 34))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, (1, o)["f"](7, 35))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(23, %Call(f, o, 11, 12))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(27, %Apply(f, o, [12, 13, 14], 1, 2))
+// assertSame(o, receiver)
+// receiver = 333
+// assertEquals(42, %_Call(f, o, 18, 24))
+// assertSame(o, receiver)
+//}
+//
+//TestCall(false, function(x, y) {
+// receiver = this
+// return x + y
+//})
+//
+//TestCall(true, function(x, y) {
+// "use strict"
+// receiver = this
+// return x + y
+//})
+//
+//TestCall(false, function() {
+// receiver = this
+// return arguments[0] + arguments[1]
+//})
+//
+//TestCall(false, Proxy.createFunction(handler, function(x, y) {
+// receiver = this
+// return x + y
+//}))
+//
+//TestCall(true, Proxy.createFunction(handler, function(x, y) {
+// "use strict"
+// receiver = this
+// return x + y
+//}))
+//
+//TestCall(false, CreateFrozen(handler, function(x, y) {
+// receiver = this
+// return x + y
+//}))
+//
+//
+//
+//// Using intrinsics as call traps.
+//
+//function TestCallIntrinsic(type, callTrap) {
+// var f = Proxy.createFunction({}, callTrap)
+// var x = f()
+// assertTrue(typeof x == type)
+//}
+//
+//TestCallIntrinsic("boolean", Boolean)
+//TestCallIntrinsic("number", Number)
+//TestCallIntrinsic("string", String)
+//TestCallIntrinsic("object", Object)
+//TestCallIntrinsic("function", Function)
+//
+//
+//
+//// Throwing from call trap.
+//
+//function TestCallThrow(callTrap) {
+// var f = Proxy.createFunction({}, callTrap)
+// assertThrows(function(){ f(11) }, "myexn")
+// assertThrows(function(){ ({x: f}).x(11) }, "myexn")
+// assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
+// assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+// assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+// assertThrows(function(){ %Call(f, {}) }, "myexn")
+// assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
+// assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
+// assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
+// assertThrows(function(){ %_Call(f, {}) }, "myexn")
+// assertThrows(function(){ %_Call(f, {}, 1, 2) }, "myexn")
+//
+// var f = CreateFrozen({}, callTrap)
+// assertThrows(function(){ f(11) }, "myexn")
+// assertThrows(function(){ ({x: f}).x(11) }, "myexn")
+// assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
+// assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
+// assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
+// assertThrows(function(){ %Call(f, {}) }, "myexn")
+// assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
+// assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
+// assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
+// assertThrows(function(){ %_Call(f, {}) }, "myexn")
+// assertThrows(function(){ %_Call(f, {}, 1, 2) }, "myexn")
+//}
+//
+//TestCallThrow(function() { throw "myexn" })
+//TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+//TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
+//
+//
+//
+//// Construction (new).
+//
+//var prototype = {myprop: 0}
+//var receiver
+//
+//var handlerWithPrototype = {
+// fix: function() { return { prototype: { value: prototype } }; },
+// get: function(r, n) {
+// if (n == "length") return 2;
+// assertEquals("prototype", n);
+// return prototype;
+// }
+//}
+//
+//var handlerSansPrototype = {
+// fix: function() { return { length: { value: 2 } } },
+// get: function(r, n) {
+// if (n == "length") return 2;
+// assertEquals("prototype", n);
+// return undefined;
+// }
+//}
+//
+//function ReturnUndef(x, y) {
+// "use strict";
+// receiver = this;
+// this.sum = x + y;
+//}
+//
+//function ReturnThis(x, y) {
+// "use strict";
+// receiver = this;
+// this.sum = x + y;
+// return this;
+//}
+//
+//function ReturnNew(x, y) {
+// "use strict";
+// receiver = this;
+// return {sum: x + y};
+//}
+//
+//function ReturnNewWithProto(x, y) {
+// "use strict";
+// receiver = this;
+// var result = Object.create(prototype);
+// result.sum = x + y;
+// return result;
+//}
+//
+//function TestConstruct(proto, constructTrap) {
+// TestConstruct2(proto, constructTrap, handlerWithPrototype)
+// TestConstruct2(proto, constructTrap, handlerSansPrototype)
+//}
+//
+//function TestConstruct2(proto, constructTrap, handler) {
+// var f = Proxy.createFunction(handler, function() {}, constructTrap)
+// var o = new f(11, 31)
+// assertEquals(undefined, receiver)
+// assertEquals(42, o.sum)
+// assertSame(proto, Object.getPrototypeOf(o))
+//
+// var f = CreateFrozen(handler, function() {}, constructTrap)
+// var o = new f(11, 32)
+// assertEquals(undefined, receiver)
+// assertEquals(43, o.sum)
+// assertSame(proto, Object.getPrototypeOf(o))
+//}
+//
+//TestConstruct(Object.prototype, ReturnNew)
+//TestConstruct(prototype, ReturnNewWithProto)
+//
+//TestConstruct(Object.prototype, Proxy.createFunction(handler, ReturnNew))
+//TestConstruct(prototype, Proxy.createFunction(handler, ReturnNewWithProto))
+//
+//TestConstruct(Object.prototype, CreateFrozen(handler, ReturnNew))
+//TestConstruct(prototype, CreateFrozen(handler, ReturnNewWithProto))
+//
+//
+//
+//// Construction with derived construct trap.
+//
+//function TestConstructFromCall(proto, returnsThis, callTrap) {
+// TestConstructFromCall2(prototype, returnsThis, callTrap, handlerWithPrototype)
+// TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
+//}
+//
+//function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
+// // TODO(rossberg): handling of prototype for derived construct trap will be
+// // fixed in a separate change. Commenting out checks below for now.
+// var f = Proxy.createFunction(handler, callTrap)
+// var o = new f(11, 31)
+// if (returnsThis) assertEquals(o, receiver)
+// assertEquals(42, o.sum)
+// // assertSame(proto, Object.getPrototypeOf(o))
+//
+// var g = CreateFrozen(handler, callTrap)
+// // assertSame(f.prototype, g.prototype)
+// var o = new g(11, 32)
+// if (returnsThis) assertEquals(o, receiver)
+// assertEquals(43, o.sum)
+// // assertSame(proto, Object.getPrototypeOf(o))
+//}
+//
+//TestConstructFromCall(Object.prototype, true, ReturnUndef)
+//TestConstructFromCall(Object.prototype, true, ReturnThis)
+//TestConstructFromCall(Object.prototype, false, ReturnNew)
+//TestConstructFromCall(prototype, false, ReturnNewWithProto)
+//
+//TestConstructFromCall(Object.prototype, true,
+// Proxy.createFunction(handler, ReturnUndef))
+//TestConstructFromCall(Object.prototype, true,
+// Proxy.createFunction(handler, ReturnThis))
+//TestConstructFromCall(Object.prototype, false,
+// Proxy.createFunction(handler, ReturnNew))
+//TestConstructFromCall(prototype, false,
+// Proxy.createFunction(handler, ReturnNewWithProto))
+//
+//TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
+//TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
+//TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
+//TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
+//
+//ReturnUndef.prototype = prototype
+//ReturnThis.prototype = prototype
+//ReturnNew.prototype = prototype
+//ReturnNewWithProto.prototype = prototype
+//
+//TestConstructFromCall(prototype, true, ReturnUndef)
+//TestConstructFromCall(prototype, true, ReturnThis)
+//TestConstructFromCall(Object.prototype, false, ReturnNew)
+//TestConstructFromCall(prototype, false, ReturnNewWithProto)
+//
+//TestConstructFromCall(Object.prototype, true,
+// Proxy.createFunction(handler, ReturnUndef))
+//TestConstructFromCall(Object.prototype, true,
+// Proxy.createFunction(handler, ReturnThis))
+//TestConstructFromCall(Object.prototype, false,
+// Proxy.createFunction(handler, ReturnNew))
+//TestConstructFromCall(prototype, false,
+// Proxy.createFunction(handler, ReturnNewWithProto))
+//
+//TestConstructFromCall(prototype, true,
+// Proxy.createFunction(handlerWithPrototype, ReturnUndef))
+//TestConstructFromCall(prototype, true,
+// Proxy.createFunction(handlerWithPrototype, ReturnThis))
+//TestConstructFromCall(Object.prototype, false,
+// Proxy.createFunction(handlerWithPrototype, ReturnNew))
+//TestConstructFromCall(prototype, false,
+// Proxy.createFunction(handlerWithPrototype,
+// ReturnNewWithProto))
+//
+//TestConstructFromCall(prototype, true,
+// CreateFrozen(handlerWithPrototype, ReturnUndef))
+//TestConstructFromCall(prototype, true,
+// CreateFrozen(handlerWithPrototype, ReturnThis))
+//TestConstructFromCall(Object.prototype, false,
+// CreateFrozen(handlerWithPrototype, ReturnNew))
+//TestConstructFromCall(prototype, false,
+// CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
+//
+//
+//
+//// Throwing from the construct trap.
+//
+//function TestConstructThrow(trap) {
+// TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
+// trap))
+// TestConstructThrow2(Proxy.createFunction({ fix: function() {return {};} },
+// function() {},
+// trap))
+//}
+//
+//function TestConstructThrow2(f) {
+// assertThrows(function(){ new f(11) }, "myexn")
+// Object.freeze(f)
+// assertThrows(function(){ new f(11) }, "myexn")
+//}
+//
+//TestConstructThrow(function() { throw "myexn" })
+//TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+//TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
+//
+//
+//
+//// Using function proxies as getters and setters.
+//
+//var value
+//var receiver
+//
+//function TestAccessorCall(getterCallTrap, setterCallTrap) {
+// var handler = { fix: function() { return {} } }
+// var pgetter = Proxy.createFunction(handler, getterCallTrap)
+// var psetter = Proxy.createFunction(handler, setterCallTrap)
+//
+// var o = {}
+// var oo = Object.create(o)
+// Object.defineProperty(o, "a", {get: pgetter, set: psetter})
+// Object.defineProperty(o, "b", {get: pgetter})
+// Object.defineProperty(o, "c", {set: psetter})
+// Object.defineProperty(o, "3", {get: pgetter, set: psetter})
+// Object.defineProperty(oo, "a", {value: 43})
+//
+// receiver = ""
+// assertEquals(42, o.a)
+// assertSame(o, receiver)
+// receiver = ""
+// assertEquals(42, o.b)
+// assertSame(o, receiver)
+// receiver = ""
+// assertEquals(undefined, o.c)
+// assertEquals("", receiver)
+// receiver = ""
+// assertEquals(42, o["a"])
+// assertSame(o, receiver)
+// receiver = ""
+// assertEquals(42, o[3])
+// assertSame(o, receiver)
+//
+// receiver = ""
+// assertEquals(43, oo.a)
+// assertEquals("", receiver)
+// receiver = ""
+// assertEquals(42, oo.b)
+// assertSame(oo, receiver)
+// receiver = ""
+// assertEquals(undefined, oo.c)
+// assertEquals("", receiver)
+// receiver = ""
+// assertEquals(43, oo["a"])
+// assertEquals("", receiver)
+// receiver = ""
+// assertEquals(42, oo[3])
+// assertSame(oo, receiver)
+//
+// receiver = ""
+// assertEquals(50, o.a = 50)
+// assertSame(o, receiver)
+// assertEquals(50, value)
+// receiver = ""
+// assertEquals(51, o.b = 51)
+// assertEquals("", receiver)
+// assertEquals(50, value) // no setter
+// assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
+// receiver = ""
+// assertEquals(52, o.c = 52)
+// assertSame(o, receiver)
+// assertEquals(52, value)
+// receiver = ""
+// assertEquals(53, o["a"] = 53)
+// assertSame(o, receiver)
+// assertEquals(53, value)
+// receiver = ""
+// assertEquals(54, o[3] = 54)
+// assertSame(o, receiver)
+// assertEquals(54, value)
+//
+// value = 0
+// receiver = ""
+// assertEquals(60, oo.a = 60)
+// assertEquals("", receiver)
+// assertEquals(0, value) // oo has own 'a'
+// assertEquals(61, oo.b = 61)
+// assertSame("", receiver)
+// assertEquals(0, value) // no setter
+// assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
+// receiver = ""
+// assertEquals(62, oo.c = 62)
+// assertSame(oo, receiver)
+// assertEquals(62, value)
+// receiver = ""
+// assertEquals(63, oo["c"] = 63)
+// assertSame(oo, receiver)
+// assertEquals(63, value)
+// receiver = ""
+// assertEquals(64, oo[3] = 64)
+// assertSame(oo, receiver)
+// assertEquals(64, value)
+//}
+//
+//TestAccessorCall(
+// function() { receiver = this; return 42 },
+// function(x) { receiver = this; value = x }
+//)
+//
+//TestAccessorCall(
+// function() { "use strict"; receiver = this; return 42 },
+// function(x) { "use strict"; receiver = this; value = x }
+//)
+//
+//TestAccessorCall(
+// Proxy.createFunction({}, function() { receiver = this; return 42 }),
+// Proxy.createFunction({}, function(x) { receiver = this; value = x })
+//)
+//
+//TestAccessorCall(
+// CreateFrozen({}, function() { receiver = this; return 42 }),
+// CreateFrozen({}, function(x) { receiver = this; value = x })
+//)
+//
+//
+//
+//// Passing a proxy function to higher-order library functions.
+//
+//function TestHigherOrder(f) {
+// assertEquals(6, [6, 2].map(f)[0])
+// assertEquals(4, [5, 2].reduce(f, 4))
+// assertTrue([1, 2].some(f))
+// assertEquals("a.b.c", "a.b.c".replace(".", f))
+//}
+//
+//TestHigherOrder(function(x) { return x })
+//TestHigherOrder(function(x) { "use strict"; return x })
+//TestHigherOrder(Proxy.createFunction({}, function(x) { return x }))
+//TestHigherOrder(CreateFrozen({}, function(x) { return x }))
+//
+//
+//
+//// TODO(rossberg): Ultimately, I want to have the following test function
+//// run through, but it currently fails on so many cases (some not even
+//// involving proxies), that I leave that for later...
+///*
+//function TestCalls() {
+// var handler = {
+// get: function(r, k) {
+// return k == "length" ? 2 : Function.prototype[k]
+// }
+// }
+// var bind = Function.prototype.bind
+// var o = {}
+//
+// var traps = [
+// function(x, y) {
+// return {receiver: this, result: x + y, strict: false}
+// },
+// function(x, y) { "use strict";
+// return {receiver: this, result: x + y, strict: true}
+// },
+// function() {
+// var x = arguments[0], y = arguments[1]
+// return {receiver: this, result: x + y, strict: false}
+// },
+// Proxy.createFunction(handler, function(x, y) {
+// return {receiver: this, result: x + y, strict: false}
+// }),
+// Proxy.createFunction(handler, function() {
+// var x = arguments[0], y = arguments[1]
+// return {receiver: this, result: x + y, strict: false}
+// }),
+// Proxy.createFunction(handler, function(x, y) { "use strict"
+// return {receiver: this, result: x + y, strict: true}
+// }),
+// CreateFrozen(handler, function(x, y) {
+// return {receiver: this, result: x + y, strict: false}
+// }),
+// CreateFrozen(handler, function(x, y) { "use strict"
+// return {receiver: this, result: x + y, strict: true}
+// }),
+// ]
+// var creates = [
+// function(trap) { return trap },
+// function(trap) { return CreateFrozen({}, callTrap) },
+// function(trap) { return Proxy.createFunction(handler, callTrap) },
+// function(trap) {
+// return Proxy.createFunction(handler, CreateFrozen({}, callTrap))
+// },
+// function(trap) {
+// return Proxy.createFunction(handler, Proxy.createFunction(handler, callTrap))
+// },
+// ]
+// var binds = [
+// function(f, o, x, y) { return f },
+// function(f, o, x, y) { return bind.call(f, o) },
+// function(f, o, x, y) { return bind.call(f, o, x) },
+// function(f, o, x, y) { return bind.call(f, o, x, y) },
+// function(f, o, x, y) { return bind.call(f, o, x, y, 5) },
+// function(f, o, x, y) { return bind.call(bind.call(f, o), {}, x, y) },
+// function(f, o, x, y) { return bind.call(bind.call(f, o, x), {}, y) },
+// function(f, o, x, y) { return bind.call(bind.call(f, o, x, y), {}, 5) },
+// ]
+// var calls = [
+// function(f, x, y) { return f(x, y) },
+// function(f, x, y) { var g = f; return g(x, y) },
+// function(f, x, y) { with ({}) return f(x, y) },
+// function(f, x, y) { var g = f; with ({}) return g(x, y) },
+// function(f, x, y, o) { with (o) return f(x, y) },
+// function(f, x, y, o) { return f.call(o, x, y) },
+// function(f, x, y, o) { return f.apply(o, [x, y]) },
+// function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
+// function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
+// function(f, x, y, o) { return %_Call(f, o, x, y) },
+// function(f, x, y, o) { return %Call(f, o, x, y) },
+// function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
+// function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
+// function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
+// function(f, x, y, o) { if (typeof o == "object") return o["f"](x, y) },
+// function(f, x, y, o) { if (typeof o == "object") return (1, o).f(x, y) },
+// function(f, x, y, o) { if (typeof o == "object") return (1, o)["f"](x, y) },
+// ]
+// var receivers = [o, global_object, undefined, null, 2, "bla", true]
+// var expectedSloppies = [o, global_object, global_object, global_object]
+//
+// for (var t = 0; t < traps.length; ++t) {
+// for (var i = 0; i < creates.length; ++i) {
+// for (var j = 0; j < binds.length; ++j) {
+// for (var k = 0; k < calls.length; ++k) {
+// for (var m = 0; m < receivers.length; ++m) {
+// for (var n = 0; n < receivers.length; ++n) {
+// var bound = receivers[m]
+// var receiver = receivers[n]
+// var func = binds[j](creates[i](traps[t]), bound, 31, 11)
+// var expected = j > 0 ? bound : receiver
+// var expectedSloppy = expectedSloppies[j > 0 ? m : n]
+// o.f = func
+// global_object.f = func
+// var x = calls[k](func, 11, 31, receiver)
+// if (x !== undefined) {
+// assertEquals(42, x.result)
+// if (calls[k].length < 4)
+// assertSame(x.strict ? undefined : global_object, x.receiver)
+// else if (x.strict)
+// assertSame(expected, x.receiver)
+// else if (expectedSloppy === undefined)
+// assertSame(expected, x.receiver.valueOf())
+// else
+// assertSame(expectedSloppy, x.receiver)
+// }
+// }
+// }
+// }
+// }
+// }
+// }
+//}
+//
+//TestCalls()
+//*/
+//
+//var realms = [Realm.create(), Realm.create()];
+//Realm.shared = {};
+//
+//Realm.eval(realms[0], "function f() { return this; };");
+//Realm.eval(realms[0], "Realm.shared.f = f;");
+//Realm.eval(realms[0], "Realm.shared.fg = this;");
+//Realm.eval(realms[1], "function g() { return this; };");
+//Realm.eval(realms[1], "Realm.shared.g = g;");
+//Realm.eval(realms[1], "Realm.shared.gg = this;");
+//
+//var fp = Proxy.createFunction({}, Realm.shared.f);
+//var gp = Proxy.createFunction({}, Realm.shared.g);
+//
+//for (var i = 0; i < 10; i++) {
+// assertEquals(Realm.shared.fg, fp());
+// assertEquals(Realm.shared.gg, gp());
+//
+// with (this) {
+// assertEquals(this, fp());
+// assertEquals(this, gp());
+// }
+//
+// with ({}) {
+// assertEquals(Realm.shared.fg, fp());
+// assertEquals(Realm.shared.gg, gp());
+// }
+//}
diff --git a/deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js b/deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js
new file mode 100644
index 0000000000..f7dff61908
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-get-own-property-descriptor.js
@@ -0,0 +1,129 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var target = {};
+var configurable_desc = {
+ value: 123,
+ configurable: true,
+ writable: true,
+ enumerable: false,
+};
+Object.defineProperty(target, "configurable", configurable_desc);
+var nonconfigurable_desc = {
+ value: 234,
+ configurable: false,
+ writable: false,
+ enumerable: true
+}
+Object.defineProperty(target, "nonconfigurable", nonconfigurable_desc);
+
+var proxied_desc = {
+ value: 345,
+ configurable: true
+};
+
+var handler = {
+ "getOwnPropertyDescriptor": function(target, name) {
+ if (name === "proxied") {
+ return proxied_desc;
+ }
+ if (name === "return_null") {
+ return null;
+ }
+ return Object.getOwnPropertyDescriptor(target, name);
+ }
+};
+
+var proxy = new Proxy(target, handler);
+var proxy_without_handler = new Proxy(target, {});
+
+// Checking basic functionality:
+
+assertEquals(configurable_desc,
+ Object.getOwnPropertyDescriptor(proxy, "configurable"));
+assertEquals(nonconfigurable_desc,
+ Object.getOwnPropertyDescriptor(proxy, "nonconfigurable"));
+assertEquals({ value: proxied_desc.value,
+ configurable: proxied_desc.configurable,
+ enumerable: false,
+ writable: false },
+ Object.getOwnPropertyDescriptor(proxy, "proxied"));
+assertEquals(configurable_desc,
+ Object.getOwnPropertyDescriptor(proxy_without_handler,
+ "configurable"));
+assertEquals(nonconfigurable_desc,
+ Object.getOwnPropertyDescriptor(proxy_without_handler,
+ "nonconfigurable"));
+
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "return_null")');
+
+handler.getOwnPropertyDescriptor = undefined;
+assertEquals(configurable_desc,
+ Object.getOwnPropertyDescriptor(proxy, "configurable"));
+
+// Checking invariants mentioned explicitly by the ES spec:
+
+// (Inv-1) "A property cannot be reported as non-existent, if it exists as a
+// non-configurable own property of the target object."
+handler.getOwnPropertyDescriptor = function(target, name) { return undefined; };
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "nonconfigurable")');
+assertEquals(undefined, Object.getOwnPropertyDescriptor(proxy, "configurable"));
+
+// (Inv-2) "A property cannot be reported as non-configurable, if it does not
+// exist as an own property of the target object or if it exists as a
+// configurable own property of the target object."
+handler.getOwnPropertyDescriptor = function(target, name) {
+ return {value: 234, configurable: false, enumerable: true};
+};
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "nonexistent")');
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "configurable")');
+assertEquals(
+ false,
+ Object.getOwnPropertyDescriptor(proxy, "nonconfigurable").configurable);
+
+// (Inv-3) "A property cannot be reported as non-existent, if it exists as an
+// own property of the target object and the target object is not extensible."
+Object.seal(target);
+handler.getOwnPropertyDescriptor = function(target, name) { return undefined; };
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "configurable")');
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "nonconfigurable")');
+assertEquals(undefined, Object.getOwnPropertyDescriptor(proxy, "nonexistent"));
+
+// (Inv-4) "A property cannot be reported as existent, if it does not exist as
+// an own property of the target object and the target object is not
+// extensible."
+var existent_desc = {value: "yes"};
+handler.getOwnPropertyDescriptor = function() { return existent_desc; };
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "nonexistent")');
+assertEquals(
+ {value: "yes", writable: false, enumerable: false, configurable: false},
+ Object.getOwnPropertyDescriptor(proxy, "configurable"));
+
+// Checking individual bailout points in the implementation:
+
+// Step 6: Trap is not callable.
+handler.getOwnPropertyDescriptor = {};
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "configurable")');
+
+// Step 8: Trap throws.
+handler.getOwnPropertyDescriptor = function() { throw "ball"; };
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "configurable")');
+
+// Step 9: Trap result is neither undefined nor an object.
+handler.getOwnPropertyDescriptor = function() { return 1; }
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "configurable")');
+
+// Step 11b: See (Inv-1) above.
+// Step 11e: See (Inv-3) above.
+
+// Step 16: Incompatible PropertyDescriptor; a non-configurable property
+// cannot be reported as configurable. (Inv-4) above checks more cases.
+handler.getOwnPropertyDescriptor = function(target, name) {
+ return {value: 456, configurable: true, writable: true}
+};
+assertThrows('Object.getOwnPropertyDescriptor(proxy, "nonconfigurable")');
+
+// Step 17: See (Inv-2) above.
diff --git a/deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js b/deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js
new file mode 100644
index 0000000000..36f67356d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-get-prototype-of.js
@@ -0,0 +1,93 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var target = { target: 1 };
+target.__proto__ = {};
+var handler = { handler: 1 };
+var proxy = new Proxy(target, handler);
+
+assertSame(Object.getPrototypeOf(proxy), target.__proto__ );
+
+target.__proto__ = [];
+assertSame(Object.getPrototypeOf(proxy), target.__proto__);
+
+handler.getPrototypeOf = function() {
+ return 1;
+}
+assertThrows(function() { Object.getPrototypeOf(proxy) }, TypeError);
+
+var target_prototype = {a:1, b:2};
+handler.getPrototypeOf = function() {
+ return target_prototype ;
+}
+assertSame(Object.getPrototypeOf(proxy), target_prototype);
+
+// Test with proxy target:
+var proxy2 = new Proxy(proxy, {'handler':1});
+assertSame(Object.getPrototypeOf(proxy2), target_prototype);
+
+// Test with Proxy handler:
+var proxy3_prototype = {'proto3':true};
+var handler_proxy = new Proxy({
+ getPrototypeOf: function() { return proxy3_prototype }
+}, {});
+var proxy3 = new Proxy(target, handler_proxy);
+assertSame(Object.getPrototypeOf(proxy3), proxy3_prototype);
+
+
+// Some tests with Object.prototype.isPrototypeOf
+
+(function () {
+ var object = {};
+ var handler = {};
+ var proto = new Proxy({}, handler);
+ object.__proto__ = proto;
+
+ assertTrue(proto.isPrototypeOf(object));
+ assertTrue(Object.prototype.isPrototypeOf.call(proto, object));
+
+ handler.getPrototypeOf = function () { return Object.prototype };
+ assertTrue(proto.isPrototypeOf(object));
+ assertTrue(Object.prototype.isPrototypeOf.call(proto, object));
+ assertTrue(Object.prototype.isPrototypeOf(object));
+ assertFalse(Object.prototype.isPrototypeOf.call(Array.prototype, object));
+ assertFalse(Array.prototype.isPrototypeOf(object));
+
+ handler.getPrototypeOf = function () { return object };
+ assertTrue(Object.prototype.isPrototypeOf.call(proto, object));
+ assertTrue(proto.isPrototypeOf(object));
+ assertTrue(Object.prototype.isPrototypeOf.call(object, object));
+ assertTrue(object.isPrototypeOf(object));
+
+ handler.getPrototypeOf = function () { throw "foo" };
+ assertTrue(proto.isPrototypeOf(object));
+ assertTrue(Object.prototype.isPrototypeOf.call(proto, object));
+ assertThrows(()=> Object.prototype.isPrototypeOf(object));
+ assertThrows(()=> Object.prototype.isPrototypeOf.call(Array.prototype, object));
+ assertThrows(()=> Array.prototype.isPrototypeOf(object));
+})();
+
+(function () {
+ var handler = {};
+ var object = new Proxy({}, handler);
+ var proto = {};
+
+ assertFalse(Object.prototype.isPrototypeOf.call(object, object));
+ assertFalse(Object.prototype.isPrototypeOf.call(proto, object));
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, object));
+
+ handler.getPrototypeOf = function () { return proto };
+ assertTrue(Object.prototype.isPrototypeOf.call(proto, object));
+ assertFalse(Object.prototype.isPrototypeOf.call({}, object));
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, object));
+
+ handler.getPrototypeOf = function () { return object };
+ assertTrue(Object.prototype.isPrototypeOf.call(object, object));
+
+ handler.getPrototypeOf = function () { throw "foo" };
+ assertThrows(()=> Object.prototype.isPrototypeOf.call(object, object));
+ assertThrows(()=> Object.prototype.isPrototypeOf(object));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-get.js b/deps/v8/test/mjsunit/harmony/proxies-get.js
new file mode 100644
index 0000000000..04ebd31257
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-get.js
@@ -0,0 +1,127 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+
+(function testBasicFunctionality() {
+ var target = {
+ target_one: 1,
+ property: "value"
+ };
+
+ var handler = {handler:1};
+
+ var proxy = new Proxy(target, handler);
+ assertEquals("value", proxy.property);
+ assertEquals(undefined, proxy.nothing);
+ assertEquals(undefined, proxy.handler);
+
+ handler.get = function() { return "value 2" };
+ assertEquals("value 2", proxy.property);
+ assertEquals("value 2", proxy.nothing);
+ assertEquals("value 2", proxy.handler);
+
+ var handler2 = new Proxy({get: function() { return "value 3" }},{});
+ var proxy2 = new Proxy(target, handler2);
+ assertEquals("value 3", proxy2.property);
+ assertEquals("value 3", proxy2.nothing);
+ assertEquals("value 3", proxy2.handler);
+})();
+
+(function testThrowOnGettingTrap() {
+ var handler = new Proxy({}, {get: function(){ throw Error() }});
+ var proxy = new Proxy({}, handler);
+ assertThrows("proxy.property", Error);
+})();
+
+(function testFallback() {
+ var target = {property:"value"};
+ var proxy = new Proxy(target, {});
+ assertEquals("value", proxy.property);
+ assertEquals(undefined, proxy.property2);
+})();
+
+(function testFallbackUndefinedTrap() {
+ var handler = new Proxy({}, {get: function(){ return undefined }});
+ var target = {property:"value"};
+ var proxy = new Proxy(target, handler);
+ assertEquals("value", proxy.property);
+ assertEquals(undefined, proxy.property2);
+})();
+
+(function testFailingInvariant() {
+ var target = {};
+ var handler = { get: function(r, p){ if (p != "key4") return "value" }}
+ var proxy = new Proxy(target, handler);
+ assertEquals("value", proxy.property);
+ assertEquals("value", proxy.key);
+ assertEquals("value", proxy.key2);
+ assertEquals("value", proxy.key3);
+
+ // Define a non-configurable, non-writeable property on the target for
+ // which the handler will return a different value.
+ Object.defineProperty(target, "key", {
+ configurable: false,
+ writable: false,
+ value: "different value"
+ });
+ assertEquals("value", proxy.property);
+ assertThrows(function(){ proxy.key }, TypeError);
+ assertEquals("value", proxy.key2);
+ assertEquals("value", proxy.key3);
+
+ // Define a non-configurable getter on the target for which the handler
+ // will return a value, according to the spec we do not throw.
+ Object.defineProperty(target, "key2", {
+ configurable: false,
+ get: function() { return "different value" }
+ });
+ assertEquals("value", proxy.property);
+ assertThrows(function(){ proxy.key }, TypeError);
+ assertEquals("value", proxy.key2);
+ assertEquals("value", proxy.key3);
+
+ // Define a non-configurable setter without a corresponding getter on the
+ // target for which the handler will return a value.
+ Object.defineProperty(target, "key3", {
+ configurable: false,
+ set: function() { }
+ });
+ assertEquals("value", proxy.property);
+ assertThrows(function(){ proxy.key }, TypeError);
+ assertEquals("value", proxy.key2);
+ assertThrows(function(){ proxy.key3 }, TypeError);
+
+ // Define a non-configurable setter without a corresponding getter on the
+ // target for which the handler will return undefined.
+ Object.defineProperty(target, "key4", {
+ configurable: false,
+ set: function() { }
+ });
+ assertSame(undefined, proxy.key4);
+})();
+
+(function testGetInternalIterators() {
+ var log = [];
+ var array = [1,2,3,4,5]
+ var origIt = array[Symbol.iterator]();
+ var it = new Proxy(origIt, {
+ get(t, name) {
+ log.push(`[[Get]](iterator, ${String(name)})`);
+ return Reflect.get(t, name);
+ },
+ set(t, name, val) {
+ log.push(`[[Set]](iterator, ${String(name)}, ${String(val)})`);
+ return Reflect.set(t, name, val);
+ }
+ });
+
+ assertThrows(function() {
+ for (var v of it) log.push(v);
+ }, TypeError);
+ assertEquals([
+ "[[Get]](iterator, Symbol(Symbol.iterator))",
+ "[[Get]](iterator, next)"
+ ], log);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-global-reference.js b/deps/v8/test/mjsunit/harmony/proxies-global-reference.js
new file mode 100644
index 0000000000..1b77e66fdf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-global-reference.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var failing_proxy = new Proxy({}, new Proxy({}, {
+ get() { throw "No trap should fire" }}));
+
+Object.setPrototypeOf(Object.prototype, failing_proxy);
+assertThrows(()=>a, TypeError);
+
+Object.setPrototypeOf(this, failing_proxy);
+assertThrows(()=>a, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-has-own-property.js b/deps/v8/test/mjsunit/harmony/proxies-has-own-property.js
new file mode 100644
index 0000000000..1455d2b273
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-has-own-property.js
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var handler = {};
+var target = {a:1};
+var proxy = new Proxy(target, handler);
+
+assertTrue(target.hasOwnProperty('a'));
+assertTrue(proxy.hasOwnProperty('a'));
+assertFalse(target.hasOwnProperty('b'));
+assertFalse(proxy.hasOwnProperty('b'));
+
+
+handler.has = function() { assertUnreachable() }
+handler.getOwnPropertyDescriptor = function () {}
+
+assertTrue(target.hasOwnProperty('a'));
+assertFalse(proxy.hasOwnProperty('a'));
+assertFalse(target.hasOwnProperty('b'));
+assertFalse(proxy.hasOwnProperty('b'));
+
+
+handler.getOwnPropertyDescriptor = function() { return {configurable: true} }
+
+assertTrue(target.hasOwnProperty('a'));
+assertTrue(proxy.hasOwnProperty('a'));
+assertFalse(target.hasOwnProperty('b'));
+assertTrue(proxy.hasOwnProperty('b'));
+
+
+handler.getOwnPropertyDescriptor = function() { throw Error(); }
+
+assertTrue(target.hasOwnProperty('a'));
+assertThrows(function(){ proxy.hasOwnProperty('a') }, Error);
+assertFalse(target.hasOwnProperty('b'));
+assertThrows(function(){ proxy.hasOwnProperty('b') }, Error);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-has.js b/deps/v8/test/mjsunit/harmony/proxies-has.js
new file mode 100644
index 0000000000..b7848e8ae3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-has.js
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var target = {
+ "target_one": 1
+};
+target.__proto__ = {
+ "target_two": 2
+};
+var handler = {
+ has: function(target, name) {
+ return name == "present";
+ }
+}
+
+var proxy = new Proxy(target, handler);
+
+// Test simple cases.
+assertTrue("present" in proxy);
+assertFalse("nonpresent" in proxy);
+
+// Test interesting algorithm steps:
+
+// Step 7: Fall through to target if trap is undefined.
+handler.has = undefined;
+assertTrue("target_one" in proxy);
+assertTrue("target_two" in proxy);
+assertFalse("in_your_dreams" in proxy);
+
+// Step 8: Result is converted to boolean.
+var result = 1;
+handler.has = function(t, n) { return result; }
+assertTrue("foo" in proxy);
+result = {};
+assertTrue("foo" in proxy);
+result = undefined;
+assertFalse("foo" in proxy);
+result = "string";
+assertTrue("foo" in proxy);
+
+// Step 9b i. Trap result must confirm presence of non-configurable properties
+// of the target.
+Object.defineProperty(target, "nonconf", {value: 1, configurable: false});
+result = false;
+assertThrows("'nonconf' in proxy", TypeError);
+
+// Step 9b iii. Trap result must confirm presence of all own properties of
+// non-extensible targets.
+Object.preventExtensions(target);
+assertThrows("'nonconf' in proxy", TypeError);
+assertThrows("'target_one' in proxy", TypeError);
+assertFalse("target_two" in proxy);
+assertFalse("in_your_dreams" in proxy);
+
+// Regression test for crbug.com/570120 (stray JSObject::cast).
+(function TestHasPropertyFastPath() {
+ var proxy = new Proxy({}, {});
+ var object = Object.create(proxy);
+ object.hasOwnProperty(0);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-hash.js b/deps/v8/test/mjsunit/harmony/proxies-hash.js
index 65d2d3c564..830facb28d 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-hash.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-hash.js
@@ -31,10 +31,11 @@
// Helper.
function TestWithProxies(test, construct, handler) {
- test(construct, handler, Proxy.create)
- test(construct, handler, function(h) {
- return Proxy.createFunction(h, function() {})
- })
+ test(construct, handler, function(h) { return new Proxy({}, h) })
+ // TODO(cbruni): Adapt and enable once we have [[Call]] working.
+ // test(construct, handler, function(h) {
+ // return Proxy.createFunction(h, function() {})
+ // })
}
diff --git a/deps/v8/test/mjsunit/harmony/proxies-integrity.js b/deps/v8/test/mjsunit/harmony/proxies-integrity.js
new file mode 100644
index 0000000000..9ed6005d6b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-integrity.js
@@ -0,0 +1,213 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+
+
+function toKey(x) {
+ if (typeof x === "symbol") return x;
+ return String(x);
+}
+
+
+const noconf = {configurable: false};
+const noconf_nowrite = {configurable: false, writable: false};
+
+
+var symbol = Symbol();
+
+
+var log = [];
+var logger = {};
+var handler = new Proxy({}, logger);
+
+logger.get = function(t, trap, r) {
+ return function() {
+ log.push([trap, ...arguments]);
+ return Reflect[trap](...arguments);
+ }
+};
+
+
+(function Seal() {
+ var target = [];
+ var proxy = new Proxy(target, handler);
+ log.length = 0;
+
+ target.wurst = 42;
+ target[0] = true;
+ Object.defineProperty(target, symbol, {get: undefined});
+
+ Object.seal(proxy);
+ assertEquals(6, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["preventExtensions", target], log[0]);
+ assertArrayEquals(
+ ["ownKeys", target], log[1]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey(0), noconf], log[2]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey("length"), noconf], log[3]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey("wurst"), noconf], log[4]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey(symbol), noconf], log[5]);
+})();
+
+
+(function Freeze() {
+ var target = [];
+ var proxy = new Proxy(target, handler);
+ log.length = 0;
+
+ target.wurst = 42;
+ target[0] = true;
+ Object.defineProperty(target, symbol, {get: undefined});
+
+ Object.freeze(proxy);
+ assertEquals(10, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["preventExtensions", target], log[0]);
+ assertArrayEquals(
+ ["ownKeys", target], log[1]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(0)], log[2]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey(0), noconf_nowrite], log[3]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey("length")], log[4]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey("length"), noconf_nowrite], log[5]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey("wurst")], log[6]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey("wurst"), noconf_nowrite], log[7]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(symbol)], log[8]);
+ assertArrayEquals(
+ ["defineProperty", target, toKey(symbol), noconf], log[9]);
+})();
+
+
+(function IsSealed() {
+ var target = [];
+ var proxy = new Proxy(target, handler);
+
+ target.wurst = 42;
+ target[0] = true;
+ Object.defineProperty(target, symbol, {get: undefined});
+
+ // Extensible.
+
+ log.length = 0;
+
+ Object.isSealed(proxy);
+ assertEquals(1, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["isExtensible", target], log[0]);
+
+ // Not extensible but not sealed.
+
+ log.length = 0;
+ Object.preventExtensions(target);
+
+ Object.isSealed(proxy);
+ assertEquals(3, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["isExtensible", target], log[0]);
+ assertArrayEquals(
+ ["ownKeys", target], log[1]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(0)], log[2]);
+
+ // Sealed.
+
+ log.length = 0;
+ Object.seal(target);
+
+ Object.isSealed(proxy);
+ assertEquals(6, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["isExtensible", target], log[0]);
+ assertArrayEquals(
+ ["ownKeys", target], log[1]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(0)], log[2]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey("length")], log[3]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey("wurst")], log[4]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(symbol)], log[5]);
+})();
+
+
+(function IsFrozen() {
+ var target = [];
+ var proxy = new Proxy(target, handler);
+
+ target.wurst = 42;
+ target[0] = true;
+ Object.defineProperty(target, symbol, {get: undefined});
+
+ // Extensible.
+
+ log.length = 0;
+
+ Object.isFrozen(proxy);
+ assertEquals(1, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["isExtensible", target], log[0]);
+
+ // Not extensible but not frozen.
+
+ log.length = 0;
+ Object.preventExtensions(target);
+
+ Object.isFrozen(proxy);
+ assertEquals(3, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["isExtensible", target], log[0]);
+ assertArrayEquals(
+ ["ownKeys", target], log[1]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(0)], log[2]);
+
+ // Frozen.
+
+ log.length = 0;
+ Object.freeze(target);
+
+ Object.isFrozen(proxy);
+ assertEquals(6, log.length)
+ for (var i in log) assertSame(target, log[i][1]);
+
+ assertArrayEquals(
+ ["isExtensible", target], log[0]);
+ assertArrayEquals(
+ ["ownKeys", target], log[1]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(0)], log[2]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey("length")], log[3]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey("wurst")], log[4]);
+ assertArrayEquals(
+ ["getOwnPropertyDescriptor", target, toKey(symbol)], log[5]);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-is-extensible.js b/deps/v8/test/mjsunit/harmony/proxies-is-extensible.js
new file mode 100644
index 0000000000..f597d0d0a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-is-extensible.js
@@ -0,0 +1,74 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+
+(function () {
+ // No trap.
+
+ var target = {};
+ var handler = {};
+ var proxy = new Proxy(target, handler);
+
+ assertTrue(Reflect.isExtensible(target));
+ assertTrue(Reflect.isExtensible(proxy));
+ assertTrue(Reflect.preventExtensions(proxy));
+ assertFalse(Reflect.isExtensible(target));
+ assertFalse(Reflect.isExtensible(proxy));
+})();
+
+
+(function () {
+ // "Undefined" trap.
+
+ var target = {};
+ var handler = { isExtensible: null };
+ var proxy = new Proxy(target, handler);
+
+ assertTrue(Reflect.isExtensible(target));
+ assertTrue(Reflect.isExtensible(proxy));
+ assertTrue(Reflect.preventExtensions(proxy));
+ assertFalse(Reflect.isExtensible(target));
+ assertFalse(Reflect.isExtensible(proxy));
+})();
+
+
+(function () {
+ // Invalid trap.
+
+ var target = {};
+ var handler = { isExtensible: true };
+ var proxy = new Proxy(target, handler);
+
+ assertThrows(() => {Reflect.isExtensible(proxy)}, TypeError);
+})();
+
+
+(function () {
+ var target = {};
+ var handler = { isExtensible() {return "bla"} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish and target is extensible.
+ assertTrue(Reflect.isExtensible(proxy));
+
+ // Trap returns trueish but target is not extensible.
+ Reflect.preventExtensions(target);
+ assertThrows(() => {Reflect.isExtensible(proxy)}, TypeError);
+})();
+
+
+(function () {
+ var target = {};
+ var handler = { isExtensible() {return 0} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns falsish but target is extensible.
+ assertThrows(() => {Reflect.isExtensible(proxy)}, TypeError);
+
+ // Trap returns falsish and target is not extensible.
+ Reflect.preventExtensions(target);
+ assertFalse(Reflect.isExtensible(proxy));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-json.js b/deps/v8/test/mjsunit/harmony/proxies-json.js
index eba10a1453..19a13298df 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-json.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-json.js
@@ -25,7 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-proxies
+// Flags: --harmony-proxies --harmony-reflect
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// JSON.stringify
+
function testStringify(expected, object) {
// Test fast case that bails out to slow case.
@@ -34,23 +40,30 @@ function testStringify(expected, object) {
assertEquals(expected, JSON.stringify(object, undefined, 0));
}
-// Test serializing a proxy, function proxy and objects that contain them.
+
+// Test serializing a proxy, a function proxy, and objects that contain them.
+
var handler1 = {
get: function(target, name) {
return name.toUpperCase();
},
- enumerate: function(target) {
+ ownKeys: function() {
return ['a', 'b', 'c'];
},
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
+ getOwnPropertyDescriptor: function() {
+ return { enumerable: true, configurable: true };
}
}
-var proxy1 = Proxy.create(handler1);
+var proxy1 = new Proxy({}, handler1);
testStringify('{"a":"A","b":"B","c":"C"}', proxy1);
-var proxy_fun = Proxy.createFunction(handler1, function() { return 1; });
+var proxy_fun = new Proxy(() => {}, handler1);
+assertTrue(typeof(proxy_fun) === 'function');
+testStringify(undefined, proxy_fun);
+testStringify('[1,null]', [1, proxy_fun]);
+
+handler1.apply = function() { return 666; };
testStringify(undefined, proxy_fun);
testStringify('[1,null]', [1, proxy_fun]);
@@ -63,98 +76,121 @@ testStringify('{"a":123,"b":{"a":"A","b":"B","c":"C"},"c":true}', parent1b);
var parent1c = [123, proxy1, true];
testStringify('[123,{"a":"A","b":"B","c":"C"},true]', parent1c);
+
// Proxy with side effect.
+
var handler2 = {
get: function(target, name) {
delete parent2.c;
return name.toUpperCase();
},
- enumerate: function(target) {
+ ownKeys: function() {
return ['a', 'b', 'c'];
},
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
+ getOwnPropertyDescriptor: function() {
+ return { enumerable: true, configurable: true };
}
}
-var proxy2 = Proxy.create(handler2);
+var proxy2 = new Proxy({}, handler2);
var parent2 = { a: "delete", b: proxy2, c: "remove" };
var expected2 = '{"a":"delete","b":{"a":"A","b":"B","c":"C"}}';
assertEquals(expected2, JSON.stringify(parent2));
parent2.c = "remove"; // Revert side effect.
assertEquals(expected2, JSON.stringify(parent2, undefined, 0));
-// Proxy with a get function that uses the first argument.
+
+// Proxy with a get function that uses the receiver argument.
+
var handler3 = {
- get: function(target, name) {
- if (name == 'valueOf') return function() { return "proxy" };
- return name + "(" + target + ")";
+ get: function(target, name, receiver) {
+ if (name == 'valueOf' || name === Symbol.toPrimitive) {
+ return function() { return "proxy" };
+ };
+ if (typeof name !== 'symbol') return name + "(" + receiver + ")";
},
- enumerate: function(target) {
+ ownKeys: function() {
return ['a', 'b', 'c'];
},
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
+ getOwnPropertyDescriptor: function() {
+ return { enumerable: true, configurable: true };
}
}
-var proxy3 = Proxy.create(handler3);
+var proxy3 = new Proxy({}, handler3);
var parent3 = { x: 123, y: proxy3 }
testStringify('{"x":123,"y":{"a":"a(proxy)","b":"b(proxy)","c":"c(proxy)"}}',
parent3);
+
// Empty proxy.
+
var handler4 = {
get: function(target, name) {
return 0;
},
enumerate: function(target) {
- return [];
+ return [][Symbol.iterator]();
+ },
+ has: function() {
+ return true;
},
getOwnPropertyDescriptor: function(target, name) {
return { enumerable: false };
}
}
-var proxy4 = Proxy.create(handler4);
+var proxy4 = new Proxy({}, handler4);
testStringify('{}', proxy4);
testStringify('{"a":{}}', { a: proxy4 });
+
// Proxy that provides a toJSON function that uses this.
+
var handler5 = {
get: function(target, name) {
if (name == 'z') return 97000;
return function(key) { return key.charCodeAt(0) + this.z; };
},
enumerate: function(target) {
- return ['toJSON', 'z'];
+ return ['toJSON', 'z'][Symbol.iterator]();
+ },
+ has: function() {
+ return true;
},
getOwnPropertyDescriptor: function(target, name) {
return { enumerable: true };
}
}
-var proxy5 = Proxy.create(handler5);
+var proxy5 = new Proxy({}, handler5);
testStringify('{"a":97097}', { a: proxy5 });
+
// Proxy that provides a toJSON function that returns undefined.
+
var handler6 = {
get: function(target, name) {
return function(key) { return undefined; };
},
enumerate: function(target) {
- return ['toJSON'];
+ return ['toJSON'][Symbol.iterator]();
+ },
+ has: function() {
+ return true;
},
getOwnPropertyDescriptor: function(target, name) {
return { enumerable: true };
}
}
-var proxy6 = Proxy.create(handler6);
+var proxy6 = new Proxy({}, handler6);
testStringify('[1,null,true]', [1, proxy6, true]);
testStringify('{"a":1,"c":true}', {a: 1, b: proxy6, c: true});
+
// Object containing a proxy that changes the parent's properties.
+
var handler7 = {
get: function(target, name) {
delete parent7.a;
@@ -162,17 +198,313 @@ var handler7 = {
parent7.e = "5";
return name.toUpperCase();
},
- enumerate: function(target) {
+ ownKeys: function() {
return ['a', 'b', 'c'];
},
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
+ getOwnPropertyDescriptor: function() {
+ return { enumerable: true, configurable: true };
}
}
-var proxy7 = Proxy.create(handler7);
+var proxy7 = new Proxy({}, handler7);
var parent7 = { a: "1", b: proxy7, c: "3", d: "4" };
assertEquals('{"a":"1","b":{"a":"A","b":"B","c":"C"},"d":"4"}',
JSON.stringify(parent7));
assertEquals('{"b":{"a":"A","b":"B","c":"C"},"d":"4","e":"5"}',
JSON.stringify(parent7));
+
+
+// (Proxy handler to log trap calls)
+
+var log = [];
+var logger = {};
+var handler = new Proxy({}, logger);
+
+logger.get = function(t, trap, r) {
+ return function() {
+ log.push([trap, ...arguments]);
+ return Reflect[trap](...arguments);
+ }
+};
+
+
+// Object is a callable proxy
+
+log.length = 0;
+var target = () => 42;
+var proxy = new Proxy(target, handler);
+assertTrue(typeof proxy === 'function');
+
+assertEquals(undefined, JSON.stringify(proxy));
+assertEquals(1, log.length)
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["get", target, "toJSON", proxy], log[0]);
+
+
+// Object is a non-callable non-arraylike proxy
+
+log.length = 0;
+var target = {foo: 42}
+var proxy = new Proxy(target, handler);
+assertFalse(Array.isArray(proxy));
+
+assertEquals('{"foo":42}', JSON.stringify(proxy));
+assertEquals(4, log.length)
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(
+ ["get", target, "toJSON", proxy], log[0]);
+assertEquals(
+ ["ownKeys", target], log[1]); // EnumerableOwnNames
+assertEquals(
+ ["getOwnPropertyDescriptor", target, "foo"], log[2]); // EnumerableOwnNames
+assertEquals(
+ ["get", target, "foo", proxy], log[3]);
+
+
+// Object is an arraylike proxy
+
+log.length = 0;
+var target = [42];
+var proxy = new Proxy(target, handler);
+assertTrue(Array.isArray(proxy));
+
+assertEquals('[42]', JSON.stringify(proxy));
+assertEquals(3, log.length)
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["get", target, "toJSON", proxy], log[0]);
+assertEquals(["get", target, "length", proxy], log[1]);
+assertEquals(["get", target, "0", proxy], log[2]);
+
+
+// Replacer is a callable proxy
+
+log.length = 0;
+var object = {0: "foo", 1: 666};
+var target = (key, val) => key == "1" ? val + 42 : val;
+var proxy = new Proxy(target, handler);
+assertTrue(typeof proxy === 'function');
+
+assertEquals('{"0":"foo","1":708}', JSON.stringify(object, proxy));
+assertEquals(3, log.length)
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(4, log[0].length)
+assertEquals("apply", log[0][0]);
+assertEquals("", log[0][3][0]);
+assertEquals({0: "foo", 1: 666}, log[0][3][1]);
+assertEquals(4, log[1].length)
+assertEquals("apply", log[1][0]);
+assertEquals(["0", "foo"], log[1][3]);
+assertEquals(4, log[2].length)
+assertEquals("apply", log[2][0]);
+assertEquals(["1", 666], log[2][3]);
+
+
+// Replacer is an arraylike proxy
+
+log.length = 0;
+var object = {0: "foo", 1: 666};
+var target = [0];
+var proxy = new Proxy(target, handler);
+assertTrue(Array.isArray(proxy));
+
+assertEquals('{"0":"foo"}', JSON.stringify(object, proxy));
+assertEquals(2, log.length)
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["get", target, "length", proxy], log[0]);
+assertEquals(["get", target, "0", proxy], log[1]);
+
+
+// Replacer is an arraylike proxy and object is an array
+
+log.length = 0;
+var object = ["foo", 42];
+var target = [0];
+var proxy = new Proxy(target, handler);
+assertTrue(Array.isArray(proxy));
+
+assertEquals('["foo",42]', JSON.stringify(object, proxy));
+assertEquals(2, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["get", target, "length", proxy], log[0]);
+assertEquals(["get", target, "0", proxy], log[1]);
+
+
+// Replacer is an arraylike proxy with a non-trivial length
+
+var getTrap = function(t, key) {
+ if (key === "length") return {[Symbol.toPrimitive]() {return 42}};
+ if (key === "41") return "foo";
+ if (key === "42") return "bar";
+};
+var target = [];
+var proxy = new Proxy(target, {get: getTrap});
+assertTrue(Array.isArray(proxy));
+var object = {foo: true, bar: 666};
+assertEquals('{"foo":true}', JSON.stringify(object, proxy));
+
+
+// Replacer is an arraylike proxy with a bogus length
+
+var getTrap = function(t, key) {
+ if (key === "length") return Symbol();
+ if (key === "41") return "foo";
+ if (key === "42") return "bar";
+};
+var target = [];
+var proxy = new Proxy(target, {get: getTrap});
+assertTrue(Array.isArray(proxy));
+var object = {foo: true, bar: 666};
+assertThrows(() => JSON.stringify(object, proxy), TypeError);
+
+
+// Replacer returns a non-callable non-arraylike proxy
+
+log.length = 0;
+var object = ["foo", 42];
+var target = {baz: 5};
+var proxy = new Proxy(target, handler);
+var replacer = (key, val) => key === "1" ? proxy : val;
+
+assertEquals('["foo",{"baz":5}]', JSON.stringify(object, replacer));
+assertEquals(3, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["ownKeys", target], log[0]);
+assertEquals(["getOwnPropertyDescriptor", target, "baz"], log[1]);
+
+
+// Replacer returns an arraylike proxy
+
+log.length = 0;
+var object = ["foo", 42];
+var target = ["bar"];
+var proxy = new Proxy(target, handler);
+var replacer = (key, val) => key === "1" ? proxy : val;
+
+assertEquals('["foo",["bar"]]', JSON.stringify(object, replacer));
+assertEquals(2, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["get", target, "length", proxy], log[0]);
+assertEquals(["get", target, "0", proxy], log[1]);
+
+
+// Replacer returns an arraylike proxy with a non-trivial length
+
+var getTrap = function(t, key) {
+ if (key === "length") return {[Symbol.toPrimitive]() {return 3}};
+ if (key === "2") return "baz";
+ if (key === "3") return "bar";
+};
+var target = [];
+var proxy = new Proxy(target, {get: getTrap});
+var replacer = (key, val) => key === "goo" ? proxy : val;
+var object = {foo: true, goo: false};
+assertEquals('{"foo":true,"goo":[null,null,"baz"]}',
+ JSON.stringify(object, replacer));
+
+
+// Replacer returns an arraylike proxy with a bogus length
+
+var getTrap = function(t, key) {
+ if (key === "length") return Symbol();
+ if (key === "2") return "baz";
+ if (key === "3") return "bar";
+};
+var target = [];
+var proxy = new Proxy(target, {get: getTrap});
+var replacer = (key, val) => key === "goo" ? proxy : val;
+var object = {foo: true, goo: false};
+assertThrows(() => JSON.stringify(object, replacer), TypeError);
+
+
+// Replacer returns a callable proxy
+
+log.length = 0;
+var target = () => 666;
+var proxy = new Proxy(target, handler);
+var replacer = (key, val) => key === "1" ? proxy : val;
+
+assertEquals('["foo",null]', JSON.stringify(["foo", 42], replacer));
+assertEquals(0, log.length);
+
+assertEquals('{"0":"foo"}', JSON.stringify({0: "foo", 1: 42}, replacer));
+assertEquals(0, log.length);
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// JSON.parse
+
+
+// Reviver is a callable proxy
+
+log.length = 0;
+var target = () => 42;
+var proxy = new Proxy(target, handler);
+assertTrue(typeof proxy === "function");
+
+assertEquals(42, JSON.parse("[true, false]", proxy));
+assertEquals(3, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(4, log[0].length);
+assertEquals("apply", log[0][0]);
+assertEquals(["0", true], log[0][3]);
+assertEquals(4, log[1].length);
+assertEquals("apply", log[1][0]);
+assertEquals(["1", false], log[1][3]);
+assertEquals(4, log[2].length);
+assertEquals("apply", log[2][0]);
+assertEquals(["", [42, 42]], log[2][3]);
+
+
+// Reviver plants a non-arraylike proxy into a yet-to-be-visited property
+
+log.length = 0;
+var target = {baz: 42};
+var proxy = new Proxy(target, handler);
+var reviver = function(p, v) {
+ if (p === "baz") return 5;
+ if (p === "foo") this.bar = proxy;
+ return v;
+}
+
+assertEquals({foo: 0, bar: proxy}, JSON.parse('{"foo":0,"bar":1}', reviver));
+assertEquals(4, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["ownKeys", target], log[0]);
+assertEquals(["getOwnPropertyDescriptor", target, "baz"], log[1]);
+assertEquals(["get", target, "baz", proxy], log[2]);
+assertEquals(["defineProperty", target, "baz",
+ {value: 5, configurable: true, writable: true, enumerable: true}], log[3]);
+
+
+// Reviver plants an arraylike proxy into a yet-to-be-visited property
+
+log.length = 0;
+var target = [42];
+var proxy = new Proxy(target, handler);
+assertTrue(Array.isArray(proxy));
+var reviver = function(p, v) {
+ if (p === "0") return undefined;
+ if (p === "foo") this.bar = proxy;
+ return v;
+}
+
+var result = JSON.parse('{"foo":0,"bar":1}', reviver);
+assertEquals({foo: 0, bar: proxy}, result);
+assertSame(result.bar, proxy);
+assertEquals(3, log.length);
+for (var i in log) assertSame(target, log[i][1]);
+
+assertEquals(["get", target, "length", proxy], log[0]);
+assertEquals(["get", target, "0", proxy], log[1]);
+assertEquals(["deleteProperty", target, "0"], log[2]);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-keys.js b/deps/v8/test/mjsunit/harmony/proxies-keys.js
new file mode 100644
index 0000000000..61a39f4972
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-keys.js
@@ -0,0 +1,41 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var target = {
+ target: 1
+};
+target.__proto__ = {
+ target_proto: 2
+};
+
+var handler = {
+ ownKeys: function(target) {
+ return ["foo", "bar", Symbol("baz"), "non-enum", "not-found"];
+ },
+ getOwnPropertyDescriptor: function(target, name) {
+ if (name == "non-enum") return {configurable: true};
+ if (name == "not-found") return undefined;
+ return {enumerable: true, configurable: true};
+ }
+}
+
+var proxy = new Proxy(target, handler);
+
+// Object.keys() ignores symbols and non-enumerable keys.
+assertEquals(["foo", "bar"], Object.keys(proxy));
+
+// Edge case: no properties left after filtering.
+handler.getOwnPropertyDescriptor = undefined;
+assertEquals([], Object.keys(proxy));
+
+// Throwing shouldn't crash.
+handler.getOwnPropertyDescriptor = function() { throw new Number(1); };
+assertThrows("Object.keys(proxy)", Number);
+
+// Fall through to target if there is no trap.
+handler.ownKeys = undefined;
+assertEquals(["target"], Object.keys(proxy));
+assertEquals(["target"], Object.keys(target));
diff --git a/deps/v8/test/mjsunit/harmony/proxies-object-assign.js b/deps/v8/test/mjsunit/harmony/proxies-object-assign.js
new file mode 100644
index 0000000000..154f8c15f6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-object-assign.js
@@ -0,0 +1,30 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var handler = {
+ ownKeys: function(t) { return ["a", "b"]; },
+ getOwnPropertyDescriptor: function(t, p) {
+ return {enumerable: true, configurable: true}
+ },
+ get: function(t, p) {
+ return 1;
+ }
+};
+
+var proxy = new Proxy({}, handler);
+
+var o = {};
+
+Object.assign(o, proxy);
+
+assertEquals({"a": 1, "b": 1}, o);
+
+(function TestStringSources() {
+ var source = "abc";
+ var target = {};
+ Object.assign(target, source);
+ assertEquals({0: "a", 1: "b", 2: "c"}, target);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js b/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js
new file mode 100644
index 0000000000..6a7ae64d78
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js
@@ -0,0 +1,84 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+var target = {
+ "target_one": 1
+};
+target.__proto__ = {
+ "target_proto_two": 2
+};
+var handler = {
+ ownKeys: function(target) {
+ return ["foo", "bar"];
+ }
+}
+
+var proxy = new Proxy(target, handler);
+
+// Simple case.
+assertEquals(["foo", "bar"], Reflect.ownKeys(proxy));
+
+// Test interesting steps of the algorithm:
+
+// Step 6: Fall through to target.[[OwnPropertyKeys]] if the trap is undefined.
+handler.ownKeys = undefined;
+assertEquals(["target_one"], Reflect.ownKeys(proxy));
+
+// Step 7: Throwing traps don't crash.
+handler.ownKeys = function(target) { throw 1; };
+assertThrows("Reflect.ownKeys(proxy)");
+
+// Step 8: CreateListFromArrayLike error cases:
+// Returning a non-Object throws.
+var keys = 1;
+handler.ownKeys = function(target) { return keys; };
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = "string";
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = Symbol("foo");
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = null;
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+
+// "length" property is honored.
+keys = { 0: "a", 1: "b", 2: "c" };
+keys.length = 0;
+assertEquals([], Reflect.ownKeys(proxy));
+keys.length = 1;
+assertEquals(["a"], Reflect.ownKeys(proxy));
+keys.length = 3;
+assertEquals(["a", "b", "c"], Reflect.ownKeys(proxy));
+// The spec wants to allow lengths up to 2^53, but we can't allocate arrays
+// of that size, so we throw even for smaller values.
+keys.length = Math.pow(2, 33);
+assertThrows("Reflect.ownKeys(proxy)", RangeError);
+
+// Non-Name results throw.
+keys = [1];
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = [{}];
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = [{toString: function() { return "foo"; }}];
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = [null];
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+
+// Step 17a: The trap result must include all non-configurable keys.
+Object.defineProperty(target, "nonconf", {value: 1, configurable: false});
+keys = ["foo"];
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = ["nonconf"];
+assertEquals(keys, Reflect.ownKeys(proxy));
+
+// Step 19a: The trap result must all keys of a non-extensible target.
+Object.preventExtensions(target);
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
+keys = ["nonconf", "target_one"];
+assertEquals(keys, Reflect.ownKeys(proxy));
+
+// Step 20: The trap result must not add keys to a non-extensible target.
+keys = ["nonconf", "target_one", "fantasy"];
+assertThrows("Reflect.ownKeys(proxy)", TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js b/deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js
new file mode 100644
index 0000000000..0d6ae4c101
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-prevent-extensions.js
@@ -0,0 +1,87 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+
+(function () {
+ // No trap.
+
+ var target = {};
+ var handler = {};
+ var proxy = new Proxy(target, handler);
+
+ assertTrue(Reflect.isExtensible(target));
+ assertTrue(Reflect.isExtensible(proxy));
+ assertTrue(Reflect.preventExtensions(proxy));
+ assertFalse(Reflect.isExtensible(target));
+ assertFalse(Reflect.isExtensible(proxy));
+})();
+
+
+(function () {
+ // "Undefined" trap.
+
+ var target = {};
+ var handler = { preventExtensions: null };
+ var proxy = new Proxy(target, handler);
+
+ assertTrue(Reflect.isExtensible(target));
+ assertTrue(Reflect.isExtensible(proxy));
+ assertTrue(Reflect.preventExtensions(proxy));
+ assertFalse(Reflect.isExtensible(target));
+ assertFalse(Reflect.isExtensible(proxy));
+})();
+
+
+(function () {
+ // Invalid trap.
+
+ var target = {};
+ var handler = { preventExtensions: 42 };
+ var proxy = new Proxy(target, handler);
+
+ assertThrows(() => {Reflect.preventExtensions(proxy)}, TypeError);
+})();
+
+
+(function () {
+ var target = {};
+ var handler = { isExtensible() {return "bla"} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish and target is extensible.
+ assertTrue(Reflect.isExtensible(proxy));
+
+ // Trap returns trueish but target is not extensible.
+ Reflect.preventExtensions(target);
+ assertThrows(() => {Reflect.isExtensible(proxy)}, TypeError);
+})();
+
+
+(function () {
+ // Trap returns falsish.
+
+ var target = {};
+ var handler = { preventExtensions() {return 0} };
+ var proxy = new Proxy(target, handler);
+
+ assertFalse(Reflect.preventExtensions(proxy));
+ Reflect.preventExtensions(target);
+ assertFalse(Reflect.preventExtensions(proxy));
+})();
+
+
+(function () {
+ var target = {};
+ var handler = { preventExtensions() {return Symbol()} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish but target is extensible.
+ assertThrows(() => {Reflect.preventExtensions(proxy)}, TypeError);
+
+ // Trap returns trueish and target is not extensible.
+ Reflect.preventExtensions(target);
+ assertTrue(Reflect.preventExtensions(proxy));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js b/deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js
new file mode 100644
index 0000000000..b1742e20b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-property-is-enumerable.js
@@ -0,0 +1,30 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var handler = {};
+var target = { a: 1 };
+var proxy = new Proxy(target, handler);
+
+assertTrue(target.propertyIsEnumerable('a'));
+assertTrue(proxy.propertyIsEnumerable('a'));
+assertFalse(target.propertyIsEnumerable('b'));
+assertFalse(proxy.propertyIsEnumerable('b'));
+
+handler.getOwnPropertyDescriptor = function(target, prop) {
+ return { configurable: true, enumerable: true, value: 10 };
+}
+assertTrue(target.propertyIsEnumerable('a'));
+assertTrue(proxy.propertyIsEnumerable('a'));
+assertFalse(target.propertyIsEnumerable('b'));
+assertTrue(proxy.propertyIsEnumerable('b'));
+
+handler.getOwnPropertyDescriptor = function(target, prop) {
+ return { configurable: true, enumerable: false, value: 10 };
+}
+assertTrue(target.propertyIsEnumerable('a'));
+assertFalse(proxy.propertyIsEnumerable('a'));
+assertFalse(target.propertyIsEnumerable('b'));
+assertFalse(proxy.propertyIsEnumerable('b'));
diff --git a/deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js b/deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js
new file mode 100644
index 0000000000..e88476dd50
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-prototype-handler-stackoverflow.js
@@ -0,0 +1,118 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect --stack-size=100
+
+// Test that traps that involve walking the target object's prototype chain
+// don't overflow the stack when the original proxy is on that chain.
+
+(function TestGetPrototype() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try { return p.__proto__; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestSetPrototype() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try { p.__proto__ = p; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestHasProperty() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try {
+ return Reflect.has(p, "foo");
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestSet() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try { p.foo = 1; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestGet() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try { return p.foo; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestEnumerate() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try { for (var x in p) {} } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestIsExtensible() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try {
+ return Reflect.isExtensible(p);
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestPreventExtensions() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try {
+ Reflect.preventExtensions(p);
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestGetOwnPropertyDescriptor() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try {
+ return Object.getOwnPropertyDescriptor(p, "foo");
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestDeleteProperty() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try { delete p.foo; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestDefineProperty() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try {
+ Object.defineProperty(p, "foo", {value: "bar"});
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestOwnKeys() {
+ var handler = {};
+ var p = new Proxy({}, handler);
+ handler.__proto__ = p;
+ try {
+ return Reflect.ownKeys(p);
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestCall() {
+ var handler = {};
+ var p = new Proxy(function() {}, handler);
+ handler.__proto__ = p;
+ try { return p(); } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestConstruct() {
+ var handler = {};
+ var p = new Proxy(function() { this.foo = 1; }, handler);
+ handler.__proto__ = p;
+ try { return new p(); } catch(e) { assertInstanceof(e, RangeError); }
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js b/deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js
new file mode 100644
index 0000000000..ba55f6aad9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-prototype-target-stackoverflow.js
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+// Test that traps that involve walking the target object's prototype chain
+// don't overflow the stack when the original proxy is on that chain.
+
+(function TestGetPrototype() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ try { return p.__proto__; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestSetPrototype() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ try { p.__proto__ = p; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestHasProperty() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ try {
+ return Reflect.has(p, "foo");
+ } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestSet() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ try { p.foo = 1; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestGet() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ try { return p.foo; } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+(function TestEnumerate() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ try { for (var x in p) {} } catch(e) { assertInstanceof(e, RangeError); }
+})();
+
+// The following traps don't involve the target object's prototype chain;
+// we test them anyway for completeness.
+
+(function TestIsExtensible() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ return Reflect.isExtensible(p);
+})();
+
+(function TestPreventExtensions() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ Reflect.preventExtensions(p);
+})();
+
+(function TestGetOwnPropertyDescriptor() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ return Object.getOwnPropertyDescriptor(p, "foo");
+})();
+
+(function TestDeleteProperty() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ delete p.foo;
+})();
+
+(function TestDefineProperty() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ Object.defineProperty(p, "foo", {value: "bar"});
+})();
+
+(function TestOwnKeys() {
+ var p = new Proxy({}, {});
+ p.__proto__ = p;
+ return Reflect.ownKeys(p);
+})();
+
+(function TestCall() {
+ var p = new Proxy(function() {}, {});
+ p.__proto__ = p;
+ return p();
+})();
+
+(function TestConstruct() {
+ var p = new Proxy(function() { this.foo = 1; }, {});
+ p.__proto__ = p;
+ return new p();
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-revocable.js b/deps/v8/test/mjsunit/harmony/proxies-revocable.js
new file mode 100644
index 0000000000..d0d0f781d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-revocable.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+
+traps = [
+ "getPrototypeOf", "setPrototypeOf", "isExtensible", "preventExtensions",
+ "getOwnPropertyDescriptor", "has", "get", "set", "deleteProperty",
+ "defineProperty", "ownKeys", "apply", "construct"
+];
+// TODO(neis): Fix enumerate.
+
+var {proxy, revoke} = Proxy.revocable({}, {});
+assertEquals(0, revoke.length);
+
+assertEquals(undefined, revoke());
+for (var trap of traps) {
+ assertThrows(() => Reflect[trap](proxy), TypeError);
+}
+
+assertEquals(undefined, revoke());
+for (var trap of traps) {
+ assertThrows(() => Reflect[trap](proxy), TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js b/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js
new file mode 100644
index 0000000000..bc60ff492c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js
@@ -0,0 +1,122 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+var target = { target: 1 };
+target.__proto__ = {};
+var handler = { handler: 1 };
+var proxy = new Proxy(target, handler);
+
+assertSame(Object.getPrototypeOf(proxy), target.__proto__ );
+
+
+assertThrows(function() { Object.setPrototypeOf(proxy, undefined) }, TypeError);
+assertThrows(function() { Object.setPrototypeOf(proxy, 1) }, TypeError);
+
+var prototype = [1];
+assertSame(proxy, Object.setPrototypeOf(proxy, prototype));
+assertSame(prototype, Object.getPrototypeOf(proxy));
+assertSame(prototype, Object.getPrototypeOf(target));
+
+var pair = Proxy.revocable(target, handler);
+assertSame(pair.proxy, Object.setPrototypeOf(pair.proxy, prototype));
+assertSame(prototype, Object.getPrototypeOf(pair.proxy));
+pair.revoke();
+assertThrows('Object.setPrototypeOf(pair.proxy, prototype)', TypeError);
+
+handler.setPrototypeOf = function(target, proto) {
+ return false;
+};
+assertThrows(function() { Object.setPrototypeOf(proxy, {a:1}) }, TypeError);
+
+handler.setPrototypeOf = function(target, proto) {
+ return undefined;
+};
+assertThrows(function() { Object.setPrototypeOf(proxy, {a:2}) }, TypeError);
+
+handler.setPrototypeOf = function(proto) {};
+assertThrows(function() { Object.setPrototypeOf(proxy, {a:3}) }, TypeError);
+
+handler.setPrototypeOf = function(target, proto) {
+ throw Error();
+};
+assertThrows(function() { Object.setPrototypeOf(proxy, {a:4}) }, Error);
+
+var seen_prototype;
+var seen_target;
+handler.setPrototypeOf = function(target, proto) {
+ seen_target = target;
+ seen_prototype = proto;
+ return true;
+}
+assertSame(Object.setPrototypeOf(proxy, {a:5}), proxy);
+assertSame(target, seen_target);
+assertEquals({a:5}, seen_prototype);
+
+(function setPrototypeProxyTarget() {
+ var target = { target: 1 };
+ target.__proto__ = {};
+ var handler = {};
+ var handler2 = {};
+ var target2 = new Proxy(target, handler2);
+ var proxy2 = new Proxy(target2, handler);
+ assertSame(Object.getPrototypeOf(proxy2), target.__proto__ );
+
+ var prototype = [2,3];
+ assertSame(proxy2, Object.setPrototypeOf(proxy2, prototype));
+ assertSame(prototype, Object.getPrototypeOf(proxy2));
+ assertSame(prototype, Object.getPrototypeOf(target));
+})();
+
+(function testProxyTrapInconsistent() {
+ var target = { target: 1 };
+ target.__proto__ = {};
+ var handler = {};
+ var handler2 = {
+ };
+
+ var target2 = new Proxy(target, handler);
+ var proxy2 = new Proxy(target2, handler2);
+
+ // If the final target is extensible we can set any prototype.
+ var prototype = [1];
+ Reflect.setPrototypeOf(proxy2, prototype);
+ assertSame(prototype, Reflect.getPrototypeOf(target));
+
+ handler2.setPrototypeOf = function(target, value) {
+ Reflect.setPrototypeOf(target, value);
+ return true;
+ };
+ prototype = [2];
+ Reflect.setPrototypeOf(proxy2, prototype);
+ assertSame(prototype, Reflect.getPrototypeOf(target));
+
+ // Prevent getting the target's prototype used to check the invariant.
+ var gotPrototype = false;
+ handler.getPrototypeOf = function() {
+ gotPrototype = true;
+ throw TypeError()
+ };
+ // If the target is extensible we do not check the invariant.
+ prototype = [3];
+ Reflect.setPrototypeOf(proxy2, prototype);
+ assertFalse(gotPrototype);
+ assertSame(prototype, Reflect.getPrototypeOf(target));
+
+ // Changing the prototype of a non-extensible target will trigger the
+ // invariant-check and throw in the above handler.
+ Reflect.preventExtensions(target);
+ assertThrows(() => {Reflect.setPrototypeOf(proxy2, [4])}, TypeError);
+ assertTrue(gotPrototype);
+ assertEquals([3], Reflect.getPrototypeOf(target));
+
+ // Setting the prototype of a non-extensible target is fine if the prototype
+ // doesn't change.
+ delete handler.getPrototypeOf;
+ Reflect.setPrototypeOf(proxy2, prototype);
+ // Changing the prototype will throw.
+ prototype = [5];
+ assertThrows(() => {Reflect.setPrototypeOf(proxy2, prototype)}, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-set.js b/deps/v8/test/mjsunit/harmony/proxies-set.js
new file mode 100644
index 0000000000..2fec115a10
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/proxies-set.js
@@ -0,0 +1,312 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies --harmony-reflect
+
+
+function sloppyDefaultSet(o, p, v) { return o[p] = v }
+function sloppyReflectSet(o, p, v) { return Reflect.set(o, p, v) }
+function strictDefaultSet(o, p, v) { "use strict"; return o[p] = v }
+function strictReflectSet(o, p, v) { "use strict"; return Reflect.set(o, p, v) }
+
+sloppyDefaultSet.shouldThrow = false;
+sloppyReflectSet.shouldThrow = false;
+strictDefaultSet.shouldThrow = true;
+strictReflectSet.shouldThrow = false;
+
+sloppyDefaultSet.returnsBool = false;
+sloppyReflectSet.returnsBool = true;
+strictDefaultSet.returnsBool = false;
+strictReflectSet.returnsBool = true;
+
+
+function assertTrueIf(flag, x) { if (flag) assertTrue(x) }
+function assertFalseIf(flag, x) { if (flag) assertFalse(x) }
+function assertSetFails(mySet, o, p, v) {
+ if (mySet.shouldThrow) {
+ assertThrows(() => mySet(o, p, v), TypeError);
+ } else {
+ assertFalseIf(mySet.returnsBool, mySet(o, p, v));
+ }
+}
+
+
+function dataDescriptor(x) {
+ return {value: x, writable: true, enumerable: true, configurable: true};
+}
+
+
+function toKey(x) {
+ if (typeof x === "symbol") return x;
+ return String(x);
+}
+
+
+var properties =
+ ["bla", "0", 1, Symbol(), {[Symbol.toPrimitive]() {return "a"}}];
+
+
+function TestForwarding(handler, mySet) {
+ assertTrue(undefined == handler.set);
+ assertTrue(undefined == handler.getOwnPropertyDescriptor);
+ assertTrue(undefined == handler.defineProperty);
+
+ var target = {};
+ var proxy = new Proxy(target, handler);
+
+ // Property does not exist on target.
+ for (var p of properties) {
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 42));
+ assertSame(42, target[p]);
+ }
+
+ // Property exists as writable data on target.
+ for (var p of properties) {
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0));
+ assertSame(0, target[p]);
+ }
+
+ // Property exists as non-writable data on target.
+ for (var p of properties) {
+ Object.defineProperty(target, p,
+ {value: 42, configurable: true, writable: false});
+ assertSetFails(mySet, proxy, p, 42);
+ assertSetFails(mySet, proxy, p, 0);
+ assertEquals(42, target[p]);
+ }
+};
+
+(function () {
+ // No trap.
+ var handler = {};
+ TestForwarding(handler, sloppyDefaultSet);
+ TestForwarding(handler, sloppyReflectSet);
+ TestForwarding(handler, strictDefaultSet);
+ TestForwarding(handler, strictReflectSet);
+})();
+
+(function () {
+ // "Undefined" trap.
+ var handler = { set: null };
+ TestForwarding(handler, sloppyDefaultSet);
+ TestForwarding(handler, sloppyReflectSet);
+ TestForwarding(handler, strictDefaultSet);
+ TestForwarding(handler, strictReflectSet);
+})();
+
+
+function TestForwarding2(mySet) {
+ // Check that setting on a proxy without "set" trap correctly triggers its
+ // "getOwnProperty" trap and its "defineProperty" trap.
+
+ var target = {};
+ var handler = {};
+ var observations = [];
+ var proxy = new Proxy(target, handler);
+
+ handler.getOwnPropertyDescriptor = function() {
+ observations.push(arguments);
+ return Reflect.getOwnPropertyDescriptor(...arguments);
+ }
+
+ handler.defineProperty = function() {
+ observations.push(arguments);
+ return Reflect.defineProperty(...arguments);
+ }
+
+ for (var p of properties) {
+ mySet(proxy, p, 42);
+ assertEquals(2, observations.length)
+ assertArrayEquals([target, toKey(p)], observations[0]);
+ assertSame(target, observations[0][0]);
+ assertArrayEquals([target, toKey(p), dataDescriptor(42)], observations[1]);
+ assertSame(target, observations[1][0]);
+ observations = [];
+
+ mySet(proxy, p, 42);
+ assertEquals(2, observations.length)
+ assertArrayEquals([target, toKey(p)], observations[0]);
+ assertSame(target, observations[0][0]);
+ assertArrayEquals([target, toKey(p), {value: 42}], observations[1]);
+ assertSame(target, observations[1][0]);
+ observations = [];
+ }
+}
+
+TestForwarding2(sloppyDefaultSet);
+TestForwarding2(sloppyReflectSet);
+TestForwarding2(strictDefaultSet);
+TestForwarding2(strictReflectSet);
+
+
+function TestInvalidTrap(proxy, mySet) {
+ for (var p of properties) {
+ assertThrows(() => mySet(proxy, p, 42), TypeError);
+ }
+}
+
+(function () {
+ var target = {};
+ var handler = { set: true };
+ var proxy = new Proxy(target, handler);
+
+ TestInvalidTrap(proxy, sloppyDefaultSet);
+ TestInvalidTrap(proxy, sloppyReflectSet);
+ TestInvalidTrap(proxy, strictDefaultSet);
+ TestInvalidTrap(proxy, strictReflectSet);
+})();
+
+
+function TestTrappingFalsish(mySet) {
+ var target = {};
+ var handler = { set() {return ""} };
+ var proxy = new Proxy(target, handler);
+
+ for (var p of properties) {
+ assertSetFails(mySet, proxy, p, 42);
+ }
+}
+
+TestTrappingFalsish(sloppyDefaultSet);
+TestTrappingFalsish(sloppyReflectSet);
+TestTrappingFalsish(strictDefaultSet);
+TestTrappingFalsish(strictReflectSet);
+
+
+function TestTrappingTrueish(mySet) {
+ var target = {};
+ var handler = { set() {return 42} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish and property does not exist in target.
+ for (var p of properties) {
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0));
+ }
+
+ // Trap returns trueish and target property is configurable or writable data.
+ for (var p of properties) {
+ Object.defineProperty(target, p, {configurable: true, writable: true});
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0));
+ Object.defineProperty(target, p, {configurable: true, writable: false});
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0));
+ Object.defineProperty(target, p, {configurable: false, writable: true});
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0));
+ }
+}
+
+TestTrappingTrueish(sloppyDefaultSet);
+TestTrappingTrueish(sloppyReflectSet);
+TestTrappingTrueish(strictDefaultSet);
+TestTrappingTrueish(strictReflectSet);
+
+
+function TestTrappingTrueish2(mySet) {
+ var target = {};
+ var handler = { set() {return 42} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish but target property is frozen data.
+ for (var p of properties) {
+ Object.defineProperty(target, p, {
+ configurable: false, writable: false, value: 0
+ });
+ assertThrows(() => mySet(proxy, p, 666), TypeError); // New value.
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0)); // Old value.
+ }
+};
+
+TestTrappingTrueish2(sloppyDefaultSet);
+TestTrappingTrueish2(sloppyReflectSet);
+TestTrappingTrueish2(strictDefaultSet);
+TestTrappingTrueish2(strictReflectSet);
+
+
+function TestTrappingTrueish3(mySet) {
+ var target = {};
+ var handler = { set() {return 42} };
+ var proxy = new Proxy(target, handler);
+
+ // Trap returns trueish and target property is configurable accessor.
+ for (var p of properties) {
+ Object.defineProperty(target, p, { configurable: true, set: undefined });
+ assertTrueIf(mySet.returnsBool, mySet(proxy, p, 0));
+ }
+
+ // Trap returns trueish and target property is non-configurable accessor.
+ for (var p of properties) {
+ Object.defineProperty(target, p, { configurable: false, set: undefined });
+ assertThrows(() => mySet(proxy, p, 0));
+ }
+};
+
+TestTrappingTrueish3(sloppyDefaultSet);
+TestTrappingTrueish3(sloppyReflectSet);
+TestTrappingTrueish3(strictDefaultSet);
+TestTrappingTrueish3(strictReflectSet);
+
+
+function TestTrapReceiverArgument(mySet) {
+ var target = {};
+ var handler = {};
+ var observations = [];
+ var proxy = new Proxy(target, handler);
+ var object = Object.create(proxy);
+
+ handler.set = function() {
+ observations.push(arguments);
+ return Reflect.set(...arguments);
+ }
+
+ for (var p of properties) {
+ mySet(object, p, 42);
+ assertEquals(1, observations.length)
+ assertArrayEquals([target, toKey(p), 42, object], observations[0]);
+ assertSame(target, observations[0][0]);
+ assertSame(object, observations[0][3]);
+ observations = [];
+ }
+};
+
+TestTrapReceiverArgument(sloppyDefaultSet);
+TestTrapReceiverArgument(sloppyReflectSet);
+TestTrapReceiverArgument(strictDefaultSet);
+TestTrapReceiverArgument(strictReflectSet);
+
+
+(function TestTrapReceiverArgument2() {
+ // Check that non-object receiver is passed through as well.
+
+ var target = {};
+ var handler = {};
+ var observations = [];
+ var proxy = new Proxy(target, handler);
+
+ handler.set = function() {
+ observations.push(arguments);
+ return Reflect.set(...arguments);
+ }
+
+ for (var p of properties) {
+ for (var receiver of [null, undefined, 1]) {
+ Reflect.set(proxy, p, 42, receiver);
+ assertEquals(1, observations.length)
+ assertArrayEquals([target, toKey(p), 42, receiver], observations[0]);
+ assertSame(target, observations[0][0]);
+ assertSame(receiver, observations[0][3]);
+ observations = [];
+ }
+ }
+
+ var object = Object.create(proxy);
+ for (var p of properties) {
+ for (var receiver of [null, undefined, 1]) {
+ Reflect.set(object, p, 42, receiver);
+ assertEquals(1, observations.length);
+ assertArrayEquals([target, toKey(p), 42, receiver], observations[0]);
+ assertSame(target, observations[0][0]);
+ assertSame(receiver, observations[0][3]);
+ observations = [];
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-symbols.js b/deps/v8/test/mjsunit/harmony/proxies-symbols.js
deleted file mode 100644
index 52353c036d..0000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-symbols.js
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-proxies
-
-
-// Helper.
-
-function TestWithProxies(test, x, y, z) {
- test(Proxy.create, x, y, z)
- test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
-}
-
-
-// No symbols should leak to proxy traps.
-
-function TestNoSymbolsToTrap(handler) {
- TestWithProxies(TestNoSymbolsToTrap2, handler)
-}
-
-function TestNoSymbolsToTrap2(create, handler) {
- var p = create(handler)
- var o = Object.create(p)
- var symbol = Symbol("secret")
-
- assertFalse(symbol in p)
- assertFalse(symbol in o)
- assertEquals(undefined, p[symbol])
- assertEquals(undefined, o[symbol])
- assertEquals(47, p[symbol] = 47)
- assertEquals(47, o[symbol] = 47)
- assertFalse(delete p[symbol])
- assertTrue(delete o[symbol])
- assertTrue(delete o[symbol])
- assertFalse({}.hasOwnProperty.call(p, symbol))
- assertFalse({}.hasOwnProperty.call(o, symbol))
- assertEquals(undefined, Object.getOwnPropertyDescriptor(p, symbol))
- assertEquals(undefined, Object.getOwnPropertyDescriptor(o, symbol))
-}
-
-
-TestNoSymbolsToTrap({
- has: assertUnreachable,
- hasOwn: assertUnreachable,
- get: assertUnreachable,
- set: assertUnreachable,
- delete: assertUnreachable,
- getPropertyDescriptor: assertUnreachable,
- getOwnPropertyDescriptor: assertUnreachable,
- defineProperty: assertUnreachable
-})
-
-
-// All symbols returned from proxy traps should be filtered.
-
-function TestNoSymbolsFromTrap(handler) {
- TestWithProxies(TestNoSymbolsFromTrap2, handler)
-}
-
-function TestNoSymbolsFromTrap2(create, handler) {
- var p = create(handler)
- var o = Object.create(p)
-
- assertEquals(0, Object.keys(p).length)
- assertEquals(0, Object.keys(o).length)
- assertEquals(0, Object.getOwnPropertyNames(p).length)
- assertEquals(0, Object.getOwnPropertyNames(o).length)
- for (var n in p) assertUnreachable()
- for (var n in o) assertUnreachable()
-}
-
-
-function MakeSymbolArray() {
- return [Symbol(), Symbol("a")]
-}
-
-TestNoSymbolsFromTrap({
- enumerate: MakeSymbolArray,
- keys: MakeSymbolArray,
- getPropertyNames: MakeSymbolArray,
- getOwnPropertyNames: MakeSymbolArray
-})
diff --git a/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js b/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
index 8a03ef481e..c87492c61d 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
@@ -5,22 +5,18 @@
// Flags: --harmony-proxies
-// TODO(arv): Once proxies can intercept symbols, add more tests.
-
-
function TestBasics() {
var log = [];
- var proxy = Proxy.create({
- getPropertyDescriptor: function(key) {
- log.push(key);
- if (key === 'x') {
- return {
- value: 1,
- configurable: true
- };
- }
- return undefined;
+ var proxy = new Proxy({}, {
+ get: function(target, key) {
+ log.push("get " + String(key));
+ if (key === 'x') return 1;
+ },
+ has: function(target, key) {
+ log.push("has " + String(key));
+ if (key === 'x') return true;
+ return false;
}
});
@@ -30,27 +26,24 @@ function TestBasics() {
assertEquals(1, x);
}
- // One 'x' for HasBinding and one for GetBindingValue
- assertEquals(['assertEquals', 'x', 'x'], log);
+ assertEquals(['has assertEquals', 'has x', 'get Symbol(Symbol.unscopables)',
+ 'get x'], log);
}
TestBasics();
function TestInconsistent() {
var log = [];
- var calls = 0;
- var proxy = Proxy.create({
- getPropertyDescriptor: function(key) {
- log.push(key);
- if (key === 'x' && calls < 1) {
- calls++;
- return {
- value: 1,
- configurable: true
- };
- }
+ var proxy = new Proxy({}, {
+ get: function(target, key) {
+ log.push("get " + String(key));
return undefined;
+ },
+ has: function(target, key) {
+ log.push("has " + String(key));
+ if (key === 'x') return true;
+ return false;
}
});
@@ -60,8 +53,8 @@ function TestInconsistent() {
assertEquals(void 0, x);
}
- // One 'x' for HasBinding and one for GetBindingValue
- assertEquals(['assertEquals', 'x', 'x'], log);
+ assertEquals(['has assertEquals', 'has x', 'get Symbol(Symbol.unscopables)',
+ 'get x'], log);
}
TestInconsistent();
@@ -72,19 +65,14 @@ function TestUseProxyAsUnscopables() {
x: 2
};
var calls = 0;
- var proxy = Proxy.create({
- has: function(key) {
+ var proxy = new Proxy({}, {
+ has: function() {
assertUnreachable();
},
- getPropertyDescriptor: function(key) {
- calls++;
+ get: function(target, key) {
assertEquals('x', key);
- return {
- value: calls === 2 ? true : undefined,
- configurable: true,
- enumerable: true,
- writable: true,
- };
+ calls++;
+ return calls === 2 ? true : undefined;
}
});
@@ -110,11 +98,11 @@ function TestThrowInHasUnscopables() {
function CustomError() {}
var calls = 0;
- var proxy = Proxy.create({
- has: function(key) {
+ var proxy = new Proxy({}, {
+ has: function() {
assertUnreachable();
},
- getPropertyDescriptor: function(key) {
+ get: function(target, key) {
if (calls++ === 0) {
throw new CustomError();
}
@@ -136,8 +124,11 @@ TestThrowInHasUnscopables();
var global = this;
function TestGlobalShouldIgnoreUnscopables() {
global.x = 1;
- var proxy = Proxy.create({
- getPropertyDescriptor: function() {
+ var proxy = new Proxy({}, {
+ get: function() {
+ assertUnreachable();
+ },
+ has: function() {
assertUnreachable();
}
});
diff --git a/deps/v8/test/mjsunit/harmony/proxies-with.js b/deps/v8/test/mjsunit/harmony/proxies-with.js
index 94de25e3ea..1aa13adea6 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-with.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-with.js
@@ -31,8 +31,10 @@
// Helper.
function TestWithProxies(test, x, y, z) {
- test(Proxy.create, x, y, z)
- test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+ test(function(h) { return new Proxy({}, h) }, x, y, z)
+ test(function(h) {
+ return new Proxy(function() {}, h)
+ }, x, y, z)
}
@@ -49,69 +51,40 @@ var key = ""
function TestWithGet2(create, handler) {
var b = "local"
- var p = create(handler)
+ var p = create(handler);
+ assertEquals("onproxy", p.a);
+ assertEquals(undefined, p.b);
+ assertEquals(undefined, p.c);
+
with (p) {
- assertEquals("onproxy", a)
- assertEquals("local", b)
- assertEquals("global", c)
+ assertEquals("onproxy", a);
+ assertEquals("local", b);
+ assertEquals("global", c);
}
var o = Object.create(p, {d: {value: "own"}})
with (o) {
assertEquals("onproxy", a)
- assertEquals("local", b)
+ assertEquals("local", b);
assertEquals("global", c)
assertEquals("own", d)
}
}
TestWithGet({
- get: function(r, k) { key = k; return k === "a" ? "onproxy" : undefined },
- getPropertyDescriptor: function(k) {
+ get(target, k) {
key = k;
- return k === "a" ? {value: "onproxy", configurable: true} : undefined
- }
+ return k === "a" ? "onproxy" : undefined
+ },
+ has(target, k) { return k === 'a' }
})
TestWithGet({
get: function(r, k) { return this.get2(r, k) },
get2: function(r, k) { key = k; return k === "a" ? "onproxy" : undefined },
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: "onproxy", configurable: true} : undefined
- }
-})
-
-TestWithGet({
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: "onproxy", configurable: true} : undefined
- }
-})
-
-TestWithGet({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- key = k;
- return k === "a" ? {value: "onproxy", configurable: true} : undefined
- }
+ has(target, k) { return k === 'a' }
})
-TestWithGet({
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ?
- {get value() { return "onproxy" }, configurable: true} : undefined
- }
-})
-
-TestWithGet({
- get: undefined,
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: "onproxy", configurable: true} : undefined
- }
-})
@@ -151,49 +124,37 @@ function onproxy() { receiver = this; return "onproxy" }
TestWithGetCall({
get: function(r, k) { key = k; return k === "a" ? onproxy : undefined },
- getPropertyDescriptor: function(k) {
+ has: function(t, k) {
key = k;
- return k === "a" ? {value: onproxy, configurable: true} : undefined
+ return k === "a";
}
})
TestWithGetCall({
get: function(r, k) { return this.get2(r, k) },
get2: function(r, k) { key = k; return k === "a" ? onproxy : undefined },
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: onproxy, configurable: true} : undefined
- }
-})
-
-TestWithGetCall({
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: onproxy, configurable: true} : undefined
- }
-})
-
-TestWithGetCall({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
+ has: function(t, k) {
key = k;
- return k === "a" ? {value: onproxy, configurable: true} : undefined
+ return k === "a";
}
})
TestWithGetCall({
- getPropertyDescriptor: function(k) {
+ get: function(r, k) { key = k; return k === "a" ? onproxy : undefined },
+ has: function(t, k) {
+ return this.has2(k)
+ },
+ has2: function(k) {
key = k;
- return k === "a" ?
- {get value() { return onproxy }, configurable: true} : undefined
+ return k === "a";
}
})
TestWithGetCall({
- get: undefined,
- getPropertyDescriptor: function(k) {
+ get: function(r, k) { key = k; return k === "a" ? onproxy : undefined },
+ has: function(t, k) {
key = k;
- return k === "a" ? {value: onproxy, configurable: true} : undefined
+ return k === "a";
}
})
@@ -207,14 +168,14 @@ function TestWithGetCallThrow2(create, handler) {
var p = create(handler)
with (p) {
- assertThrows(function(){ a() }, "myexn")
+ assertThrowsEquals(function(){ a() }, "myexn")
assertEquals("local", b())
assertEquals("global", c())
}
var o = Object.create(p, {d: {value: function() { return "own" }}})
with (o) {
- assertThrows(function(){ a() }, "myexn")
+ assertThrowsEquals(function(){ a() }, "myexn")
assertEquals("local", b())
assertEquals("global", c())
assertEquals("own", d())
@@ -224,51 +185,14 @@ function TestWithGetCallThrow2(create, handler) {
function onproxythrow() { throw "myexn" }
TestWithGetCallThrow({
+ has: function(r, k) { return k === "a"; },
get: function(r, k) { key = k; return k === "a" ? onproxythrow : undefined },
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: onproxythrow, configurable: true} : undefined
- }
})
TestWithGetCallThrow({
+ has: function(r, k) { return k === "a"; },
get: function(r, k) { return this.get2(r, k) },
get2: function(r, k) { key = k; return k === "a" ? onproxythrow : undefined },
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: onproxythrow, configurable: true} : undefined
- }
-})
-
-TestWithGetCallThrow({
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: onproxythrow, configurable: true} : undefined
- }
-})
-
-TestWithGetCallThrow({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- key = k;
- return k === "a" ? {value: onproxythrow, configurable: true} : undefined
- }
-})
-
-TestWithGetCallThrow({
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ?
- {get value() { return onproxythrow }, configurable: true} : undefined
- }
-})
-
-TestWithGetCallThrow({
- get: undefined,
- getPropertyDescriptor: function(k) {
- key = k;
- return k === "a" ? {value: onproxythrow, configurable: true} : undefined
- }
})
@@ -322,79 +246,58 @@ function TestWithSet2(create, handler, hasSetter) {
TestWithSet({
set: function(r, k, v) { key = k; val = v; return true },
- getPropertyDescriptor: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
+ has: function(t, k) {
+ return k === "a"
}
})
TestWithSet({
set: function(r, k, v) { return this.set2(r, k, v) },
set2: function(r, k, v) { key = k; val = v; return true },
- getPropertyDescriptor: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
+ has: function(t, k) {
+ return k === "a"
}
})
TestWithSet({
- getPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor(k)
- },
- getOwnPropertyDescriptor: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
+ has: function(t, k) {
+ return k === "a"
},
- defineProperty: function(k, desc) { key = k; val = desc.value }
+ defineProperty: function(t, k, desc) { key = k; val = desc.value }
})
TestWithSet({
- getOwnPropertyDescriptor: function(k) {
- return this.getPropertyDescriptor2(k)
+ has: function(t, k) {
+ return this.has2(k)
},
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
+ has2: function(k) {
+ return k === "a"
},
- defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
+ defineProperty: function(t, k, desc) { this.defineProperty2(k, desc) },
defineProperty2: function(k, desc) { key = k; val = desc.value }
})
TestWithSet({
- getOwnPropertyDescriptor: function(k) {
- return this.getPropertyDescriptor(k)
- },
- getPropertyDescriptor: function(k) {
- return k === "a" ?
- {get writable() { return true }, configurable: true} : undefined
+ has: function(t, k) {
+ return k === "a"
},
- defineProperty: function(k, desc) { key = k; val = desc.value }
+ defineProperty: function(t, k, desc) { key = k; val = desc.value }
})
TestWithSet({
- getOwnPropertyDescriptor: function(k) {
- return this.getPropertyDescriptor(k)
+ has: function(t, k) {
+ return this.has2(k) },
+ has2: function(k) {
+ return k === "a"
},
- getPropertyDescriptor: function(k) {
- return k === "a" ?
- {set: function(v) { key = k; val = v }, configurable: true} : undefined
- }
+ set: function(t, k, v) { key = k; val = v; return true }
}, true)
TestWithSet({
- getOwnPropertyDescriptor: function(k) {
- return this.getPropertyDescriptor(k)
+ has: function(t, k) {
+ return k === "a"
},
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- return k === "a" ?
- {set: function(v) { key = k; val = v }, configurable: true} : undefined
- }
-}, true)
-
-TestWithSet({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
- },
- defineProperty: function(k, desc) { key = k; val = desc.value }
+ defineProperty: function(t, k, desc) { key = k; val = desc.value }
})
@@ -404,7 +307,7 @@ function TestWithSetThrow(handler, hasSetter) {
function TestWithSetThrow2(create, handler, hasSetter) {
var p = create(handler)
- assertThrows(function(){
+ assertThrowsEquals(function(){
with (p) {
a = 1
}
@@ -413,7 +316,7 @@ function TestWithSetThrow2(create, handler, hasSetter) {
if (!hasSetter) return
var o = Object.create(p, {})
- assertThrows(function(){
+ assertThrowsEquals(function(){
with (o) {
a = 1
}
@@ -421,26 +324,30 @@ function TestWithSetThrow2(create, handler, hasSetter) {
}
TestWithSetThrow({
- set: function(r, k, v) { throw "myexn" },
- getPropertyDescriptor: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
+ set: function() { throw "myexn" },
+ has: function(t, k) {
+ return k === "a"
}
})
TestWithSetThrow({
- getPropertyDescriptor: function(k) { throw "myexn" },
+ has: function() { throw "myexn" },
})
TestWithSetThrow({
- getPropertyDescriptor: function(k) {
- return k === "a" ? {writable: true, configurable: true} : undefined
+ has: function() { throw "myexn" },
+})
+
+TestWithSetThrow({
+ has: function(t, k) {
+ return k === "a"
},
- defineProperty: function(k, desc) { throw "myexn" }
+ defineProperty: function() { throw "myexn" }
})
TestWithSetThrow({
- getPropertyDescriptor: function(k) {
- return k === "a" ?
- {set: function() { throw "myexn" }, configurable: true} : undefined
- }
+ has: function(t, k) {
+ return k === "a"
+ },
+ set: function() { throw "myexn" }
}, true)
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index e49ea7fab8..8f24d4d9ad 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -35,12 +35,21 @@
// Helper.
function TestWithProxies(test, x, y, z) {
- test(Proxy.create, x, y, z)
- test(function(h) {return Proxy.createFunction(h, function() {})}, x, y, z)
+ // Separate function for nicer stack traces.
+ TestWithObjectProxy(test, x, y, z);
+ TestWithFunctionProxy(test, x, y, z);
}
+function TestWithObjectProxy(test, x, y, z) {
+ test((handler) => { return new Proxy({}, handler) }, x, y, z)
+}
+
+function TestWithFunctionProxy(test, x, y, z) {
+ test((handler) => { return new Proxy(() => {}, handler) }, x, y, z)
+}
+// ---------------------------------------------------------------------------
// Getting property descriptors (Object.getOwnPropertyDescriptor).
var key
@@ -58,44 +67,45 @@ function TestGetOwnProperty2(create, handler) {
}
TestGetOwnProperty({
- getOwnPropertyDescriptor: function(k) {
+ getOwnPropertyDescriptor(target, k) {
key = k
return {value: 42, configurable: true}
}
})
TestGetOwnProperty({
- getOwnPropertyDescriptor: function(k) {
+ getOwnPropertyDescriptor(target, k) {
return this.getOwnPropertyDescriptor2(k)
},
- getOwnPropertyDescriptor2: function(k) {
+ getOwnPropertyDescriptor2(k) {
key = k
return {value: 42, configurable: true}
}
})
TestGetOwnProperty({
- getOwnPropertyDescriptor: function(k) {
+ getOwnPropertyDescriptor(target, k) {
key = k
return {get value() { return 42 }, get configurable() { return true }}
}
})
-TestGetOwnProperty(Proxy.create({
- get: function(pr, pk) {
- return function(k) { key = k; return {value: 42, configurable: true} }
+TestGetOwnProperty(new Proxy({}, {
+ get(target, pk, receiver) {
+ return function(t, k) { key = k; return {value: 42, configurable: true} }
}
}))
+// ---------------------------------------------------------------------------
function TestGetOwnPropertyThrow(handler) {
TestWithProxies(TestGetOwnPropertyThrow2, handler)
}
function TestGetOwnPropertyThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
- assertThrows(function(){ Object.getOwnPropertyDescriptor(p, 77) }, "myexn")
+ assertThrowsEquals(() => Object.getOwnPropertyDescriptor(p, "a"), "myexn")
+ assertThrowsEquals(() => Object.getOwnPropertyDescriptor(p, 77), "myexn")
}
TestGetOwnPropertyThrow({
@@ -104,7 +114,7 @@ TestGetOwnPropertyThrow({
TestGetOwnPropertyThrow({
getOwnPropertyDescriptor: function(k) {
- return this.getPropertyDescriptor2(k)
+ return this.getOwnPropertyDescriptor2(k)
},
getOwnPropertyDescriptor2: function(k) { throw "myexn" }
})
@@ -115,14 +125,14 @@ TestGetOwnPropertyThrow({
}
})
-TestGetOwnPropertyThrow(Proxy.create({
+TestGetOwnPropertyThrow(new Proxy({}, {
get: function(pr, pk) {
return function(k) { throw "myexn" }
}
}))
-
+// ---------------------------------------------------------------------------
// Getters (dot, brackets).
var key
@@ -161,42 +171,22 @@ function TestGet2(create, handler) {
}
TestGet({
- get: function(r, k) { key = k; return 42 }
-})
-
-TestGet({
- get: function(r, k) { return this.get2(r, k) },
- get2: function(r, k) { key = k; return 42 }
-})
-
-TestGet({
- getPropertyDescriptor: function(k) { key = k; return {value: 42} }
+ get(t, k, r) { key = k; return 42 }
})
TestGet({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) { key = k; return {value: 42} }
+ get(t, k, r) { return this.get2(r, k) },
+ get2(r, k) { key = k; return 42 }
})
-TestGet({
- getPropertyDescriptor: function(k) {
- key = k;
- return {get value() { return 42 }}
- }
-})
-
-TestGet({
- get: undefined,
- getPropertyDescriptor: function(k) { key = k; return {value: 42} }
-})
-
-TestGet(Proxy.create({
- get: function(pr, pk) {
- return function(r, k) { key = k; return 42 }
+TestGet(new Proxy({}, {
+ get(pt, pk, pr) {
+ return function(t, k, r) { key = k; return 42 }
}
}))
+// ---------------------------------------------------------------------------
function TestGetCall(handler) {
TestWithProxies(TestGetCall2, handler)
}
@@ -241,120 +231,75 @@ function TestGetCall2(create, handler) {
}
TestGetCall({
- get: function(r, k) { return function() { return 55 } }
-})
-
-TestGetCall({
- get: function(r, k) { return this.get2(r, k) },
- get2: function(r, k) { return function() { return 55 } }
+ get(t, k, r) { return () => { return 55 } }
})
TestGetCall({
- getPropertyDescriptor: function(k) {
- return {value: function() { return 55 }}
- }
+ get(t, k, r) { return this.get2(t, k, r) },
+ get2(t, k, r) { return () => { return 55 } }
})
TestGetCall({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- return {value: function() { return 55 }}
- }
-})
-
-TestGetCall({
- getPropertyDescriptor: function(k) {
- return {get value() { return function() { return 55 } }}
- }
-})
-
-TestGetCall({
- get: undefined,
- getPropertyDescriptor: function(k) {
- return {value: function() { return 55 }}
- }
-})
-
-TestGetCall({
- get: function(r, k) {
+ get(t, k, r) {
if (k == "gg") {
- return function() { return 55 }
+ return () => { return 55 }
} else if (k == "withargs") {
- return function(n, m) { return n + m * 2 }
+ return (n, m) => { return n + m * 2 }
} else {
- return function() { return this.gg() }
+ return () => { return r.gg() }
}
}
})
-TestGetCall(Proxy.create({
- get: function(pr, pk) {
- return function(r, k) { return function() { return 55 } }
+TestGetCall(new Proxy({}, {
+ get(pt, pk, pr) {
+ return (t, k, r) => { return () => { return 55 } }
}
}))
+// ---------------------------------------------------------------------------
function TestGetThrow(handler) {
TestWithProxies(TestGetThrow2, handler)
}
function TestGetThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ p.a }, "myexn")
- assertThrows(function(){ p["b"] }, "myexn")
- assertThrows(function(){ p[3] }, "myexn")
- assertThrows(function(){ (function(n) { p[n] })("c") }, "myexn")
- assertThrows(function(){ (function(n) { p[n] })(99) }, "myexn")
+ assertThrowsEquals(function(){ p.a }, "myexn")
+ assertThrowsEquals(function(){ p["b"] }, "myexn")
+ assertThrowsEquals(function(){ p[3] }, "myexn")
+ assertThrowsEquals(function(){ (function(n) { p[n] })("c") }, "myexn")
+ assertThrowsEquals(function(){ (function(n) { p[n] })(99) }, "myexn")
var o = Object.create(p, {x: {value: 88}, '4': {value: 89}})
- assertThrows(function(){ o.a }, "myexn")
- assertThrows(function(){ o["b"] }, "myexn")
- assertThrows(function(){ o[3] }, "myexn")
- assertThrows(function(){ (function(n) { o[n] })("c") }, "myexn")
- assertThrows(function(){ (function(n) { o[n] })(99) }, "myexn")
+ assertThrowsEquals(function(){ o.a }, "myexn")
+ assertThrowsEquals(function(){ o["b"] }, "myexn")
+ assertThrowsEquals(function(){ o[3] }, "myexn")
+ assertThrowsEquals(function(){ (function(n) { o[n] })("c") }, "myexn")
+ assertThrowsEquals(function(){ (function(n) { o[n] })(99) }, "myexn")
}
TestGetThrow({
- get: function(r, k) { throw "myexn" }
-})
-
-TestGetThrow({
- get: function(r, k) { return this.get2(r, k) },
- get2: function(r, k) { throw "myexn" }
-})
-
-TestGetThrow({
- getPropertyDescriptor: function(k) { throw "myexn" }
-})
-
-TestGetThrow({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) { throw "myexn" }
-})
-
-TestGetThrow({
- getPropertyDescriptor: function(k) {
- return {get value() { throw "myexn" }}
- }
+ get(r, k) { throw "myexn" }
})
TestGetThrow({
- get: undefined,
- getPropertyDescriptor: function(k) { throw "myexn" }
+ get(r, k) { return this.get2(r, k) },
+ get2(r, k) { throw "myexn" }
})
-TestGetThrow(Proxy.create({
- get: function(pr, pk) { throw "myexn" }
+TestGetThrow(new Proxy({}, {
+ get(pr, pk) { throw "myexn" }
}))
-TestGetThrow(Proxy.create({
- get: function(pr, pk) {
+TestGetThrow(new Proxy({}, {
+ get(pr, pk) {
return function(r, k) { throw "myexn" }
}
}))
-
+// ---------------------------------------------------------------------------
// Setters.
var key
@@ -397,78 +342,25 @@ TestSet({
set2: function(r, k, v) { key = k; val = v; return true }
})
-TestSet({
- getOwnPropertyDescriptor: function(k) { return {writable: true} },
- defineProperty: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor2(k)
- },
- getOwnPropertyDescriptor2: function(k) { return {writable: true} },
- defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
- defineProperty2: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) {
- return {get writable() { return true }}
- },
- defineProperty: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) {
- return {set: function(v) { key = k; val = v }}
- }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) { return {writable: true} },
- defineProperty: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) {
- return {get writable() { return true }}
- },
- defineProperty: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) {
- return {set: function(v) { key = k; val = v }}
- }
-})
-
-TestSet({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) { return null },
- defineProperty: function(k, desc) { key = k, val = desc.value }
-})
-
-TestSet(Proxy.create({
- get: function(pr, pk) {
- return function(r, k, v) { key = k; val = v; return true }
+TestSet(new Proxy({}, {
+ get(pk, pr) {
+ return (r, k, v) => { key = k; val = v; return true }
}
}))
+// ---------------------------------------------------------------------------
function TestSetThrow(handler) {
TestWithProxies(TestSetThrow2, handler)
}
function TestSetThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ p.a = 42 }, "myexn")
- assertThrows(function(){ p["b"] = 42 }, "myexn")
- assertThrows(function(){ p[22] = 42 }, "myexn")
- assertThrows(function(){ (function(n) { p[n] = 45 })("c") }, "myexn")
- assertThrows(function(){ (function(n) { p[n] = 46 })(99) }, "myexn")
+ assertThrowsEquals(function(){ p.a = 42 }, "myexn")
+ assertThrowsEquals(function(){ p["b"] = 42 }, "myexn")
+ assertThrowsEquals(function(){ p[22] = 42 }, "myexn")
+ assertThrowsEquals(function(){ (function(n) { p[n] = 45 })("c") }, "myexn")
+ assertThrowsEquals(function(){ (function(n) { p[n] = 46 })(99) }, "myexn")
}
TestSetThrow({
@@ -486,7 +378,9 @@ TestSetThrow({
})
TestSetThrow({
- getOwnPropertyDescriptor: function(k) { return {writable: true} },
+ getOwnPropertyDescriptor: function(k) {
+ return {configurable: true, writable: true}
+ },
defineProperty: function(k, desc) { throw "myexn" }
})
@@ -503,7 +397,9 @@ TestSetThrow({
getOwnPropertyDescriptor: function(k) {
return this.getOwnPropertyDescriptor2(k)
},
- getOwnPropertyDescriptor2: function(k) { return {writable: true} },
+ getOwnPropertyDescriptor2: function(k) {
+ return {configurable: true, writable: true}
+ },
defineProperty: function(k, desc) { this.defineProperty2(k, desc) },
defineProperty2: function(k, desc) { throw "myexn" }
})
@@ -515,7 +411,10 @@ TestSetThrow({
TestSetThrow({
getOwnPropertyDescriptor: function(k) {
- return {get writable() { return true }}
+ return {
+ get configurable() { return true },
+ get writable() { return true }
+ }
},
defineProperty: function(k, desc) { throw "myexn" }
})
@@ -525,167 +424,23 @@ TestSetThrow({
})
TestSetThrow({
- getOwnPropertyDescriptor: function(k) {
- return {set: function(v) { throw "myexn" }}
- }
-})
-
-TestSetThrow({
getOwnPropertyDescriptor: function(k) { throw "myexn" },
- getPropertyDescriptor: function(k) { return {writable: true} },
- defineProperty: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSetThrow({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) { throw "myexn" },
- defineProperty: function(k, desc) { key = k; val = desc.value }
-})
-
-TestSetThrow({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) { return {writable: true} },
- defineProperty: function(k, desc) { throw "myexn" }
-})
-
-TestSetThrow({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) {
- return {get writable() { throw "myexn" }}
- },
defineProperty: function(k, desc) { key = k; val = desc.value }
})
-TestSetThrow({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) {
- return {set: function(v) { throw "myexn" }}
- }
-})
-
-TestSetThrow({
- getOwnPropertyDescriptor: function(k) { return null },
- getPropertyDescriptor: function(k) { return null },
- defineProperty: function(k, desc) { throw "myexn" }
-})
-
-TestSetThrow(Proxy.create({
+TestSetThrow(new Proxy({}, {
get: function(pr, pk) { throw "myexn" }
}))
-TestSetThrow(Proxy.create({
+TestSetThrow(new Proxy({}, {
get: function(pr, pk) {
return function(r, k, v) { throw "myexn" }
}
}))
-
-var rec
-var key
-var val
-
-function TestSetForDerived(trap) {
- TestWithProxies(TestSetForDerived2, trap)
-}
-
-function TestSetForDerived2(create, trap) {
- var p = create({getPropertyDescriptor: trap, getOwnPropertyDescriptor: trap})
- var o = Object.create(p, {x: {value: 88, writable: true},
- '1': {value: 89, writable: true}})
-
- key = ""
- assertEquals(48, o.x = 48)
- assertEquals("", key) // trap not invoked
- assertEquals(48, o.x)
-
- assertEquals(47, o[1] = 47)
- assertEquals("", key) // trap not invoked
- assertEquals(47, o[1])
-
- assertEquals(49, o.y = 49)
- assertEquals("y", key)
- assertEquals(49, o.y)
-
- assertEquals(50, o[2] = 50)
- assertEquals("2", key)
- assertEquals(50, o[2])
-
- assertEquals(44, o.p_writable = 44)
- assertEquals("p_writable", key)
- assertEquals(44, o.p_writable)
-
- assertEquals(45, o.p_nonwritable = 45)
- assertEquals("p_nonwritable", key)
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nonwritable"))
-
- assertThrows(function(){ "use strict"; o.p_nonwritable = 45 }, TypeError)
- assertEquals("p_nonwritable", key)
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nonwritable"))
-
- val = ""
- assertEquals(46, o.p_setter = 46)
- assertEquals("p_setter", key)
- assertSame(o, rec)
- assertEquals(46, val) // written to parent
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_setter"))
-
- val = ""
- assertEquals(47, o.p_nosetter = 47)
- assertEquals("p_nosetter", key)
- assertEquals("", val) // not written at all
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nosetter"));
-
- key = ""
- assertThrows(function(){ "use strict"; o.p_nosetter = 50 }, TypeError)
- assertEquals("p_nosetter", key)
- assertEquals("", val) // not written at all
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nosetter"));
-
- assertThrows(function(){ o.p_nonconf = 53 }, TypeError)
- assertEquals("p_nonconf", key)
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nonconf"));
-
- assertThrows(function(){ o.p_throw = 51 }, "myexn")
- assertEquals("p_throw", key)
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_throw"));
-
- assertThrows(function(){ o.p_setterthrow = 52 }, "myexn")
- assertEquals("p_setterthrow", key)
- assertFalse(Object.prototype.hasOwnProperty.call(o, "p_setterthrow"));
-}
-
-
-TestSetForDerived(
- function(k) {
- // TODO(yangguo): issue 2398 - throwing an error causes formatting of
- // the message string, which can be observable through this handler.
- // We ignore keys that occur when formatting the message string.
- if (k == "toString" || k == "valueOf") return;
-
- key = k;
- switch (k) {
- case "p_writable": return {writable: true, configurable: true}
- case "p_nonwritable": return {writable: false, configurable: true}
- case "p_setter": return {
- set: function(x) { rec = this; val = x },
- configurable: true
- }
- case "p_nosetter": return {
- get: function() { return 1 },
- configurable: true
- }
- case "p_nonconf": return {}
- case "p_throw": throw "myexn"
- case "p_setterthrow": return {set: function(x) { throw "myexn" }}
- default: return undefined
- }
- }
-)
-
+// ---------------------------------------------------------------------------
// Evil proxy-induced side-effects shouldn't crash.
-// TODO(rossberg): proper behaviour isn't really spec'ed yet, so ignore results.
-
TestWithProxies(function(create) {
var calls = 0
var handler = {
@@ -716,12 +471,7 @@ TestWithProxies(function(create) {
})
-
-// TODO(rossberg): TestSetReject, returning false
-// TODO(rossberg): TestGetProperty, TestSetProperty
-
-
-
+// ---------------------------------------------------------------------------
// Property definition (Object.defineProperty and Object.defineProperties).
var key
@@ -758,15 +508,15 @@ function TestDefine2(create, handler) {
var attributes = {configurable: true, mine: 66, minetoo: 23}
assertEquals(p, Object.defineProperty(p, "d", attributes))
- assertEquals("d", key)
+ assertEquals("d", key);
// Modifying the attributes object after the fact should have no effect.
attributes.configurable = false
attributes.mine = 77
- delete attributes.minetoo
- assertEquals(3, Object.getOwnPropertyNames(desc).length)
+ delete attributes.minetoo;
+ assertEquals(1, Object.getOwnPropertyNames(desc).length)
assertEquals(true, desc.configurable)
- assertEquals(66, desc.mine)
- assertEquals(23, desc.minetoo)
+ assertEquals(undefined, desc.mine)
+ assertEquals(undefined, desc.minetoo)
assertEquals(p, Object.defineProperty(p, "e", {get: function(){ return 5 }}))
assertEquals("e", key)
@@ -777,18 +527,6 @@ function TestDefine2(create, handler) {
assertEquals("zzz", key)
assertEquals(0, Object.getOwnPropertyNames(desc).length)
- var d = create({
- get: function(r, k) { return (k === "value") ? 77 : void 0 },
- getOwnPropertyNames: function() { return ["value"] },
- enumerate: function() { return ["value"] }
- })
- assertEquals(1, Object.getOwnPropertyNames(d).length)
- assertEquals(77, d.value)
- assertEquals(p, Object.defineProperty(p, "p", d))
- assertEquals("p", key)
- assertEquals(1, Object.getOwnPropertyNames(desc).length)
- assertEquals(77, desc.value)
-
var props = {
'11': {},
blub: {get: function() { return true }},
@@ -804,47 +542,42 @@ function TestDefine2(create, handler) {
assertEquals(undefined, desc.mine) // Arguably a bug in the spec...
var props = {bla: {get value() { throw "myexn" }}}
- assertThrows(function(){ Object.defineProperties(p, props) }, "myexn")
+ assertThrowsEquals(function(){ Object.defineProperties(p, props) }, "myexn")
}
TestDefine({
- defineProperty: function(k, d) { key = k; desc = d; return true }
+ defineProperty(t, k, d) { key = k; desc = d; return true }
})
TestDefine({
- defineProperty: function(k, d) { return this.defineProperty2(k, d) },
- defineProperty2: function(k, d) { key = k; desc = d; return true }
+ defineProperty(t, k, d) { return this.defineProperty2(k, d) },
+ defineProperty2(k, d) { key = k; desc = d; return true }
})
-TestDefine(Proxy.create({
- get: function(pr, pk) {
- return function(k, d) { key = k; desc = d; return true }
- }
-}))
-
+// ---------------------------------------------------------------------------
function TestDefineThrow(handler) {
TestWithProxies(TestDefineThrow2, handler)
}
function TestDefineThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ Object.defineProperty(p, "a", {value: 44})}, "myexn")
- assertThrows(function(){ Object.defineProperty(p, 0, {value: 44})}, "myexn")
+ assertThrowsEquals(() => Object.defineProperty(p, "a", {value: 44}), "myexn")
+ assertThrowsEquals(() => Object.defineProperty(p, 0, {value: 44}), "myexn")
var d1 = create({
get: function(r, k) { throw "myexn" },
getOwnPropertyNames: function() { return ["value"] }
})
- assertThrows(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
+ assertThrowsEquals(function(){ Object.defineProperty(p, "p", d1) }, "myexn")
var d2 = create({
get: function(r, k) { return 77 },
getOwnPropertyNames: function() { throw "myexn" }
})
- assertThrows(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
+ assertThrowsEquals(function(){ Object.defineProperty(p, "p", d2) }, "myexn")
var props = {bla: {get value() { throw "otherexn" }}}
- assertThrows(function(){ Object.defineProperties(p, props) }, "otherexn")
+ assertThrowsEquals(() => Object.defineProperties(p, props), "otherexn")
}
TestDefineThrow({
@@ -856,11 +589,11 @@ TestDefineThrow({
defineProperty2: function(k, d) { throw "myexn" }
})
-TestDefineThrow(Proxy.create({
+TestDefineThrow(new Proxy({}, {
get: function(pr, pk) { throw "myexn" }
}))
-TestDefineThrow(Proxy.create({
+TestDefineThrow(new Proxy({}, {
get: function(pr, pk) {
return function(k, d) { throw "myexn" }
}
@@ -868,6 +601,7 @@ TestDefineThrow(Proxy.create({
+// ---------------------------------------------------------------------------
// Property deletion (delete).
var key
@@ -907,60 +641,61 @@ function TestDelete2(create, handler) {
}
TestDelete({
- delete: function(k) { key = k; return k < "z" }
+ deleteProperty(target, k) { key = k; return k < "z" }
})
TestDelete({
- delete: function(k) { return this.delete2(k) },
+ deleteProperty(target, k) { return this.delete2(k) },
delete2: function(k) { key = k; return k < "z" }
})
-TestDelete(Proxy.create({
- get: function(pr, pk) {
- return function(k) { key = k; return k < "z" }
+TestDelete(new Proxy({}, {
+ get(pt, pk, pr) {
+ return (target, k) => { key = k; return k < "z" }
}
}))
+// ---------------------------------------------------------------------------
function TestDeleteThrow(handler) {
TestWithProxies(TestDeleteThrow2, handler)
}
function TestDeleteThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ delete p.a }, "myexn")
- assertThrows(function(){ delete p["b"] }, "myexn");
- assertThrows(function(){ delete p[3] }, "myexn");
+ assertThrowsEquals(function(){ delete p.a }, "myexn")
+ assertThrowsEquals(function(){ delete p["b"] }, "myexn");
+ assertThrowsEquals(function(){ delete p[3] }, "myexn");
(function() {
"use strict"
- assertThrows(function(){ delete p.c }, "myexn")
- assertThrows(function(){ delete p["d"] }, "myexn")
- assertThrows(function(){ delete p[4] }, "myexn");
+ assertThrowsEquals(function(){ delete p.c }, "myexn")
+ assertThrowsEquals(function(){ delete p["d"] }, "myexn")
+ assertThrowsEquals(function(){ delete p[4] }, "myexn");
})()
}
TestDeleteThrow({
- delete: function(k) { throw "myexn" }
+ deleteProperty(t, k) { throw "myexn" }
})
TestDeleteThrow({
- delete: function(k) { return this.delete2(k) },
- delete2: function(k) { throw "myexn" }
+ deleteProperty(t, k) { return this.delete2(k) },
+ delete2(k) { throw "myexn" }
})
-TestDeleteThrow(Proxy.create({
- get: function(pr, pk) { throw "myexn" }
+TestDeleteThrow(new Proxy({}, {
+ get(pt, pk, pr) { throw "myexn" }
}))
-TestDeleteThrow(Proxy.create({
- get: function(pr, pk) {
- return function(k) { throw "myexn" }
+TestDeleteThrow(new Proxy({}, {
+ get(pt, pk, pr) {
+ return (k) => { throw "myexn" }
}
}))
-
+// ---------------------------------------------------------------------------
// Property descriptors (Object.getOwnPropertyDescriptor).
function TestDescriptor(handler) {
@@ -989,26 +724,27 @@ function TestDescriptor2(create, handler) {
}
TestDescriptor({
- defineProperty: function(k, d) { this["__" + k] = d; return true },
- getOwnPropertyDescriptor: function(k) { return this["__" + k] }
+ defineProperty(t, k, d) { this["__" + k] = d; return true },
+ getOwnPropertyDescriptor(t, k) { return this["__" + k] }
})
TestDescriptor({
- defineProperty: function(k, d) { this["__" + k] = d; return true },
- getOwnPropertyDescriptor: function(k) {
+ defineProperty(t, k, d) { this["__" + k] = d; return true },
+ getOwnPropertyDescriptor(t, k) {
return this.getOwnPropertyDescriptor2(k)
},
getOwnPropertyDescriptor2: function(k) { return this["__" + k] }
})
+// ---------------------------------------------------------------------------
function TestDescriptorThrow(handler) {
TestWithProxies(TestDescriptorThrow2, handler)
}
function TestDescriptorThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
+ assertThrowsEquals(() => Object.getOwnPropertyDescriptor(p, "a"), "myexn")
}
TestDescriptorThrow({
@@ -1024,6 +760,7 @@ TestDescriptorThrow({
+// ---------------------------------------------------------------------------
// Comparison.
function TestComparison(eq) {
@@ -1052,19 +789,20 @@ TestComparison(function(o1, o2) { return !(o1 !== o2) })
// Type (typeof).
function TestTypeof() {
- assertEquals("object", typeof Proxy.create({}))
- assertTrue(typeof Proxy.create({}) == "object")
- assertTrue("object" == typeof Proxy.create({}))
+ assertEquals("object", typeof new Proxy({},{}))
+ assertTrue(typeof new Proxy({}, {}) == "object")
+ assertTrue("object" == typeof new Proxy({},{}))
- assertEquals("function", typeof Proxy.createFunction({}, function() {}))
- assertTrue(typeof Proxy.createFunction({}, function() {}) == "function")
- assertTrue("function" == typeof Proxy.createFunction({}, function() {}))
+ assertEquals("function", typeof new Proxy(function() {}, {}))
+ assertTrue(typeof new Proxy(function() {}, {}) == "function")
+ assertTrue("function" == typeof new Proxy(function() {},{}))
}
TestTypeof()
+// ---------------------------------------------------------------------------
// Membership test (in).
var key
@@ -1112,60 +850,35 @@ function TestIn2(create, handler) {
}
TestIn({
- has: function(k) { key = k; return k < "z" }
-})
-
-TestIn({
- has: function(k) { return this.has2(k) },
- has2: function(k) { key = k; return k < "z" }
+ has(t, k) { key = k; return k < "z" }
})
TestIn({
- getPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42} : void 0
- }
+ has(t, k) { return this.has2(k) },
+ has2(k) { key = k; return k < "z" }
})
-TestIn({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- key = k; return k < "z" ? {value: 42} : void 0
- }
-})
-
-TestIn({
- getPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {get value() { return 42 }} : void 0
- }
-})
-
-TestIn({
- has: undefined,
- getPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42} : void 0
- }
-})
-
-TestIn(Proxy.create({
- get: function(pr, pk) {
- return function(k) { key = k; return k < "z" }
+TestIn(new Proxy({},{
+ get(pt, pk, pr) {
+ return (t, k) => { key = k; return k < "z" }
}
}))
+// ---------------------------------------------------------------------------
function TestInThrow(handler) {
TestWithProxies(TestInThrow2, handler)
}
function TestInThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ return "a" in o }, "myexn")
- assertThrows(function(){ return 99 in o }, "myexn")
- assertThrows(function(){ return !("a" in o) }, "myexn")
- assertThrows(function(){ return ("a" in o) ? 2 : 3 }, "myexn")
- assertThrows(function(){ if ("b" in o) {} }, "myexn")
- assertThrows(function(){ if (!("b" in o)) {} }, "myexn")
- assertThrows(function(){ if ("zzz" in o) {} }, "myexn")
+ assertThrowsEquals(function(){ return "a" in p }, "myexn")
+ assertThrowsEquals(function(){ return 99 in p }, "myexn")
+ assertThrowsEquals(function(){ return !("a" in p) }, "myexn")
+ assertThrowsEquals(function(){ return ("a" in p) ? 2 : 3 }, "myexn")
+ assertThrowsEquals(function(){ if ("b" in p) {} }, "myexn")
+ assertThrowsEquals(function(){ if (!("b" in p)) {} }, "myexn")
+ assertThrowsEquals(function(){ if ("zzz" in p) {} }, "myexn")
}
TestInThrow({
@@ -1177,184 +890,19 @@ TestInThrow({
has2: function(k) { throw "myexn" }
})
-TestInThrow({
- getPropertyDescriptor: function(k) { throw "myexn" }
-})
-
-TestInThrow({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) { throw "myexn" }
-})
-
-TestInThrow({
- has: undefined,
- getPropertyDescriptor: function(k) { throw "myexn" }
-})
-
-TestInThrow(Proxy.create({
+TestInThrow(new Proxy({},{
get: function(pr, pk) { throw "myexn" }
}))
-TestInThrow(Proxy.create({
+TestInThrow(new Proxy({},{
get: function(pr, pk) {
return function(k) { throw "myexn" }
}
}))
-function TestInForDerived(handler) {
- TestWithProxies(TestInForDerived2, handler)
-}
-
-function TestInForDerived2(create, handler) {
- var p = create(handler)
- var o = Object.create(p)
-
- assertTrue("a" in o)
- assertEquals("a", key)
- assertTrue(99 in o)
- assertEquals("99", key)
- assertFalse("z" in o)
- assertEquals("z", key)
-
- assertEquals(2, ("a" in o) ? 2 : 0)
- assertEquals(0, !("a" in o) ? 2 : 0)
- assertEquals(0, ("zzz" in o) ? 2 : 0)
- assertEquals(2, !("zzz" in o) ? 2 : 0)
-
- if ("b" in o) {
- } else {
- assertTrue(false)
- }
- assertEquals("b", key)
-
- if ("zz" in o) {
- assertTrue(false)
- }
- assertEquals("zz", key)
-
- if (!("c" in o)) {
- assertTrue(false)
- }
- assertEquals("c", key)
-
- if (!("zzz" in o)) {
- } else {
- assertTrue(false)
- }
- assertEquals("zzz", key)
-}
-
-TestInForDerived({
- getPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
- getPropertyDescriptor2: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getPropertyDescriptor: function(k) {
- key = k;
- return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
- }
-})
-
-/* TODO(rossberg): this will work once we implement the newest proposal
- * regarding default traps for getPropertyDescriptor.
-TestInForDerived({
- getOwnPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getOwnPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor2(k)
- },
- getOwnPropertyDescriptor2: function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
-})
-
-TestInForDerived({
- getOwnPropertyDescriptor: function(k) {
- key = k;
- return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
- }
-})
-*/
-
-TestInForDerived(Proxy.create({
- get: function(pr, pk) {
- return function(k) {
- key = k; return k < "z" ? {value: 42, configurable: true} : void 0
- }
- }
-}))
-
-
-
-// Property descriptor conversion.
-
-var descget
-
-function TestDescriptorGetOrder(handler) {
- var p = Proxy.create(handler)
- var o = Object.create(p, {b: {value: 0}})
- TestDescriptorGetOrder2(function(n) { return p[n] }, "vV")
- TestDescriptorGetOrder2(function(n) { return n in p }, "")
- TestDescriptorGetOrder2(function(n) { return o[n] }, "vV")
- TestDescriptorGetOrder2(function(n) { return n in o }, "")
-}
-
-function TestDescriptorGetOrder2(f, access) {
- descget = ""
- assertTrue(f("a"))
- assertEquals(access, descget)
- descget = ""
- assertTrue(f(99))
- assertEquals(access, descget)
- descget = ""
- assertFalse(!!f("z"))
- assertEquals("", descget)
-}
-
-TestDescriptorGetOrder({
- getPropertyDescriptor: function(k) {
- if (k >= "z") return void 0
- // Return a proxy as property descriptor, so that we can log accesses.
- return Proxy.create({
- get: function(r, attr) {
- descget += attr[0].toUpperCase()
- return true
- },
- has: function(attr) {
- descget += attr[0]
- switch (attr) {
- case "writable":
- case "enumerable":
- case "configurable":
- case "value":
- return true
- case "get":
- case "set":
- return false
- default:
- assertUnreachable()
- }
- }
- })
- }
-})
-
-
+// ---------------------------------------------------------------------------
// Own Properties (Object.prototype.hasOwnProperty).
var key
@@ -1374,105 +922,52 @@ function TestHasOwn2(create, handler) {
}
TestHasOwn({
- hasOwn: function(k) { key = k; return k < "z" }
-})
-
-TestHasOwn({
- hasOwn: function(k) { return this.hasOwn2(k) },
- hasOwn2: function(k) { key = k; return k < "z" }
-})
-
-TestHasOwn({
- getOwnPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42} : void 0
- }
-})
-
-TestHasOwn({
- getOwnPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor2(k)
+ getOwnPropertyDescriptor(t, k) {
+ key = k; if (k < "z") return {configurable: true}
},
- getOwnPropertyDescriptor2: function(k) {
- key = k; return k < "z" ? {value: 42} : void 0
- }
+ has() { assertUnreachable() }
})
TestHasOwn({
- getOwnPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {get value() { return 42 }} : void 0
- }
-})
-
-TestHasOwn({
- hasOwn: undefined,
- getOwnPropertyDescriptor: function(k) {
- key = k; return k < "z" ? {value: 42} : void 0
+ getOwnPropertyDescriptor(t, k) { return this.getOwnPropertyDescriptor2(k) },
+ getOwnPropertyDescriptor2(k) {
+ key = k; if (k < "z") return {configurable: true}
}
})
-TestHasOwn(Proxy.create({
- get: function(pr, pk) {
- return function(k) { key = k; return k < "z" }
- }
-}))
+// ---------------------------------------------------------------------------
function TestHasOwnThrow(handler) {
TestWithProxies(TestHasOwnThrow2, handler)
}
function TestHasOwnThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, "a")},
+ assertThrowsEquals(function(){ Object.prototype.hasOwnProperty.call(p, "a")},
"myexn")
- assertThrows(function(){ Object.prototype.hasOwnProperty.call(p, 99)},
+ assertThrowsEquals(function(){ Object.prototype.hasOwnProperty.call(p, 99)},
"myexn")
}
TestHasOwnThrow({
- hasOwn: function(k) { throw "myexn" }
-})
-
-TestHasOwnThrow({
- hasOwn: function(k) { return this.hasOwn2(k) },
- hasOwn2: function(k) { throw "myexn" }
-})
-
-TestHasOwnThrow({
- getOwnPropertyDescriptor: function(k) { throw "myexn" }
+ getOwnPropertyDescriptor(t, k) { throw "myexn" }
})
TestHasOwnThrow({
- getOwnPropertyDescriptor: function(k) {
- return this.getOwnPropertyDescriptor2(k)
- },
- getOwnPropertyDescriptor2: function(k) { throw "myexn" }
-})
-
-TestHasOwnThrow({
- hasOwn: undefined,
- getOwnPropertyDescriptor: function(k) { throw "myexn" }
-})
-
-TestHasOwnThrow(Proxy.create({
- get: function(pr, pk) { throw "myexn" }
-}))
-
-TestHasOwnThrow(Proxy.create({
- get: function(pr, pk) {
- return function(k) { throw "myexn" }
- }
-}))
-
+ getOwnPropertyDescriptor(t, k) { return this.getOwnPropertyDescriptor2(k) },
+ getOwnPropertyDescriptor2(k) { throw "myexn" }
+});
+// ---------------------------------------------------------------------------
// Instanceof (instanceof)
-function TestProxyInstanceof() {
+(function TestProxyInstanceof() {
var o1 = {}
- var p1 = Proxy.create({})
- var p2 = Proxy.create({}, o1)
- var p3 = Proxy.create({}, p2)
+ var p1 = new Proxy({}, {})
+ var p2 = new Proxy(o1, {})
+ var p3 = new Proxy(p2, {})
var o2 = Object.create(p2)
var f0 = function() {}
@@ -1489,35 +984,33 @@ function TestProxyInstanceof() {
assertFalse(o1 instanceof f1)
assertFalse(o1 instanceof f2)
assertFalse(o1 instanceof f3)
- assertFalse(p1 instanceof Object)
+ assertTrue(p1 instanceof Object)
assertFalse(p1 instanceof f0)
assertFalse(p1 instanceof f1)
assertFalse(p1 instanceof f2)
assertFalse(p1 instanceof f3)
assertTrue(p2 instanceof Object)
- assertTrue(p2 instanceof f0)
+ assertFalse(p2 instanceof f0)
assertFalse(p2 instanceof f1)
assertFalse(p2 instanceof f2)
assertFalse(p2 instanceof f3)
assertTrue(p3 instanceof Object)
- assertTrue(p3 instanceof f0)
+ assertFalse(p3 instanceof f0)
assertFalse(p3 instanceof f1)
- assertTrue(p3 instanceof f2)
+ assertFalse(p3 instanceof f2)
assertFalse(p3 instanceof f3)
assertTrue(o2 instanceof Object)
- assertTrue(o2 instanceof f0)
+ assertFalse(o2 instanceof f0)
assertFalse(o2 instanceof f1)
assertTrue(o2 instanceof f2)
assertFalse(o2 instanceof f3)
- var f = Proxy.createFunction({}, function() {})
+ var f = new Proxy(function() {}, {})
assertTrue(f instanceof Function)
-}
+})();
-TestProxyInstanceof()
-
-function TestInstanceofProxy() {
+(function TestInstanceofProxy() {
var o0 = Object.create(null)
var o1 = {}
var o2 = Object.create(o0)
@@ -1526,12 +1019,12 @@ function TestInstanceofProxy() {
var o5 = Object.create(o3)
function handler(o) { return {get: function() { return o } } }
- var f0 = Proxy.createFunction(handler(o0), function() {})
- var f1 = Proxy.createFunction(handler(o1), function() {})
- var f2 = Proxy.createFunction(handler(o2), function() {})
- var f3 = Proxy.createFunction(handler(o3), function() {})
- var f4 = Proxy.createFunction(handler(o4), function() {})
- var f5 = Proxy.createFunction(handler(o4), function() {})
+ var f0 = new Proxy(function() {}, handler(o0))
+ var f1 = new Proxy(function() {}, handler(o1))
+ var f2 = new Proxy(function() {}, handler(o2))
+ var f3 = new Proxy(function() {}, handler(o3))
+ var f4 = new Proxy(function() {}, handler(o4))
+ var f5 = new Proxy(function() {}, handler(o4))
assertFalse(null instanceof f0)
assertFalse(o0 instanceof f0)
@@ -1571,532 +1064,203 @@ function TestInstanceofProxy() {
assertFalse(o5 instanceof f4)
assertFalse(o5 instanceof f5)
- var f = Proxy.createFunction({}, function() {})
- var ff = Proxy.createFunction(handler(Function), function() {})
+ var f = new Proxy(function() {}, {})
+ var ff = new Proxy(function() {}, handler(Function))
assertTrue(f instanceof Function)
assertFalse(f instanceof ff)
-}
-
-TestInstanceofProxy()
-
+})();
+// ---------------------------------------------------------------------------
// Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
-function TestPrototype() {
+(function TestPrototype() {
var o1 = {}
- var p1 = Proxy.create({})
- var p2 = Proxy.create({}, o1)
- var p3 = Proxy.create({}, p2)
- var p4 = Proxy.create({}, null)
+ var p1 = new Proxy({}, {})
+ var p2 = new Proxy(o1, {})
+ var p3 = new Proxy(p2, {})
var o2 = Object.create(p3)
assertSame(Object.getPrototypeOf(o1), Object.prototype)
- assertSame(Object.getPrototypeOf(p1), null)
- assertSame(Object.getPrototypeOf(p2), o1)
- assertSame(Object.getPrototypeOf(p3), p2)
- assertSame(Object.getPrototypeOf(p4), null)
+ assertSame(Object.getPrototypeOf(p1), Object.prototype)
+ assertSame(Object.getPrototypeOf(p2), Object.prototype)
+ assertSame(Object.getPrototypeOf(p3), Object.prototype)
assertSame(Object.getPrototypeOf(o2), p3)
assertTrue(Object.prototype.isPrototypeOf(o1))
- assertFalse(Object.prototype.isPrototypeOf(p1))
+ assertTrue(Object.prototype.isPrototypeOf(p1))
assertTrue(Object.prototype.isPrototypeOf(p2))
assertTrue(Object.prototype.isPrototypeOf(p3))
- assertFalse(Object.prototype.isPrototypeOf(p4))
assertTrue(Object.prototype.isPrototypeOf(o2))
assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o1))
- assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
+ assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o2))
assertFalse(Object.prototype.isPrototypeOf.call(o1, o1))
assertFalse(Object.prototype.isPrototypeOf.call(o1, p1))
- assertTrue(Object.prototype.isPrototypeOf.call(o1, p2))
- assertTrue(Object.prototype.isPrototypeOf.call(o1, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(o1, p4))
- assertTrue(Object.prototype.isPrototypeOf.call(o1, o2))
+ assertFalse(Object.prototype.isPrototypeOf.call(o1, p2))
+ assertFalse(Object.prototype.isPrototypeOf.call(o1, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(o1, o2))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
assertFalse(Object.prototype.isPrototypeOf.call(p1, o1))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
assertFalse(Object.prototype.isPrototypeOf.call(p1, o2))
assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
- assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
- assertTrue(Object.prototype.isPrototypeOf.call(p2, o2))
+ assertFalse(Object.prototype.isPrototypeOf.call(p2, p3))
+ assertFalse(Object.prototype.isPrototypeOf.call(p2, o2))
assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
assertTrue(Object.prototype.isPrototypeOf.call(p3, o2))
assertFalse(Object.prototype.isPrototypeOf.call(o2, o1))
assertFalse(Object.prototype.isPrototypeOf.call(o2, p1))
assertFalse(Object.prototype.isPrototypeOf.call(o2, p2))
assertFalse(Object.prototype.isPrototypeOf.call(o2, p3))
- assertFalse(Object.prototype.isPrototypeOf.call(o2, p4))
assertFalse(Object.prototype.isPrototypeOf.call(o2, o2))
- var f = Proxy.createFunction({}, function() {})
+ var f = new Proxy(function() {}, {})
assertSame(Object.getPrototypeOf(f), Function.prototype)
assertTrue(Object.prototype.isPrototypeOf(f))
assertTrue(Object.prototype.isPrototypeOf.call(Function.prototype, f))
-}
-
-TestPrototype()
-
-
-
-// Property names (Object.getOwnPropertyNames, Object.keys).
-
-function TestPropertyNames(names, handler) {
- TestWithProxies(TestPropertyNames2, handler, names)
-}
-
-function TestPropertyNames2(create, handler, names) {
- var p = create(handler)
- assertArrayEquals(names, Object.getOwnPropertyNames(p))
-}
-
-TestPropertyNames([], {
- getOwnPropertyNames: function() { return [] }
-})
-
-TestPropertyNames(["a", "zz", " ", "0", "toString"], {
- getOwnPropertyNames: function() { return ["a", "zz", " ", 0, "toString"] }
-})
-
-TestPropertyNames(["throw", "function "], {
- getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
- getOwnPropertyNames2: function() { return ["throw", "function "] }
-})
-
-TestPropertyNames(["[object Object]"], {
- get getOwnPropertyNames() {
- return function() { return [{}] }
- }
-})
+})();
+// ---------------------------------------------------------------------------
function TestPropertyNamesThrow(handler) {
TestWithProxies(TestPropertyNamesThrow2, handler)
}
function TestPropertyNamesThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ Object.getOwnPropertyNames(p) }, "myexn")
+ assertThrowsEquals(function(){ Object.getOwnPropertyNames(p) }, "myexn")
}
TestPropertyNamesThrow({
- getOwnPropertyNames: function() { throw "myexn" }
+ ownKeys() { throw "myexn" }
})
TestPropertyNamesThrow({
- getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
- getOwnPropertyNames2: function() { throw "myexn" }
+ ownKeys() { return this.getOwnPropertyNames2() },
+ getOwnPropertyNames2() { throw "myexn" }
})
+// ---------------------------------------------------------------------------
function TestKeys(names, handler) {
- TestWithProxies(TestKeys2, handler, names)
-}
-
-function TestKeys2(create, handler, names) {
- var p = create(handler)
+ var p = new Proxy({}, handler);
assertArrayEquals(names, Object.keys(p))
}
TestKeys([], {
- keys: function() { return [] }
+ ownKeys() { return [] }
+})
+
+TestKeys([], {
+ ownKeys() { return ["a", "zz", " ", "0", "toString"] }
})
TestKeys(["a", "zz", " ", "0", "toString"], {
- keys: function() { return ["a", "zz", " ", 0, "toString"] }
+ ownKeys() { return ["a", "zz", " ", "0", "toString"] },
+ getOwnPropertyDescriptor(t, p) {
+ return {configurable: true, enumerable: true}
+ }
})
-TestKeys(["throw", "function "], {
- keys: function() { return this.keys2() },
- keys2: function() { return ["throw", "function "] }
+TestKeys([], {
+ ownKeys() { return this.keys2() },
+ keys2() { return ["throw", "function "] }
})
-TestKeys(["[object Object]"], {
- get keys() {
- return function() { return [{}] }
+TestKeys(["throw", "function "], {
+ ownKeys() { return this.keys2() },
+ keys2() { return ["throw", "function "] },
+ getOwnPropertyDescriptor(t, p) {
+ return {configurable: true, enumerable: true}
}
})
TestKeys(["a", "0"], {
- getOwnPropertyNames: function() { return ["a", 23, "zz", "", 0] },
- getOwnPropertyDescriptor: function(k) {
- return k == "" ? undefined : {enumerable: k.length == 1}
+ ownKeys() { return ["a", "23", "zz", "", "0"] },
+ getOwnPropertyDescriptor(t, k) {
+ return k == "" ?
+ undefined :
+ { configurable: true, enumerable: k.length == 1}
}
})
TestKeys(["23", "zz", ""], {
- getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
- getOwnPropertyNames2: function() { return ["a", 23, "zz", "", 0] },
- getOwnPropertyDescriptor: function(k) {
+ ownKeys() { return this.getOwnPropertyNames2() },
+ getOwnPropertyNames2() { return ["a", "23", "zz", "", "0"] },
+ getOwnPropertyDescriptor(t, k) {
return this.getOwnPropertyDescriptor2(k)
},
- getOwnPropertyDescriptor2: function(k) { return {enumerable: k.length != 1} }
-})
-
-TestKeys(["a", "b", "c", "5"], {
- get getOwnPropertyNames() {
- return function() { return ["0", 4, "a", "b", "c", 5, "ety"] }
- },
- get getOwnPropertyDescriptor() {
- return function(k) {
- return k == "ety" ? undefined : {enumerable: k >= "44"}
- }
+ getOwnPropertyDescriptor2(k) {
+ return {configurable: true, enumerable: k.length != 1 }
}
})
TestKeys([], {
- get getOwnPropertyNames() {
+ get ownKeys() {
return function() { return ["a", "b", "c"] }
},
- getOwnPropertyDescriptor: function(k) { return {} }
+ getOwnPropertyDescriptor: function(k) { return {configurable: true} }
})
+// ---------------------------------------------------------------------------
function TestKeysThrow(handler) {
TestWithProxies(TestKeysThrow2, handler)
}
function TestKeysThrow2(create, handler) {
- var p = create(handler)
- assertThrows(function(){ Object.keys(p) }, "myexn")
+ var p = create(handler);
+ assertThrowsEquals(function(){ Object.keys(p) }, "myexn");
}
TestKeysThrow({
- keys: function() { throw "myexn" }
-})
-
-TestKeysThrow({
- keys: function() { return this.keys2() },
- keys2: function() { throw "myexn" }
-})
-
-TestKeysThrow({
- getOwnPropertyNames: function() { throw "myexn" },
- getOwnPropertyDescriptor: function(k) { return true }
+ ownKeys() { throw "myexn" }
})
TestKeysThrow({
- getOwnPropertyNames: function() { return [1, 2] },
- getOwnPropertyDescriptor: function(k) { throw "myexn" }
+ ownKeys() { return this.keys2() },
+ keys2() { throw "myexn" }
})
TestKeysThrow({
- getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
- getOwnPropertyNames2: function() { throw "myexn" },
+ ownKeys() { return ['1'] },
+ getOwnPropertyDescriptor: function() { throw "myexn" },
})
TestKeysThrow({
- getOwnPropertyNames: function() { return this.getOwnPropertyNames2() },
- getOwnPropertyNames2: function() { return [1, 2] },
- getOwnPropertyDescriptor: function(k) {
+ ownKeys() { return this.getOwnPropertyNames2() },
+ getOwnPropertyNames2() { return ['1', '2'] },
+ getOwnPropertyDescriptor(k) {
return this.getOwnPropertyDescriptor2(k)
},
- getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+ getOwnPropertyDescriptor2(k) { throw "myexn" }
})
TestKeysThrow({
- get getOwnPropertyNames() { throw "myexn" }
+ get ownKeys() { throw "myexn" }
})
TestKeysThrow({
- get getOwnPropertyNames() {
+ get ownKeys() {
return function() { throw "myexn" }
},
})
TestKeysThrow({
- get getOwnPropertyNames() {
- return function() { return [1, 2] }
+ get ownKeys() {
+ return function() { return ['1', '2'] }
},
- getOwnPropertyDescriptor: function(k) { throw "myexn" }
-})
-
-
-
-// Fixing (Object.freeze, Object.seal, Object.preventExtensions,
-// Object.isFrozen, Object.isSealed, Object.isExtensible)
-
-function TestFix(names, handler) {
- var proto = {p: 77}
- var assertFixing = function(o, s, f, e) {
- assertEquals(s, Object.isSealed(o))
- assertEquals(f, Object.isFrozen(o))
- assertEquals(e, Object.isExtensible(o))
- }
-
- var p1 = Proxy.create(handler, proto)
- assertFixing(p1, false, false, true)
- Object.seal(p1)
- assertFixing(p1, true, names.length === 0, false)
- assertArrayEquals(names.sort(), Object.getOwnPropertyNames(p1).sort())
- assertArrayEquals(names.filter(function(x) {return x < "z"}).sort(),
- Object.keys(p1).sort())
- assertEquals(proto, Object.getPrototypeOf(p1))
- assertEquals(77, p1.p)
- for (var n in p1) {
- var desc = Object.getOwnPropertyDescriptor(p1, n)
- if (desc !== undefined) assertFalse(desc.configurable)
- }
-
- var p2 = Proxy.create(handler, proto)
- assertFixing(p2, false, false, true)
- Object.freeze(p2)
- assertFixing(p2, true, true, false)
- assertArrayEquals(names.sort(), Object.getOwnPropertyNames(p2).sort())
- assertArrayEquals(names.filter(function(x) {return x < "z"}).sort(),
- Object.keys(p2).sort())
- assertEquals(proto, Object.getPrototypeOf(p2))
- assertEquals(77, p2.p)
- for (var n in p2) {
- var desc = Object.getOwnPropertyDescriptor(p2, n)
- if (desc !== undefined) assertFalse(desc.writable)
- if (desc !== undefined) assertFalse(desc.configurable)
- }
-
- var p3 = Proxy.create(handler, proto)
- assertFixing(p3, false, false, true)
- Object.preventExtensions(p3)
- assertFixing(p3, names.length === 0, names.length === 0, false)
- assertArrayEquals(names.sort(), Object.getOwnPropertyNames(p3).sort())
- assertArrayEquals(names.filter(function(x) {return x < "z"}).sort(),
- Object.keys(p3).sort())
- assertEquals(proto, Object.getPrototypeOf(p3))
- assertEquals(77, p3.p)
-
- var p = Proxy.create(handler, proto)
- var o = Object.create(p)
- assertFixing(p, false, false, true)
- assertFixing(o, false, false, true)
- Object.freeze(o)
- assertFixing(p, false, false, true)
- assertFixing(o, true, true, false)
-}
-
-TestFix([], {
- fix: function() { return {} }
-})
-
-TestFix(["a", "b", "c", "3", "zz"], {
- fix: function() {
- return {
- a: {value: "a", writable: true, configurable: false, enumerable: true},
- b: {value: 33, writable: false, configurable: false, enumerable: true},
- c: {value: 0, writable: true, configurable: true, enumerable: true},
- '3': {value: true, writable: false, configurable: true, enumerable: true},
- zz: {value: 0, enumerable: false}
- }
- }
-})
-
-TestFix(["a"], {
- fix: function() { return this.fix2() },
- fix2: function() {
- return {a: {value: 4, writable: true, configurable: true, enumerable: true}}
- }
-})
-
-TestFix(["b"], {
- get fix() {
- return function() {
- return {b: {configurable: true, writable: true, enumerable: true}}
- }
- }
-})
-
-
-function TestFixFunction(fix) {
- var f1 = Proxy.createFunction({
- fix: function() { return {} }
- }, function() {})
- fix(f1)
- assertEquals(0, f1.length)
-
- var f2 = Proxy.createFunction({
- fix: function() { return {length: {value: 3}} }
- }, function() {})
- fix(f2)
- assertEquals(3, f2.length)
-
- var f3 = Proxy.createFunction({
- fix: function() { return {length: {value: "huh"}} }
- }, function() {})
- fix(f3)
- assertEquals(0, f1.length)
-}
-
-TestFixFunction(Object.seal)
-TestFixFunction(Object.freeze)
-TestFixFunction(Object.preventExtensions)
-
-
-function TestFixThrow(handler) {
- TestWithProxies(TestFixThrow2, handler)
-}
-
-function TestFixThrow2(create, handler) {
- var p = create(handler, {})
- assertThrows(function(){ Object.seal(p) }, "myexn")
- assertThrows(function(){ Object.freeze(p) }, "myexn")
- assertThrows(function(){ Object.preventExtensions(p) }, "myexn")
-}
-
-TestFixThrow({
- fix: function() { throw "myexn" }
-})
-
-TestFixThrow({
- fix: function() { return this.fix2() },
- fix2: function() { throw "myexn" }
-})
-
-TestFixThrow({
- get fix() { throw "myexn" }
-})
-
-TestFixThrow({
- get fix() {
- return function() { throw "myexn" }
- }
-})
-
-
-// Freeze a proxy in the middle of operations on it.
-// TODO(rossberg): actual behaviour not specified consistently at the moment,
-// just make sure that we do not crash.
-function TestReentrantFix(f) {
- TestWithProxies(f, Object.freeze)
- TestWithProxies(f, Object.seal)
- TestWithProxies(f, Object.preventExtensions)
-}
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get get() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while getting get trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get: function() { freeze(p); return 3 },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing get trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing default get trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return {get: function(){}} },
- fix: function() { return {} }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Freeze while getting a property from prototype.
- try { o.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get set() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while getting set trap.
- try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- set: function() { freeze(p); return true },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing set trap.
- try { p.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getOwnPropertyDescriptor: function() { freeze(p); return undefined },
- fix: function() { return {} }
- }
- var p = create(handler)
- // Freeze while executing default set trap.
- try { p.x } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
- fix: function() { return {} }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Freeze while setting a property in prototype, dropping the property!
- try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- getPropertyDescriptor: function() { freeze(p); return {set: function(){}} },
- fix: function() { return {x: {get: function(){}}} }
- }
- var p = create(handler)
- var o = Object.create(p)
- // Freeze while setting a property in prototype, making it read-only!
- try { o.x = 4 } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- get fix() { freeze(p); return function(){} }
- }
- var p = create(handler)
- // Freeze while getting fix trap.
- try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
-})
-
-TestReentrantFix(function(create, freeze) {
- var handler = {
- fix: function() { freeze(p); return {} }
- }
- var p = create(handler)
- // Freeze while executing fix trap.
- try { Object.freeze(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.seal(p) } catch (e) { assertInstanceof(e, Error) }
- p = create(handler)
- try { Object.preventExtensions(p) } catch (e) { assertInstanceof(e, Error) }
+ getOwnPropertyDescriptor(k) { throw "myexn" }
})
+// ---------------------------------------------------------------------------
// String conversion (Object.prototype.toString,
// Object.prototype.toLocaleString,
// Function.prototype.toString)
@@ -2104,25 +1268,25 @@ TestReentrantFix(function(create, freeze) {
var key
function TestToString(handler) {
- var p = Proxy.create(handler)
+ var p = new Proxy({}, handler)
key = ""
assertEquals("[object Object]", Object.prototype.toString.call(p))
- assertEquals("", key)
+ assertEquals(Symbol.toStringTag, key)
assertEquals("my_proxy", Object.prototype.toLocaleString.call(p))
assertEquals("toString", key)
- var f = Proxy.createFunction(handler, function() {})
+ var f = new Proxy(function() {}, handler)
key = ""
assertEquals("[object Function]", Object.prototype.toString.call(f))
- assertEquals("", key)
+ assertEquals(Symbol.toStringTag, key)
assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
assertEquals("toString", key)
- assertDoesNotThrow(function(){ Function.prototype.toString.call(f) })
+ assertThrows(function(){ Function.prototype.toString.call(f) })
var o = Object.create(p)
key = ""
assertEquals("[object Object]", Object.prototype.toString.call(o))
- assertEquals("", key)
+ assertEquals(Symbol.toStringTag, key)
assertEquals("my_proxy", Object.prototype.toLocaleString.call(o))
assertEquals("toString", key)
}
@@ -2136,7 +1300,7 @@ TestToString({
get2: function(r, k) { key = k; return function() { return "my_proxy" } }
})
-TestToString(Proxy.create({
+TestToString(new Proxy({}, {
get: function(pr, pk) {
return function(r, k) { key = k; return function() { return "my_proxy" } }
}
@@ -2144,17 +1308,17 @@ TestToString(Proxy.create({
function TestToStringThrow(handler) {
- var p = Proxy.create(handler)
- assertEquals("[object Object]", Object.prototype.toString.call(p))
- assertThrows(function(){ Object.prototype.toLocaleString.call(p) }, "myexn")
+ var p = new Proxy({}, handler)
+ assertThrowsEquals(() => Object.prototype.toString.call(p), "myexn")
+ assertThrowsEquals(() => Object.prototype.toLocaleString.call(p), "myexn")
- var f = Proxy.createFunction(handler, function() {})
- assertEquals("[object Function]", Object.prototype.toString.call(f))
- assertThrows(function(){ Object.prototype.toLocaleString.call(f) }, "myexn")
+ var f = new Proxy(function(){}, handler)
+ assertThrowsEquals(() => Object.prototype.toString.call(f), "myexn")
+ assertThrowsEquals(() => Object.prototype.toLocaleString.call(f), "myexn")
var o = Object.create(p)
- assertEquals("[object Object]", Object.prototype.toString.call(o))
- assertThrows(function(){ Object.prototype.toLocaleString.call(o) }, "myexn")
+ assertThrowsEquals(() => Object.prototype.toString.call(o), "myexn")
+ assertThrowsEquals(() => Object.prototype.toLocaleString.call(o), "myexn")
}
TestToStringThrow({
@@ -2162,26 +1326,22 @@ TestToStringThrow({
})
TestToStringThrow({
- get: function(r, k) { return function() { throw "myexn" } }
-})
-
-TestToStringThrow({
get: function(r, k) { return this.get2(r, k) },
get2: function(r, k) { throw "myexn" }
})
-TestToStringThrow(Proxy.create({
+TestToStringThrow(new Proxy({}, {
get: function(pr, pk) { throw "myexn" }
}))
-TestToStringThrow(Proxy.create({
+TestToStringThrow(new Proxy({}, {
get: function(pr, pk) {
return function(r, k) { throw "myexn" }
}
}))
-
+// ---------------------------------------------------------------------------
// Value conversion (Object.prototype.toValue)
function TestValueOf(handler) {
@@ -2197,6 +1357,7 @@ TestValueOf({})
+// ---------------------------------------------------------------------------
// Enumerability (Object.prototype.propertyIsEnumerable)
var key
@@ -2221,45 +1382,50 @@ function TestIsEnumerable2(create, handler) {
}
TestIsEnumerable({
- getOwnPropertyDescriptor: function(k) {
- key = k; return {enumerable: k < "z", configurable: true}
+ getOwnPropertyDescriptor(t, k) {
+ key = k;
+ return {enumerable: k < "z", configurable: true}
},
})
TestIsEnumerable({
- getOwnPropertyDescriptor: function(k) {
+ getOwnPropertyDescriptor: function(t, k) {
return this.getOwnPropertyDescriptor2(k)
},
getOwnPropertyDescriptor2: function(k) {
- key = k; return {enumerable: k < "z", configurable: true}
+ key = k;
+ return {enumerable: k < "z", configurable: true}
},
})
TestIsEnumerable({
- getOwnPropertyDescriptor: function(k) {
- key = k; return {get enumerable() { return k < "z" }, configurable: true}
+ getOwnPropertyDescriptor: function(t, k) {
+ key = k;
+ return {get enumerable() { return k < "z" }, configurable: true}
},
})
-TestIsEnumerable(Proxy.create({
- get: function(pr, pk) {
- return function(k) {
- key = k; return {enumerable: k < "z", configurable: true}
+TestIsEnumerable(new Proxy({}, {
+ get: function(pt, pk, pr) {
+ return function(t, k) {
+ key = k;
+ return {enumerable: k < "z", configurable: true}
}
}
}))
+// ---------------------------------------------------------------------------
function TestIsEnumerableThrow(handler) {
TestWithProxies(TestIsEnumerableThrow2, handler)
}
function TestIsEnumerableThrow2(create, handler) {
var p = create(handler)
- assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, "a") },
- "myexn")
- assertThrows(function(){ Object.prototype.propertyIsEnumerable.call(p, 11) },
- "myexn")
+ assertThrowsEquals(() => Object.prototype.propertyIsEnumerable.call(p, "a"),
+ "myexn")
+ assertThrowsEquals(() => Object.prototype.propertyIsEnumerable.call(p, 11),
+ "myexn")
}
TestIsEnumerableThrow({
@@ -2279,23 +1445,24 @@ TestIsEnumerableThrow({
},
})
-TestIsEnumerableThrow(Proxy.create({
+TestIsEnumerableThrow(new Proxy({}, {
get: function(pr, pk) { throw "myexn" }
}))
-TestIsEnumerableThrow(Proxy.create({
+TestIsEnumerableThrow(new Proxy({}, {
get: function(pr, pk) {
return function(k) { throw "myexn" }
}
-}))
+}));
+// ---------------------------------------------------------------------------
// Constructor functions with proxy prototypes.
-function TestConstructorWithProxyPrototype() {
+(function TestConstructorWithProxyPrototype() {
TestWithProxies(TestConstructorWithProxyPrototype2, {})
-}
+})();
function TestConstructorWithProxyPrototype2(create, handler) {
function C() {};
@@ -2303,19 +1470,18 @@ function TestConstructorWithProxyPrototype2(create, handler) {
var o = new C;
assertSame(C.prototype, Object.getPrototypeOf(o));
-}
+};
-TestConstructorWithProxyPrototype();
-function TestOptWithProxyPrototype() {
+(function TestOptWithProxyPrototype() {
var handler = {
- getPropertyDescriptor: function(k) {
- return {value: 10, configurable: true, enumerable: true, writable: true};
+ get(t, k) {
+ return 10;
}
};
function C() {};
- C.prototype = Proxy.create(handler);
+ C.prototype = new Proxy({}, handler);
var o = new C();
function f() {
@@ -2325,6 +1491,4 @@ function TestOptWithProxyPrototype() {
assertEquals(10, f());
%OptimizeFunctionOnNextCall(f);
assertEquals(10, f());
-}
-
-TestOptWithProxyPrototype();
+})();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-construct.js b/deps/v8/test/mjsunit/harmony/reflect-construct.js
index 2211e3f783..f2dfc15366 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-construct.js
+++ b/deps/v8/test/mjsunit/harmony/reflect-construct.js
@@ -275,3 +275,107 @@
assertEquals(10, Reflect.construct(sumSloppy,
{ 0: 1, 1: 2, 2: 3, 3: 4, length: 4 }).a);
})();
+
+(function() {
+ function* f() { yield 1; yield 2; }
+ function* g() { yield 3; yield 4; }
+ var o = Reflect.construct(f, [], g);
+ assertEquals([1, 2], [...o]);
+ assertTrue(o.__proto__ === g.prototype);
+ assertTrue(o.__proto__ !== f.prototype);
+})();
+
+(function () {
+ var realm1 = Realm.create();
+ var realm2 = Realm.create();
+
+ var well_known_intrinsic_constructors = [
+ "Array",
+ "ArrayBuffer",
+ "Boolean",
+ ["DataView", [new ArrayBuffer()]],
+ "Date",
+ "Error",
+ "EvalError",
+ "Float32Array",
+ "Float64Array",
+ ["Function", ["return 153;"]],
+ ["Function", ["'use strict'; return 153;"]],
+ ["Function", ["'use strong'; return 153;"]],
+ ["((function*(){}).constructor)", ["yield 153;"]], // GeneratorFunction
+ ["((function*(){}).constructor)", ["'use strict'; yield 153;"]],
+ ["((function*(){}).constructor)", ["'use strong'; yield 153;"]],
+ "Int8Array",
+ "Int16Array",
+ "Int32Array",
+ "Map",
+ "Number",
+ "Object",
+ ["Promise", [(resolve, reject)=>{}]],
+ "RangeError",
+ "ReferenceError",
+ "RegExp",
+ "Set",
+ "String",
+ "SyntaxError",
+ // %TypedArray%?
+ "TypeError",
+ "Uint8Array",
+ "Uint8ClampedArray",
+ "Uint16Array",
+ "Uint32Array",
+ "URIError",
+ "WeakMap",
+ "WeakSet"
+ ];
+
+ function getname(v) {
+ return typeof v === "string" ? v : v[0];
+ }
+
+ function getargs(v) {
+ return typeof v === "string" ? [] : v[1];
+ }
+
+ function test_intrinsic_prototype(name) {
+ var own = Realm.eval(realm1, name);
+
+ // Ensure that constructor.prototype is non-writable, non-configurable.
+ var desc = Object.getOwnPropertyDescriptor(own, "prototype");
+ assertFalse(desc.configurable, name);
+ assertFalse(desc.writable, name);
+ }
+
+ for (var intrinsic of well_known_intrinsic_constructors) {
+ test_intrinsic_prototype(getname(intrinsic));
+ }
+
+ function function_with_non_instance_prototype(realm) {
+ var f = Realm.eval(realm, "(function(){})");
+ f.prototype = 1;
+ return f;
+ }
+
+ function test_intrinsic_default(realm, name, args, convert) {
+ var own = Realm.eval(realm1, name);
+ var other = Realm.eval(realm, name);
+ var o = Reflect.construct(
+ convert(own), args, function_with_non_instance_prototype(realm));
+
+ // Ensure the intrisicDefaultProto is fetched from the correct realm.
+ assertTrue(realm == realm1 || o.__proto__ !== own.prototype, [...arguments]);
+ assertTrue(o.__proto__ === other.prototype, [...arguments]);
+ }
+
+ function test_all(test, convert) {
+ for (var intrinsic of well_known_intrinsic_constructors) {
+ for (var realm of [realm1, realm2]) {
+ test(realm, getname(intrinsic), getargs(intrinsic), convert);
+ }
+ }
+ }
+
+ test_all(test_intrinsic_default, (v)=>v);
+ test_all(test_intrinsic_default,
+ (v)=>{ "use strict"; return class extends v {}});
+})();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js b/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js
index 34cd660c8f..ccd1845c78 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js
+++ b/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js
@@ -47,31 +47,32 @@ var handler = {
}
};
-
-var proxy = Proxy.create(handler);
-var o = {__proto__: proxy};
-
-function f2(o) {
- var result = [];
- for (var i of Reflect.enumerate(o)) {
- result.push(i);
- }
- return result;
-}
-
-function check_f2() {
- assertEquals(keys, f2(o));
- assertEquals(keys, has_keys);
- has_keys.length = 0;
-}
-
-check_f2();
-check_f2();
+// TODO(neis,cbruni): Enable once the enumerate proxy trap is properly
+// implemented.
+// var proxy = new Proxy({}, handler);
+// var o = {__proto__: proxy};
+//
+// function f2(o) {
+// var result = [];
+// for (var i of Reflect.enumerate(o)) {
+// result.push(i);
+// }
+// return result;
+// }
+//
+// function check_f2() {
+// assertEquals(keys, f2(o));
+// assertEquals(keys, has_keys);
+// has_keys.length = 0;
+// }
+//
+// check_f2();
+// check_f2();
// Test lazy deopt after GetPropertyNamesFast
-%OptimizeFunctionOnNextCall(f2);
-deopt_enum = true;
-check_f2();
+// %OptimizeFunctionOnNextCall(f2);
+// deopt_enum = true;
+// check_f2();
// Test lazy deopt after FILTER_KEY
-%OptimizeFunctionOnNextCall(f2);
-deopt_has = true;
-check_f2();
+// %OptimizeFunctionOnNextCall(f2);
+// deopt_has = true;
+// check_f2();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js b/deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js
index 1ce86347df..4dee91b61e 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js
+++ b/deps/v8/test/mjsunit/harmony/reflect-get-prototype-of.js
@@ -105,12 +105,12 @@ var functions = [
// DataView,
Date,
Error,
- Float32Array,
- Float64Array,
+ // Float32Array, prototype is %TypedArray%
+ // Float64Array,
Function,
- Int16Array,
- Int32Array,
- Int8Array,
+ // Int16Array,
+ // Int32Array,
+ // Int8Array,
Map,
Number,
Object,
@@ -119,10 +119,10 @@ var functions = [
Set,
String,
// Symbol, not constructible
- Uint16Array,
- Uint32Array,
- Uint8Array,
- Uint8ClampedArray,
+ // Uint16Array,
+ // Uint32Array,
+ // Uint8Array,
+ // Uint8ClampedArray,
WeakMap,
WeakSet,
];
diff --git a/deps/v8/test/mjsunit/harmony/reflect-own-keys.js b/deps/v8/test/mjsunit/harmony/reflect-own-keys.js
new file mode 100644
index 0000000000..6f5dacf1c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/reflect-own-keys.js
@@ -0,0 +1,93 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is adapted from mjsunit/object-get-own-property-names.js.
+
+// Flags: --harmony-reflect
+
+
+// Check simple cases.
+var obj = { a: 1, b: 2};
+var keys = Reflect.ownKeys(obj);
+assertEquals(2, keys.length);
+assertEquals("a", keys[0]);
+assertEquals("b", keys[1]);
+
+var obj = { a: function(){}, b: function(){} };
+var keys = Reflect.ownKeys(obj);
+assertEquals(2, keys.length);
+assertEquals("a", keys[0]);
+assertEquals("b", keys[1]);
+
+// Check slow case
+var obj = { a: 1, b: 2, c: 3 };
+delete obj.b;
+var keys = Reflect.ownKeys(obj)
+assertEquals(2, keys.length);
+assertEquals("a", keys[0]);
+assertEquals("c", keys[1]);
+
+// Check that non-enumerable properties are being returned.
+var keys = Reflect.ownKeys([1, 2]);
+assertEquals(3, keys.length);
+assertEquals("0", keys[0]);
+assertEquals("1", keys[1]);
+assertEquals("string", typeof keys[0]);
+assertEquals("string", typeof keys[1]);
+assertEquals("length", keys[2]);
+
+// Check that no proto properties are returned.
+var obj = { foo: "foo" };
+obj.__proto__ = { bar: "bar" };
+keys = Reflect.ownKeys(obj);
+assertEquals(1, keys.length);
+assertEquals("foo", keys[0]);
+
+// Check that getter properties are returned.
+var obj = {};
+obj.__defineGetter__("getter", function() {});
+keys = Reflect.ownKeys(obj);
+assertEquals(1, keys.length);
+assertEquals("getter", keys[0]);
+
+// Check that implementation does not access Array.prototype.
+var savedConcat = Array.prototype.concat;
+Array.prototype.concat = function() { return []; }
+keys = Reflect.ownKeys({0: 'foo', bar: 'baz'});
+assertEquals(2, keys.length);
+assertEquals('0', keys[0]);
+assertEquals('bar', keys[1]);
+assertSame(Array.prototype, keys.__proto__);
+Array.prototype.concat = savedConcat;
+
+assertThrows(function() { Reflect.ownKeys(4) }, TypeError);
+assertThrows(function() { Reflect.ownKeys("foo") }, TypeError);
+assertThrows(function() { Reflect.ownKeys(true) }, TypeError);
+
+assertEquals(Reflect.ownKeys(Object(4)), []);
+assertEquals(Reflect.ownKeys(Object("foo")), ["0", "1", "2", "length"]);
+assertEquals(Reflect.ownKeys(Object(true)), []);
diff --git a/deps/v8/test/mjsunit/harmony/reflect.js b/deps/v8/test/mjsunit/harmony/reflect.js
index a3d44b8916..8ee1227a44 100644
--- a/deps/v8/test/mjsunit/harmony/reflect.js
+++ b/deps/v8/test/mjsunit/harmony/reflect.js
@@ -71,7 +71,7 @@ function prepare(target) {
(function testReflectGetArity() {
- assertEquals(3, Reflect.get.length);
+ assertEquals(2, Reflect.get.length);
})();
@@ -87,7 +87,7 @@ function prepare(target) {
var a = { [Symbol.toPrimitive]: function() { return "bla" } };
var b = { [Symbol.toPrimitive]: function() { throw "gaga" } };
assertEquals(42, Reflect.get(target, a));
- assertThrows(function() { Reflect.get(target, b); }, "gaga");
+ assertThrowsEquals(function() { Reflect.get(target, b); }, "gaga");
})();
@@ -156,7 +156,7 @@ function prepare(target) {
var b = { [Symbol.toPrimitive]: function() { throw "gaga" } };
assertTrue(Reflect.set(target, a, 42));
assertEquals(42, target.bla);
- assertThrows(function() { Reflect.set(target, b, 42); }, "gaga");
+ assertThrowsEquals(function() { Reflect.set(target, b, 42); }, "gaga");
})();
@@ -297,7 +297,7 @@ function prepare(target) {
var a = { [Symbol.toPrimitive]: function() { return "bla" } };
var b = { [Symbol.toPrimitive]: function() { throw "gaga" } };
assertTrue(Reflect.has(target, a));
- assertThrows(function() { Reflect.has(target, b); }, "gaga");
+ assertThrowsEquals(function() { Reflect.has(target, b); }, "gaga");
})();
@@ -350,7 +350,7 @@ function prepare(target) {
var b = { [Symbol.toPrimitive]: function() { throw "gaga" } };
assertTrue(Reflect.defineProperty(target, a, {value: 42}));
assertEquals(target.bla, 42);
- assertThrows(function() { Reflect.defineProperty(target, b); }, "gaga");
+ assertThrowsEquals(function() { Reflect.defineProperty(target, b); }, "gaga");
})();
@@ -379,7 +379,7 @@ function prepare(target) {
var a = { [Symbol.toPrimitive]: function() { return "bla" } };
var b = { [Symbol.toPrimitive]: function() { throw "gaga" } };
assertTrue(Reflect.deleteProperty(target, a));
- assertThrows(function() { Reflect.deleteProperty(target, b); }, "gaga");
+ assertThrowsEquals(function() { Reflect.deleteProperty(target, b); }, "gaga");
})();
@@ -530,8 +530,7 @@ function prepare(target) {
var a = { [Symbol.toPrimitive]: function() { return "bla" } };
var b = { [Symbol.toPrimitive]: function() { throw "gaga" } };
assertEquals(42, Reflect.getOwnPropertyDescriptor(target, a).value);
- assertThrows(function() { Reflect.getOwnPropertyDescriptor(target, b); },
- "gaga");
+ assertThrowsEquals(() => Reflect.getOwnPropertyDescriptor(target, b), "gaga");
})();
@@ -540,6 +539,39 @@ function prepare(target) {
////////////////////////////////////////////////////////////////////////////////
+// Reflect.ownKeys
+
+
+(function testReflectOwnKeysArity() {
+ assertEquals(1, Reflect.ownKeys.length);
+})();
+
+
+(function testReflectOwnKeysOnNonObject() {
+ assertThrows(function() { Reflect.ownKeys(); }, TypeError);
+ assertThrows(function() { Reflect.ownKeys(42); }, TypeError);
+ assertThrows(function() { Reflect.ownKeys(null); }, TypeError);
+})();
+
+
+(function testReflectOwnKeysOnObject(){
+ assertEquals(["z", "y", "x"], Reflect.ownKeys({z: 3, y: 2, x: 1}));
+ assertEquals(["length"], Reflect.ownKeys([]));
+
+ var s1 = Symbol("foo");
+ var s2 = Symbol("bar");
+ var obj = { [s1]: 0, "bla": 0, 42: 0, "0": 0,
+ [s2]: 0, "-1": 0, "88": 0, "aaa": 0 };
+ assertEquals(["0", "42", "88", "bla", "-1", "aaa", s1, s2],
+ Reflect.ownKeys(obj));
+})();
+
+
+// See reflect-own-keys.js for further tests.
+
+
+
+////////////////////////////////////////////////////////////////////////////////
// Reflect.preventExtensions
diff --git a/deps/v8/test/mjsunit/harmony/regexp-lookbehind.js b/deps/v8/test/mjsunit/harmony/regexp-lookbehind.js
new file mode 100644
index 0000000000..5155929892
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-lookbehind.js
@@ -0,0 +1,165 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-lookbehind
+
+// Simple fixed-length matches.
+assertEquals(["a"], "a".match(/^.(?<=a)/));
+assertNull("b".match(/^.(?<=a)/));
+assertEquals(["foo"], "foo1".match(/^f..(?<=.oo)/));
+assertEquals(["foo"], "foo2".match(/^f\w\w(?<=\woo)/));
+assertNull("boo".match(/^f\w\w(?<=\woo)/));
+assertNull("fao".match(/^f\w\w(?<=\woo)/));
+assertNull("foa".match(/^f\w\w(?<=\woo)/));
+assertEquals(["def"], "abcdef".match(/(?<=abc)\w\w\w/));
+assertEquals(["def"], "abcdef".match(/(?<=a.c)\w\w\w/));
+assertEquals(["def"], "abcdef".match(/(?<=a\wc)\w\w\w/));
+assertEquals(["cde"], "abcdef".match(/(?<=a[a-z])\w\w\w/));
+assertEquals(["def"], "abcdef".match(/(?<=a[a-z][a-z])\w\w\w/));
+assertEquals(["def"], "abcdef".match(/(?<=a[a-z]{2})\w\w\w/));
+assertEquals(["bcd"], "abcdef".match(/(?<=a{1})\w\w\w/));
+assertEquals(["cde"], "abcdef".match(/(?<=a{1}b{1})\w\w\w/));
+assertEquals(["def"], "abcdef".match(/(?<=a{1}[a-z]{2})\w\w\w/));
+
+// Variable-length matches.
+assertEquals(["def"], "abcdef".match(/(?<=[a|b|c]*)[^a|b|c]{3}/));
+assertEquals(["def"], "abcdef".match(/(?<=\w*)[^a|b|c]{3}/));
+
+// Start of line matches.
+assertEquals(["def"], "abcdef".match(/(?<=^abc)def/));
+assertEquals(["def"], "abcdef".match(/(?<=^[a-c]{3})def/));
+assertEquals(["def"], "xyz\nabcdef".match(/(?<=^[a-c]{3})def/m));
+assertEquals(["ab", "cd", "efg"], "ab\ncd\nefg".match(/(?<=^)\w+/gm));
+assertEquals(["ab", "cd", "efg"], "ab\ncd\nefg".match(/\w+(?<=$)/gm));
+assertEquals(["ab", "cd", "efg"], "ab\ncd\nefg".match(/(?<=^)\w+(?<=$)/gm));
+assertNull("abcdef".match(/(?<=^[^a-c]{3})def/));
+assertNull("foooo".match(/"^foooo(?<=^o+)$/));
+assertNull("foooo".match(/"^foooo(?<=^o*)$/));
+assertEquals(["foo"], "foo".match(/^foo(?<=^fo+)$/));
+assertEquals(["foooo"], "foooo".match(/^foooo(?<=^fo*)/));
+assertEquals(["foo", "f"], "foo".match(/^(f)oo(?<=^\1o+)$/));
+assertEquals(["foo", "f"], "foo".match(/^(f)oo(?<=^\1o+)$/i));
+assertEquals(["foo\u1234", "f"], "foo\u1234".match(/^(f)oo(?<=^\1o+).$/i));
+assertEquals(["def"], "abcdefdef".match(/(?<=^\w+)def/));
+assertEquals(["def", "def"], "abcdefdef".match(/(?<=^\w+)def/g));
+
+// Word boundary matches.
+assertEquals(["def"], "abc def".match(/(?<=\b)[d-f]{3}/));
+assertEquals(["def"], "ab cdef".match(/(?<=\B)\w{3}/));
+assertEquals(["def"], "ab cdef".match(/(?<=\B)(?<=c(?<=\w))\w{3}/));
+assertNull("abcdef".match(/(?<=\b)[d-f]{3}/));
+
+// Negative lookbehind.
+assertEquals(["abc"], "abcdef".match(/(?<!abc)\w\w\w/));
+assertEquals(["abc"], "abcdef".match(/(?<!a.c)\w\w\w/));
+assertEquals(["abc"], "abcdef".match(/(?<!a\wc)\w\w\w/));
+assertEquals(["abc"], "abcdef".match(/(?<!a[a-z])\w\w\w/));
+assertEquals(["abc"], "abcdef".match(/(?<!a[a-z]{2})\w\w\w/));
+assertNull("abcdef".match(/(?<!abc)def/));
+assertNull("abcdef".match(/(?<!a.c)def/));
+assertNull("abcdef".match(/(?<!a\wc)def/));
+assertNull("abcdef".match(/(?<!a[a-z][a-z])def/));
+assertNull("abcdef".match(/(?<!a[a-z]{2})def/));
+assertNull("abcdef".match(/(?<!a{1}b{1})cde/));
+assertNull("abcdef".match(/(?<!a{1}[a-z]{2})def/));
+
+// Capturing matches.
+assertEquals(["def", "c"], "abcdef".match(/(?<=(c))def/));
+assertEquals(["def", "bc"], "abcdef".match(/(?<=(\w{2}))def/));
+assertEquals(["def", "bc", "c"], "abcdef".match(/(?<=(\w(\w)))def/));
+assertEquals(["def", "a"], "abcdef".match(/(?<=(\w){3})def/));
+assertEquals(["d", "bc", undefined], "abcdef".match(/(?<=(bc)|(cd))./));
+assertEquals(["c", "a", undefined],
+ "abcdef".match(/(?<=([ab]{1,2})\D|(abc))\w/));
+assertEquals(["ab", "a", "b"], "abcdef".match(/\D(?<=([ab]+))(\w)/));
+assertEquals(["c", "d"], "abcdef".match(/(?<=b|c)\w/g));
+assertEquals(["cd", "ef"], "abcdef".match(/(?<=[b-e])\w{2}/g));
+
+// Captures inside negative lookbehind. (They never capture.)
+assertEquals(["de", undefined], "abcdef".match(/(?<!(^|[ab]))\w{2}/));
+
+// Nested lookaround.
+assertEquals(["ef"], "abcdef".match(/(?<=ab(?=c)\wd)\w\w/));
+assertEquals(["ef", "bc"], "abcdef".match(/(?<=a(?=([^a]{2})d)\w{3})\w\w/));
+assertEquals(["ef", "bc"],
+ "abcdef".match(/(?<=a(?=([bc]{2}(?<!a{2}))d)\w{3})\w\w/));
+assertNull("abcdef".match(/(?<=a(?=([bc]{2}(?<!a*))d)\w{3})\w\w/));
+assertEquals(["faaa"], "faaao".match(/^faaao?(?<=^f[oa]+(?=o))/));
+
+// Back references.
+assertEquals(["b", "b", "bb"], "abb".match(/(.)(?<=(\1\1))/));
+assertEquals(["B", "B", "bB"], "abB".match(/(.)(?<=(\1\1))/i));
+assertEquals(["aB", "aB", "a"], "aabAaBa".match(/((\w)\w)(?<=\1\2\1)/i));
+assertEquals(["Ba", "Ba", "a"], "aabAaBa".match(/(\w(\w))(?<=\1\2\1)/i));
+assertEquals(["b", "b", "B"], "abaBbAa".match(/(?=(\w))(?<=(\1))./i));
+assertEquals(["foo", "'", "foo"], " 'foo' ".match(/(?<=(.))(\w+)(?=\1)/));
+assertEquals(["foo", "\"", "foo"], " \"foo\" ".match(/(?<=(.))(\w+)(?=\1)/));
+assertNull(" .foo\" ".match(/(?<=(.))(\w+)(?=\1)/));
+assertNull("ab".match(/(.)(?<=\1\1\1)/));
+assertNull("abb".match(/(.)(?<=\1\1\1)/));
+assertEquals(["b", "b"], "abbb".match(/(.)(?<=\1\1\1)/));
+assertNull("ab".match(/(..)(?<=\1\1\1)/));
+assertNull("abb".match(/(..)(?<=\1\1\1)/));
+assertNull("aabb".match(/(..)(?<=\1\1\1)/));
+assertNull("abab".match(/(..)(?<=\1\1\1)/));
+assertNull("fabxbab".match(/(..)(?<=\1\1\1)/));
+assertNull("faxabab".match(/(..)(?<=\1\1\1)/));
+assertEquals(["ab", "ab"], "fababab".match(/(..)(?<=\1\1\1)/));
+
+// Back references to captures inside the lookbehind.
+assertEquals(["d", "C"], "abcCd".match(/(?<=\1(\w))d/i));
+assertEquals(["d", "x"], "abxxd".match(/(?<=\1([abx]))d/));
+assertEquals(["c", "ab"], "ababc".match(/(?<=\1(\w+))c/));
+assertEquals(["c", "b"], "ababbc".match(/(?<=\1(\w+))c/));
+assertNull("ababdc".match(/(?<=\1(\w+))c/));
+assertEquals(["c", "abab"], "ababc".match(/(?<=(\w+)\1)c/));
+
+// Alternations are tried left to right,
+// and we do not backtrack into a lookbehind.
+assertEquals(["xabcd", "cd", ""], "xabcd".match(/.*(?<=(..|...|....))(.*)/));
+assertEquals(["xabcd", "bcd", ""], "xabcd".match(/.*(?<=(xx|...|....))(.*)/));
+assertEquals(["xxabcd", "bcd", ""], "xxabcd".match(/.*(?<=(xx|...))(.*)/));
+assertEquals(["xxabcd", "xx", "abcd"], "xxabcd".match(/.*(?<=(xx|xxx))(.*)/));
+
+// We do not backtrack into a lookbehind.
+// The lookbehind captures "abc" so that \1 does not match. We do not backtrack
+// to capture only "bc" in the lookbehind.
+assertNull("abcdbc".match(/(?<=([abc]+)).\1/));
+
+// Greedy loop.
+assertEquals(["c", "bbbbbb"], "abbbbbbc".match(/(?<=(b+))c/));
+assertEquals(["c", "b1234"], "ab1234c".match(/(?<=(b\d+))c/));
+assertEquals(["c", "b12b23b34"], "ab12b23b34c".match(/(?<=((?:b\d{2})+))c/));
+
+// Sticky
+var re1 = /(?<=^(\w+))def/g;
+assertEquals(["def", "abc"], re1.exec("abcdefdef"));
+assertEquals(["def", "abcdef"], re1.exec("abcdefdef"));
+var re2 = /\Bdef/g;
+assertEquals(["def"], re2.exec("abcdefdef"));
+assertEquals(["def"], re2.exec("abcdefdef"));
+
+// Misc
+assertNull("abcdef".match(/(?<=$abc)def/));
+assertEquals(["foo"], "foo".match(/^foo(?<=foo)$/));
+assertEquals(["foo"], "foo".match(/^f.o(?<=foo)$/));
+assertNull("fno".match(/^f.o(?<=foo)$/));
+assertNull("foo".match(/^foo(?<!foo)$/));
+assertNull("foo".match(/^f.o(?<!foo)$/));
+assertEquals(["fno"], "fno".match(/^f.o(?<!foo)$/));
+assertEquals(["foooo"], "foooo".match(/^foooo(?<=fo+)$/));
+assertEquals(["foooo"], "foooo".match(/^foooo(?<=fo*)$/));
+assertEquals(["abc", "abc"], /(abc\1)/.exec("abc"));
+assertEquals(["abc", "abc"], /(abc\1)/.exec("abc\u1234"));
+assertEquals(["abc", "abc"], /(abc\1)/i.exec("abc"));
+assertEquals(["abc", "abc"], /(abc\1)/i.exec("abc\u1234"));
+var oob_subject = "abcdefghijklmnabcdefghijklmn".substr(14);
+assertNull(oob_subject.match(/(?=(abcdefghijklmn))(?<=\1)a/i));
+assertNull(oob_subject.match(/(?=(abcdefghijklmn))(?<=\1)a/));
+
+// Mutual recursive capture/back references
+assertEquals(["cacb", "a", ""], /(?<=a(.\2)b(\1)).{4}/.exec("aabcacbc"));
+assertEquals(["b", "ac", "ac"], /(?<=a(\2)b(..\1))b/.exec("aacbacb"));
+assertEquals(["x", "aa"], /(?<=(?:\1b)(aa))./.exec("aabaax"));
+assertEquals(["x", "aa"], /(?<=(?:\1|b)(aa))./.exec("aaaax"));
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2219.js b/deps/v8/test/mjsunit/harmony/regress/regress-2219.js
index 946c75bd80..29e08603b5 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2219.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2219.js
@@ -27,6 +27,6 @@
// Flags: --harmony-proxies --expose-gc
-var p = Proxy.create({getPropertyDescriptor: function() { gc() }});
+var p = new Proxy({}, {getOwnPropertyDescriptor: function() { gc() }});
var o = Object.create(p);
assertSame(23, o.x = 23);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2225.js b/deps/v8/test/mjsunit/harmony/regress/regress-2225.js
index 9957d8d463..75778753b2 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2225.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-2225.js
@@ -28,12 +28,13 @@
// Flags: --harmony-proxies
var proxy_has_x = false;
-var proxy = Proxy.create({ getPropertyDescriptor:function(key) {
- assertSame('x', key);
- if (proxy_has_x) {
- return { configurable:true, writable:false, value:19 };
+var proxy = new Proxy({}, {
+ get(t, key, receiver) {
+ assertSame('x', key);
+ if (proxy_has_x) { return 19 }
+ return 8;
}
-}});
+});
// Test __lookupGetter__/__lookupSetter__ with proxy.
assertSame(undefined, Object.prototype.__lookupGetter__.call(proxy, 'foo'));
@@ -49,17 +50,27 @@ assertSame(undefined, Object.prototype.__lookupGetter__.call(object, '123'));
assertSame(undefined, Object.prototype.__lookupSetter__.call(object, '456'));
// Test inline constructors with proxy as prototype.
-function f() { this.x = 23; }
-f.prototype = proxy;
+function F() { this.x = 42 }
+F.prototype = proxy;
+var instance = new F();
+
proxy_has_x = false;
-assertSame(23, new f().x);
+assertSame(42, instance.x);
+delete instance.x;
+assertSame(8, instance.x);
+
proxy_has_x = true;
-assertSame(19, new f().x);
+assertSame(19, instance.x);
// Test inline constructors with proxy in prototype chain.
-function g() { this.x = 42; }
-g.prototype.__proto__ = proxy;
+function G() { this.x = 42; }
+G.prototype.__proto__ = proxy;
+instance = new G();
+
proxy_has_x = false;
-assertSame(42, new g().x);
+assertSame(42, instance.x);
+delete instance.x;
+assertSame(8, instance.x);
+
proxy_has_x = true;
-assertSame(19, new g().x);
+assertSame(19, instance.x);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-405844.js b/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
deleted file mode 100644
index 3d3561f7a5..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-405844.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-proxies --harmony-object-observe
-
-var proxy = Proxy.create({ fix: function() { return {}; } });
-Object.preventExtensions(proxy);
-Object.observe(proxy, function(){});
-
-var functionProxy = Proxy.createFunction({ fix: function() { return {}; } }, function(){});
-Object.preventExtensions(functionProxy);
-Object.observe(functionProxy, function(){});
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js b/deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js
index 6cc6f0e747..b4579b141f 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4395-global-eval.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-default-parameters --harmony-destructuring
+// Flags: --harmony-default-parameters --harmony-destructuring-bind
((x, y = eval('x')) => assertEquals(42, y))(42);
((x, {y = eval('x')}) => assertEquals(42, y))(42, {});
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4395.js b/deps/v8/test/mjsunit/harmony/regress/regress-4395.js
index a003856715..fcc6784428 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4395.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4395.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring --harmony-default-parameters
+// Flags: --harmony-destructuring-bind --harmony-default-parameters
(function testExpressionTypes() {
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4585.js b/deps/v8/test/mjsunit/harmony/regress/regress-4585.js
new file mode 100644
index 0000000000..ada91c67ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4585.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-destructuring-bind
+
+assertThrows(`for(const { method() {} } = this) {}`, SyntaxError);
+assertThrows(`var { method() {} } = this;`, SyntaxError);
+assertThrows(`for(const { *method() {} } = this) {}`, SyntaxError);
+assertThrows(`var { *method() {} } = this;`, SyntaxError);
+assertThrows(`for(var { get foo() {} } = this) {}`, SyntaxError);
+assertThrows(`for(var { set foo() {} } = this) {}`, SyntaxError);
+
+// Still OK in other objects
+for (var { name = "" + { toString() { return "test" } } } in { a: 1}) break;
+assertEquals(name, "test");
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4658.js b/deps/v8/test/mjsunit/harmony/regress/regress-4658.js
new file mode 100644
index 0000000000..35bea12adc
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4658.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-do-expressions
+
+(function testWithSimpleLoopVariable() {
+ var f = (x, y = (do { var s=0; for (var e of x) s += e; s; })) => y*(y+1);
+ var result = f([1,2,3]); // core dump here, if not fixed.
+ assertEquals(result, 42);
+})();
+
+(function testWithComplexLoopVariable() {
+ var f = (x, i=x[0]-1, a=[],
+ y = (do { var s=0;
+ for (a[i] of x) s += a[i++];
+ s;
+ })) => y*(a[0]+a[1]*a[2]);
+ var result = f([1,2,3]); // core dump here, if not fixed.
+ assertEquals(result, 42);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-517455.js b/deps/v8/test/mjsunit/harmony/regress/regress-517455.js
index a59fa181b7..f07e8fe63b 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-517455.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-517455.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
function f({x = ""}) { eval(x) }
f({})
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-576662.js b/deps/v8/test/mjsunit/harmony/regress/regress-576662.js
new file mode 100644
index 0000000000..5541b79b5d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-576662.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+// https://code.google.com/p/chromium/issues/detail?id=576662 (simplified)
+
+Realm.create();
+this.__proto__ = new Proxy({},{});
+assertThrows(() => Realm.eval(1, "Realm.global(0).bla = 1"));
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js
index 31d276aa83..cf26127643 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js
@@ -5,7 +5,7 @@
// Flags: --allow-natives-syntax --harmony-proxies
function bar() {}
-bar({ a: Proxy.create({}) });
+bar({ a: new Proxy({}, {}) });
function foo(x) { x.a.b == ""; }
var x = {a: {b: "" }};
foo(x);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js
index c30260db72..7ef9e20520 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js
@@ -5,14 +5,16 @@
// Flags: --harmony-proxies
var fuse = 1;
+
var handler = {
get: function() { return function() {} },
- getPropertyDescriptor: function() {
+ has() { return true },
+ getOwnPropertyDescriptor: function() {
if (fuse-- == 0) throw "please die";
return {value: function() {}, configurable: true};
}
};
-var p = Proxy.create(handler);
+var p = new Proxy({}, handler);
var o = Object.create(p);
with (o) { f() }
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-571149.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-571149.js
new file mode 100644
index 0000000000..b3325b7fdf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-571149.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+(function(a = 0){
+ var x; // allocated in a var block, due to use of default parameter
+ (function() { return !x })();
+})();
+
+(function({a}){
+ var x; // allocated in a var block, due to use of parameter destructuring
+ (function() { return !x })();
+})({});
+
+(function(...a){
+ var x; // allocated in a var block, due to use of rest parameter
+ (function() { return !x })();
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js b/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js
index 9b32939306..9a440b6ab3 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-lookup-transition.js
@@ -4,7 +4,7 @@
// Flags: --harmony-proxies --expose-gc
-var proxy = Proxy.create({ getPropertyDescriptor:function(key) {
+var proxy = new Proxy({}, { getOwnPropertyDescriptor:function() {
gc();
}});
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
index bac42681ab..7f7f8fb2d5 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
@@ -204,7 +204,7 @@ function TestTypedArray(constr, elementSize, typicalElement) {
assertEquals("[object " + constr.name + "]",
Object.prototype.toString.call(a));
var desc = Object.getOwnPropertyDescriptor(
- constr.prototype, Symbol.toStringTag);
+ constr.prototype.__proto__, Symbol.toStringTag);
assertTrue(desc.configurable);
assertFalse(desc.enumerable);
assertFalse(!!desc.writable);
@@ -310,17 +310,14 @@ var typedArrayConstructors = [
function TestPropertyTypeChecks(constructor) {
function CheckProperty(name) {
- var d = Object.getOwnPropertyDescriptor(constructor.prototype, name);
+ var d = Object.getOwnPropertyDescriptor(constructor.prototype.__proto__,
+ name);
var o = {};
assertThrows(function() {d.get.call(o);}, TypeError);
for (var i = 0; i < typedArrayConstructors.length; i++) {
var ctor = typedArrayConstructors[i];
var a = MakeSharedTypedArray(ctor, 10);
- if (ctor === constructor) {
- d.get.call(a); // shouldn't throw
- } else {
- assertThrows(function() {d.get.call(a);}, TypeError);
- }
+ d.get.call(a); // shouldn't throw
}
}
diff --git a/deps/v8/test/mjsunit/harmony/simd.js b/deps/v8/test/mjsunit/harmony/simd.js
index 2c07eefb59..6330ac8338 100644
--- a/deps/v8/test/mjsunit/harmony/simd.js
+++ b/deps/v8/test/mjsunit/harmony/simd.js
@@ -618,3 +618,17 @@ function TestSIMDObject() {
assertSame(SIMD.Bool8x16, undefined);
}
TestSIMDObject()
+
+
+function TestStringify(expected, input) {
+ assertEquals(expected, JSON.stringify(input));
+ assertEquals(expected, JSON.stringify(input, null, 0));
+}
+
+TestStringify(undefined, SIMD.Float32x4(1, 2, 3, 4));
+TestStringify('[null]', [SIMD.Float32x4(1, 2, 3, 4)]);
+TestStringify('[{}]', [Object(SIMD.Float32x4(1, 2, 3, 4))]);
+var simd_wrapper = Object(SIMD.Float32x4(1, 2, 3, 4));
+TestStringify('{}', simd_wrapper);
+simd_wrapper.a = 1;
+TestStringify('{"a":1}', simd_wrapper);
diff --git a/deps/v8/test/mjsunit/harmony/species.js b/deps/v8/test/mjsunit/harmony/species.js
new file mode 100644
index 0000000000..da1df4331f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/species.js
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Test the ES2015 @@species feature
+
+'use strict';
+
+let TypedArray = Uint8Array.__proto__;
+
+// The @@species property exists on the right objects and has the right values
+
+let classesWithSpecies = [RegExp, Array, TypedArray, ArrayBuffer, Map, Set, Promise];
+let classesWithoutSpecies = [Object, Function, String, Number, Symbol, WeakMap, WeakSet];
+
+for (let constructor of classesWithSpecies) {
+ assertEquals(constructor, constructor[Symbol.species]);
+ assertThrows(function() { constructor[Symbol.species] = undefined }, TypeError);
+ let descriptor = Object.getOwnPropertyDescriptor(constructor, Symbol.species);
+ assertTrue(descriptor.configurable);
+ assertFalse(descriptor.enumerable);
+ assertEquals(undefined, descriptor.writable);
+ assertEquals(undefined, descriptor.set);
+ assertEquals('function', typeof descriptor.get);
+}
+
+// @@species is defined with distinct getters
+assertEquals(classesWithSpecies.length,
+ new Set(classesWithSpecies.map(constructor =>
+ Object.getOwnPropertyDescriptor(
+ constructor, Symbol.species).get)
+ ).size);
+
+for (let constructor of classesWithoutSpecies)
+ assertEquals(undefined, constructor[Symbol.species]);
diff --git a/deps/v8/test/mjsunit/harmony/string-match.js b/deps/v8/test/mjsunit/harmony/string-match.js
new file mode 100644
index 0000000000..25a3ca2fd1
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/string-match.js
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-subclass
+
+var pattern = {};
+pattern[Symbol.match] = function(string) {
+ return string.length;
+};
+// Check object coercible fails.
+assertThrows(() => String.prototype.match.call(null, pattern),
+ TypeError);
+// Override is called.
+assertEquals(5, "abcde".match(pattern));
+// Non-callable override.
+pattern[Symbol.match] = "dumdidum";
+assertThrows(() => "abcde".match(pattern), TypeError);
+
+assertEquals("[Symbol.match]", RegExp.prototype[Symbol.match].name);
diff --git a/deps/v8/test/mjsunit/harmony/typedarray-species.js b/deps/v8/test/mjsunit/harmony/typedarray-species.js
new file mode 100644
index 0000000000..35a9ea1de7
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/typedarray-species.js
@@ -0,0 +1,86 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Subclasses of %TypedArray% construct themselves under map, etc
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+
+for (let constructor of typedArrayConstructors) {
+ class MyTypedArray extends constructor { }
+ assertEquals(MyTypedArray, new MyTypedArray().map(()=>0).constructor);
+ assertEquals(MyTypedArray, new MyTypedArray().filter(()=>{}).constructor);
+ assertEquals(MyTypedArray, new MyTypedArray().slice().constructor);
+}
+
+// Subclasses can override @@species to return the another class
+
+for (let constructor of typedArrayConstructors) {
+ class MyTypedArray extends constructor { }
+ class MyOtherTypedArray extends constructor {
+ static get [Symbol.species]() { return MyTypedArray; }
+ }
+ assertEquals(MyTypedArray, new MyOtherTypedArray().map(()=>0).constructor);
+ assertEquals(MyTypedArray, new MyOtherTypedArray().filter(()=>{}).constructor);
+ assertEquals(MyTypedArray, new MyOtherTypedArray().slice().constructor);
+}
+
+// TypedArray too-short and non-TypedArray error checking
+
+for (let constructor of typedArrayConstructors) {
+ class MyShortTypedArray extends constructor {
+ constructor(length) { super(length - 1); }
+ }
+ assertThrows(() => new MyShortTypedArray(5).map(()=>0), TypeError);
+ assertThrows(() => new MyShortTypedArray(5).filter(()=>true), TypeError);
+ assertThrows(() => new MyShortTypedArray(5).slice(), TypeError);
+
+ class MyNonTypedArray extends constructor {
+ static get [Symbol.species]() { return Array; }
+ }
+ assertThrows(() => new MyNonTypedArray().map(()=>0), TypeError);
+ assertThrows(() => new MyNonTypedArray().filter(()=>{}), TypeError);
+ assertThrows(() => new MyNonTypedArray().slice(), TypeError);
+}
+
+// Defaults when constructor or @@species is missing or non-constructor
+
+for (let constructor of typedArrayConstructors) {
+ class MyDefaultTypedArray extends constructor {
+ static get [Symbol.species]() { return undefined; }
+ }
+ assertEquals(constructor, new MyDefaultTypedArray().map(()=>0).constructor);
+
+ class MyOtherDefaultTypedArray extends constructor { }
+ assertEquals(MyOtherDefaultTypedArray, new MyOtherDefaultTypedArray().map(()=>0).constructor);
+ MyOtherDefaultTypedArray.prototype.constructor = undefined;
+ assertEquals(constructor, new MyOtherDefaultTypedArray().map(()=>0).constructor);
+}
+
+// Exceptions propagated when getting constructor @@species throws
+
+class SpeciesError extends Error { }
+class ConstructorError extends Error { }
+
+for (let constructor of typedArrayConstructors) {
+ class MyThrowingArray extends constructor {
+ static get [Symbol.species]() { throw new SpeciesError; }
+ }
+ assertThrows(() => new MyThrowingArray().map(()=>{}), SpeciesError);
+ Object.defineProperty(MyThrowingArray.prototype, 'constructor', {
+ get() { throw new ConstructorError; }
+ });
+ assertThrows(() => new MyThrowingArray().map(()=>{}), ConstructorError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
index 67493351a4..f591dac930 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
+++ b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
@@ -210,3 +210,48 @@ function testRegexpHelper(r) {
helper(/foo/u);
helper(new RegExp("foo", "u"));
})();
+
+// Non-BMP patterns.
+// Single character atom.
+assertTrue(new RegExp("\u{12345}", "u").test("\u{12345}"));
+assertTrue(/\u{12345}/u.test("\u{12345}"));
+assertTrue(new RegExp("\u{12345}", "u").test("\ud808\udf45"));
+assertTrue(/\u{12345}/u.test("\ud808\udf45"));
+assertFalse(new RegExp("\u{12345}", "u").test("\udf45"));
+assertFalse(/\u{12345}/u.test("\udf45"));
+
+// Multi-character atom.
+assertTrue(new RegExp("\u{12345}\u{23456}", "u").test("a\u{12345}\u{23456}b"));
+assertTrue(/\u{12345}\u{23456}/u.test("b\u{12345}\u{23456}c"));
+assertFalse(new RegExp("\u{12345}\u{23456}", "u").test("a\udf45\u{23456}b"));
+assertFalse(/\u{12345}\u{23456}/u.test("b\udf45\u{23456}c"));
+
+// Disjunction.
+assertTrue(new RegExp("\u{12345}(?:\u{23456})", "u").test(
+ "a\u{12345}\u{23456}b"));
+assertTrue(/\u{12345}(?:\u{23456})/u.test("b\u{12345}\u{23456}c"));
+assertFalse(new RegExp("\u{12345}(?:\u{23456})", "u").test(
+ "a\udf45\u{23456}b"));
+assertFalse(/\u{12345}(?:\u{23456})/u.test("b\udf45\u{23456}c"));
+
+// Alternative.
+assertTrue(new RegExp("\u{12345}|\u{23456}", "u").test("a\u{12345}b"));
+assertTrue(/\u{12345}|\u{23456}/u.test("b\u{23456}c"));
+assertFalse(new RegExp("\u{12345}|\u{23456}", "u").test("a\udf45\ud84db"));
+assertFalse(/\u{12345}|\u{23456}/u.test("b\udf45\ud808c"));
+
+// Capture.
+assertTrue(new RegExp("(\u{12345}|\u{23456}).\\1", "u").test(
+ "\u{12345}b\u{12345}"));
+assertTrue(/(\u{12345}|\u{23456}).\1/u.test("\u{12345}b\u{12345}"));
+assertFalse(new RegExp("(\u{12345}|\u{23456}).\\1", "u").test(
+ "\u{12345}b\u{23456}"));
+assertFalse(/(\u{12345}|\u{23456}).\1/u.test("\u{12345}b\u{23456}"));
+
+// Quantifier.
+assertTrue(new RegExp("\u{12345}{3}", "u").test("\u{12345}\u{12345}\u{12345}"));
+assertTrue(/\u{12345}{3}/u.test("\u{12345}\u{12345}\u{12345}"));
+assertTrue(new RegExp("\u{12345}{3}").test("\u{12345}\udf45\udf45"));
+assertTrue(/\ud808\udf45{3}/u.test("\u{12345}\udf45\udf45"));
+assertFalse(new RegExp("\u{12345}{3}", "u").test("\u{12345}\udf45\udf45"));
+assertFalse(/\u{12345}{3}/u.test("\u{12345}\udf45\udf45"));
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index c72c153688..84f2056856 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -140,9 +140,16 @@ var pointJson = '{"x": 1, "y": 2}';
assertEquals({'x': 1, 'y': 2}, JSON.parse(pointJson));
assertEquals({'x': 1}, JSON.parse(pointJson, GetFilter('y')));
assertEquals({'y': 2}, JSON.parse(pointJson, GetFilter('x')));
+
assertEquals([1, 2, 3], JSON.parse("[1, 2, 3]"));
-assertEquals([1, undefined, 3], JSON.parse("[1, 2, 3]", GetFilter(1)));
-assertEquals([1, 2, undefined], JSON.parse("[1, 2, 3]", GetFilter(2)));
+
+var array1 = JSON.parse("[1, 2, 3]", GetFilter(1));
+assertEquals([1, , 3], array1);
+assertFalse(array1.hasOwnProperty(1)); // assertEquals above is not enough
+
+var array2 = JSON.parse("[1, 2, 3]", GetFilter(2));
+assertEquals([1, 2, ,], array2);
+assertFalse(array2.hasOwnProperty(2));
function DoubleNumbers(key, value) {
return (typeof value == 'number') ? 2 * value : value;
@@ -482,3 +489,32 @@ assertTrue(Object.prototype.isPrototypeOf(o2));
var json = '{"stuff before slash\\\\stuff after slash":"whatever"}';
TestStringify(json, JSON.parse(json));
+
+
+// https://bugs.chromium.org/p/v8/issues/detail?id=3139
+
+reviver = function(p, v) {
+ if (p == "a") {
+ this.b = { get x() {return null}, set x(_){throw 666} }
+ }
+ return v;
+}
+assertEquals({a: 0, b: {x: null}}, JSON.parse('{"a":0,"b":1}', reviver));
+
+
+// Make sure a failed [[Delete]] doesn't throw
+
+reviver = function(p, v) {
+ Object.freeze(this);
+ return p === "" ? v : undefined;
+}
+assertEquals({a: 0, b: 1}, JSON.parse('{"a":0,"b":1}', reviver));
+
+
+// Make sure a failed [[DefineProperty]] doesn't throw
+
+reviver = function(p, v) {
+ Object.freeze(this);
+ return p === "" ? v : 42;
+}
+assertEquals({a: 0, b: 1}, JSON.parse('{"a":0,"b":1}', reviver));
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index f55bad3dbb..8da7e6bd7b 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -57,6 +57,18 @@ test(function() {
Object.defineProperty(1, "x", {});
}, "Object.defineProperty called on non-object", TypeError);
+test(function() {
+ (function() {}).apply({}, 1);
+}, "CreateListFromArrayLike called on non-object", TypeError);
+
+test(function() {
+ Reflect.apply(function() {}, {}, 1);
+}, "CreateListFromArrayLike called on non-object", TypeError);
+
+test(function() {
+ Reflect.construct(function() {}, 1);
+}, "CreateListFromArrayLike called on non-object", TypeError);
+
// kCalledOnNullOrUndefined
test(function() {
Array.prototype.shift.call(null);
@@ -97,11 +109,6 @@ test(function() {
new DataView(1);
}, "First argument to DataView constructor must be an ArrayBuffer", TypeError);
-// kDateType
-test(function() {
- Date.prototype.setYear.call({}, 1);
-}, "this is not a Date object.", TypeError);
-
// kDefineDisallowed
test(function() {
"use strict";
@@ -170,11 +177,6 @@ test(function() {
new Map([1]);
}, "Iterator value 1 is not an entry object", TypeError);
-// kNotAPromise
-test(function() {
- Promise.prototype.chain.call(1);
-}, "1 is not a promise", TypeError);
-
// kNotConstructor
test(function() {
new Symbol();
@@ -182,7 +184,7 @@ test(function() {
// kNotDateObject
test(function() {
- Date.prototype.setHours.call(1);
+ Date.prototype.getHours.call(1);
}, "this is not a Date object.", TypeError);
// kNotGeneric
@@ -256,7 +258,7 @@ test(function() {
test(function() {
Set.prototype.add = 0;
new Set(1);
-}, "Property 'add' of object #<Set> is not a function", TypeError);
+}, "'0' returned for property 'add' of object '#<Set>' is not a function", TypeError);
// kProtoObjectOrNull
test(function() {
@@ -335,31 +337,13 @@ test(function() {
}, "Invalid property descriptor. Cannot both specify accessors " +
"and a value or writable attribute, #<Object>", TypeError);
-// kWithExpression
-test(function() {
- with (null) {}
-}, "null has no properties", TypeError);
-
-// kWrongArgs
-test(function() {
- (function() {}).apply({}, 1);
-}, "Function.prototype.apply: Arguments list has wrong type", TypeError);
-
-test(function() {
- Reflect.apply(function() {}, {}, 1);
-}, "Reflect.apply: Arguments list has wrong type", TypeError);
-
-test(function() {
- Reflect.construct(function() {}, 1);
-}, "Reflect.construct: Arguments list has wrong type", TypeError);
-
// === SyntaxError ===
// kInvalidRegExpFlags
test(function() {
- /a/x.test("a");
-}, "Invalid flags supplied to RegExp constructor 'x'", SyntaxError);
+ eval("/a/x.test(\"a\");");
+}, "Invalid regular expression flags", SyntaxError);
// kMalformedRegExp
test(function() {
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index e545a61637..7642839d53 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -83,7 +83,7 @@ function testScriptMirror(f, file_name, file_lines, type, compilation_type,
// Test the script mirror for different functions.
-testScriptMirror(function(){}, 'mirror-script.js', 98, 2, 0);
+testScriptMirror(function(){}, 'mirror-script.js', 99, 2, 0);
testScriptMirror(Math.round, 'native math.js', -1, 0, 0);
testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index f08c29339a..9b07953c8a 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -93,6 +93,10 @@ var assertNotNull;
// to the type property on the thrown exception.
var assertThrows;
+// Assert that the passed function throws an exception.
+// The exception is checked against the second argument using assertEquals.
+var assertThrowsEquals;
+
// Assert that the passed function or eval code does not throw an exception.
var assertDoesNotThrow;
@@ -113,14 +117,39 @@ var assertUnoptimized;
(function () { // Scope for utility functions.
+ var ObjectPrototypeToString = Object.prototype.toString;
+ var NumberPrototypeValueOf = Number.prototype.valueOf;
+ var BooleanPrototypeValueOf = Boolean.prototype.valueOf;
+ var StringPrototypeValueOf = String.prototype.valueOf;
+ var DatePrototypeValueOf = Date.prototype.valueOf;
+ var RegExpPrototypeToString = RegExp.prototype.toString;
+ var ArrayPrototypeMap = Array.prototype.map;
+ var ArrayPrototypeJoin = Array.prototype.join;
+
function classOf(object) {
// Argument must not be null or undefined.
- var string = Object.prototype.toString.call(object);
+ var string = ObjectPrototypeToString.call(object);
// String has format [object <ClassName>].
return string.substring(8, string.length - 1);
}
+ function ValueOf(value) {
+ switch (classOf(value)) {
+ case "Number":
+ return NumberPrototypeValueOf.call(value);
+ case "String":
+ return StringPrototypeValueOf.call(value);
+ case "Boolean":
+ return BooleanPrototypeValueOf.call(value);
+ case "Date":
+ return DatePrototypeValueOf.call(value);
+ default:
+ return value;
+ }
+ }
+
+
function PrettyPrint(value) {
switch (typeof value) {
case "string":
@@ -137,19 +166,21 @@ var assertUnoptimized;
if (value === null) return "null";
var objectClass = classOf(value);
switch (objectClass) {
- case "Number":
- case "String":
- case "Boolean":
- case "Date":
- return objectClass + "(" + PrettyPrint(value.valueOf()) + ")";
- case "RegExp":
- return value.toString();
- case "Array":
- return "[" + value.map(PrettyPrintArrayElement).join(",") + "]";
- case "Object":
- break;
- default:
- return objectClass + "()";
+ case "Number":
+ case "String":
+ case "Boolean":
+ case "Date":
+ return objectClass + "(" + PrettyPrint(ValueOf(value)) + ")";
+ case "RegExp":
+ return RegExpPrototypeToString.call(value);
+ case "Array":
+ var mapped = ArrayPrototypeMap.call(value, PrettyPrintArrayElement);
+ var joined = ArrayPrototypeJoin.call(mapped, ",");
+ return "[" + joined + "]";
+ case "Object":
+ break;
+ default:
+ return objectClass + "()";
}
// [[Class]] is "Object".
var name = value.constructor.name;
@@ -211,7 +242,8 @@ var assertUnoptimized;
if (objectClass !== classOf(b)) return false;
if (objectClass === "RegExp") {
// For RegExp, just compare pattern and flags using its toString.
- return (a.toString() === b.toString());
+ return RegExpPrototypeToString.call(a) ===
+ RegExpPrototypeToString.call(b);
}
// Functions are only identical to themselves.
if (objectClass === "Function") return false;
@@ -227,7 +259,7 @@ var assertUnoptimized;
}
if (objectClass === "String" || objectClass === "Number" ||
objectClass === "Boolean" || objectClass === "Date") {
- if (a.valueOf() !== b.valueOf()) return false;
+ if (ValueOf(a) !== ValueOf(b)) return false;
}
return deepObjectEquals(a, b);
}
@@ -325,6 +357,8 @@ var assertUnoptimized;
} catch (e) {
if (typeof type_opt === 'function') {
assertInstanceof(e, type_opt);
+ } else if (type_opt !== void 0) {
+ fail("invalid use of assertThrows, maybe you want assertThrowsEquals");
}
if (arguments.length >= 3) {
assertEquals(e.type, cause_opt);
@@ -336,6 +370,17 @@ var assertUnoptimized;
};
+ assertThrowsEquals = function assertThrowsEquals(fun, val) {
+ try {
+ fun();
+ } catch(e) {
+ assertEquals(val, e);
+ return;
+ }
+ throw new MjsUnitAssertionError("Did not throw exception");
+ };
+
+
assertInstanceof = function assertInstanceof(obj, type) {
if (!(obj instanceof type)) {
var actualTypeName = null;
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 47aa91f808..95e8da1cb2 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -31,11 +31,6 @@
'bugs/*': [FAIL],
##############################################################################
- # Flaky tests.
- # BUG(v8:2921).
- 'debug-step-4-in-frame': [PASS, FAIL, SLOW],
-
- ##############################################################################
# Fails.
'regress/regress-1119': [FAIL],
@@ -54,6 +49,12 @@
# Issue 3784: setters-on-elements is flaky
'setters-on-elements': [PASS, FAIL],
+ # Issue 3641: The new 'then' semantics suppress some exceptions.
+ # These tests may be changed or removed when 'chain' is deprecated.
+ 'es6/debug-promises/reject-with-throw-in-reject': [FAIL],
+ 'es6/debug-promises/reject-with-undefined-reject': [FAIL],
+ 'es6/debug-promises/reject-with-invalid-reject': [FAIL],
+
##############################################################################
# TurboFan compiler failures.
@@ -85,6 +86,11 @@
'harmony/module-resolution': [SKIP],
'harmony/regress/regress-343928': [SKIP],
+ # Proxy tests rely on non ES6 version of Proxies
+ # TODO(neis,cbruni): figure out which Proxy tests can be reused
+ 'harmony/proxies-example-membrane': [SKIP],
+ 'strong/load-proxy': [SKIP],
+
# Issue 3660: Replacing activated TurboFan frames by unoptimized code does
# not work, but we expect it to not crash.
'debug-step-turbofan': [PASS, FAIL],
@@ -112,6 +118,9 @@
'debug-listbreakpoints': [PASS, NO_VARIANTS], # arm64 nosnap with turbofan
'debug-enable-disable-breakpoints': [PASS, NO_VARIANTS], #arm64 nosnap with turbofan.
+ # TODO(rossberg)
+ 'strong/literals': [SKIP], # Rest arguments do not respect strongness in Turbofan.
+
# Issue 4035: unexpected frame->context() in debugger
'regress/regress-crbug-107996': [PASS, NO_VARIANTS],
'regress/regress-crbug-171715': [PASS, NO_VARIANTS],
@@ -138,7 +147,7 @@
'debug-stepout-scope-part2': [PASS, NO_VARIANTS],
'debug-stepout-scope-part3': [PASS, NO_VARIANTS],
'es6/debug-evaluate-blockscopes': [PASS, NO_VARIANTS],
- # issue 4055:
+ # Issue 4055: Scope chain length observed by debugger is off.
'es6/generators-debug-scopes': [PASS, NO_VARIANTS],
# TODO(titzer): --always-opt incorrectly disables CrankShaft soft deopt points
@@ -149,6 +158,7 @@
# Assumptions about optimization need investigation in TurboFan.
'regress-sync-optimized-lists': [PASS, NO_VARIANTS],
+ 'regress/regress-store-uncacheable': [PASS, NO_VARIANTS],
# issue 4078:
'allocation-site-info': [PASS, NO_VARIANTS],
@@ -185,6 +195,11 @@
# Issue 488: this test sometimes times out.
'array-constructor': [PASS, TIMEOUT],
+ # Issue 4413: this test sometimes times out with TSAN because we trigger
+ # the slow path in C++ with holey arrays in Function.prototype.apply.
+ # TODO(bmeurer): Add fast support for holey arrays in apply.
+ 'apply': [PASS, TIMEOUT],
+
# Very slow on ARM and MIPS, contains no architecture dependent code.
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', TIMEOUT]],
'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
@@ -268,6 +283,9 @@
'regress/regress-crbug-474297': [PASS, ['mode == debug', SLOW]],
'strong/implicit-conversions': [PASS, SLOW],
'strong/load-element-mutate-backing-store': [PASS, SLOW],
+
+ # TODO(bradnelson): Enable tests in a separate change.
+ 'wasm/*': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -285,7 +303,6 @@
'array-feedback': [SKIP],
'array-literal-feedback': [SKIP],
'd8-performance-now': [SKIP],
- 'debug-stepout-scope-part8': [PASS, ['arch == arm ', FAIL]],
'elements-kind': [SKIP],
'elements-transition-hoisting': [SKIP],
'fast-prototype': [SKIP],
@@ -394,11 +411,6 @@
'regress/regress-91013': [SKIP],
'regress/regress-99167': [SKIP],
- # Long running tests.
- 'regress/regress-2185': [PASS, ['mode == debug', PASS, TIMEOUT]],
- 'regress/regress-2185-2': [PASS, TIMEOUT],
- 'whitespaces': [PASS, TIMEOUT, SLOW],
-
# BUG(v8:3457).
'deserialize-reference': [PASS, FAIL],
@@ -436,6 +448,7 @@
'unicodelctest-no-optimization': [PASS, SLOW],
'unicodelctest': [PASS, SLOW],
'unicode-test': [PASS, SLOW],
+ 'whitespaces': [PASS, TIMEOUT, SLOW],
}], # 'arch == arm64'
['arch == arm64 and mode == debug and simulator_run == True', {
@@ -484,7 +497,7 @@
'try': [PASS, ['mode == debug', SKIP]],
'debug-scripts-request': [PASS, ['mode == debug', SKIP]],
'array-constructor': [PASS, ['mode == debug', SKIP]],
- 'regress/regress-1122': [PASS, ['mode == debug and arch == android_arm', SKIP]],
+ 'regress/regress-1122': [PASS, SLOW, ['mode == debug and arch == android_arm', SKIP]],
# Flaky test that can hit compilation-time stack overflow in debug mode.
'unicode-test': [PASS, ['mode == debug', PASS, FAIL]],
@@ -493,10 +506,6 @@
'compiler/regress-stacktrace-methods': [PASS, ['mode == release', TIMEOUT]],
'array-splice': [PASS, TIMEOUT],
- # Long running test.
- 'string-indexof-2': [PASS, TIMEOUT],
- 'mirror-object': [PASS, TIMEOUT],
-
# Long running tests. Skipping because having them timeout takes too long on
# the buildbot.
'big-object-literal': [SKIP],
@@ -514,17 +523,16 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
- ############################################################################
# Slow tests.
- 'regress/regress-2185-2': [PASS, SLOW],
- 'mirror-object': [PASS, SLOW],
- 'compiler/osr-with-args': [PASS, SLOW],
'array-sort': [PASS, SLOW],
+ 'compiler/osr-with-args': [PASS, SLOW],
+ 'mirror-object': [PASS, SLOW],
'packed-elements': [PASS, SLOW],
- 'regress/regress-91008': [PASS, SLOW],
+ 'regress/regress-2185-2': [PASS, SLOW],
'regress/regress-2790': [PASS, SLOW],
+ 'regress/regress-91008': [PASS, SLOW],
'regress/regress-json-stringify-gc': [PASS, SLOW],
- 'regress/regress-1122': [PASS, SLOW],
+ 'string-indexof-2': [PASS, TIMEOUT],
}], # 'arch == arm or arch == android_arm'
##############################################################################
@@ -729,8 +737,6 @@
}], # 'arch == ppc and simulator_run == True'
['ignition == True', {
- 'asm/*': [SKIP],
- 'compiler/*': [SKIP],
'const*': [SKIP],
'debug-*': [SKIP],
'es6/*': [SKIP],
@@ -740,78 +746,109 @@
'regress/debug*': [SKIP],
'regress/regress-debug*': [SKIP],
+ # TODO(bradnelson): Figure out why these tests fail with ignition.
+ 'wasm/*': [SKIP],
+
'allocation-folding': [SKIP],
'api-call-after-bypassed-exception': [SKIP],
'apply-arguments-gc-safepoint': [SKIP],
'arguments-load-across-eval': [SKIP],
'arguments-read-and-assignment': [SKIP],
'array-bounds-check-removal': [SKIP],
- 'array-constructor': [SKIP],
'array-elements-from-array-prototype-chain': [SKIP],
'array-functions-prototype-misc': [SKIP],
'array-join': [SKIP],
- 'array-length-number-conversion': [SKIP],
'array-literal-feedback': [SKIP],
'array-literal-transitions': [SKIP],
- 'array-reduce': [SKIP],
'array-tostring': [SKIP],
- 'assert-opt-and-deopt': [SKIP],
- 'big-array-literal': [SKIP],
'break': [SKIP],
'call-runtime-tail': [SKIP],
+ 'compiler/compare-map-elim2': [SKIP],
+ 'compiler/deopt-inlined-smi': [SKIP],
+ 'compiler/deopt-tonumber-compare': [SKIP],
+ 'compiler/escape-analysis-arguments': [SKIP],
+ 'compiler/escape-analysis': [SKIP],
+ 'compiler/expression-trees': [SKIP],
+ 'compiler/inline-arguments': [SKIP],
+ 'compiler/inline-arity-mismatch': [SKIP],
+ 'compiler/inline-construct': [SKIP],
+ 'compiler/lazy-deopt-in-literal': [SKIP],
+ 'compiler/manual-concurrent-recompile': [SKIP],
+ 'compiler/optimized-for-in': [SKIP],
+ 'compiler/optimized-function-calls': [SKIP],
+ 'compiler/optimize_max': [SKIP],
+ 'compiler/optimize_min': [SKIP],
+ 'compiler/opt-next-call-turbo': [SKIP],
+ 'compiler/osr-forof': [SKIP],
+ 'compiler/property-refs': [SKIP],
+ 'compiler/regress-3786': [SKIP],
+ 'compiler/regress-446647': [SKIP],
+ 'compiler/regress-447567': [SKIP],
+ 'compiler/regress-469089': [SKIP],
+ 'compiler/regress-96989': [SKIP],
+ 'compiler/regress-const': [SKIP],
+ 'compiler/regress-funarguments': [SKIP],
+ 'compiler/regress-stacktrace-methods': [SKIP],
+ 'compiler/regress-variable-liveness': [SKIP],
+ 'compiler/rotate': [SKIP],
+ 'compiler/safepoint': [SKIP],
+ 'compiler/try-deopt': [SKIP],
+ 'compiler/try-osr': [SKIP],
+ 'compiler/uint32': [SKIP],
+ 'compiler/variables': [SKIP],
'context-calls-maintained': [SKIP],
'contextual-calls': [SKIP],
'cross-realm-filtering': [SKIP],
'cyclic-array-to-string': [SKIP],
'd8-worker-sharedarraybuffer': [SKIP],
+ 'delete-in-with': [SKIP],
'deopt-minus-zero': [SKIP],
- 'deopt-with-fp-regs': [SKIP],
- 'deserialize-script-id': [SKIP],
- 'div-mul-minus-one': [SKIP],
+ 'deserialize-optimize-inner': [SKIP],
'double-equals': [SKIP],
- 'double-intrinsics': [SKIP],
- 'elements-transition-hoisting': [SKIP],
- 'error-constructors': [SKIP],
'eval-enclosing-function-name': [SKIP],
'eval-stack-trace': [SKIP],
'fast-prototype': [SKIP],
+ 'field-type-tracking': [SKIP],
+ 'for-in-opt': [SKIP],
'for-in-special-cases': [SKIP],
'function-call': [SKIP],
'get-caller-js-function': [SKIP],
'get-prototype-of': [SKIP],
'getter-in-prototype': [SKIP],
- 'getters-on-elements': [SKIP],
'global-hash': [SKIP],
'global-load-from-eval-in-with': [SKIP],
'global-vars-with': [SKIP],
'instanceof-2': [SKIP],
- 'math-floor-of-div-minus-zero': [SKIP],
+ 'json-replacer-number-wrapper-tostring': [SKIP],
+ 'json-replacer-order': [SKIP],
+ 'json': [SKIP],
+ 'keyed-load-with-symbol-key': [SKIP],
+ 'local-load-from-eval': [SKIP],
'math-min-max': [SKIP],
'messages': [SKIP],
'mirror-object': [SKIP],
- 'numops-fuzz-part1': [SKIP],
- 'numops-fuzz-part2': [SKIP],
- 'numops-fuzz-part3': [SKIP],
- 'numops-fuzz-part4': [SKIP],
'object-literal-gc': [SKIP],
'osr-elements-kind': [SKIP],
'property-load-across-eval': [SKIP],
'proto-accessor': [SKIP],
'readonly': [SKIP],
+ 'receiver-in-with-calls': [SKIP],
'regress-3225': [SKIP],
'regress/clear-keyed-call': [SKIP],
+ 'regress/poly_count_operation': [SKIP],
'regress/regress-102153': [SKIP],
+ 'regress/regress-1030466': [SKIP],
'regress/regress-1079': [SKIP],
'regress/regress-109195': [SKIP],
'regress/regress-1114040': [SKIP],
'regress/regress-1125': [SKIP],
'regress/regress-1129': [SKIP],
+ 'regress/regress-1170187': [SKIP],
'regress/regress-117409': [SKIP],
'regress/regress-1177809': [SKIP],
'regress/regress-119609': [SKIP],
- 'regress/regress-1209': [SKIP],
+ 'regress/regress-123919': [SKIP],
'regress/regress-124594': [SKIP],
- 'regress/regress-124': [SKIP],
'regress/regress-125515': [SKIP],
'regress/regress-128018': [SKIP],
'regress/regress-131994': [SKIP],
@@ -820,7 +857,6 @@
'regress/regress-1369': [SKIP],
'regress/regress-1403': [SKIP],
'regress/regress-1412': [SKIP],
- 'regress/regress-1415': [SKIP],
'regress/regress-1436': [SKIP],
'regress/regress-1493017': [SKIP],
'regress/regress-1523': [SKIP],
@@ -832,48 +868,49 @@
'regress/regress-1708': [SKIP],
'regress/regress-1757': [SKIP],
'regress/regress-1790': [SKIP],
+ 'regress/regress-1853': [SKIP],
'regress/regress-1980': [SKIP],
'regress/regress-2054': [SKIP],
'regress/regress-2071': [SKIP],
- 'regress/regress-2132': [SKIP],
'regress/regress-2163': [SKIP],
+ 'regress/regress-220': [SKIP],
'regress/regress-2318': [SKIP],
'regress/regress-2339': [SKIP],
- 'regress/regress-2444': [SKIP],
- 'regress/regress-244': [SKIP],
+ 'regress/regress-2374': [SKIP],
'regress/regress-2593': [SKIP],
- 'regress/regress-2594': [SKIP],
'regress/regress-2618': [SKIP],
'regress/regress-263': [SKIP],
'regress/regress-265': [SKIP],
'regress/regress-269': [SKIP],
'regress/regress-2790': [SKIP],
'regress/regress-2825': [SKIP],
- 'regress/regress-286': [SKIP],
- 'regress/regress-298269': [SKIP],
- 'regress/regress-3176': [SKIP],
+ 'regress/regress-3135': [SKIP],
+ 'regress/regress-3138': [SKIP],
'regress/regress-318420': [SKIP],
'regress/regress-320532': [SKIP],
'regress/regress-3281': [SKIP],
'regress/regress-331444': [SKIP],
'regress/regress-343609': [SKIP],
'regress/regress-347530': [SKIP],
+ 'regress/regress-347914': [SKIP],
+ 'regress/regress-351261': [SKIP],
'regress/regress-352982': [SKIP],
+ 'regress/regress-353551': [SKIP],
'regress/regress-354357': [SKIP],
'regress/regress-356053': [SKIP],
'regress/regress-357105': [SKIP],
+ 'regress/regress-359441': [SKIP],
'regress/regress-361025': [SKIP],
'regress/regress-3621': [SKIP],
'regress/regress-365172-3': [SKIP],
'regress/regress-370827': [SKIP],
- 'regress/regress-3709': [SKIP],
'regress/regress-377290': [SKIP],
- 'regress/regress-385565': [SKIP],
'regress/regress-3859': [SKIP],
'regress/regress-3884': [SKIP],
'regress/regress-3926': [SKIP],
'regress/regress-3960': [SKIP],
'regress/regress-3969': [SKIP],
+ 'regress/regress-3985': [SKIP],
'regress/regress-4023': [SKIP],
'regress/regress-4027': [SKIP],
'regress/regress-403292': [SKIP],
@@ -881,8 +918,8 @@
'regress/regress-4121': [SKIP],
'regress/regress-419663': [SKIP],
'regress/regress-4255-4': [SKIP],
- 'regress/regress-430201': [SKIP],
'regress/regress-430201b': [SKIP],
+ 'regress/regress-430201': [SKIP],
'regress/regress-4309-3': [SKIP],
'regress/regress-4320': [SKIP],
'regress/regress-4325': [SKIP],
@@ -891,25 +928,30 @@
'regress/regress-4388': [SKIP],
'regress/regress-444805': [SKIP],
'regress/regress-446389': [SKIP],
+ 'regress/regress-447756': [SKIP],
'regress/regress-4515': [SKIP],
'regress/regress-4521': [SKIP],
'regress/regress-4525': [SKIP],
'regress/regress-453481': [SKIP],
'regress/regress-4534': [SKIP],
'regress/regress-454725': [SKIP],
+ 'regress/regress-457935': [SKIP],
'regress/regress-470804': [SKIP],
'regress/regress-476488': [SKIP],
- 'regress/regress-491536': [SKIP],
+ 'regress/regress-503565': [SKIP],
'regress/regress-514362': [SKIP],
'regress/regress-520029': [SKIP],
- 'regress/regress-542099': [SKIP],
'regress/regress-542100': [SKIP],
+ 'regress/regress-544991': [SKIP],
+ 'regress/regress-568765': [SKIP],
+ 'regress/regress-572589': [SKIP],
'regress/regress-580': [SKIP],
'regress/regress-618': [SKIP],
- 'regress/regress-643': [SKIP],
'regress/regress-69': [SKIP],
'regress/regress-70066': [SKIP],
'regress/regress-747': [SKIP],
+ 'regress/regress-753': [SKIP],
+ 'regress/regress-799761': [SKIP],
'regress/regress-806473': [SKIP],
'regress/regress-842017': [SKIP],
'regress/regress-84234': [SKIP],
@@ -922,34 +964,40 @@
'regress/regress-974': [SKIP],
'regress/regress-99167': [SKIP],
'regress/regress-998565': [SKIP],
+ 'regress/regress-arg-materialize-store': [SKIP],
'regress/regress-arguments-gc': [SKIP],
'regress/regress-assignment-in-test-context': [SKIP],
+ 'regress/regress-bce-underflow': [SKIP],
'regress/regress-cnlt-elements': [SKIP],
'regress/regress-cnlt-enum-indices': [SKIP],
'regress/regress-cntl-descriptors-enum': [SKIP],
'regress/regress-conditional-position': [SKIP],
'regress/regress-convert-enum': [SKIP],
+ 'regress/regress-crbug-109362': [SKIP],
'regress/regress-crbug-119800': [SKIP],
- 'regress/regress-crbug-135008': [SKIP],
+ 'regress/regress-crbug-163530': [SKIP],
'regress/regress-crbug-229923': [SKIP],
'regress/regress-crbug-242502': [SKIP],
'regress/regress-crbug-242924': [SKIP],
'regress/regress-crbug-245480': [SKIP],
- 'regress/regress-crbug-349079': [SKIP],
'regress/regress-crbug-350864': [SKIP],
+ 'regress/regress-crbug-351262': [SKIP],
'regress/regress-crbug-352058': [SKIP],
'regress/regress-crbug-357137': [SKIP],
'regress/regress-crbug-385002': [SKIP],
'regress/regress-crbug-387599': [SKIP],
+ 'regress/regress-crbug-405517': [SKIP],
'regress/regress-crbug-405922': [SKIP],
'regress/regress-crbug-409614': [SKIP],
'regress/regress-crbug-410033': [SKIP],
'regress/regress-crbug-412208': [SKIP],
'regress/regress-crbug-416558': [SKIP],
'regress/regress-crbug-424142': [SKIP],
+ 'regress/regress-crbug-429159': [SKIP],
'regress/regress-crbug-431602': [SKIP],
'regress/regress-crbug-432493': [SKIP],
'regress/regress-crbug-450642': [SKIP],
+ 'regress/regress-crbug-455644': [SKIP],
'regress/regress-crbug-465298': [SKIP],
'regress/regress-crbug-467180': [SKIP],
'regress/regress-crbug-467531': [SKIP],
@@ -962,38 +1010,45 @@
'regress/regress-crbug-489293': [SKIP],
'regress/regress-crbug-489597': [SKIP],
'regress/regress-crbug-498142': [SKIP],
- 'regress/regress-crbug-500824': [SKIP],
'regress/regress-crbug-501809': [SKIP],
'regress/regress-crbug-506443': [SKIP],
'regress/regress-crbug-507070': [SKIP],
+ 'regress/regress-crbug-517592': [SKIP],
+ 'regress/regress-crbug-522895': [SKIP],
'regress/regress-crbug-527364': [SKIP],
- 'regress/regress-crbug-530598': [SKIP],
'regress/regress-crbug-546968': [SKIP],
+ 'regress/regress-crbug-568477-1': [SKIP],
+ 'regress/regress-crbug-568477-2': [SKIP],
+ 'regress/regress-crbug-568477-3': [SKIP],
+ 'regress/regress-crbug-568477-4': [SKIP],
+ 'regress/regress-crbug-572590': [SKIP],
+ 'regress/regress-crbug-573857': [SKIP],
+ 'regress/regress-crbug-575080': [SKIP],
'regress/regress-deopt-gcb': [SKIP],
'regress/regress-deopt-gc': [SKIP],
+ 'regress/regress-deopt-in-array-literal-spread': [SKIP],
'regress/regress-embedded-cons-string': [SKIP],
'regress/regress-existing-shared-function-info': [SKIP],
'regress/regress-fast-literal-transition': [SKIP],
- 'regress/regress-force-representation': [SKIP],
'regress/regress-function-constructor-receiver': [SKIP],
'regress/regress-handle-illegal-redeclaration': [SKIP],
'regress/regress-inline-class-constructor': [SKIP],
'regress/regress-inlining-function-literal-context': [SKIP],
+ 'regress/regress-latin-1': [SKIP],
'regress/regress-lazy-deopt-reloc': [SKIP],
- 'regress/regress-map-invalidation-2': [SKIP],
'regress/regress-opt-after-debug-deopt': [SKIP],
- 'regress/regress-param-local-type': [SKIP],
+ 'regress/regress-osr-in-case-label': [SKIP],
+ 'regress/regress-osr-in-literal': [SKIP],
'regress/regress-prepare-break-while-recompile': [SKIP],
'regress/regress-put-prototype-transition': [SKIP],
'regress/regress-sliced-external-cons-regexp': [SKIP],
+ 'regress/regress-store-heapobject': [SKIP],
'regress/regress-transcendental': [SKIP],
'regress/regress-typedarray-length': [SKIP],
'regress/splice-missing-wb': [SKIP],
+ 'setter-on-constructor-prototype': [SKIP],
'shift-for-integer-div': [SKIP],
'simple-constructor': [SKIP],
- 'sin-cos': [SKIP],
- 'smi-mul-const': [SKIP],
- 'smi-mul': [SKIP],
'sparse-array-reverse': [SKIP],
'stack-traces': [SKIP],
'strict-mode': [SKIP],
@@ -1003,42 +1058,66 @@
'string-natives': [SKIP],
'string-replace-with-empty': [SKIP],
'string-slices': [SKIP],
- 'switch-opt': [SKIP],
'tools/profile': [SKIP],
'tools/profviz': [SKIP],
'try-finally-continue': [SKIP],
'try': [SKIP],
- 'unary-minus-deopt': [SKIP],
'undetectable-compare': [SKIP],
'unused-context-in-with': [SKIP],
- 'uri': [SKIP],
'value-wrapper': [SKIP],
+ 'with-function-expression': [SKIP],
'with-parameter-access': [SKIP],
'with-prototype': [SKIP],
'with-readonly': [SKIP],
+ 'with-value': [SKIP],
+ 'regress/regress-builtinbust-7': [SKIP],
+ 'regress/regress-crbug-451770': [SKIP],
+ 'regress/regress-crbug-503968': [SKIP],
+ 'regress/regress-crbug-504729': [SKIP],
}], # ignition == True
['ignition == True and (arch == arm or arch == arm64)', {
+ 'array-constructor': [SKIP],
'array-sort': [SKIP],
- 'date-parse': [SKIP],
- 'math-floor-part1': [SKIP],
- 'math-floor-part2': [SKIP],
- 'math-floor-part3': [SKIP],
+ 'array-store-and-grow': [SKIP],
+ 'compiler/division-by-constant': [SKIP],
+ 'compiler/osr-big': [SKIP],
+ 'compiler/osr-nested': [SKIP],
+ 'compiler/osr-one': [SKIP],
+ 'compiler/osr-two': [SKIP],
'mul-exhaustive-part*': [SKIP],
- 'regress/regress-1167': [SKIP],
+ 'regress/regress-1257': [SKIP],
'regress/regress-165637': [SKIP],
- 'regress/regress-2249': [SKIP],
'regress/regress-319722-ArrayBuffer': [SKIP],
- 'regress/regress-542823': [SKIP],
- 'regress/regress-634-debug': [SKIP],
- 'regress/regress-78270': [SKIP],
+ 'regress/regress-411210': [SKIP],
+ 'regress/regress-91008': [SKIP],
'regress/regress-crbug-347903': [SKIP],
+ 'regress/regress-crbug-500497': [SKIP],
'regress/regress-crbug-505007-1': [SKIP],
'regress/regress-crbug-505007-2': [SKIP],
- 'regress/short-circuit': [SKIP],
+ 'regress/regress-2193': [SKIP],
+ 'regress/regress-3158': [SKIP],
+ 'regress/regress-347904': [SKIP],
+ 'regress/regress-380092': [SKIP],
+ 'regress/regress-4173': [SKIP],
+ 'regress/regress-copy-hole-to-field': [SKIP],
+ 'regress/regress-crbug-315252': [SKIP],
+ 'regress/regress-crbug-412215': [SKIP],
+ 'regress/regress-crbug-513507': [SKIP],
+ 'regress/regress-deep-proto': [SKIP],
+ 'regress/regress-deopt-store-effect': [SKIP],
+ 'regress/regress-undefined-store-keyed-fast-element': [SKIP],
'stack-traces-overflow': [SKIP],
'unicodelctest': [SKIP],
'unicodelctest-no-optimization': [SKIP],
}], # ignition == True and (arch == arm or arch == arm64)
+##############################################################################
+['gcov_coverage', {
+ # Tests taking too long.
+ 'array-functions-prototype-misc': [SKIP],
+ 'strong/implicit-conversions': [SKIP],
+ 'strong/load-element-mutate-backing-store': [SKIP],
+}], # 'gcov_coverage'
+
]
diff --git a/deps/v8/test/mjsunit/parallel-optimize-disabled.js b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
index c144e649cc..1dbce06fdf 100644
--- a/deps/v8/test/mjsunit/parallel-optimize-disabled.js
+++ b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --nodead-code-elimination --concurrent-recompilation
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --legacy-const
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/property-load-across-eval.js b/deps/v8/test/mjsunit/property-load-across-eval.js
index 98b621e792..222c0e965e 100644
--- a/deps/v8/test/mjsunit/property-load-across-eval.js
+++ b/deps/v8/test/mjsunit/property-load-across-eval.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Tests loading of properties across eval calls.
var x = 1;
diff --git a/deps/v8/test/mjsunit/random-bit-correlations.js b/deps/v8/test/mjsunit/random-bit-correlations.js
new file mode 100644
index 0000000000..8322cfac4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/random-bit-correlations.js
@@ -0,0 +1,69 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --random-seed=12 --nostress-opt --noalways-opt --predictable
+
+(function() {
+ var kHistory = 2;
+ var kRepeats = 100;
+ var history = new Uint32Array(kHistory);
+
+ function random() {
+ return (Math.random() * Math.pow(2, 32)) >>> 0;
+ }
+
+ function ChiSquared(m, n) {
+ var ys_minus_np1 = (m - n / 2.0);
+ var chi_squared_1 = ys_minus_np1 * ys_minus_np1 * 2.0 / n;
+ var ys_minus_np2 = ((n - m) - n / 2.0);
+ var chi_squared_2 = ys_minus_np2 * ys_minus_np2 * 2.0 / n;
+ return chi_squared_1 + chi_squared_2;
+ }
+ for (var predictor_bit = -2; predictor_bit < 32; predictor_bit++) {
+ // The predicted bit is one of the bits from the PRNG.
+ for (var random_bit = 0; random_bit < 32; random_bit++) {
+ for (var ago = 0; ago < kHistory; ago++) {
+ // We don't want to check whether each bit predicts itself.
+ if (ago == 0 && predictor_bit == random_bit) continue;
+ // Enter the new random value into the history
+ for (var i = ago; i >= 0; i--) {
+ history[i] = random();
+ }
+ // Find out how many of the bits are the same as the prediction bit.
+ var m = 0;
+ for (var i = 0; i < kRepeats; i++) {
+ for (var j = ago - 1; j >= 0; j--) history[j + 1] = history[j];
+ history[0] = random();
+ var predicted;
+ if (predictor_bit >= 0) {
+ predicted = (history[ago] >> predictor_bit) & 1;
+ } else {
+ predicted = predictor_bit == -2 ? 0 : 1;
+ }
+ var bit = (history[0] >> random_bit) & 1;
+ if (bit == predicted) m++;
+ }
+ // Chi squared analysis for k = 2 (2, states: same/not-same) and one
+ // degree of freedom (k - 1).
+ var chi_squared = ChiSquared(m, kRepeats);
+ if (chi_squared > 24) {
+ var percent = Math.floor(m * 100.0 / kRepeats);
+ if (predictor_bit < 0) {
+ var bit_value = predictor_bit == -2 ? 0 : 1;
+ print(`Bit ${random_bit} is ${bit_value} ${percent}% of the time`);
+ } else {
+ print(`Bit ${random_bit} is the same as bit ${predictor_bit} ` +
+ `${ago} ago ${percent}% of the time`);
+ }
+ }
+ // For 1 degree of freedom this corresponds to 1 in a million. We are
+ // running ~8000 tests, so that would be surprising.
+ assertTrue(chi_squared <= 24);
+ // If the predictor bit is a fixed 0 or 1 then it makes no sense to
+ // repeat the test with a different age.
+ if (predictor_bit < 0) break;
+ }
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/readonly.js b/deps/v8/test/mjsunit/readonly.js
index 084e9ffe23..3b090cebc1 100644
--- a/deps/v8/test/mjsunit/readonly.js
+++ b/deps/v8/test/mjsunit/readonly.js
@@ -120,12 +120,13 @@ function ReadonlyByProto(o, name) {
o.__proto__ = p;
}
+// TODO(neis,cbruni): Enable once the necessary traps work again.
// Allow Proxy to be undefined, so test can run in non-Harmony mode as well.
var global = this;
function ReadonlyByProxy(o, name) {
if (!global.Proxy) return ReadonlyByFreeze(o, name); // Dummy.
- var p = global.Proxy.create({
+ var p = new global.Proxy({}, {
getPropertyDescriptor: function() {
return {value: -46, writable: false, configurable: true};
}
@@ -135,7 +136,7 @@ function ReadonlyByProxy(o, name) {
var readonlys = [
ReadonlyByNonwritableDataProperty, ReadonlyByAccessorPropertyWithoutSetter,
- ReadonlyByGetter, ReadonlyByFreeze, ReadonlyByProto, ReadonlyByProxy
+ ReadonlyByGetter, ReadonlyByFreeze, ReadonlyByProto // ReadonlyByProxy
]
function TestAllReadonlys(f) {
diff --git a/deps/v8/test/mjsunit/regexp-not-sticky-yet.js b/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
index 4186a63fef..2002509d1d 100644
--- a/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
+++ b/deps/v8/test/mjsunit/regexp-not-sticky-yet.js
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --no-harmony-regexps
// Test that sticky regexp support is not affecting V8 when the
// --harmony-regexps flag is not on.
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index 6374296210..b6f019ea26 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -275,7 +275,7 @@ assertEquals('/(?:)/', re.toString());
re.compile();
assertEquals('/(?:)/', re.toString());
re.compile(void 0);
-assertEquals('/undefined/', re.toString());
+assertEquals('/(?:)/', re.toString());
// Check for lazy RegExp literal creation
@@ -722,3 +722,10 @@ assertThrows("RegExp.prototype.toString.call(true)", TypeError);
assertThrows("RegExp.prototype.toString.call([])", TypeError);
assertThrows("RegExp.prototype.toString.call({})", TypeError);
assertThrows("RegExp.prototype.toString.call(function(){})", TypeError);
+
+// Test mutually recursive capture and backreferences.
+assertEquals(["b", "", ""], /(\2)b(\1)/.exec("aba"));
+assertEquals(["a", "", ""], /(\2).(\1)/.exec("aba"));
+assertEquals(["aba", "a", "a"], /(.\2).(\1)/.exec("aba"));
+assertEquals(["acbc", "c", "c"], /a(.\2)b(\1)$/.exec("acbc"));
+assertEquals(["acbc", "c", "c"], /a(.\2)b(\1)/.exec("aabcacbc"));
diff --git a/deps/v8/test/mjsunit/regress-3225.js b/deps/v8/test/mjsunit/regress-3225.js
index fe44b85110..97165a80dd 100644
--- a/deps/v8/test/mjsunit/regress-3225.js
+++ b/deps/v8/test/mjsunit/regress-3225.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals
Debug = debug.Debug
@@ -15,12 +15,13 @@ function listener(event, exec_state, event_data, data) {
if (debug_step == 0) {
assertEquals(1, exec_state.frame(0).evaluate('a').value());
assertEquals(3, exec_state.frame(0).evaluate('b').value());
- exec_state.frame(0).evaluate("a = 4").value();
+ exec_state.frame(0).evaluate("a = 4").value(); // no effect.
debug_step++;
} else {
- assertEquals(4, exec_state.frame(0).evaluate('a').value());
+ assertEquals(1, exec_state.frame(0).evaluate('a').value());
assertEquals(3, exec_state.frame(0).evaluate('b').value());
- exec_state.frame(0).evaluate("b = 5").value();
+ exec_state.frame(0).evaluate("set_a_to_5()");
+ exec_state.frame(0).evaluate("b = 5").value(); // no effect.
}
} catch (e) {
failure = e;
@@ -30,19 +31,22 @@ function listener(event, exec_state, event_data, data) {
Debug.setListener(listener);
function* generator(a, b) {
+ function set_a_to_5() { a = 5 }
var b = 3; // Shadows a parameter.
debugger;
yield a;
yield b;
debugger;
+ yield a;
return b;
}
var foo = generator(1, 2);
-assertEquals(4, foo.next().value);
+assertEquals(1, foo.next().value);
assertEquals(3, foo.next().value);
assertEquals(5, foo.next().value);
+assertEquals(3, foo.next().value);
assertNull(failure);
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/debug-prepare-step-in.js b/deps/v8/test/mjsunit/regress/debug-prepare-step-in.js
index 60b47f7a5d..93474da695 100644
--- a/deps/v8/test/mjsunit/regress/debug-prepare-step-in.js
+++ b/deps/v8/test/mjsunit/regress/debug-prepare-step-in.js
@@ -30,7 +30,7 @@
Debug = debug.Debug
function breakListener(event, exec_state, event_data, data) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
Debug.setListener(breakListener);
diff --git a/deps/v8/test/mjsunit/regress/property-descriptor-to-object.js b/deps/v8/test/mjsunit/regress/property-descriptor-to-object.js
new file mode 100644
index 0000000000..e47d5a5b4d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/property-descriptor-to-object.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = { prop: 1 };
+Object.prototype.value = 0;
+var d = Object.getOwnPropertyDescriptor(o, "prop");
+assertEquals(1, d.value);
diff --git a/deps/v8/test/mjsunit/regress/regress-109195.js b/deps/v8/test/mjsunit/regress/regress-109195.js
index 97538aa167..e4a2bbf229 100644
--- a/deps/v8/test/mjsunit/regress/regress-109195.js
+++ b/deps/v8/test/mjsunit/regress/regress-109195.js
@@ -32,7 +32,7 @@ function listener(event, exec_state, event_data, data) {
for (var i = 0, n = exec_state.frameCount(); i < n; i++) {
exec_state.frame().scopeCount(i);
}
- exec_state.prepareStep(Debug.StepAction.Continue, 1);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
}
Debug.setListener(listener);
diff --git a/deps/v8/test/mjsunit/regress/regress-1178598.js b/deps/v8/test/mjsunit/regress/regress-1178598.js
index 135c596872..2056a9d8da 100644
--- a/deps/v8/test/mjsunit/regress/regress-1178598.js
+++ b/deps/v8/test/mjsunit/regress/regress-1178598.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Regression test cases for issue 1178598.
// Make sure const-initialization doesn't conflict
diff --git a/deps/v8/test/mjsunit/regress/regress-1182832.js b/deps/v8/test/mjsunit/regress/regress-1182832.js
index 6c4fcb413e..4d214695b5 100644
--- a/deps/v8/test/mjsunit/regress/regress-1182832.js
+++ b/deps/v8/test/mjsunit/regress/regress-1182832.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
var caught = false;
try {
(function () {
diff --git a/deps/v8/test/mjsunit/regress/regress-1199637.js b/deps/v8/test/mjsunit/regress/regress-1199637.js
index f77eead315..34ab5144a2 100644
--- a/deps/v8/test/mjsunit/regress/regress-1199637.js
+++ b/deps/v8/test/mjsunit/regress/regress-1199637.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --es52-globals
+// Flags: --allow-natives-syntax --legacy-const
// Make sure that we can introduce global variables (using
// both var and const) that shadow even READ_ONLY variables
diff --git a/deps/v8/test/mjsunit/regress/regress-1201933.js b/deps/v8/test/mjsunit/regress/regress-1201933.js
index d4827e41cf..4a7c65a5c1 100644
--- a/deps/v8/test/mjsunit/regress/regress-1201933.js
+++ b/deps/v8/test/mjsunit/regress/regress-1201933.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Make sure this doesn't fail with an assertion
// failure during lazy compilation.
diff --git a/deps/v8/test/mjsunit/regress/regress-1207276.js b/deps/v8/test/mjsunit/regress/regress-1207276.js
index ce7efe9880..b5d01815e2 100644
--- a/deps/v8/test/mjsunit/regress/regress-1207276.js
+++ b/deps/v8/test/mjsunit/regress/regress-1207276.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
try {
const x=n,Glo0al;
} catch(e){}
diff --git a/deps/v8/test/mjsunit/regress/regress-1213575.js b/deps/v8/test/mjsunit/regress/regress-1213575.js
index 8c197bcf83..fc35b88103 100644
--- a/deps/v8/test/mjsunit/regress/regress-1213575.js
+++ b/deps/v8/test/mjsunit/regress/regress-1213575.js
@@ -28,6 +28,8 @@
// Make sure that a const definition does not try
// to pass 'the hole' to a defined setter.
+// Flags: --legacy-const
+
this.__defineSetter__('x', function(value) { assertTrue(value === 1); });
var caught = false;
diff --git a/deps/v8/test/mjsunit/regress/regress-1229.js b/deps/v8/test/mjsunit/regress/regress-1229.js
deleted file mode 100644
index 5447f3f7d2..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1229.js
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-// Check that %NewObjectFromBound works correctly when called from optimized
-// frame.
-function foo1(x, y, z) {
- assertEquals(1, x);
- assertEquals(2, y);
- assertEquals(3, z);
-}
-
-function foo2(x, y, z) {
- assertEquals(1, x);
- assertEquals(2, y);
- assertEquals(undefined, z);
-}
-
-function foo3(x, y, z) {
- assertEquals(1, x);
- assertEquals(2, y);
- assertEquals(3, z);
-}
-
-
-var foob1 = foo1.bind({}, 1);
-var foob2 = foo2.bind({}, 1);
-var foob3 = foo3.bind({}, 1);
-
-
-function f1(y, z) {
- return %NewObjectFromBound(foob1);
-}
-
-function f2(y, z) {
- return %NewObjectFromBound(foob2);
-}
-
-function f3(y, z) {
- return %NewObjectFromBound(foob3);
-}
-
-// Check that %NewObjectFromBound looks at correct frame for inlined function.
-function g1(z, y) {
- return f1(y, z); /* f should be inlined into g, note rotated arguments */
-}
-
-function g2(z, y, x) {
- return f2(y); /* f should be inlined into g, note argument count mismatch */
-}
-
-function g3(z, y, x) {
- return f3(x, y, z); /* f should be inlined into g, note argument count mismatch */
-}
-
-// Check that %NewObjectFromBound looks at correct frame for inlined function.
-function ff(x) { }
-function h1(z2, y2) {
- var local_z = z2 >> 1;
- ff(local_z);
- var local_y = y2 >> 1;
- ff(local_y);
- return f1(local_y, local_z); /* f should be inlined into h */
-}
-
-function h2(z2, y2, x2) {
- var local_z = z2 >> 1;
- ff(local_z);
- var local_y = y2 >> 1;
- ff(local_y);
- return f2(local_y); /* f should be inlined into h */
-}
-
-function h3(z2, y2, x2) {
- var local_z = z2 >> 1;
- ff(local_z);
- var local_y = y2 >> 1;
- ff(local_y);
- var local_x = x2 >> 1;
- ff(local_x);
- return f3(local_x, local_y, local_z); /* f should be inlined into h */
-}
-
-
-function invoke(f, args) {
- for (var i = 0; i < 5; i++) f.apply(this, args);
- %OptimizeFunctionOnNextCall(f);
- f.apply(this, args);
-}
-
-invoke(f1, [2, 3]);
-invoke(f2, [2]);
-invoke(f3, [2, 3, 4]);
-invoke(g1, [3, 2]);
-invoke(g2, [3, 2, 4]);
-invoke(g3, [4, 3, 2]);
-invoke(h1, [6, 4]);
-invoke(h2, [6, 4, 8]);
-invoke(h3, [8, 6, 4]);
-
-// Check that %_IsConstructCall returns correct value when inlined
-var NON_CONSTRUCT_MARKER = {};
-var CONSTRUCT_MARKER = {};
-function baz(x) {
- return (!%_IsConstructCall()) ? NON_CONSTRUCT_MARKER : CONSTRUCT_MARKER;
-}
-
-function bar(x, y, z) {
- var non_construct = baz(0); /* baz should be inlined */
- assertSame(non_construct, NON_CONSTRUCT_MARKER);
- var non_construct = baz(); /* baz should be inlined */
- assertSame(non_construct, NON_CONSTRUCT_MARKER);
- var non_construct = baz(0, 0); /* baz should be inlined */
- assertSame(non_construct, NON_CONSTRUCT_MARKER);
- var construct = new baz(0);
- assertSame(construct, CONSTRUCT_MARKER);
- var construct = new baz(0, 0);
- assertSame(construct, CONSTRUCT_MARKER);
-}
-
-invoke(bar, [1, 2, 3]);
diff --git a/deps/v8/test/mjsunit/regress/regress-147497.js b/deps/v8/test/mjsunit/regress/regress-147497.js
index 92e29d1258..f61d0c664d 100644
--- a/deps/v8/test/mjsunit/regress/regress-147497.js
+++ b/deps/v8/test/mjsunit/regress/regress-147497.js
@@ -31,7 +31,7 @@ Debug = debug.Debug;
function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break) {
- exec_state.prepareStep(Debug.StepAction.StepNext, 10);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
}
};
diff --git a/deps/v8/test/mjsunit/regress/regress-186.js b/deps/v8/test/mjsunit/regress/regress-186.js
index 0212855896..e10ed8f25d 100644
--- a/deps/v8/test/mjsunit/regress/regress-186.js
+++ b/deps/v8/test/mjsunit/regress/regress-186.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Make sure that eval can introduce a local variable called __proto__.
// See http://code.google.com/p/v8/issues/detail?id=186
diff --git a/deps/v8/test/mjsunit/regress/regress-2596.js b/deps/v8/test/mjsunit/regress/regress-2596.js
index e7006085a1..a1a0af3b00 100644
--- a/deps/v8/test/mjsunit/regress/regress-2596.js
+++ b/deps/v8/test/mjsunit/regress/regress-2596.js
@@ -27,9 +27,11 @@
// Flags: --allow-natives-syntax
-var bytes = new Uint8Array([
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]); // kHoleNaN
-var doubles = new Float64Array(bytes.buffer);
+var ab = new ArrayBuffer(8);
+var i_view = new Int32Array(ab);
+i_view[0] = %GetHoleNaNUpper()
+i_view[1] = %GetHoleNaNLower();
+var doubles = new Float64Array(ab); // kHoleNaN
assertTrue(isNaN(doubles[0]));
var prototype = [2.5, 2.5];
diff --git a/deps/v8/test/mjsunit/regress/regress-3138.js b/deps/v8/test/mjsunit/regress/regress-3138.js
index acb121d2bd..6f0430c855 100644
--- a/deps/v8/test/mjsunit/regress/regress-3138.js
+++ b/deps/v8/test/mjsunit/regress/regress-3138.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --legacy-const
+
(function f(){
assertEquals("function", typeof f);
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-325676.js b/deps/v8/test/mjsunit/regress/regress-325676.js
index 427bbc38dc..7aae0cdaab 100644
--- a/deps/v8/test/mjsunit/regress/regress-325676.js
+++ b/deps/v8/test/mjsunit/regress/regress-325676.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals
// If a function parameter is forced to be context allocated,
// debug evaluate need to resolve it to a context slot instead of
@@ -40,7 +40,7 @@ function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
try {
assertEquals(expected, exec_state.frame(0).evaluate('arg').value());
- exec_state.frame(0).evaluate('arg = "evaluated";');
+ exec_state.frame(0).evaluate('arg = "evaluated";'); // no effect
} catch (e) {
exception = e;
}
@@ -51,12 +51,12 @@ Debug.setListener(listener);
function f(arg) {
expected = arg;
debugger;
- assertEquals("evaluated", arg);
+ assertEquals(expected, arg);
arg = "value";
expected = arg;
debugger;
- assertEquals("evaluated", arg);
+ assertEquals(expected, arg);
// Forces arg to be context allocated even though a parameter.
function g() { arg; }
diff --git a/deps/v8/test/mjsunit/regress/regress-3641.js b/deps/v8/test/mjsunit/regress/regress-3641.js
new file mode 100644
index 0000000000..9aff8c8f7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3641.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// If a Promise's then method is overridden, that should be respected
+// even if the promise is already resolved. x's resolution function is
+// only called by Promise.resolve(); there shouldn't be a resolution
+// check before when calling x.then.
+
+
+// Async assert framework copied from mjsunit/es6/promises.js
+
+var asyncAssertsExpected = 0;
+
+function assertAsyncRan() { ++asyncAssertsExpected }
+
+function assertLater(f, name) {
+ assertFalse(f()); // should not be true synchronously
+ ++asyncAssertsExpected;
+ var iterations = 0;
+ function runAssertion() {
+ if (f()) {
+ print(name, "succeeded");
+ --asyncAssertsExpected;
+ } else if (iterations++ < 10) {
+ %EnqueueMicrotask(runAssertion);
+ } else {
+ %AbortJS(name + " FAILED!");
+ }
+ }
+ %EnqueueMicrotask(runAssertion);
+}
+
+function assertAsyncDone(iteration) {
+ var iteration = iteration || 0;
+ %EnqueueMicrotask(function() {
+ if (asyncAssertsExpected === 0)
+ assertAsync(true, "all")
+ else if (iteration > 10) // Shouldn't take more.
+ assertAsync(false, "all... " + asyncAssertsExpected)
+ else
+ assertAsyncDone(iteration + 1)
+ });
+}
+
+// End async assert framework
+
+var y;
+var x = Promise.resolve();
+x.then = () => { y = true; }
+Promise.resolve().then(() => x);
+assertLater(() => y === true, "y === true");
+
+assertAsyncDone();
diff --git a/deps/v8/test/mjsunit/regress/regress-380049.js b/deps/v8/test/mjsunit/regress/regress-380049.js
deleted file mode 100644
index a78626cc54..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-380049.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-function foo(a,b,c) { return arguments; }
-var f = foo(false, null, 40);
-assertThrows(function() { %ObjectFreeze(f); });
diff --git a/deps/v8/test/mjsunit/regress/regress-417709b.js b/deps/v8/test/mjsunit/regress/regress-417709b.js
index 76805435d3..4d9572e7d7 100644
--- a/deps/v8/test/mjsunit/regress/regress-417709b.js
+++ b/deps/v8/test/mjsunit/regress/regress-417709b.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --stack-size=100
+// Flags: --harmony-object-observe --stack-size=100
var a = [];
diff --git a/deps/v8/test/mjsunit/regress/regress-436896.js b/deps/v8/test/mjsunit/regress/regress-436896.js
index 0ea70523fe..fee44dee8c 100644
--- a/deps/v8/test/mjsunit/regress/regress-436896.js
+++ b/deps/v8/test/mjsunit/regress/regress-436896.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --legacy-const
function f(x) {
const x = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-4576.js b/deps/v8/test/mjsunit/regress/regress-4576.js
new file mode 100644
index 0000000000..c55c69580a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4576.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-sloppy --legacy-const
+
+// Should trigger a runtime error, not an early error.
+function f() {
+ const x;
+ var x;
+}
+assertThrows(f, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4640.js b/deps/v8/test/mjsunit/regress/regress-4640.js
new file mode 100644
index 0000000000..ed609bbb1f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4640.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Some surrounding cases which already worked, for good measure
+assertTrue(new Date('275760-10-14') == 'Invalid Date');
+assertTrue(new Date('275760-09-23') == 'Invalid Date');
+assertTrue(new Date('+275760-09-24') == 'Invalid Date');
+assertTrue(new Date('+275760-10-13') == 'Invalid Date');
+
+// The following cases used to throw "illegal access"
+assertTrue(new Date('275760-09-24') == 'Invalid Date');
+assertTrue(new Date('275760-10-13') == 'Invalid Date');
+assertTrue(new Date('+275760-10-13 ') == 'Invalid Date');
+
+// However, dates within the range or valid
+assertTrue(new Date('100000-10-13') != 'Invalid Date');
+assertTrue(new Date('+100000-10-13') != 'Invalid Date');
+assertTrue(new Date('+100000-10-13 ') != 'Invalid Date');
diff --git a/deps/v8/test/mjsunit/regress/regress-4665.js b/deps/v8/test/mjsunit/regress/regress-4665.js
new file mode 100644
index 0000000000..9d7307acc7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4665.js
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noharmony-species
+
+// First test case
+
+function FirstBuffer () {}
+FirstBuffer.prototype.__proto__ = Uint8Array.prototype
+FirstBuffer.__proto__ = Uint8Array
+
+var buf = new Uint8Array(10)
+buf.__proto__ = FirstBuffer.prototype
+
+var buf2 = buf.subarray(2)
+assertEquals(8, buf2.length);
+
+// Second test case
+
+function SecondBuffer (arg) {
+ var arr = new Uint8Array(arg)
+ arr.__proto__ = SecondBuffer.prototype
+ return arr
+}
+SecondBuffer.prototype.__proto__ = Uint8Array.prototype
+SecondBuffer.__proto__ = Uint8Array
+
+var buf3 = new SecondBuffer(10)
+
+var buf4 = buf3.subarray(2)
+
+assertEquals(8, buf4.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-4693.js b/deps/v8/test/mjsunit/regress/regress-4693.js
new file mode 100644
index 0000000000..ed832e65da
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4693.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-sloppy-function
+
+// In sloppy mode we allow function redeclarations within blocks for webcompat.
+(function() {
+ assertEquals(undefined, f); // Annex B
+ if (true) {
+ assertEquals(2, f());
+ function f() { return 1 }
+ assertEquals(2, f());
+ function f() { return 2 }
+ assertEquals(2, f());
+ }
+ assertEquals(2, f()); // Annex B
+})();
+
+// Should still fail in strict mode
+assertThrows(`
+ (function() {
+ "use strict";
+ if (true) {
+ function f() { return 1 }
+ function f() { return 2 }
+ }
+ })();
+`, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-544991.js b/deps/v8/test/mjsunit/regress/regress-544991.js
index dc09fae6a4..911d8acc89 100644
--- a/deps/v8/test/mjsunit/regress/regress-544991.js
+++ b/deps/v8/test/mjsunit/regress/regress-544991.js
@@ -2,14 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-species
+
+'use strict';
+
var typedArray = new Int8Array(1);
var saved;
var called;
-typedArray.constructor = function(x) { called = true; saved = x };
-typedArray.constructor.prototype = Int8Array.prototype;
+class TypedArraySubclass extends Int8Array {
+ constructor(x) {
+ super(x);
+ called = true;
+ saved = x;
+ }
+}
+typedArray.constructor = TypedArraySubclass
typedArray.map(function(){});
-// To meet the spec, constructor shouldn't be called directly, but
-// if it is called for now, the argument should be an Array
-assertTrue(called); // Will fail later; when so, delete this test
-assertEquals("Array", saved.constructor.name);
+assertTrue(called);
+assertEquals(saved, 1);
diff --git a/deps/v8/test/mjsunit/regress/regress-552302.js b/deps/v8/test/mjsunit/regress/regress-552302.js
index d4a1f2448e..b9f712a619 100644
--- a/deps/v8/test/mjsunit/regress/regress-552302.js
+++ b/deps/v8/test/mjsunit/regress/regress-552302.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring --allow-natives-syntax
+// Flags: --harmony-destructuring-bind --allow-natives-syntax
assertThrows('var %OptimizeFunctionOnNextCall()', SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-554865.js b/deps/v8/test/mjsunit/regress/regress-554865.js
new file mode 100644
index 0000000000..9b66d79b35
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-554865.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-default-parameters
+
+(function() {
+ var x = {};
+ ((y = [42]) => assertEquals(42, y[0]))();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-556543.js b/deps/v8/test/mjsunit/regress/regress-556543.js
new file mode 100644
index 0000000000..9e9bedd6f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-556543.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ for (var __v_2 = 0; __v_2 < __v_5; ++__v_2) {
+ for (var __v_5 = 0; __v_3 < 1; ++__v_8) {
+ if (true || 0 > -6) continue;
+ for (var __v_3 = 0; __v_3 < 1; ++__v_3) {
+ }
+ }
+ }
+}
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-568765.js b/deps/v8/test/mjsunit/regress/regress-568765.js
new file mode 100644
index 0000000000..9efd8599a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-568765.js
@@ -0,0 +1,93 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --gc-interval=216
+// Flags: --nonative-context-specialization
+
+function PrettyPrint() { return ""; }
+function fail() { }
+assertSame = function assertSame() { if (found === expected) { if (1 / found) return; } else if ((expected !== expected) && (found !== found)) { return; }; }; assertEquals = function assertEquals(expected, found, name_opt) { if ( expected) { fail(PrettyPrint()); } };
+assertTrue = function assertTrue() { assertEquals(); };
+assertThrows = function assertThrows(code, type_opt, cause_opt) { var threwException = true; try { if (typeof code == 'function') { code(); } else {; } threwException = false; } catch (e) { if (typeof type_opt == 'function') {; } if (arguments.length >= 3) {; } return; } };
+assertInstanceof = function assertInstanceof() { if (obj instanceof type) { var actualTypeName = null; var actualConstructor = Object.getPrototypeOf().constructor; if (typeof actualConstructor == "function") {; }; } };
+function modifyPropertyOrValue() { var names; try {; } catch(e) {; return; } if(!names) return; name = names[rand_value % names.length]; if (isNaN()); }
+function nop() {}
+var __v_5 = {};
+var __v_12 = {};
+var __v_13 = {};
+var __v_16 = {};
+function __f_0() {
+}
+(function (){
+ function __f_6() {
+ }
+ a = __f_6();
+ b = __f_6();
+ name = "Array";
+})();
+(function (){
+ function __f_1() {
+ assertTrue();
+ }
+ __f_1();
+})();
+__v_10 = {
+}
+__v_11 = new Object();
+tailee1 = function() {
+ "use strict";
+ if (__v_12-- == 0) {
+ }
+ return nop();
+};
+%OptimizeFunctionOnNextCall(tailee1);
+assertEquals(__v_10, tailee1.call());
+__v_14 = 100000;
+gc();
+tailee2 = function() {
+ "use strict";
+ __v_14 = ((__v_14 | 0) - 1) | 0;
+ if ((__v_14 | 0) === 0) {
+ }
+};
+%OptimizeFunctionOnNextCall(tailee2);
+assertEquals(__v_11, tailee2.call());
+__v_13 = 999999;
+tailee3 = function() {
+ "use strict";
+ if (__v_13-- == 0) {
+ }
+};
+%OptimizeFunctionOnNextCall(tailee3);
+assertEquals(__v_9, tailee3.call(__v_11, __v_9));
+tailee4 = function(px) {
+ return nop(tailee4, this, px, undefined);
+};
+%OptimizeFunctionOnNextCall(tailee4);
+assertThrows(function() { tailee4.call(); });
+tailee5 = function() {
+ return nop();
+};
+%OptimizeFunctionOnNextCall(tailee5);
+assertThrows(function() { tailee5.call(); });
+tailee6 = function() {
+}
+tailee7 = function( py, pz, pa, pb, pc) {
+ return nop();
+};
+%OptimizeFunctionOnNextCall(tailee7);
+ tailee7.call();
+
+(function() {
+ Number.prototype[0] = "a";
+ Number.prototype[1] = "b";
+ Object.defineProperty(Number.prototype, 2, {
+ get: function() {
+ }
+ });
+ Number.prototype.length = 3;
+Array.prototype.includes.call(5);
+})();
+var __v_9 = -8;
+var __v_20 = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-572589.js b/deps/v8/test/mjsunit/regress/regress-572589.js
new file mode 100644
index 0000000000..36092a2bf4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-572589.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --no-lazy
+// Flags: --harmony-destructuring-bind
+
+"use strict";
+eval();
+var f = ({x}) => { };
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-493568.js b/deps/v8/test/mjsunit/regress/regress-575364.js
index 081f4937fe..f1dc49e073 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-493568.js
+++ b/deps/v8/test/mjsunit/regress/regress-575364.js
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-proxies
+// Flags: --expose-wasm
-var p = Proxy.create({ fix: function() { return {}; } });
+function f() {
+ "use asm";
-var obj = {};
-obj.x = p;
-
-Object.preventExtensions(p);
+}
+assertFalse(_WASMEXP_ == undefined);
+assertThrows(function() { _WASMEXP_.asmCompileRun(f.toString()); });
diff --git a/deps/v8/test/mjsunit/regress/regress-578775.js b/deps/v8/test/mjsunit/regress/regress-578775.js
new file mode 100644
index 0000000000..afeaf3d914
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-578775.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// https://code.google.com/p/chromium/issues/detail?id=578775
+
+var __v_9 = {};
+for (var __v_0 = 0; __v_0 < 1000; __v_0++) {
+}
+__v_2 = { __v_2: 1 };
+__v_12 = new Proxy({}, {});
+function f() {
+ var __v_10 = new Proxy({}, __v_2);
+ __v_9.__proto__ = __v_10;
+ __v_2.getPrototypeOf = function () { return __v_9 };
+ Object.prototype.isPrototypeOf.call(__v_0, __v_10);
+};
+assertThrows(f, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-583260.js b/deps/v8/test/mjsunit/regress/regress-583260.js
new file mode 100644
index 0000000000..b0c01f6e9d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-583260.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__v_1 = {
+ has() { return true }
+};
+__v_2 = new Proxy({}, __v_1);
+function __f_5(object) {
+ with (object) { return delete __v_3; }
+}
+ __f_5(__v_2)
diff --git a/deps/v8/test/mjsunit/regress/regress-641.js b/deps/v8/test/mjsunit/regress/regress-641.js
index 957caa8ccd..c29b2af242 100644
--- a/deps/v8/test/mjsunit/regress/regress-641.js
+++ b/deps/v8/test/mjsunit/regress/regress-641.js
@@ -27,6 +27,8 @@
// Regression test for http://code.google.com/p/v8/issues/detail?id=641.
+// Flags: --legacy-const
+
function f(){
while (window + 1) {
const window=[,];
diff --git a/deps/v8/test/mjsunit/regress/regress-70066.js b/deps/v8/test/mjsunit/regress/regress-70066.js
index 01c2f4f3a3..8787b76646 100644
--- a/deps/v8/test/mjsunit/regress/regress-70066.js
+++ b/deps/v8/test/mjsunit/regress/regress-70066.js
@@ -120,7 +120,7 @@ function test8() {
}
assertEquals(true, test8(), "test8");
-assertThrows("x", "test8"); // Global x should be deleted.
+assertThrows("x"); // Global x should be deleted.
// Delete on a property that is not found anywhere.
@@ -128,7 +128,7 @@ function test9() {
with ({}) { return delete x; }
}
-assertThrows("x", "test9"); // Make sure it's not there.
+assertThrows("x"); // Make sure it's not there.
assertEquals(true, test9(), "test9");
diff --git a/deps/v8/test/mjsunit/regress/regress-799761.js b/deps/v8/test/mjsunit/regress/regress-799761.js
index d3be1bdadf..7d09da56fa 100644
--- a/deps/v8/test/mjsunit/regress/regress-799761.js
+++ b/deps/v8/test/mjsunit/regress/regress-799761.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// const variables should be read-only
const c = 42;
c = 87;
diff --git a/deps/v8/test/mjsunit/regress/regress-88591.js b/deps/v8/test/mjsunit/regress/regress-88591.js
index e42570a95b..e7f410d7b0 100644
--- a/deps/v8/test/mjsunit/regress/regress-88591.js
+++ b/deps/v8/test/mjsunit/regress/regress-88591.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
// Regression test for a crash. A data property in the global object's
// prototype shadowed by a setter in the global object's prototype's
// prototype would crash or assert when seen by Runtime_DeclareContextSlot.
diff --git a/deps/v8/test/mjsunit/regress/regress-91120.js b/deps/v8/test/mjsunit/regress/regress-91120.js
index 117acac6cd..73f545648a 100644
--- a/deps/v8/test/mjsunit/regress/regress-91120.js
+++ b/deps/v8/test/mjsunit/regress/regress-91120.js
@@ -25,24 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// We intend that the function declaration for g inside catch is hoisted to
-// function f's scope. Invoke it before try/catch, in the try block, in the
-// catch block, after try/catch, and outside f, and verify that it has
-// access to the proper binding of x.
+// With ES2015 function hoisting semantics, functions are only "hoisted" out
+// of blocks by writing their values into var-scoped declarations. Therefore,
+// they access the catch binding when it syntactically appears so.
+// This is a potentially breaking change vs the old semantics, which would
+// return 'function' from g() everywhere.
+
var x = 'global';
function f() {
var x = 'function';
- assertEquals('function', g());
+ assertEquals(undefined, g);
try {
- assertEquals('function', g());
+ assertEquals(undefined, g);
throw 'catch';
} catch (x) {
function g() { return x; }
- assertEquals('function', g());
+ assertEquals('catch', g());
}
- assertEquals('function', g());
+ assertEquals('catch', g());
return g;
}
-assertEquals('function', f()());
+assertEquals('catch', f()());
diff --git a/deps/v8/test/mjsunit/regress/regress-995.js b/deps/v8/test/mjsunit/regress/regress-995.js
index 6f3dac1330..3f99179104 100644
--- a/deps/v8/test/mjsunit/regress/regress-995.js
+++ b/deps/v8/test/mjsunit/regress/regress-995.js
@@ -33,7 +33,7 @@
// HHasInstance.
function f(value) {
- if (%_IsSpecObject(value)) {
+ if (%_IsJSReceiver(value)) {
if ((%_IsArray(value))) assertTrue(false);
}
}
diff --git a/deps/v8/test/mjsunit/regress/regress-conditional-position.js b/deps/v8/test/mjsunit/regress/regress-conditional-position.js
index ae5a3acb58..7f9d3034ff 100644
--- a/deps/v8/test/mjsunit/regress/regress-conditional-position.js
+++ b/deps/v8/test/mjsunit/regress/regress-conditional-position.js
@@ -86,9 +86,9 @@ test(test1, 58);
test(test2, 65);
test(test3, 72);
-eval(test1.toString() + "//@ sourceUrl=foo");
-eval(test2.toString() + "//@ sourceUrl=foo");
-eval(test3.toString() + "//@ sourceUrl=foo");
+eval(test1.toString() + "//# sourceUrl=foo");
+eval(test2.toString() + "//# sourceUrl=foo");
+eval(test3.toString() + "//# sourceUrl=foo");
test(test1, 2);
test(test2, 3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
index 20285f614d..0a4153f04e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
@@ -15,20 +15,20 @@ function test(expectation, f) {
/*
(function() {
-1 + reference_error //@ sourceURL=evaltest
+1 + reference_error //# sourceURL=evaltest
})
*/
test("2:5", new Function(
- '1 + reference_error //@ sourceURL=evaltest'));
+ '1 + reference_error //# sourceURL=evaltest'));
/*
(function(x
/\**\/) {
- 1 + reference_error //@ sourceURL=evaltest
+ 1 + reference_error //# sourceURL=evaltest
})
*/
test("4:6", new Function(
- 'x', '\n 1 + reference_error //@ sourceURL=evaltest'));
+ 'x', '\n 1 + reference_error //# sourceURL=evaltest'));
/*
(function(x
@@ -36,24 +36,24 @@ test("4:6", new Function(
,y
/\**\/) {
- 1 + reference_error //@ sourceURL=evaltest
+ 1 + reference_error //# sourceURL=evaltest
})
*/
test("7:6", new Function(
- 'x\n\n', "z//\n", "y", '\n 1 + reference_error //@ sourceURL=evaltest'));
+ 'x\n\n', "z//\n", "y", '\n 1 + reference_error //# sourceURL=evaltest'));
/*
(function(x/\*,z//
,y*\/
/\**\/) {
-1 + reference_error //@ sourceURL=evaltest
+1 + reference_error //# sourceURL=evaltest
})
*/
test("4:5", new Function(
- 'x/*', "z//\n", "y*/", '1 + reference_error //@ sourceURL=evaltest'));
+ 'x/*', "z//\n", "y*/", '1 + reference_error //# sourceURL=evaltest'));
/*
(function () {
- 1 + reference_error //@ sourceURL=evaltest5
+ 1 + reference_error //# sourceURL=evaltest5
})
*/
test("2:6", eval(
- '(function () {\n 1 + reference_error //@ sourceURL=evaltest\n})'));
+ '(function () {\n 1 + reference_error //# sourceURL=evaltest\n})'));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-119800.js b/deps/v8/test/mjsunit/regress/regress-crbug-119800.js
index 1641cac686..3946fbb71d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-119800.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-119800.js
@@ -19,7 +19,7 @@ function listener(event, exec_state, event_data, data) {
try {
Debug.debuggerFlags().breakPointsActive.setValue(false);
breaks.push(exec_state.frame().sourceLineText().trimLeft());
- exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
} catch (e) {
exception = e;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js b/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
index d896eadcc4..6e75729c18 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --debug-eval-readonly-locals
Debug = debug.Debug;
@@ -14,11 +14,11 @@ function listener(event, exec_state, event_data, data) {
try {
if (step == 0) {
assertEquals("error", exec_state.frame(0).evaluate("e").value());
- exec_state.frame(0).evaluate("e = 'foo'");
- exec_state.frame(0).evaluate("x = 'modified'");
+ exec_state.frame(0).evaluate("write_0('foo')");
+ exec_state.frame(0).evaluate("write_1('modified')");
} else {
assertEquals("argument", exec_state.frame(0).evaluate("e").value());
- exec_state.frame(0).evaluate("e = 'bar'");
+ exec_state.frame(0).evaluate("write_2('bar')");
}
step++;
} catch (e) {
@@ -33,9 +33,15 @@ function f(e, x) {
try {
throw "error";
} catch(e) {
+ // In ES2015 hoisting semantics, 'x' binds to the argument
+ // and 'e' binds to the exception.
+ function write_0(v) { e = v }
+ function write_1(v) { x = v }
debugger;
- assertEquals("foo", e);
+ assertEquals("foo", e); // overwritten by the debugger
}
+ assertEquals("argument", e); // debugger did not overwrite
+ function write_2(v) { e = v }
debugger;
assertEquals("bar", e);
assertEquals("modified", x);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-352586.js b/deps/v8/test/mjsunit/regress/regress-crbug-352586.js
index 2210480990..18b5390c58 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-352586.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-352586.js
@@ -12,4 +12,4 @@ function getter() {
a.__proto__ = Error("");
a.__defineGetter__('message', getter);
-a.message;
+assertThrows(()=>a.message, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-364374.js b/deps/v8/test/mjsunit/regress/regress-crbug-364374.js
new file mode 100644
index 0000000000..d8ae91fffe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-364374.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Intl) {
+ // chromium:364374
+
+ // Locations with 2 underscores are accepted and normalized.
+ // 'of' and 'es' are always lowercased.
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'eUrope/isLe_OF_man'})
+ assertEquals('Europe/Isle_of_Man', df.resolvedOptions().timeZone);
+
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'africa/Dar_eS_salaam'})
+ assertEquals('Africa/Dar_es_Salaam', df.resolvedOptions().timeZone);
+
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/port_of_spain'})
+ assertEquals('America/Port_of_Spain', df.resolvedOptions().timeZone);
+
+ // Zone ids with more than 2 parts are accepted and normalized.
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/north_Dakota/new_salem'})
+ assertEquals('America/North_Dakota/New_Salem', df.resolvedOptions().timeZone);
+
+ // 3-part zone IDs are accepted and normalized.
+ // Two Buenose Aires aliases are identical.
+ df1 = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/aRgentina/buenos_aIres'})
+ df2 = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/Argentina/Buenos_Aires'})
+ assertEquals(df1.resolvedOptions().timeZone, df2.resolvedOptions().timeZone);
+
+ df2 = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/Buenos_Aires'})
+ assertEquals(df1.resolvedOptions().timeZone, df2.resolvedOptions().timeZone);
+
+ df1 = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/Indiana/Indianapolis'})
+ df2 = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/Indianapolis'})
+ assertEquals(df1.resolvedOptions().timeZone, df2.resolvedOptions().timeZone);
+
+ // ICU does not recognize East-Indiana. Add later when it does.
+ // df2 = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/East-Indiana'})
+ // assertEquals(df1.resolvedOptions().timeZone, df2.resolvedOptions().timeZone);
+
+
+ // Zone IDs with hyphens. 'au' has to be in lowercase.
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'America/port-aU-pRince'})
+ assertEquals('America/Port-au-Prince', df.resolvedOptions().timeZone);
+
+ // Accepts Ho_Chi_Minh and treats it as identical to Saigon
+ df1 = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Ho_Chi_Minh'})
+ df2 = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Saigon'})
+ assertEquals(df1.resolvedOptions().timeZone, df2.resolvedOptions().timeZone);
+
+ // Throws for invalid timezone ids.
+ assertThrows(() => Intl.DateTimeFormat(undefined, {timeZone: 'Europe/_Paris'}));
+ assertThrows(() => Intl.DateTimeFormat(undefined, {timeZone: 'America/New__York'}));
+ assertThrows(() => Intl.DateTimeFormat(undefined, {timeZone: 'America//New_York'}));
+ assertThrows(() => Intl.DateTimeFormat(undefined, {timeZone: 'America/New_York_'}));
+ assertThrows(() => Intl.DateTimeFormat(undefined, {timeZone: 'America/New_Y0rk'}));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-380671.js b/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
new file mode 100644
index 0000000000..891215e301
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-380671.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --mock-arraybuffer-allocator
+
+var buffer = new ArrayBuffer(0xc0000000);
+assertEquals(0xc0000000, buffer.byteLength);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-390925.js b/deps/v8/test/mjsunit/regress/regress-crbug-390925.js
index 24873df17b..c4d98adb3e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-390925.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-390925.js
@@ -5,5 +5,6 @@
// Flags: --allow-natives-syntax
var a = new Array();
+var b = new Array();
Object.freeze(a);
-assertThrows(function() { %LiveEditCheckAndDropActivations(a, true); });
+assertThrows(function() { %LiveEditCheckAndDropActivations(a, b, true); });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js b/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
index 9786313227..67ea19158e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
@@ -10,7 +10,7 @@ Debug.setBreakOnException();
try {
try {
- %DebugPushPromise(new Promise(function() {}), function() {}, function() {});
+ %DebugPushPromise(new Promise(function() {}), function() {});
} catch (e) {
}
throw new Error();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405517.js b/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
index 36c3f4f7f7..578e76aded 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-405517.js
@@ -6,7 +6,7 @@
function f() {
var e = [0];
- %PreventExtensions(e);
+ Object.preventExtensions(e);
for (var i = 0; i < 4; i++) e.shift();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405922.js b/deps/v8/test/mjsunit/regress/regress-crbug-405922.js
index a38ac86912..31b432de19 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-405922.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-405922.js
@@ -5,13 +5,15 @@
// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug
+var exception = null;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
}
} catch (e) {
+ exception = e;
}
}
@@ -25,3 +27,4 @@ debugger;
f(2);
Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-409614.js b/deps/v8/test/mjsunit/regress/regress-crbug-409614.js
index 7b27404819..1a9a77746a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-409614.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-409614.js
@@ -10,7 +10,7 @@ var error_count = 0;
function f() {
return 0; // Break
-}
+} // Break
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
@@ -18,7 +18,7 @@ function listener(event, exec_state, event_data, data) {
if (exec_state.frame(0).sourceLineText().indexOf("Break") <0) {
error_count++;
}
- exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ exec_state.prepareStep(Debug.StepAction.StepIn);
f(); // We should not break in this call of f().
} catch (e) {
print(e + e.stack);
@@ -29,7 +29,7 @@ function listener(event, exec_state, event_data, data) {
Debug.setListener(listener);
debugger; // Break
-f();
+f(); // Break
Debug.setListener(null); // Break
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-412319.js b/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
index c597b0dd87..158fc59ae6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-412319.js
@@ -6,7 +6,7 @@
function __f_6() {
var __v_7 = [0];
- %PreventExtensions(__v_7);
+ Object.preventExtensions(__v_7);
for (var __v_6 = -2; __v_6 < 19; __v_6++) __v_7.shift();
__f_7(__v_7);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-422858.js b/deps/v8/test/mjsunit/regress/regress-crbug-422858.js
new file mode 100644
index 0000000000..ba75fc01a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-422858.js
@@ -0,0 +1,23 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var date = new Date("2016/01/02 10:00 GMT-8")
+assertEquals(0, date.getMinutes());
+assertEquals(18, date.getUTCHours());
+
+date = new Date("2016/01/02 10:00 GMT-12")
+assertEquals(0, date.getMinutes());
+assertEquals(22, date.getUTCHours());
+
+date = new Date("2016/01/02 10:00 GMT-123")
+assertEquals(23, date.getMinutes());
+assertEquals(11, date.getUTCHours());
+
+date = new Date("2016/01/02 10:00 GMT-0856")
+assertEquals(56, date.getMinutes());
+assertEquals(18, date.getUTCHours());
+
+date = new Date("2016/01/02 10:00 GMT-08000")
+assertEquals(NaN, date.getMinutes());
+assertEquals(NaN, date.getUTCHours());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-435825.js b/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
index e10b812d4d..959535bcb5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
@@ -7,5 +7,5 @@ Error.prepareStackTrace = function (a,b) { return b; };
try {
/(invalid regexp/;
} catch (e) {
- e.stack[0].getThis().toString();
+ assertEquals("[object global]", e.stack[0].getThis().toString());
}
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js b/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
index 770c8073cf..770c8073cf 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-467180.js b/deps/v8/test/mjsunit/regress/regress-crbug-467180.js
index fcf5c30294..a07c6a6466 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-467180.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-467180.js
@@ -20,7 +20,7 @@ function listener(event, exec_state, event_data, data) {
var label = +exec_state.frame(0).sourceLineText().substr(-1);
log.push(label);
if (label == 2) log.push(exec_state.frame(0).evaluate("i").value());
- exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+ exec_state.prepareStep(Debug.StepAction.StepNext);
} catch (e) {
exception = e;
print("Caught something. " + e + " " + e.stack);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-481896.js b/deps/v8/test/mjsunit/regress/regress-crbug-481896.js
index 0d5c650f1e..1ef0e5010a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-481896.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-481896.js
@@ -29,7 +29,7 @@ function install() {
eval("this.dynamic = function dynamic() { \n" +
" print(\"> dynamic\"); // Break\n" +
"}\n" +
- "//@ sourceURL=dynamicScript");
+ "//# sourceURL=dynamicScript");
}
install();
@@ -53,4 +53,4 @@ Debug.setListener(null);
assertNull(exception);
assertEquals(2, break_count);
-//@ sourceURL=staticScript
+//# sourceURL=staticScript
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-487322.js b/deps/v8/test/mjsunit/regress/regress-crbug-487322.js
new file mode 100644
index 0000000000..6338cf4da8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-487322.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Intl) {
+ // Normalizes Kat{h,}mandu (chromium:487322)
+ // According to the IANA timezone db, Kathmandu is the current canonical
+ // name, but ICU got it backward. To make this test robust against a future
+ // ICU change ( http://bugs.icu-project.org/trac/ticket/12044 ),
+ // just check that Kat(h)mandu is resolved identically.
+ df1 = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Katmandu'})
+ df2 = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Kathmandu'})
+ assertEquals(df1.resolvedOptions().timeZone, df2.resolvedOptions().timeZone);
+
+ // Normalizes Ulan_Bator to Ulaanbaatar. Unlike Kat(h)mandu, ICU got this
+ // right so that we make sure that Ulan_Bator is resolved to Ulaanbaatar.
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Ulaanbaatar'})
+ assertEquals('Asia/Ulaanbaatar', df.resolvedOptions().timeZone);
+
+ df = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Ulan_Bator'})
+ assertEquals('Asia/Ulaanbaatar', df.resolvedOptions().timeZone);
+
+ // Throws for unsupported time zones.
+ assertThrows(() => Intl.DateTimeFormat(undefined, {timeZone: 'Aurope/Paris'}));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-505907.js b/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
index 761261eca0..c8d4bac9be 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-505907.js
@@ -5,7 +5,9 @@
// Flags: --harmony-proxies
try {
- var p = Proxy.create({ getPropertyDescriptor: function() { return [] } });
+ var p = new Proxy({}, {
+ getPropertyDescriptor: function() { return [] }
+ });
var o = Object.create(p);
with (o) { unresolved_name() }
} catch(e) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-506956.js b/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
index 5862ddb296..73eb2f2220 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-506956.js
@@ -5,7 +5,9 @@
// Flags: --harmony-proxies
try {
- var p = Proxy.create({ getPropertyDescriptor: function() { throw "boom"; } });
+ var p = new Proxy({}, {
+ getPropertyDescriptor: function() { throw "boom"; }
+ });
var o = Object.create(p);
with (o) { delete unresolved_name; }
} catch(e) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-517592.js b/deps/v8/test/mjsunit/regress/regress-crbug-517592.js
index 760d892439..5d61db95dc 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-517592.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-517592.js
@@ -8,7 +8,7 @@ var source =
"var foo = function foo() {\n" +
" return 1;\n" +
"}\n" +
- "//@ sourceURL=test";
+ "//# sourceURL=test";
Debug = debug.Debug;
Debug.setListener(listener);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523308.js b/deps/v8/test/mjsunit/regress/regress-crbug-523308.js
index 5715762ed6..36114791b9 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-523308.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523308.js
@@ -6,5 +6,4 @@ var error;
try { reference_error(); } catch (e) { error = e; }
toString = error.toString;
error.__proto__ = [];
-assertEquals("ReferenceError: reference_error is not defined",
- toString.call(error));
+assertEquals("Error: reference_error is not defined", toString.call(error));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-551287.js b/deps/v8/test/mjsunit/regress/regress-crbug-551287.js
new file mode 100644
index 0000000000..a85deef4bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-551287.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() { do { } while (true); }
+
+function boom(x) {
+ switch(x) {
+ case 1:
+ case f(): return;
+ }
+}
+
+%OptimizeFunctionOnNextCall(boom)
+boom(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-554831.js b/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
new file mode 100644
index 0000000000..f7343e08b9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ var key = "s";
+ function f(object) { return object[key]; };
+ f("");
+ f("");
+ %OptimizeFunctionOnNextCall(f);
+ f("");
+ assertOptimized(f);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-557807.js b/deps/v8/test/mjsunit/regress/regress-crbug-557807.js
new file mode 100644
index 0000000000..a96bc99003
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-557807.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar() { return { __proto__: this }; }
+function foo(a) { a[0] = 0.3; }
+foo(bar());
+%OptimizeFunctionOnNextCall(foo);
+foo(bar());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-561973.js b/deps/v8/test/mjsunit/regress/regress-crbug-561973.js
new file mode 100644
index 0000000000..51b6e611e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-561973.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Date.parse('Sat, 01 Jan 100 08:00:00 UT-59011430400000');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-563929.js b/deps/v8/test/mjsunit/regress/regress-crbug-563929.js
new file mode 100644
index 0000000000..a9a112dd8d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-563929.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x = 0;
+function a() {
+ eval("");
+ return (function() {
+ eval("");
+ return (function() {
+ eval("");
+ return (function() {
+ eval("");
+ return (function() {
+ eval("");
+ return (function() {
+ eval("");
+ return (function() {
+ eval("");
+ return (function() {
+ eval("");
+ return x;
+ })();
+ }) ();
+ })();
+ })();
+ })();
+ })();
+ })();
+}
+assertEquals(a(), 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-565917.js b/deps/v8/test/mjsunit/regress/regress-crbug-565917.js
new file mode 100644
index 0000000000..2cccedf9b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-565917.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+} catch(e) {; }
+new ArrayBuffer();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js
new file mode 100644
index 0000000000..ed269a9d7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var expected = ["debugger;", "var x = y;", "debugger;", "var x = y;"];
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ log.push(exec_state.frame(0).sourceLineText().trimLeft());
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+function f() {
+ var a = 1;
+ debugger;
+ var x = y;
+ print(x);
+}
+
+function call_f_with_deeper_stack() {
+ (() => () => () => f())()()();
+}
+
+Promise.resolve().then(f).catch(call_f_with_deeper_stack);
+
+// Schedule microtask to check against expectation at the end.
+function testDone(iteration) {
+ function checkResult() {
+ try {
+ assertTrue(iteration < 10);
+ if (expected.length == log.length) {
+ assertEquals(expected, log);
+ } else {
+ testDone(iteration + 1);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+ }
+
+ %EnqueueMicrotask(checkResult);
+}
+
+testDone(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js
new file mode 100644
index 0000000000..64dd6777c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-568477-2.js
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var expected = ["debugger;",
+ "var x = y;",
+ "new Promise(f).catch(call_f_with_deeper_stack);",
+ "var a = 1;", "", "var a = 1;",
+ "debugger;",
+ "var x = y;"];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(expected.shift(), exec_state.frame(0).sourceLineText().trimLeft());
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+function f() {
+ var a = 1;
+ debugger;
+ var x = y;
+ print(x);
+}
+
+function call_f_with_deeper_stack() {
+ (() => () => () => f())()()();
+}
+
+new Promise(f).catch(call_f_with_deeper_stack);
+var a = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js
new file mode 100644
index 0000000000..812db2b8f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var expected = ["debugger;", "var x = y;", "debugger;", "var x = y;"];
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ log.push(exec_state.frame(0).sourceLineText().trimLeft());
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+function f() {
+ var a = 1;
+ debugger;
+ var x = y;
+ print(x);
+}
+
+function call_f_with_deeper_stack() {
+ (() => () => () => f())()()();
+}
+
+var p = Promise.resolve();
+p.then(f);
+p.then(call_f_with_deeper_stack);
+
+// Schedule microtask to check against expectation at the end.
+function testDone(iteration) {
+ function checkResult() {
+ try {
+ assertTrue(iteration < 10);
+ if (expected.length == log.length) {
+ assertEquals(expected, log);
+ } else {
+ testDone(iteration + 1);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+ }
+
+ %EnqueueMicrotask(checkResult);
+}
+
+testDone(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js b/deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js
new file mode 100644
index 0000000000..f0e3e901db
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var expected =
+ ["debugger;", "var x = y;", "var b = 2;", "Debug.setListener(null);"];
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ log.push(exec_state.frame(0).sourceLineText().trimLeft());
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+function f() {
+ var a = 1;
+ debugger;
+ var x = y;
+ print(x);
+}
+
+try {
+ %Call(f, {});
+} catch (e) {
+ var b = 2;
+}
+
+Debug.setListener(null);
+
+assertEquals(expected, log);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568525.js b/deps/v8/test/mjsunit/regress/regress-crbug-568525.js
new file mode 100644
index 0000000000..c916bfe649
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-568525.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = /a/;
+a[4] = 1.5;
+for (var x in a) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-569534.js b/deps/v8/test/mjsunit/regress/regress-crbug-569534.js
new file mode 100644
index 0000000000..e1419ea8b0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-569534.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var array = [,0.5];
+array.length = 0;
+for (var i in array) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-570241.js b/deps/v8/test/mjsunit/regress/regress-crbug-570241.js
new file mode 100644
index 0000000000..4fecba57b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-570241.js
@@ -0,0 +1,7 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-lookbehind
+
+assertTrue(/(?<=12345123451234512345)/.test("12345123451234512345"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-570651.js b/deps/v8/test/mjsunit/regress/regress-crbug-570651.js
new file mode 100644
index 0000000000..9860b428b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-570651.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = (e,s) => s;
+var __v_3 = Error().stack[0].constructor;
+var __v_4 = {};
+function __f_3() {}
+var __v_5 = __v_3.call(null, __v_4, __f_3, {valueOf() { return 1611877293 }});
+ __v_5.getColumnNumber();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-571064.js b/deps/v8/test/mjsunit/regress/regress-crbug-571064.js
new file mode 100644
index 0000000000..a28a3833b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-571064.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --enable-slow-asserts
+
+Array.prototype.__proto__ = null;
+var func = Array.prototype.push;
+var prototype = Array.prototype;
+function CallFunc(a) {
+ func.call(a);
+}
+function CallFuncWithPrototype() {
+ CallFunc(prototype);
+}
+CallFunc([]);
+CallFunc([]);
+%OptimizeFunctionOnNextCall(CallFuncWithPrototype);
+CallFuncWithPrototype();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-571370.js b/deps/v8/test/mjsunit/regress/regress-crbug-571370.js
new file mode 100644
index 0000000000..5fd9a2484b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-571370.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var val = [0.5];
+var arr = [0.5];
+for (var i = -1; i < 1; i++) {
+ arr[i] = val;
+}
+assertEquals(val, arr[-1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-571517.js b/deps/v8/test/mjsunit/regress/regress-crbug-571517.js
new file mode 100644
index 0000000000..03bf76cb5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-571517.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Receiver() { this.receiver = "receiver"; }
+function Proto() { this.proto = "proto"; }
+
+function f(a) {
+ return a.foo;
+}
+
+var rec = new Receiver();
+
+var proto = rec.__proto__.__proto__;
+
+// Initialize prototype chain dependent IC (nonexistent load).
+assertEquals(undefined, f(rec));
+assertEquals(undefined, f(rec));
+
+// Add a new prototype to the end of the chain.
+var p2 = new Proto();
+p2.__proto__ = null;
+proto.__proto__ = p2;
+
+// Update the IC.
+assertEquals(undefined, f(rec));
+
+// Now modify the most recently added prototype by adding a property...
+p2.foo = "bar";
+assertEquals("bar", f(rec));
+
+// ...and removing it again. Due to missing prototype user registrations,
+// this fails to invalidate the IC.
+delete p2.foo;
+p2.secret = "GAME OVER";
+assertEquals(undefined, f(rec));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-572590.js b/deps/v8/test/mjsunit/regress/regress-crbug-572590.js
new file mode 100644
index 0000000000..5871005423
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-572590.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --verify-heap
+
+function g() { }
+var f = g.bind();
+f.__defineGetter__('length', g);
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-573857.js b/deps/v8/test/mjsunit/regress/regress-crbug-573857.js
new file mode 100644
index 0000000000..d2892c924f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-573857.js
@@ -0,0 +1,13 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --verify-heap
+
+function f() {}
+f = f.bind();
+f.x = f.name;
+f.__defineGetter__('name', function() { return f.x; });
+function g() {}
+g.prototype = f;
+gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-573858.js b/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
new file mode 100644
index 0000000000..37a9eb84e5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-573858.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var throw_type_error = Object.getOwnPropertyDescriptor(
+ (function() {"use strict"}).__proto__, "caller").get;
+
+function create_initial_map() { this instanceof throw_type_error }
+%OptimizeFunctionOnNextCall(create_initial_map);
+create_initial_map();
+
+function test() { new throw_type_error }
+%OptimizeFunctionOnNextCall(test);
+assertThrows(test);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-575080.js b/deps/v8/test/mjsunit/regress/regress-crbug-575080.js
new file mode 100644
index 0000000000..c549e9caff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-575080.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --es-staging
+
+class A extends Function {
+ constructor(...args) {
+ super(...args);
+ this.a = 42;
+ this.d = 4.2;
+ this.o = 0;
+ }
+}
+var obj = new A("'use strict';");
+obj.o = 0.1;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-575082.js b/deps/v8/test/mjsunit/regress/regress-crbug-575082.js
new file mode 100644
index 0000000000..d9cc0f9140
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-575082.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var y = new Date("-1073741824");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-575314.js b/deps/v8/test/mjsunit/regress/regress-crbug-575314.js
new file mode 100644
index 0000000000..7a5bd4eb70
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-575314.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// https://code.google.com/p/chromium/issues/detail?id=575314
+
+// Overwriting the constructor of a Promise with something that doesn't have
+// @@species shouldn't result in a rejection, even if that constructor
+// is somewhat bogus.
+
+var test = new Promise(function(){});
+test.constructor = function(){};
+Promise.resolve(test).catch(e => %AbortJS(e + " FAILED!"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-578039-Proxy_construct_prototype_change.js b/deps/v8/test/mjsunit/regress/regress-crbug-578039-Proxy_construct_prototype_change.js
new file mode 100644
index 0000000000..30b3f219e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-578039-Proxy_construct_prototype_change.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+function target() {};
+
+var proxy = new Proxy(target, {
+ get() {
+ // Reset the initial map of the target.
+ target.prototype = 123;
+ }});
+
+new proxy();
diff --git a/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js b/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
index 07c7fad7e6..daa6fa7670 100644
--- a/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
+++ b/deps/v8/test/mjsunit/regress/regress-debugger-redirect.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --legacy-const
function f(x) {
// This function compiles into code that only throws a redeclaration
diff --git a/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js b/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js
new file mode 100644
index 0000000000..8bebbe27f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-deopt-in-array-literal-spread.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a,b,c,d) { return [a, ...(%DeoptimizeNow(), [b,c]), d]; }
+
+assertEquals([1,2,3,4], f(1,2,3,4));
+assertEquals([1,2,3,4], f(1,2,3,4));
+%OptimizeFunctionOnNextCall(f);
+assertEquals([1,2,3,4], f(1,2,3,4));
diff --git a/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js b/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js
new file mode 100644
index 0000000000..dbd4762fcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-ensure-initial-map.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var x = Object.getOwnPropertyDescriptor({get x() {}}, "x").get;
+function f(o, b) {
+ if (b) {
+ return o instanceof x;
+ }
+}
+
+%OptimizeFunctionOnNextCall(f);
+f();
+
+function g() {
+ return new x();
+}
+
+%OptimizeFunctionOnNextCall(g);
+assertThrows(()=>g());
diff --git a/deps/v8/test/mjsunit/regress/regress-function-constructor-receiver.js b/deps/v8/test/mjsunit/regress/regress-function-constructor-receiver.js
deleted file mode 100644
index 5d713803be..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-function-constructor-receiver.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Return the raw CallSites array.
-Error.prepareStackTrace = function (a,b) { return b; };
-
-var threw = false;
-try {
- new Function({toString:0,valueOf:0});
-} catch (e) {
- threw = true;
- // Ensure that the receiver during "new Function" is the undefined value.
- assertEquals(undefined, e.stack[0].getThis());
-}
-
-assertTrue(threw);
diff --git a/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js b/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
index fe04ddb27c..fc4ba900db 100644
--- a/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
+++ b/deps/v8/test/mjsunit/regress/regress-handle-illegal-redeclaration.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --always-opt
+// Flags: --always-opt --legacy-const
var x = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js b/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js
new file mode 100644
index 0000000000..bd8fa31102
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-inline-arrow-as-construct.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// This tests that inlining a constructor call to a function which cannot be
+// used as a constructor (e.g. arrow function) still throws correctly.
+
+var g = () => {}
+
+function f() {
+ return new g();
+}
+
+assertThrows(f);
+assertThrows(f);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js b/deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js
new file mode 100644
index 0000000000..2fa5001f90
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-inline-strong-as-construct.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --strong-mode
+
+// This tests that inlining a constructor call to a function which cannot be
+// used as a constructor (e.g. strong mode function) still throws correctly.
+
+function g() {
+ "use strong";
+}
+
+function f() {
+ return new g();
+}
+
+assertThrows(f);
+assertThrows(f);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js b/deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js
new file mode 100644
index 0000000000..3ad9e33646
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-osr-in-case-label.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-do-expressions
+
+function f(x) {
+ switch (x) {
+ case 1: return "one";
+ case 2: return "two";
+ case do { for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); } }:
+ case 3: return "WAT";
+ }
+}
+
+assertEquals("one", f(1));
+assertEquals("two", f(2));
+assertEquals("WAT", f(3));
diff --git a/deps/v8/test/mjsunit/regress/regress-osr-in-literal.js b/deps/v8/test/mjsunit/regress/regress-osr-in-literal.js
new file mode 100644
index 0000000000..7553b9c725
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-osr-in-literal.js
@@ -0,0 +1,30 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-do-expressions
+
+"use strict";
+
+var p = {};
+var testCases = [
+ { s:"[1, do { _OSR_ 2 }, 3]", r:[1, 2, 3] },
+ { s:"[1, ...[2], do { _OSR_ 3 }, 4]", r:[1, 2, 3, 4] },
+ { s:"[1, ...do { _OSR_ [2,3] }, 4]", r:[1, 2, 3, 4] },
+ { s:"{ a:do { _OSR_ 1 } }", r:{ a:1 } },
+ { s:"{ a:do { _OSR_ 2 }, __proto__:p }", r:{ a:2, __proto__:p } },
+ { s:"{ a:do { _OSR_ 3 }, get b() { return 4; } }", r:{ a:3, b:4 } },
+ { s:"{ [do { _OSR_ 'b' }]: 3 }", r:{ b:3 } },
+ { s:"{ [do { _OSR_ 'b' }]: 3, c: 4 }", r:{ b:3, c:4 } },
+ { s:"{ [do { _OSR_ 'b' }]: 3, __proto__:p }", r:{ b:3, __proto__:p } },
+ { s:"{ get [do { _OSR_ 'c' }]() { return 4; } }", r:{ c:4 } },
+ { s:"class { [do { _OSR_ 'f' }]() {} }" },
+ { s:"class { [do { _OSR_ 'f' }]() {}; g() {} }" },
+];
+
+for (var i = 0; i < testCases.length; ++i) {
+ var source = "(function f" + i + "(x) { return " + testCases[i].s + "})";
+ var osr = "for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }";
+ var result = eval(source.replace("_OSR_", osr))();
+ if (testCases[i].r) assertEquals(testCases[i].r, result);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
index ee85364735..a0b99980c7 100644
--- a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
+++ b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
@@ -109,12 +109,12 @@ assertEquals(undefined, get(a));
})();
// Ensure we cannot delete length, byteOffset, byteLength.
-assertTrue(Int32Array.prototype.hasOwnProperty("length"));
-assertTrue(Int32Array.prototype.hasOwnProperty("byteOffset"));
-assertTrue(Int32Array.prototype.hasOwnProperty("byteLength"));
-assertFalse(delete Int32Array.prototype.length);
-assertFalse(delete Int32Array.prototype.byteOffset);
-assertFalse(delete Int32Array.prototype.byteLength);
+assertTrue(Int32Array.prototype.__proto__.hasOwnProperty("length"));
+assertTrue(Int32Array.prototype.__proto__.hasOwnProperty("byteOffset"));
+assertTrue(Int32Array.prototype.__proto__.hasOwnProperty("byteLength"));
+assertFalse(delete Int32Array.prototype.__proto__.length);
+assertFalse(delete Int32Array.prototype.__proto__.byteOffset);
+assertFalse(delete Int32Array.prototype.__proto__.byteLength);
a = new Int32Array(100);
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
index 93106c5eb1..0e9b3d3f4a 100644
--- a/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan.js
@@ -10,8 +10,8 @@ function loader(dst, src, i) {
var ab = new ArrayBuffer(8);
var i_view = new Int32Array(ab);
-i_view[0] = 0xFFF7FFFF;
-i_view[1] = 0xFFF7FFFF;
+i_view[0] = %GetHoleNaNUpper()
+i_view[1] = %GetHoleNaNLower();
var f_view = new Float64Array(ab);
var fixed_double_elements = new Float64Array(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js b/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
index 636b38a110..5a0bc38c07 100644
--- a/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
+++ b/deps/v8/test/mjsunit/regress/regress-undefined-nan3.js
@@ -6,8 +6,8 @@
var ab = new ArrayBuffer(8);
var i_view = new Int32Array(ab);
-i_view[0] = 0xFFF7FFFF;
-i_view[1] = 0xFFF7FFFF;
+i_view[0] = %GetHoleNaNUpper()
+i_view[1] = %GetHoleNaNLower();
var f_view = new Float64Array(ab);
var fixed_double_elements = new Float64Array(1);
diff --git a/deps/v8/test/mjsunit/stack-traces-2.js b/deps/v8/test/mjsunit/stack-traces-2.js
index a54bb45ff5..45dfe3a1ea 100644
--- a/deps/v8/test/mjsunit/stack-traces-2.js
+++ b/deps/v8/test/mjsunit/stack-traces-2.js
@@ -69,13 +69,10 @@ function testNotOmittedBuiltin(throwing, included) {
testTraceNativeConversion(String); // Does ToString on argument.
-testTraceNativeConversion(Number); // Does ToNumber on argument.
testTraceNativeConversion(RegExp); // Does ToString on argument.
testTraceNativeConstructor(String); // Does ToString on argument.
-testTraceNativeConstructor(Number); // Does ToNumber on argument.
testTraceNativeConstructor(RegExp); // Does ToString on argument.
-testTraceNativeConstructor(Date); // Does ToNumber on argument.
// QuickSort has builtins object as receiver, and is non-native
// builtin. Should not be omitted with the --builtins-in-stack-traces flag.
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index b256033b53..1123c0bc1d 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -69,7 +69,7 @@ function testEvalWithSourceURL() {
function testNestedEvalWithSourceURL() {
var x = "FAIL";
- var innerEval = 'function Inner() { eval(x); }\n//@ sourceURL=res://inner-eval';
+ var innerEval = 'function Inner() { eval(x); }\n//# sourceURL=res://inner-eval';
eval("function Outer() { eval(innerEval); Inner(); }; Outer();\n//# sourceURL=res://outer-eval");
}
@@ -213,27 +213,11 @@ function testErrorsDuringFormatting() {
Nasty.prototype.foo = function () { throw new RangeError(); };
var n = new Nasty();
n.__defineGetter__('constructor', function () { CONS_FAIL; });
- var threw = false;
- try {
- n.foo();
- } catch (e) {
- threw = true;
- assertTrue(e.stack.indexOf('<error: ReferenceError') != -1,
- "ErrorsDuringFormatting didn't contain error: ReferenceError");
- }
- assertTrue(threw, "ErrorsDuringFormatting didn't throw");
- threw = false;
+ assertThrows(()=>n.foo(), RangeError);
// Now we can't even format the message saying that we couldn't format
// the stack frame. Put that in your pipe and smoke it!
ReferenceError.prototype.toString = function () { NESTED_FAIL; };
- try {
- n.foo();
- } catch (e) {
- threw = true;
- assertTrue(e.stack.indexOf('<error>') != -1,
- "ErrorsDuringFormatting didn't contain <error>");
- }
- assertTrue(threw, "ErrorsDuringFormatting didnt' throw (2)");
+ assertThrows(()=>n.foo(), RangeError);
}
@@ -307,13 +291,10 @@ testUnintendedCallerCensorship();
testErrorsDuringFormatting();
testTraceNativeConversion(String); // Does ToString on argument.
-testTraceNativeConversion(Number); // Does ToNumber on argument.
testTraceNativeConversion(RegExp); // Does ToString on argument.
testTraceNativeConstructor(String); // Does ToString on argument.
-testTraceNativeConstructor(Number); // Does ToNumber on argument.
testTraceNativeConstructor(RegExp); // Does ToString on argument.
-testTraceNativeConstructor(Date); // Does ToNumber on argument.
// Omitted because QuickSort has builtins object as receiver, and is non-native
// builtin.
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index d9939765fc..6beb9c667a 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -1018,11 +1018,7 @@ function CheckFunctionPillDescriptor(func, name) {
assertThrows(function() { 'use strict'; pill.property = "value"; },
TypeError);
assertThrows(pill, TypeError);
- assertEquals(pill.prototype, (function(){}).prototype);
- var d = Object.getOwnPropertyDescriptor(pill, "prototype");
- assertFalse(d.writable);
- assertFalse(d.configurable);
- assertFalse(d.enumerable);
+ assertEquals(undefined, pill.prototype);
}
// Poisoned accessors are no longer own properties
@@ -1046,11 +1042,7 @@ function CheckArgumentsPillDescriptor(func, name) {
assertThrows(function() { 'use strict'; pill.property = "value"; },
TypeError);
assertThrows(pill, TypeError);
- assertEquals(pill.prototype, (function(){}).prototype);
- var d = Object.getOwnPropertyDescriptor(pill, "prototype");
- assertFalse(d.writable);
- assertFalse(d.configurable);
- assertFalse(d.enumerable);
+ assertEquals(undefined, pill.prototype);
}
var descriptor = Object.getOwnPropertyDescriptor(func, name);
diff --git a/deps/v8/test/mjsunit/strong/declaration-after-use.js b/deps/v8/test/mjsunit/strong/declaration-after-use.js
index ee44983329..3530105f2b 100644
--- a/deps/v8/test/mjsunit/strong/declaration-after-use.js
+++ b/deps/v8/test/mjsunit/strong/declaration-after-use.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --strong-mode --harmony-rest-parameters
+// Flags: --strong-mode
// Note that it's essential for these tests that the reference is inside dead
// code (because we already produce ReferenceErrors for run-time unresolved
diff --git a/deps/v8/test/mjsunit/strong/destructuring.js b/deps/v8/test/mjsunit/strong/destructuring.js
index b3971b3b09..30f6183f62 100644
--- a/deps/v8/test/mjsunit/strong/destructuring.js
+++ b/deps/v8/test/mjsunit/strong/destructuring.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
// Flags: --strong-mode --allow-natives-syntax
(function() {
diff --git a/deps/v8/test/mjsunit/strong/eval-direct.js b/deps/v8/test/mjsunit/strong/eval-direct.js
index 137ff8d37f..bb5387e5b3 100644
--- a/deps/v8/test/mjsunit/strong/eval-direct.js
+++ b/deps/v8/test/mjsunit/strong/eval-direct.js
@@ -14,6 +14,9 @@ assertThrows("'use strong'; eval('function f() {}');", SyntaxError);
assertThrows("'use strong'; function f() {eval()}", SyntaxError);
assertDoesNotThrow("'use strong'; eval;");
-assertDoesNotThrow("'use strong'; eval`foo`;");
assertDoesNotThrow("'use strong'; let foo = eval; foo();");
assertDoesNotThrow("'use strong'; (1, eval)();");
+
+// TODO(neis): The tagged template triggers %ObjectFreeze on an array, which
+// throws when trying to redefine 'length'.
+// assertDoesNotThrow("'use strong'; eval`foo`;");
diff --git a/deps/v8/test/mjsunit/strong/function-arity.js b/deps/v8/test/mjsunit/strong/function-arity.js
index 5ead236171..11ee212a64 100644
--- a/deps/v8/test/mjsunit/strong/function-arity.js
+++ b/deps/v8/test/mjsunit/strong/function-arity.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --strong-mode --harmony-reflect
-// Flags: --harmony-rest-parameters --allow-natives-syntax
+// Flags: --allow-natives-syntax
'use strict';
diff --git a/deps/v8/test/mjsunit/strong/literals.js b/deps/v8/test/mjsunit/strong/literals.js
index c9457ffbec..6bdf0f0057 100644
--- a/deps/v8/test/mjsunit/strong/literals.js
+++ b/deps/v8/test/mjsunit/strong/literals.js
@@ -3,8 +3,7 @@
// found in the LICENSE file.
// Flags: --strong-mode --allow-natives-syntax
-// Flags: --harmony-rest-parameters
-// Flags: --harmony-destructuring
+// Flags: --harmony-destructuring-bind
'use strict';
diff --git a/deps/v8/test/mjsunit/strong/load-proxy.js b/deps/v8/test/mjsunit/strong/load-proxy.js
index 98a3238c4e..417a41faf8 100644
--- a/deps/v8/test/mjsunit/strong/load-proxy.js
+++ b/deps/v8/test/mjsunit/strong/load-proxy.js
@@ -50,8 +50,8 @@ function handlerMaker2(obj) {
};
}
var baseObj = {};
-var proxy1 = Proxy.create(handlerMaker1(baseObj));
-var proxy2 = Proxy.create(handlerMaker2(baseObj));
+var proxy1 = new Proxy({}, handlerMaker1(baseObj));
+var proxy2 = new Proxy({}, handlerMaker2(baseObj));
var childObj1 = { __proto__: proxy1 };
var childObj2 = { __proto__: proxy2 };
var childObjAccessor1 = { set foo(_){}, set "1"(_){}, __proto__: proxy1 };
@@ -80,19 +80,21 @@ var childObjAccessor2 = { set foo(_){}, set "1"(_){}, __proto__: proxy2 };
// semantics.
Object.freeze(baseObj);
- Object.freeze(proxy1);
- assertThrows(function(){proxy1.foo}, TypeError);
- assertThrows(function(){proxy1[1]}, TypeError);
- assertThrows(function(){childObj1.foo}, TypeError);
- assertThrows(function(){childObj1[1]}, TypeError);
- assertThrows(function(){childObjAccessor1.foo}, TypeError);
- assertThrows(function(){childObjAccessor1[1]}, TypeError);
-
- Object.freeze(proxy2);
- assertThrows(function(){proxy2.foo}, TypeError);
- assertThrows(function(){proxy2[1]}, TypeError);
- assertThrows(function(){childObj2.foo}, TypeError);
- assertThrows(function(){childObj2[1]}, TypeError);
- assertThrows(function(){childObjAccessor2.foo}, TypeError);
- assertThrows(function(){childObjAccessor2[1]}, TypeError);
+ // TODO(neis): Reenable once proxies properly support freeze.
+ //
+ // Object.freeze(proxy1);
+ // assertThrows(function(){proxy1.foo}, TypeError);
+ // assertThrows(function(){proxy1[1]}, TypeError);
+ // assertThrows(function(){childObj1.foo}, TypeError);
+ // assertThrows(function(){childObj1[1]}, TypeError);
+ // assertThrows(function(){childObjAccessor1.foo}, TypeError);
+ // assertThrows(function(){childObjAccessor1[1]}, TypeError);
+ //
+ // Object.freeze(proxy2);
+ // assertThrows(function(){proxy2.foo}, TypeError);
+ // assertThrows(function(){proxy2[1]}, TypeError);
+ // assertThrows(function(){childObj2.foo}, TypeError);
+ // assertThrows(function(){childObj2[1]}, TypeError);
+ // assertThrows(function(){childObjAccessor2.foo}, TypeError);
+ // assertThrows(function(){childObjAccessor2[1]}, TypeError);
})();
diff --git a/deps/v8/test/mjsunit/call-runtime-tail.js b/deps/v8/test/mjsunit/tail-call-intrinsic.js
index 9f404a8089..a74f153732 100644
--- a/deps/v8/test/mjsunit/call-runtime-tail.js
+++ b/deps/v8/test/mjsunit/tail-call-intrinsic.js
@@ -16,7 +16,7 @@ tailee1 = function() {
if (count1-- == 0) {
return this;
}
- return %_Call(tailee1, this);
+ return %_TailCall(tailee1, this);
};
%OptimizeFunctionOnNextCall(tailee1);
@@ -33,25 +33,25 @@ tailee2 = function(px) {
if ((count2 | 0) === 0) {
return this;
}
- return %_Call(tailee2, this, px);
+ return %_TailCall(tailee2, this, px);
};
%OptimizeFunctionOnNextCall(tailee2);
assertEquals(p1, tailee2.call(p1, p2));
-// Ensure swapped 2 parameters don't trigger a tail call (parameter swizzling
-// for the tail call isn't supported yet).
-var count3 = 100000;
+// Ensure swapped 2 parameters trigger a tail call and do the appropriate
+// parameters swapping
+var count3 = 999999;
tailee3 = function(px) {
"use strict";
if (count3-- == 0) {
return this;
}
- return %_Call(tailee3, px, this);
+ return %_TailCall(tailee3, px, this);
};
%OptimizeFunctionOnNextCall(tailee3);
-assertThrows(function() { tailee3.call(p1, p2); });
+assertEquals(p2, tailee3.call(p1, p2));
// Ensure too many parameters defeats the tail call optimization (currently
// unsupported).
@@ -61,22 +61,48 @@ tailee4 = function(px) {
if (count4-- == 0) {
return this;
}
- return %_Call(tailee4, this, px, undefined);
+ return %_TailCall(tailee4, this, px, undefined);
};
%OptimizeFunctionOnNextCall(tailee4);
assertThrows(function() { tailee4.call(p1, p2); });
-// Ensure too few parameters defeats the tail call optimization (currently
-// unsupported).
+// Ensure that calling the arguments adapter defeats the tail call optimization.
var count5 = 1000000;
tailee5 = function(px) {
"use strict";
if (count5-- == 0) {
return this;
}
- return %_Call(tailee5, this);
+ return %_TailCall(tailee5, this);
};
%OptimizeFunctionOnNextCall(tailee5);
assertThrows(function() { tailee5.call(p1, p2); });
+
+// Ensure tail calls with fewer stack parameters properly re-arranges the stack.
+tailee6 = function(px) {
+ return px;
+}
+
+tailee7 = function(px, py, pz, pa, pb, pc) {
+ "use strict";
+ return %_TailCall(tailee6, this, pc);
+};
+
+%OptimizeFunctionOnNextCall(tailee6);
+%OptimizeFunctionOnNextCall(tailee7);
+assertEquals(110, tailee7.call(null, 15, 16, 17, 18, 0, 110));
+
+tailee8 = function(px, py, pz, pa, pb) {
+ return pb + pz + px;
+}
+
+tailee9 = function(px, py, pz, pa, pb, pc) {
+ "use strict";
+ return %_TailCall(tailee8, this, pb, py, px, pa, pz);
+};
+
+%OptimizeFunctionOnNextCall(tailee8);
+%OptimizeFunctionOnNextCall(tailee9);
+assertEquals(32, tailee9.call(null, 15, 16, 17, 18, 0, 110));
diff --git a/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js b/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js
index c049fb4d09..68b8f0525b 100644
--- a/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js
+++ b/deps/v8/test/mjsunit/third_party/regexp-pcre/regexp-pcre.js
@@ -1657,19 +1657,19 @@ assertNull(res[2].exec("Adefabc", 10));
assertToStringEquals("abc", res[7].exec("abc"), 11);
assertNull(res[7].exec("*** Failers", 12));
assertNull(res[7].exec("def\nabc", 13));
-assertThrows("var re = /x{5,4}/;", 14);
-assertThrows("var re = /[abcd/;", 15);
-assertThrows("var re = /[z-a]/;", 16);
-assertThrows("var re = /^*/;", 17);
-assertThrows("var re = /(abc/;", 18);
-assertThrows("var re = /(?# abc/;", 19);
+assertThrows("var re = /x{5,4}/;");
+assertThrows("var re = /[abcd/;");
+assertThrows("var re = /[z-a]/;");
+assertThrows("var re = /^*/;");
+assertThrows("var re = /(abc/;");
+assertThrows("var re = /(?# abc/;");
assertToStringEquals("cat", res[11].exec("this sentence eventually mentions a cat"), 20);
assertToStringEquals("elephant", res[11].exec("this sentences rambles on and on for a while and then reaches elephant"), 21);
assertToStringEquals("cat", res[12].exec("this sentence eventually mentions a cat"), 22);
assertToStringEquals("elephant", res[12].exec("this sentences rambles on and on for a while and then reaches elephant"), 23);
assertToStringEquals("CAT", res[13].exec("this sentence eventually mentions a CAT cat"), 24);
assertToStringEquals("elephant", res[13].exec("this sentences rambles on and on for a while to elephant ElePhant"), 25);
-assertThrows("var re = /{4,5}abc/;", 26);
+assertThrows("var re = /{4,5}abc/;");
assertToStringEquals("abcb,a,b,c", res[18].exec("abcb"), 27);
assertToStringEquals("abcb,a,b,c", res[18].exec("O0abcb"), 28);
assertToStringEquals("abcb,a,b,c", res[18].exec("O3abcb"), 29);
@@ -1712,7 +1712,7 @@ assertNull(res[30].exec("abc\ndef", 65));
assertToStringEquals("abc", res[31].exec("abc"), 66);
assertNull(res[31].exec("abc\n", 67));
assertToStringEquals("abc,abc", res[33].exec("abc"), 68);
-assertThrows("var re = /)/;", 69);
+assertThrows("var re = /)/;");
assertToStringEquals("-pr", res[35].exec("co-processors, and for"), 70);
assertToStringEquals("<def>ghi<klm>", res[36].exec("abc<def>ghi<klm>nop"), 71);
assertToStringEquals("<def>", res[37].exec("abc<def>ghi<klm>nop"), 72);
@@ -1772,28 +1772,28 @@ assertNull(res[45].exec("fooabar", 125));
assertNull(res[46].exec("*** Failers", 126));
assertNull(res[46].exec("a", 127));
assertNull(res[48].exec("aaaaaa", 128));
-assertThrows("var re = /a[b-a]/;", 129);
-assertThrows("var re = /a[/;", 130);
-assertThrows("var re = /*a/;", 131);
-assertThrows("var re = /abc)/;", 132);
-assertThrows("var re = /(abc/;", 133);
-assertThrows("var re = /a**/;", 134);
-assertThrows("var re = /)(/;", 135);
-assertThrows("var re = /a[b-a]/;", 136);
-assertThrows("var re = /a[/;", 137);
-assertThrows("var re = /*a/;", 138);
-assertThrows("var re = /abc)/;", 139);
-assertThrows("var re = /(abc/;", 140);
-assertThrows("var re = /a**/;", 141);
-assertThrows("var re = /)(/;", 142);
-assertThrows("var re = /:(?:/;", 143);
-assertThrows("var re = /a(?{)b/;", 144);
-assertThrows("var re = /a(?{{})b/;", 145);
-assertThrows("var re = /a(?{}})b/;", 146);
-assertThrows("var re = /a(?{\"{\"})b/;", 147);
-assertThrows("var re = /a(?{\"{\"}})b/;", 148);
-assertThrows("var re = /[a[:xyz:/;", 149);
-assertThrows("var re = /a{37,17}/;", 150);
+assertThrows("var re = /a[b-a]/;");
+assertThrows("var re = /a[/;");
+assertThrows("var re = /*a/;");
+assertThrows("var re = /abc)/;");
+assertThrows("var re = /(abc/;");
+assertThrows("var re = /a**/;");
+assertThrows("var re = /)(/;");
+assertThrows("var re = /a[b-a]/;");
+assertThrows("var re = /a[/;");
+assertThrows("var re = /*a/;");
+assertThrows("var re = /abc)/;");
+assertThrows("var re = /(abc/;");
+assertThrows("var re = /a**/;");
+assertThrows("var re = /)(/;");
+assertThrows("var re = /:(?:/;");
+assertThrows("var re = /a(?{)b/;");
+assertThrows("var re = /a(?{{})b/;");
+assertThrows("var re = /a(?{}})b/;");
+assertThrows("var re = /a(?{\"{\"})b/;");
+assertThrows("var re = /a(?{\"{\"}})b/;");
+assertThrows("var re = /[a[:xyz:/;");
+assertThrows("var re = /a{37,17}/;");
assertToStringEquals("abcd,a,d", res[58].exec("abcd"), 151);
assertToStringEquals("abcd,a,d", res[58].exec("abcdC2"), 152);
assertToStringEquals("abcd,a,d", res[58].exec("abcdC5"), 153);
@@ -1884,7 +1884,7 @@ assertNull(res[147].exec("aB", 237));
assertNull(res[147].exec("*** Failers", 238));
assertNull(res[147].exec("Ab", 239));
assertNull(res[147].exec("AB", 240));
-assertThrows("var re = /[\\200-\\110]/;", 241);
+assertThrows("var re = /[\\200-\\110]/;");
assertToStringEquals("1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 ABC ABC,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 ,19 ,20 ,21 ,22 ,23 ,24 ,25 ,26 ,27 ,28 ,29 ,30 ,31 ,32 ,33 ,34 ,35 ,36 ,37 ,38 ,39 ,40 ,41 ,42 ,43 ,44 ,45 ,46 ,47 ,48 ,49 ,50 ,51 ,52 ,53 ,54 ,55 ,56 ,57 ,58 ,59 ,60 ,61 ,62 ,63 ,64 ,65 ,66 ,67 ,68 ,69 ,70 ,71 ,72 ,73 ,74 ,75 ,76 ,77 ,78 ,79 ,80 ,81 ,82 ,83 ,84 ,85 ,86 ,87 ,88 ,89 ,90 ,91 ,92 ,93 ,94 ,95 ,96 ,97 ,98 ,99 ,100 ,101 ,102 ,103 ,104 ,105 ,106 ,107 ,108 ,109 ,110 ,111 ,112 ,113 ,114 ,115 ,116 ,117 ,118 ,119 ,120 ,121 ,122 ,123 ,124 ,125 ,126 ,127 ,128 ,129 ,130 ,131 ,132 ,133 ,134 ,135 ,136 ,137 ,138 ,139 ,140 ,141 ,142 ,143 ,144 ,145 ,146 ,147 ,148 ,149 ,150 ,151 ,152 ,153 ,154 ,155 ,156 ,157 ,158 ,159 ,160 ,161 ,162 ,163 ,164 ,165 ,166 ,167 ,168 ,169 ,170 ,171 ,172 ,173 ,174 ,175 ,176 ,177 ,178 ,179 ,180 ,181 ,182 ,183 ,184 ,185 ,186 ,187 ,188 ,189 ,190 ,191 ,192 ,193 ,194 ,195 ,196 ,197 ,198 ,199 ,200 ,201 ,202 ,203 ,204 ,205 ,206 ,207 ,208 ,209 ,210 ,211 ,212 ,213 ,214 ,215 ,216 ,217 ,218 ,219 ,220 ,221 ,222 ,223 ,224 ,225 ,226 ,227 ,228 ,229 ,230 ,231 ,232 ,233 ,234 ,235 ,236 ,237 ,238 ,239 ,240 ,241 ,242 ,243 ,244 ,245 ,246 ,247 ,248 ,249 ,250 ,251 ,252 ,253 ,254 ,255 ,256 ,257 ,258 ,259 ,260 ,261 ,262 ,263 ,264 ,265 ,266 ,267 ,268 ,269 ,ABC,ABC", res[149].exec("O900 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 ABC ABC"), 242);
assertToStringEquals("mainmain,main,", res[151].exec("mainmain"), 243);
assertToStringEquals("mainOmain,main,", res[151].exec("mainOmain"), 244);
@@ -1902,7 +1902,7 @@ assertToStringEquals("aabbaa,", res[163].exec("aabbaa"), 255);
assertToStringEquals("aabbbaa,", res[164].exec("aabbbaa"), 256);
assertToStringEquals("aabbbaa,aa,,", res[165].exec("aabbbaa"), 257);
assertToStringEquals("aabbbbaa,aa,,", res[166].exec("aabbbbaa"), 258);
-assertThrows("var re = //;", 259);
+assertThrows("var re = //;");
assertToStringEquals("a", res[169].exec("ab"), 260);
assertToStringEquals("a", res[169].exec("aB"), 261);
assertToStringEquals("*", res[169].exec("*** Failers"), 262);
@@ -1930,8 +1930,8 @@ assertNull(res[177].exec("*** Failers", 283));
assertNull(res[177].exec("((()aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 284));
assertNull(res[177].exec("xaaaab", 285));
assertNull(res[177].exec("xaaaab", 286));
-assertThrows("var re = /[/;", 287);
-assertThrows("var re = /[a-/;", 288);
+assertThrows("var re = /[/;");
+assertThrows("var re = /[a-/;");
assertNull(res[189].exec("<>", 289));
assertNull(res[189].exec("<abcd>", 290));
assertNull(res[189].exec("<abc <123> hij>", 291));
@@ -2111,7 +2111,7 @@ assertNull(res[255].exec("AbCd", 464));
assertNull(res[255].exec("** Failers", 465));
assertNull(res[255].exec("abcd", 466));
// We are compatible with JSC, and don't throw an exception in this case.
-// assertThrows("var re = /(){2,4294967295}/;", 467);
+// assertThrows("var re = /(){2,4294967295}/;");
assertNull(res[255].exec("abcdefghijklAkB", 468));
assertNull(res[255].exec("abcdefghijklAkB", 469));
assertNull(res[255].exec("abcdefghijklAkB", 470));
@@ -2230,7 +2230,7 @@ assertToStringEquals("x", res[350].exec("x"), 582);
assertToStringEquals("ab", res[350].exec("abcabc"), 583);
assertToStringEquals("Xaaa,a", res[351].exec("Xaaa"), 584);
assertToStringEquals("Xaba,a", res[351].exec("Xaba"), 585);
-assertThrows("var re = /^[a-\\Q\\E]/;", 586);
+assertThrows("var re = /^[a-\\Q\\E]/;");
assertNull(res[353].exec("(xy)x", 587));
assertNull(res[353].exec("1221", 588));
assertNull(res[353].exec("Satan, oscillate my metallic sonatas!", 589));
@@ -2353,13 +2353,13 @@ assertToStringEquals("abc1", res[378].exec("abc1\n abc2\x0b abc3\x0c abc4\x0d ab
assertToStringEquals("X", res[379].exec("XABC"), 706);
assertNull(res[379].exec("** Failers ", 707));
assertToStringEquals("X", res[379].exec("XABCB"), 708);
-assertThrows("var re = /(ab|c)(?-1)/;", 709);
+assertThrows("var re = /(ab|c)(?-1)/;");
assertNull(res[379].exec("abc", 710));
assertNull(res[379].exec("xyabcabc", 711));
assertNull(res[379].exec("** Failers", 712));
assertNull(res[379].exec("xyabc ", 713));
-assertThrows("var re = /x(?-0)y/;", 714);
-assertThrows("var re = /x(?-1)y/;", 715);
+assertThrows("var re = /x(?-0)y/;");
+assertThrows("var re = /x(?-1)y/;");
assertNull(res[379].exec("abcX", 716));
assertNull(res[379].exec("Y", 717));
assertNull(res[379].exec("** Failers", 718));
@@ -2379,14 +2379,14 @@ assertNull(res[382].exec("** Failers", 731));
assertNull(res[382].exec("tom-bon ", 732));
assertNull(res[382].exec("tom-tom", 733));
assertNull(res[382].exec("bon-bon ", 734));
-assertThrows("var re = /(?|(abc)|(xyz))/;", 735);
-assertThrows("var re = /(x)(?|(abc)|(xyz))(x)/;", 736);
+assertThrows("var re = /(?|(abc)|(xyz))/;");
+assertThrows("var re = /(x)(?|(abc)|(xyz))(x)/;");
assertNull(res[383].exec("xabcx", 737));
assertNull(res[383].exec("xxyzx ", 738));
-assertThrows("var re = /(x)(?|(abc)(pqr)|(xyz))(x)/;", 739);
+assertThrows("var re = /(x)(?|(abc)(pqr)|(xyz))(x)/;");
assertNull(res[383].exec("xabcpqrx", 740));
assertNull(res[383].exec("xxyzx ", 741));
-assertThrows("var re = /(?|(abc)|(xyz))\\1/;", 742);
+assertThrows("var re = /(?|(abc)|(xyz))\\1/;");
assertNull(res[383].exec("abcabc", 743));
assertNull(res[383].exec("xyzxyz ", 744));
assertNull(res[383].exec("** Failers", 745));
@@ -2528,7 +2528,7 @@ assertNull(res[431].exec("a\x85b", 880));
assertNull(res[431].exec("a\nb", 881));
assertNull(res[431].exec("a\x0db ", 882));
assertNull(res[431].exec("a\x85b", 883));
-assertThrows("var re = /(?-+a)/;", 884);
+assertThrows("var re = /(?-+a)/;");
assertNull(res[443].exec("aaaa", 885));
assertNull(res[443].exec("bacxxx", 886));
assertNull(res[443].exec("bbaccxxx ", 887));
@@ -4082,7 +4082,7 @@ assertToStringEquals("bababc,ba", res[808].exec("bababc"), 2434);
assertNull(res[808].exec("*** Failers", 2435));
assertNull(res[808].exec("bababbc", 2436));
assertNull(res[808].exec("babababc", 2437));
-assertThrows("var re = /^\\ca\\cA\\c[\\c{\\c:/;", 2438);
+assertThrows("var re = /^\\ca\\cA\\c[\\c{\\c:/;");
assertNull(res[808].exec("\x01\x01e;z", 2439));
assertToStringEquals("a", res[809].exec("athing"), 2440);
assertToStringEquals("b", res[809].exec("bthing"), 2441);
@@ -4478,7 +4478,7 @@ assertToStringEquals(".23,.23,", res[925].exec("1.230003938 "), 2830);
assertToStringEquals(".875,.875,5", res[925].exec("1.875000282"), 2831);
assertNull(res[925].exec("*** Failers ", 2832));
assertNull(res[925].exec("1.235 ", 2833));
-assertThrows("var re = /a(?)b/;", 2834);
+assertThrows("var re = /a(?)b/;");
assertNull(res[925].exec("ab ", 2835));
assertToStringEquals("foo table,foo,table", res[926].exec("Food is on the foo table"), 2836);
assertToStringEquals("food is under the bar in the bar,d is under the bar in the ", res[927].exec("The food is under the bar in the barn."), 2837);
@@ -4625,7 +4625,7 @@ assertToStringEquals("\"the \\\"quick\\\" brown fox\", brown fox", res[983].exec
assertToStringEquals("", res[984].exec("abc"), 2978);
assertToStringEquals("", res[985].exec("abc "), 2979);
assertToStringEquals("", res[986].exec("abc "), 2980);
-assertThrows("var re = //;", 2981);
+assertThrows("var re = //;");
assertToStringEquals("", res[986].exec("abc"), 2982);
assertToStringEquals("acb", res[988].exec("acb"), 2983);
assertToStringEquals("a\nb", res[988].exec("a\nb"), 2984);
@@ -5593,11 +5593,11 @@ assertToStringEquals("X", res[1330].exec("XABCB"), 3945);
assertNull(res[1330].exec("abc\x0d\n\x0d\n", 3946));
assertNull(res[1330].exec("abc\x0d\n\x0d\n", 3947));
assertNull(res[1330].exec("abc\x0d\n\x0d\n", 3948));
-assertThrows("var re = /(?|(abc)|(xyz))/;", 3949);
-assertThrows("var re = /(x)(?|(abc)|(xyz))(x)/;", 3950);
+assertThrows("var re = /(?|(abc)|(xyz))/;");
+assertThrows("var re = /(x)(?|(abc)|(xyz))(x)/;");
assertNull(res[1330].exec("xabcx", 3951));
assertNull(res[1330].exec("xxyzx ", 3952));
-assertThrows("var re = /(x)(?|(abc)(pqr)|(xyz))(x)/;", 3953);
+assertThrows("var re = /(x)(?|(abc)(pqr)|(xyz))(x)/;");
assertNull(res[1330].exec("xabcpqrx", 3954));
assertNull(res[1330].exec("xxyzx ", 3955));
assertNull(res[1330].exec("abcabc", 3956));
@@ -6600,4 +6600,4 @@ assertNull(res[1546].exec("x{1d79}x{a77d} ", 4952));
assertNull(res[1546].exec("x{a77d}x{1d79}", 4953));
assertNull(res[1546].exec("** Failers ", 4954));
assertNull(res[1546].exec("x{1d79}x{a77d} ", 4955));
-assertThrows("var re = //;", 4956);
+assertThrows("var re = //;");
diff --git a/deps/v8/test/mjsunit/wasm/OWNERS b/deps/v8/test/mjsunit/wasm/OWNERS
new file mode 100644
index 0000000000..c2abc8a6ad
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/OWNERS
@@ -0,0 +1,3 @@
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
new file mode 100644
index 0000000000..8dfe85aee1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -0,0 +1,785 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function EmptyTest() {
+ "use asm";
+ function caller() {
+ empty();
+ return 11;
+ }
+ function empty() {
+ }
+ return {caller: caller};
+}
+
+assertEquals(11, _WASMEXP_.asmCompileRun(EmptyTest.toString()));
+
+function IntTest() {
+ "use asm";
+ function sum(a, b) {
+ a = a|0;
+ b = b|0;
+ var c = (b + 1)|0
+ var d = 3.0;
+ var e = d | 0; // double conversion
+ return (a + c + 1)|0;
+ }
+
+ function caller() {
+ return sum(77,22) | 0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(101, _WASMEXP_.asmCompileRun(IntTest.toString()));
+
+function Float64Test() {
+ "use asm";
+ function sum(a, b) {
+ a = +a;
+ b = +b;
+ return +(a + b);
+ }
+
+ function caller() {
+ var a = +sum(70.1,10.2);
+ var ret = 0|0;
+ if (a == 80.3) {
+ ret = 1|0;
+ } else {
+ ret = 0|0;
+ }
+ return ret|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(1, _WASMEXP_.asmCompileRun(Float64Test.toString()));
+
+function BadModule() {
+ "use asm";
+ function caller(a, b) {
+ a = a|0;
+ b = b+0;
+ var c = (b + 1)|0
+ return (a + c + 1)|0;
+ }
+
+ function caller() {
+ return call(1, 2)|0;
+ }
+
+ return {caller: caller};
+}
+
+assertThrows(function() {
+ _WASMEXP_.asmCompileRun(BadModule.toString())
+});
+
+function TestReturnInBlock() {
+ "use asm";
+
+ function caller() {
+ if(1) {
+ {
+ {
+ return 1;
+ }
+ }
+ }
+ return 0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(1, _WASMEXP_.asmCompileRun(TestReturnInBlock.toString()));
+
+function TestWhileSimple() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ while(x < 5) {
+ x = (x + 1)|0;
+ }
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(5, _WASMEXP_.asmCompileRun(TestWhileSimple.toString()));
+
+function TestWhileWithoutBraces() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ while(x <= 3)
+ x = (x + 1)|0;
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(4, _WASMEXP_.asmCompileRun(TestWhileWithoutBraces.toString()));
+
+function TestReturnInWhile() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ while(x < 10) {
+ x = (x + 6)|0;
+ return x|0;
+ }
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(6, _WASMEXP_.asmCompileRun(TestReturnInWhile.toString()));
+
+function TestReturnInWhileWithoutBraces() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ while(x < 5)
+ return 7;
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(7, _WASMEXP_.asmCompileRun(TestReturnInWhileWithoutBraces.toString()));
+
+function TestBreakInWhile() {
+ "use asm";
+
+ function caller() {
+ while(1) {
+ break;
+ }
+ return 8;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(8, _WASMEXP_.asmCompileRun(TestBreakInWhile.toString()));
+
+function TestBreakInNestedWhile() {
+ "use asm";
+
+ function caller() {
+ var x = 1.0;
+ while(x < 1.5) {
+ while(1)
+ break;
+ x = +(x + 0.25);
+ }
+ var ret = 0;
+ if (x == 1.5) {
+ ret = 9;
+ }
+ return ret|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(9, _WASMEXP_.asmCompileRun(TestBreakInNestedWhile.toString()));
+
+function TestBreakInBlock() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ abc: {
+ x = 10;
+ if (x == 10) {
+ break abc;
+ }
+ x = 20;
+ }
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(10, _WASMEXP_.asmCompileRun(TestBreakInBlock.toString()));
+
+function TestBreakInNamedWhile() {
+ "use asm";
+
+ function caller() {
+ var x = 0;
+ outer: while (1) {
+ x = (x + 1)|0;
+ while (x == 11) {
+ break outer;
+ }
+ }
+ return x|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(11, _WASMEXP_.asmCompileRun(TestBreakInNamedWhile.toString()));
+
+function TestContinue() {
+ "use asm";
+
+ function caller() {
+ var x = 5;
+ var ret = 0;
+ while (x >= 0) {
+ x = (x - 1)|0;
+ if (x == 2) {
+ continue;
+ }
+ ret = (ret - 1)|0;
+ }
+ return ret|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(-5, _WASMEXP_.asmCompileRun(TestContinue.toString()));
+
+function TestContinueInNamedWhile() {
+ "use asm";
+
+ function caller() {
+ var x = 5;
+ var y = 0;
+ var ret = 0;
+ outer: while (x > 0) {
+ x = (x - 1)|0;
+ y = 0;
+ while (y < 5) {
+ if (x == 3) {
+ continue outer;
+ }
+ ret = (ret + 1)|0;
+ y = (y + 1)|0;
+ }
+ }
+ return ret|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(20, _WASMEXP_.asmCompileRun(TestContinueInNamedWhile.toString()));
+
+function TestNot() {
+ "use asm";
+
+ function caller() {
+ var a = !(2 > 3);
+ return a | 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(1, _WASMEXP_.asmCompileRun(TestNot.toString()));
+
+function TestNotEquals() {
+ "use asm";
+
+ function caller() {
+ var a = 3;
+ if (a != 2) {
+ return 21;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(21, _WASMEXP_.asmCompileRun(TestNotEquals.toString()));
+
+function TestUnsignedComparison() {
+ "use asm";
+
+ function caller() {
+ var a = 0xffffffff;
+ if ((a>>>0) > (0>>>0)) {
+ return 22;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(22, _WASMEXP_.asmCompileRun(TestUnsignedComparison.toString()));
+
+function TestMixedAdd() {
+ "use asm";
+
+ function caller() {
+ var a = 0x80000000;
+ var b = 0x7fffffff;
+ var c = 0;
+ c = ((a>>>0) + b)|0;
+ if ((c >>> 0) > (0>>>0)) {
+ if (c < 0) {
+ return 23;
+ }
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(23, _WASMEXP_.asmCompileRun(TestMixedAdd.toString()));
+
+function TestInt32HeapAccess(stdlib, foreign, buffer) {
+ "use asm";
+
+ var m = new stdlib.Int32Array(buffer);
+ function caller() {
+ var i = 4;
+
+ m[0] = (i + 1) | 0;
+ m[i >> 2] = ((m[0]|0) + 1) | 0;
+ m[2] = ((m[i >> 2]|0) + 1) | 0;
+ return m[2] | 0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(7, _WASMEXP_.asmCompileRun(TestInt32HeapAccess.toString()));
+
+function TestHeapAccessIntTypes() {
+ var types = [
+ ['Int8Array', '>> 0'],
+ ['Uint8Array', '>> 0'],
+ ['Int16Array', '>> 1'],
+ ['Uint16Array', '>> 1'],
+ ['Int32Array', '>> 2'],
+ ['Uint32Array', '>> 2'],
+ ];
+ for (var i = 0; i < types.length; i++) {
+ var code = TestInt32HeapAccess.toString();
+ code = code.replace('Int32Array', types[i][0]);
+ code = code.replace(/>> 2/g, types[i][1]);
+ assertEquals(7, _WASMEXP_.asmCompileRun(code));
+ }
+}
+
+TestHeapAccessIntTypes();
+
+function TestFloatHeapAccess(stdlib, foreign, buffer) {
+ "use asm";
+
+ var f32 = new stdlib.Float32Array(buffer);
+ var f64 = new stdlib.Float64Array(buffer);
+ var fround = stdlib.Math.fround;
+ function caller() {
+ var i = 8;
+ var j = 8;
+ var v = 6.0;
+
+ // TODO(bradnelson): Add float32 when asm-wasm supports it.
+ f64[2] = v + 1.0;
+ f64[i >> 3] = +f64[2] + 1.0;
+ f64[j >> 3] = +f64[j >> 3] + 1.0;
+ i = +f64[i >> 3] == 9.0;
+ return i|0;
+ }
+
+ return {caller: caller};
+}
+
+assertEquals(1, _WASMEXP_.asmCompileRun(TestFloatHeapAccess.toString()));
+
+function TestConvertI32() {
+ "use asm";
+
+ function caller() {
+ var a = 1.5;
+ if ((~~(a + a)) == 3) {
+ return 24;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(24, _WASMEXP_.asmCompileRun(TestConvertI32.toString()));
+
+function TestConvertF64FromInt() {
+ "use asm";
+
+ function caller() {
+ var a = 1;
+ if ((+(a + a)) > 1.5) {
+ return 25;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(25, _WASMEXP_.asmCompileRun(TestConvertF64FromInt.toString()));
+
+function TestConvertF64FromUnsigned() {
+ "use asm";
+
+ function caller() {
+ var a = 0xffffffff;
+ if ((+(a>>>0)) > 0.0) {
+ if((+a) < 0.0) {
+ return 26;
+ }
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(26, _WASMEXP_.asmCompileRun(TestConvertF64FromUnsigned.toString()));
+
+function TestModInt() {
+ "use asm";
+
+ function caller() {
+ var a = -83;
+ var b = 28;
+ return ((a|0)%(b|0))|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(-27, _WASMEXP_.asmCompileRun(TestModInt.toString()));
+
+function TestModUnsignedInt() {
+ "use asm";
+
+ function caller() {
+ var a = 0x80000000; //2147483648
+ var b = 10;
+ return ((a>>>0)%(b>>>0))|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(8, _WASMEXP_.asmCompileRun(TestModUnsignedInt.toString()));
+
+function TestModDouble() {
+ "use asm";
+
+ function caller() {
+ var a = 5.25;
+ var b = 2.5;
+ if (a%b == 0.25) {
+ return 28;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(28, _WASMEXP_.asmCompileRun(TestModDouble.toString()));
+
+/*
+TODO: Fix parsing of negative doubles
+ Fix code to use trunc instead of casts
+function TestModDoubleNegative() {
+ "use asm";
+
+ function caller() {
+ var a = -34359738368.25;
+ var b = 2.5;
+ if (a%b == -0.75) {
+ return 28;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(28, _WASMEXP_.asmCompileRun(TestModDoubleNegative.toString()));
+*/
+
+function TestNamedFunctions() {
+ "use asm";
+
+ var a = 0.0;
+ var b = 0.0;
+
+ function add() {
+ return +(a + b);
+ }
+
+ function init() {
+ a = 43.25;
+ b = 34.25;
+ }
+
+ return {init:init,
+ add:add};
+}
+
+var module = _WASMEXP_.instantiateModuleFromAsm(TestNamedFunctions.toString());
+module.init();
+assertEquals(77.5, module.add());
+
+function TestGlobalsWithInit() {
+ "use asm";
+
+ var a = 43.25;
+ var b = 34.25;
+
+ function add() {
+ return +(a + b);
+ }
+
+ return {add:add};
+}
+
+var module = _WASMEXP_.instantiateModuleFromAsm(TestGlobalsWithInit.toString());
+module.__init__();
+assertEquals(77.5, module.add());
+
+function TestForLoop() {
+ "use asm"
+
+ function caller() {
+ var ret = 0;
+ var i = 0;
+ for (i = 2; i <= 10; i = (i+1)|0) {
+ ret = (ret + i) | 0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(54, _WASMEXP_.asmCompileRun(TestForLoop.toString()));
+
+function TestForLoopWithoutInit() {
+ "use asm"
+
+ function caller() {
+ var ret = 0;
+ var i = 0;
+ for (; i < 10; i = (i+1)|0) {
+ ret = (ret + 10) | 0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(100, _WASMEXP_.asmCompileRun(TestForLoopWithoutInit.toString()));
+
+function TestForLoopWithoutCondition() {
+ "use asm"
+
+ function caller() {
+ var ret = 0;
+ var i = 0;
+ for (i=1;; i = (i+1)|0) {
+ ret = (ret + i) | 0;
+ if (i == 11) {
+ break;
+ }
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(66, _WASMEXP_.asmCompileRun(TestForLoopWithoutCondition.toString()));
+
+function TestForLoopWithoutNext() {
+ "use asm"
+
+ function caller() {
+ var i = 0;
+ for (i=1; i < 41;) {
+ i = (i + 1) | 0;
+ }
+ return i|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(41, _WASMEXP_.asmCompileRun(TestForLoopWithoutNext.toString()));
+
+function TestForLoopWithoutBody() {
+ "use asm"
+
+ function caller() {
+ var i = 0;
+ for (i=1; i < 45 ; i = (i+1)|0) {
+ }
+ return i|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(45, _WASMEXP_.asmCompileRun(TestForLoopWithoutBody.toString()));
+
+function TestDoWhile() {
+ "use asm"
+
+ function caller() {
+ var i = 0;
+ var ret = 21;
+ do {
+ ret = (ret + ret)|0;
+ i = (i + 1)|0;
+ } while (i < 2);
+ return ret|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(84, _WASMEXP_.asmCompileRun(TestDoWhile.toString()));
+
+function TestConditional() {
+ "use asm"
+
+ function caller() {
+ var x = 1;
+ return ((x > 0) ? 41 : 71)|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(41, _WASMEXP_.asmCompileRun(TestConditional.toString()));
+
+function TestSwitch() {
+ "use asm"
+
+ function caller() {
+ var ret = 0;
+ var x = 7;
+ switch (x) {
+ case 1: return 0;
+ case 7: {
+ ret = 12;
+ break;
+ }
+ default: return 0;
+ }
+ switch (x) {
+ case 1: return 0;
+ case 8: return 0;
+ default: ret = (ret + 11)|0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(23, _WASMEXP_.asmCompileRun(TestSwitch.toString()));
+
+function TestSwitchFallthrough() {
+ "use asm"
+
+ function caller() {
+ var x = 17;
+ var ret = 0;
+ switch (x) {
+ case 17:
+ case 14: ret = 39;
+ case 1: ret = (ret + 3)|0;
+ case 4: break;
+ default: ret = (ret + 1)|0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(42, _WASMEXP_.asmCompileRun(TestSwitchFallthrough.toString()));
+
+function TestNestedSwitch() {
+ "use asm"
+
+ function caller() {
+ var x = 3;
+ var y = -13;
+ switch (x) {
+ case 1: return 0;
+ case 3: {
+ switch (y) {
+ case 2: return 0;
+ case -13: return 43;
+ default: return 0;
+ }
+ }
+ default: return 0;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+}
+
+assertEquals(43, _WASMEXP_.asmCompileRun(TestNestedSwitch.toString()));
+
+function TestInitFunctionWithNoGlobals() {
+ "use asm";
+ function caller() {
+ return 51;
+ }
+ return {caller};
+}
+
+var module = _WASMEXP_.instantiateModuleFromAsm(
+ TestInitFunctionWithNoGlobals.toString());
+module.__init__();
+assertEquals(51, module.caller());
+
+function TestExportNameDifferentFromFunctionName() {
+ "use asm";
+ function caller() {
+ return 55;
+ }
+ return {alt_caller:caller};
+}
+
+var module = _WASMEXP_.instantiateModuleFromAsm(
+ TestExportNameDifferentFromFunctionName.toString());
+module.__init__();
+assertEquals(55, module.alt_caller());
diff --git a/deps/v8/test/mjsunit/wasm/calls.js b/deps/v8/test/mjsunit/wasm/calls.js
new file mode 100644
index 0000000000..98ad657f52
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/calls.js
@@ -0,0 +1,145 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+var module = (function () {
+ var kBodySize = 5;
+ var kNameOffset = 21 + kBodySize + 1;
+
+ return _WASMEXP_.instantiateModule(bytes(
+ // -- memory
+ kDeclMemory,
+ 12, 12, 1,
+ // -- signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstI32, kAstI32, // int, int -> int
+ // -- functions
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // -- body
+ kExprI32Sub, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kDeclEnd,
+ 's', 'u', 'b', 0 // name
+ ));
+})();
+
+// Check the module exists.
+assertFalse(module === undefined);
+assertFalse(module === null);
+assertFalse(module === 0);
+assertEquals("object", typeof module);
+
+// Check the memory is an ArrayBuffer.
+var mem = module.memory;
+assertFalse(mem === undefined);
+assertFalse(mem === null);
+assertFalse(mem === 0);
+assertEquals("object", typeof mem);
+assertTrue(mem instanceof ArrayBuffer);
+for (var i = 0; i < 4; i++) {
+ module.memory = 0; // should be ignored
+ assertEquals(mem, module.memory);
+}
+
+assertEquals(4096, module.memory.byteLength);
+
+// Check the properties of the sub function.
+assertEquals("function", typeof module.sub);
+
+assertEquals(-55, module.sub(33, 88));
+assertEquals(-55555, module.sub(33333, 88888));
+assertEquals(-5555555, module.sub(3333333, 8888888));
+
+
+var module = (function() {
+ var kBodySize = 1;
+ var kNameOffset2 = 19 + kBodySize + 1;
+
+ return _WASMEXP_.instantiateModule(bytes(
+ // -- memory
+ kDeclMemory,
+ 12, 12, 1,
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstStmt, // signature: void -> void
+ // -- functions
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0, // signature index
+ kNameOffset2, 0, 0, 0, // name offset
+ kBodySize, 0,
+ kExprNop, // body
+ kDeclEnd,
+ 'n', 'o', 'p', 0 // name
+ ));
+})();
+
+// Check the module exists.
+assertFalse(module === undefined);
+assertFalse(module === null);
+assertFalse(module === 0);
+assertEquals("object", typeof module);
+
+// Check the memory is an ArrayBuffer.
+var mem = module.memory;
+assertFalse(mem === undefined);
+assertFalse(mem === null);
+assertFalse(mem === 0);
+assertEquals("object", typeof mem);
+assertTrue(mem instanceof ArrayBuffer);
+for (var i = 0; i < 4; i++) {
+ module.memory = 0; // should be ignored
+ assertEquals(mem, module.memory);
+}
+
+assertEquals(4096, module.memory.byteLength);
+
+// Check the properties of the sub function.
+assertFalse(module.nop === undefined);
+assertFalse(module.nop === null);
+assertFalse(module.nop === 0);
+assertEquals("function", typeof module.nop);
+
+assertEquals(undefined, module.nop());
+
+(function testLt() {
+ var kBodySize = 5;
+ var kNameOffset = 21 + kBodySize + 1;
+
+ var data = bytes(
+ // -- memory
+ kDeclMemory,
+ 12, 12, 1,
+ // -- signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstF64, kAstF64, // (f64,f64)->int
+ // -- functions
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0, // signature index
+ kNameOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // -- body
+ kExprF64Lt, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kDeclEnd,
+ 'f', 'l', 't', 0 // name
+ );
+
+ var module = _WASMEXP_.instantiateModule(data);
+
+ assertEquals("function", typeof module.flt);
+ assertEquals(1, module.flt(-2, -1));
+ assertEquals(0, module.flt(7.3, 7.1));
+ assertEquals(1, module.flt(7.1, 7.3));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compile-run-basic.js b/deps/v8/test/mjsunit/wasm/compile-run-basic.js
new file mode 100644
index 0000000000..dbc624a2fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compile-run-basic.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+var kReturnValue = 97;
+
+var kBodySize = 2;
+var kNameOffset = 15 + kBodySize + 1;
+
+var data = bytes(
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstI32, // signature: void -> int
+ // -- main function
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0, // signature index
+ kNameOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ // -- body
+ kExprI8Const, // --
+ kReturnValue, // --
+ kDeclEnd,
+ 'm', 'a', 'i', 'n', 0 // name
+);
+
+assertEquals(kReturnValue, _WASMEXP_.compileRun(data));
diff --git a/deps/v8/test/mjsunit/wasm/divrem-trap.js b/deps/v8/test/mjsunit/wasm/divrem-trap.js
new file mode 100644
index 0000000000..9787ae34c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/divrem-trap.js
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function assertTraps(code, msg) {
+ var threwException = true;
+ try {
+ if (typeof code === 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ threwException = false;
+ } catch (e) {
+ if (typeof type_opt === 'function') {
+ assertInstanceof(e, type_opt);
+ }
+ if (arguments.length >= 3) {
+ assertEquals(e.type, cause_opt);
+ }
+ // Success.
+ return;
+ }
+ throw new MjsUnitAssertionError("Did not throw exception");
+}
+
+
+function makeDivRem(opcode) {
+ var kBodySize = 5;
+ var kNameMainOffset = 6 + 11 + kBodySize + 1;
+
+ var data = bytes(
+ // signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstI32, kAstI32, // (int,int) -> int
+ // -- main function
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ opcode, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ // names
+ kDeclEnd,
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data);
+
+ assertEquals("function", typeof module.main);
+
+ return module.main;
+}
+
+var divs = makeDivRem(kExprI32DivS);
+var divu = makeDivRem(kExprI32DivU);
+
+assertEquals( 33, divs( 333, 10));
+assertEquals(-33, divs(-336, 10));
+
+assertEquals( 44, divu( 445, 10));
+assertEquals(429496685, divu(-446, 10));
+
+assertTraps(kTrapDivByZero, "divs(100, 0);");
+assertTraps(kTrapDivByZero, "divs(-1009, 0);");
+
+assertTraps(kTrapDivByZero, "divu(200, 0);");
+assertTraps(kTrapDivByZero, "divu(-2009, 0);");
+
+assertTraps(kTrapDivUnrepresentable, "divs(0x80000000, -1)");
+assertEquals(0, divu(0x80000000, -1));
+
+
+var rems = makeDivRem(kExprI32RemS);
+var remu = makeDivRem(kExprI32RemU);
+
+assertEquals( 3, rems( 333, 10));
+assertEquals(-6, rems(-336, 10));
+
+assertEquals( 5, remu( 445, 10));
+assertEquals( 3, remu(-443, 10));
+
+assertTraps(kTrapRemByZero, "rems(100, 0);");
+assertTraps(kTrapRemByZero, "rems(-1009, 0);");
+
+assertTraps(kTrapRemByZero, "remu(200, 0);");
+assertTraps(kTrapRemByZero, "remu(-2009, 0);");
+
+assertEquals(-2147483648, remu(0x80000000, -1));
+assertEquals(0, rems(0x80000000, -1));
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
new file mode 100644
index 0000000000..3359429055
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -0,0 +1,79 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function testCallFFI(ffi) {
+ var kBodySize = 6;
+ var kNameAddOffset = 28 + kBodySize + 1;
+ var kNameMainOffset = kNameAddOffset + 4;
+
+ var data = bytes(
+ kDeclMemory,
+ 12, 12, 1, // memory
+ // -- signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstF64, kAstF64, // (f64,f64)->int
+ // -- foreign function
+ kDeclFunctions, 2,
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0, // signature index
+ kNameAddOffset, 0, 0, 0, // name offset
+ // -- main function
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0, // signature index
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ kExprCallFunction, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ // names
+ kDeclEnd,
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ print("instantiate FFI");
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+}
+
+// everything is good.
+(function() {
+ var ffi = new Object();
+ ffi.fun = function(a, b) { print(a, b); }
+ testCallFFI(ffi);
+})();
+
+
+// FFI object should be an object.
+assertThrows(function() {
+ var ffi = 0;
+ testCallFFI(ffi);
+});
+
+
+// FFI object should have a "fun" property.
+assertThrows(function() {
+ var ffi = new Object();
+ testCallFFI(ffi);
+});
+
+
+// "fun" should be a JS function.
+assertThrows(function() {
+ var ffi = new Object();
+ ffi.fun = new Object();
+ testCallFFI(ffi);
+});
+
+
+// "fun" should be a JS function.
+assertThrows(function() {
+ var ffi = new Object();
+ ffi.fun = 0;
+ testCallFFI(ffi);
+});
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
new file mode 100644
index 0000000000..95d655dc6d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -0,0 +1,333 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function testCallFFI(func, check) {
+ var kBodySize = 6;
+ var kNameFunOffset = 24 + kBodySize + 1;
+ var kNameMainOffset = kNameFunOffset + 4;
+
+ var ffi = new Object();
+ ffi.fun = func;
+
+ var data = bytes(
+ // signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstF64, kAstF64, // (f64,f64) -> int
+ // -- foreign function
+ kDeclFunctions, 2,
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0,
+ kNameFunOffset, 0, 0, 0, // name offset
+ // -- main function
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ kExprCallFunction, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ // names
+ kDeclEnd,
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ for (var i = 0; i < 100000; i += 10003) {
+ var a = 22.5 + i, b = 10.5 + i;
+ var r = module.main(a, b);
+ check(r, a, b);
+ }
+}
+
+var global = (function() { return this; })();
+var params = [-99, -99, -99, -99];
+var was_called = false;
+var length = -1;
+
+function FOREIGN_SUB(a, b) {
+ print("FOREIGN_SUB(" + a + ", " + b + ")");
+ was_called = true;
+ params[0] = this;
+ params[1] = a;
+ params[2] = b;
+ return (a - b) | 0;
+}
+
+function check_FOREIGN_SUB(r, a, b) {
+ assertEquals(a - b | 0, r);
+ assertTrue(was_called);
+// assertEquals(global, params[0]); // sloppy mode
+ assertEquals(a, params[1]);
+ assertEquals(b, params[2]);
+ was_called = false;
+}
+
+testCallFFI(FOREIGN_SUB, check_FOREIGN_SUB);
+
+
+function FOREIGN_ABCD(a, b, c, d) {
+ print("FOREIGN_ABCD(" + a + ", " + b + ", " + c + ", " + d + ")");
+ was_called = true;
+ params[0] = this;
+ params[1] = a;
+ params[2] = b;
+ params[3] = c;
+ params[4] = d;
+ return (a * b * 6) | 0;
+}
+
+function check_FOREIGN_ABCD(r, a, b) {
+ assertEquals((a * b * 6) | 0, r);
+ assertTrue(was_called);
+// assertEquals(global, params[0]); // sloppy mode.
+ assertEquals(a, params[1]);
+ assertEquals(b, params[2]);
+ assertEquals(undefined, params[3]);
+ assertEquals(undefined, params[4]);
+ was_called = false;
+}
+
+testCallFFI(FOREIGN_ABCD, check_FOREIGN_ABCD);
+
+function FOREIGN_ARGUMENTS0() {
+ print("FOREIGN_ARGUMENTS0");
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (arguments[0] * arguments[1] * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS1(a) {
+ print("FOREIGN_ARGUMENTS1", a);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (arguments[0] * arguments[1] * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS2(a, b) {
+ print("FOREIGN_ARGUMENTS2", a, b);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (a * b * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS3(a, b, c) {
+ print("FOREIGN_ARGUMENTS3", a, b, c);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (a * b * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS4(a, b, c, d) {
+ print("FOREIGN_ARGUMENTS4", a, b, c, d);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (a * b * 7) | 0;
+}
+
+function check_FOREIGN_ARGUMENTS(r, a, b) {
+ assertEquals((a * b * 7) | 0, r);
+ assertTrue(was_called);
+ assertEquals(2, length);
+ assertEquals(a, params[0]);
+ assertEquals(b, params[1]);
+ was_called = false;
+}
+
+// Check a bunch of uses of the arguments object.
+testCallFFI(FOREIGN_ARGUMENTS0, check_FOREIGN_ARGUMENTS);
+testCallFFI(FOREIGN_ARGUMENTS1, check_FOREIGN_ARGUMENTS);
+testCallFFI(FOREIGN_ARGUMENTS2, check_FOREIGN_ARGUMENTS);
+testCallFFI(FOREIGN_ARGUMENTS3, check_FOREIGN_ARGUMENTS);
+testCallFFI(FOREIGN_ARGUMENTS4, check_FOREIGN_ARGUMENTS);
+
+function returnValue(val) {
+ return function(a, b) {
+ print("RETURN_VALUE ", val);
+ return val;
+ }
+}
+
+
+function checkReturn(expected) {
+ return function(r, a, b) { assertEquals(expected, r); }
+}
+
+// Check that returning weird values doesn't crash
+testCallFFI(returnValue(undefined), checkReturn(0));
+testCallFFI(returnValue(null), checkReturn(0));
+testCallFFI(returnValue("0"), checkReturn(0));
+testCallFFI(returnValue("-77"), checkReturn(-77));
+
+var objWithValueOf = {valueOf: function() { return 198; }}
+
+testCallFFI(returnValue(objWithValueOf), checkReturn(198));
+
+
+function testCallBinopVoid(type, func, check) {
+ var kBodySize = 10;
+ var kNameFunOffset = 28 + kBodySize + 1;
+ var kNameMainOffset = kNameFunOffset + 4;
+
+ var ffi = new Object();
+
+ var passed_length = -1;
+ var passed_a = -1;
+ var passed_b = -1;
+ var args_a = -1;
+ var args_b = -1;
+
+ ffi.fun = function(a, b) {
+ passed_length = arguments.length;
+ passed_a = a;
+ passed_b = b;
+ args_a = arguments[0];
+ args_b = arguments[1];
+ }
+
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 2,
+ 2, kAstStmt, type, type, // (type,type)->void
+ 2, kAstI32, type, type, // (type,type)->int
+ // -- foreign function
+ kDeclFunctions, 2,
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0, // signature index
+ kNameFunOffset, 0, 0, 0, // name offset
+ // -- main function
+ kDeclFunctionName | kDeclFunctionExport,
+ 1, 0, // signature index
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ // main body
+ kExprBlock, 2, // --
+ kExprCallFunction, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI8Const, 99, // --
+ // names
+ kDeclEnd,
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ print("testCallBinopVoid", type);
+
+ for (var i = 0; i < 100000; i += 10003.1) {
+ var a = 22.5 + i, b = 10.5 + i;
+ var r = module.main(a, b);
+ assertEquals(99, r);
+ assertEquals(2, passed_length);
+ var expected_a, expected_b;
+ switch (type) {
+ case kAstI32: {
+ expected_a = a | 0;
+ expected_b = b | 0;
+ break;
+ }
+ case kAstF32: {
+ expected_a = Math.fround(a);
+ expected_b = Math.fround(b);
+ break;
+ }
+ case kAstF64: {
+ expected_a = a;
+ expected_b = b;
+ break;
+ }
+ }
+
+ assertEquals(expected_a, args_a);
+ assertEquals(expected_b, args_b);
+ assertEquals(expected_a, passed_a);
+ assertEquals(expected_b, passed_b);
+ }
+}
+
+
+testCallBinopVoid(kAstI32);
+// TODO testCallBinopVoid(kAstI64);
+testCallBinopVoid(kAstF32);
+testCallBinopVoid(kAstF64);
+
+
+
+function testCallPrint() {
+ var kBodySize = 10;
+ var kNamePrintOffset = 10 + 7 + 7 + 9 + kBodySize + 1;
+ var kNameMainOffset = kNamePrintOffset + 6;
+
+ var ffi = new Object();
+ ffi.print = print;
+
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 2,
+ 1, kAstStmt, kAstI32, // i32->void
+ 1, kAstStmt, kAstF64, // f64->int
+ kDeclFunctions, 3,
+ // -- import print i32
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0, // signature index
+ kNamePrintOffset, 0, 0, 0, // name offset
+ // -- import print f64
+ kDeclFunctionName | kDeclFunctionImport,
+ 1, 0, // signature index
+ kNamePrintOffset, 0, 0, 0, // name offset
+ // -- decl main
+ kDeclFunctionName | kDeclFunctionExport,
+ 1, 0, // signature index
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ // main body
+ kExprBlock, 2, // --
+ kExprCallFunction, 0, // --
+ kExprI8Const, 97, // --
+ kExprCallFunction, 1, // --
+ kExprGetLocal, 0, // --
+ // names
+ kDeclEnd,
+ 'p', 'r', 'i', 'n', 't', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ for (var i = -9; i < 900; i += 6.125) {
+ module.main(i);
+ }
+}
+
+testCallPrint();
+testCallPrint();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
new file mode 100644
index 0000000000..560c8baa08
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -0,0 +1,73 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+var module = (function () {
+ var kFuncWithBody = 9;
+ var kFuncImported = 7;
+ var kBodySize1 = 5;
+ var kBodySize2 = 8;
+ var kFuncTableSize = 8;
+ var kSubOffset = 13 + kFuncWithBody + kBodySize1 + kFuncImported + kFuncWithBody + kBodySize2 + kFuncTableSize + 1;
+ var kAddOffset = kSubOffset + 4;
+ var kMainOffset = kAddOffset + 4;
+
+ var ffi = new Object();
+ ffi.add = (function(a, b) { return a + b | 0; });
+
+ return _WASMEXP_.instantiateModule(bytes(
+ // -- signatures
+ kDeclSignatures, 2,
+ 2, kAstI32, kAstI32, kAstI32, // int, int -> int
+ 3, kAstI32, kAstI32, kAstI32, kAstI32, // int, int, int -> int
+ // -- function #0 (sub)
+ kDeclFunctions, 3,
+ kDeclFunctionName,
+ 0, 0, // signature offset
+ kSubOffset, 0, 0, 0, // name offset
+ kBodySize1, 0, // body size
+ kExprI32Sub, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ // -- function #1 (add)
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0, // signature offset
+ kAddOffset, 0, 0, 0, // name offset
+ // -- function #2 (main)
+ kDeclFunctionName | kDeclFunctionExport,
+ 1, 0, // signature offset
+ kMainOffset, 0, 0, 0, // name offset
+ kBodySize2, 0, // body size
+ kExprCallIndirect, 0,
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ // -- function table
+ kDeclFunctionTable,
+ 3,
+ 0, 0,
+ 1, 0,
+ 2, 0,
+ kDeclEnd,
+ 's', 'u', 'b', 0, // name
+ 'a', 'd', 'd', 0, // name
+ 'm', 'a', 'i', 'n', 0 // name
+ ), ffi);
+})();
+
+// Check the module exists.
+assertFalse(module === undefined);
+assertFalse(module === null);
+assertFalse(module === 0);
+assertEquals("object", typeof module);
+assertEquals("function", typeof module.main);
+
+assertEquals(5, module.main(0, 12, 7));
+assertEquals(19, module.main(1, 12, 7));
+
+assertTraps(kTrapFuncSigMismatch, "module.main(2, 12, 33)");
+assertTraps(kTrapFuncInvalid, "module.main(3, 12, 33)");
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
new file mode 100644
index 0000000000..13a22615fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -0,0 +1,62 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+var kReturnValue = 117;
+
+var kBodySize = 2;
+var kNameOffset = 19 + kBodySize + 1;
+
+var data = bytes(
+ // -- memory
+ kDeclMemory,
+ 10, 10, 1,
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstI32, // signature: void -> int
+ // -- main function
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0, // signature index
+ kNameOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ // -- body
+ kExprI8Const, // --
+ kReturnValue, // --
+ kDeclEnd,
+ 'm', 'a', 'i', 'n', 0 // name
+);
+
+var module = _WASMEXP_.instantiateModule(data);
+
+// Check the module exists.
+assertFalse(module === undefined);
+assertFalse(module === null);
+assertFalse(module === 0);
+assertEquals("object", typeof module);
+
+// Check the memory is an ArrayBuffer.
+var mem = module.memory;
+assertFalse(mem === undefined);
+assertFalse(mem === null);
+assertFalse(mem === 0);
+assertEquals("object", typeof mem);
+assertTrue(mem instanceof ArrayBuffer);
+for (var i = 0; i < 4; i++) {
+ module.memory = 0; // should be ignored
+ assertEquals(mem, module.memory);
+}
+
+assertEquals(1024, module.memory.byteLength);
+
+// Check the properties of the main function.
+assertFalse(module.main === undefined);
+assertFalse(module.main === null);
+assertFalse(module.main === 0);
+assertEquals("function", typeof module.main);
+
+assertEquals(kReturnValue, module.main());
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
new file mode 100644
index 0000000000..e9c1404a4f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -0,0 +1,181 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --stress-compaction
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+var kMemSize = 4096;
+
+function genModule(memory) {
+ var kBodySize = 27;
+ var kNameMainOffset = 28 + kBodySize + 1;
+
+ var data = bytes(
+ kDeclMemory,
+ 12, 12, 1, // memory
+ // -- signatures
+ kDeclSignatures, 1,
+ 1, kAstI32, kAstI32, // int->int
+ // -- main function
+ kDeclFunctions, 1,
+ kDeclFunctionLocals | kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ 1, 0, // local int32 count
+ 0, 0, // local int64 count
+ 0, 0, // local float32 count
+ 0, 0, // local float64 count
+ kBodySize, 0, // code size
+ // main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
+ kExprBlock,2,
+ kExprLoop,1,
+ kExprIf,
+ kExprGetLocal,0,
+ kExprBr, 0,
+ kExprIfElse,
+ kExprI32LoadMem,0,kExprGetLocal,0,
+ kExprBr,2, kExprI8Const, 255,
+ kExprSetLocal,0,
+ kExprI32Sub,kExprGetLocal,0,kExprI8Const,4,
+ kExprI8Const,0,
+ // names
+ kDeclEnd,
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ return _WASMEXP_.instantiateModule(data, null, memory);
+}
+
+function testPokeMemory() {
+ var module = genModule(null);
+ var buffer = module.memory;
+ assertEquals(kMemSize, buffer.byteLength);
+
+ var array = new Int8Array(buffer);
+ assertEquals(kMemSize, array.length);
+
+ for (var i = 0; i < kMemSize; i++) {
+ assertEquals(0, array[i]);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals(0, module.main(kMemSize - 4));
+
+ array[kMemSize/2 + i] = 1;
+ assertEquals(0, module.main(kMemSize/2 - 4));
+ assertEquals(-1, module.main(kMemSize - 4));
+
+ array[kMemSize/2 + i] = 0;
+ assertEquals(0, module.main(kMemSize - 4));
+ }
+}
+
+testPokeMemory();
+
+function testSurvivalAcrossGc() {
+ var checker = genModule(null).main;
+ for (var i = 0; i < 5; i++) {
+ print("gc run ", i);
+ assertEquals(0, checker(kMemSize - 4));
+ gc();
+ }
+}
+
+testSurvivalAcrossGc();
+testSurvivalAcrossGc();
+testSurvivalAcrossGc();
+testSurvivalAcrossGc();
+
+
+function testPokeOuterMemory() {
+ var buffer = new ArrayBuffer(kMemSize);
+ var module = genModule(buffer);
+ assertEquals(kMemSize, buffer.byteLength);
+
+ var array = new Int8Array(buffer);
+ assertEquals(kMemSize, array.length);
+
+ for (var i = 0; i < kMemSize; i++) {
+ assertEquals(0, array[i]);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals(0, module.main(kMemSize - 4));
+
+ array[kMemSize/2 + i] = 1;
+ assertEquals(0, module.main(kMemSize/2 - 4));
+ assertEquals(-1, module.main(kMemSize - 4));
+
+ array[kMemSize/2 + i] = 0;
+ assertEquals(0, module.main(kMemSize - 4));
+ }
+}
+
+testPokeOuterMemory();
+
+function testOuterMemorySurvivalAcrossGc() {
+ var buffer = new ArrayBuffer(kMemSize);
+ var checker = genModule(buffer).main;
+ for (var i = 0; i < 5; i++) {
+ print("gc run ", i);
+ assertEquals(0, checker(kMemSize - 4));
+ gc();
+ }
+}
+
+testOuterMemorySurvivalAcrossGc();
+testOuterMemorySurvivalAcrossGc();
+testOuterMemorySurvivalAcrossGc();
+testOuterMemorySurvivalAcrossGc();
+
+
+function testOOBThrows() {
+ var kBodySize = 8;
+ var kNameMainOffset = 29 + kBodySize + 1;
+
+ var data = bytes(
+ kDeclMemory,
+ 12, 12, 1, // memory = 4KB
+ // -- signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstI32, kAstI32, // int->int
+ // -- main function
+ kDeclFunctions, 1,
+ kDeclFunctionLocals | kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ 1, 0, // local int32 count
+ 0, 0, // local int64 count
+ 0, 0, // local float32 count
+ 0, 0, // local float64 count
+ kBodySize, 0, // code size
+ // geti: return mem[a] = mem[b]
+ kExprI32StoreMem, 0, kExprGetLocal, 0, kExprI32LoadMem, 0, kExprGetLocal, 1,
+ // names
+ kDeclEnd,
+ 'g','e','t','i', 0 // --
+ );
+
+ var memory = null;
+ var module = _WASMEXP_.instantiateModule(data, null, memory);
+
+ var offset;
+
+ function read() { return module.geti(0, offset); }
+ function write() { return module.geti(offset, 0); }
+
+ for (offset = 0; offset < 4092; offset++) {
+ assertEquals(0, read());
+ assertEquals(0, write());
+ }
+
+
+ for (offset = 4093; offset < 4124; offset++) {
+ assertTraps(kTrapMemOutOfBounds, read);
+ assertTraps(kTrapMemOutOfBounds, write);
+ }
+}
+
+testOOBThrows();
diff --git a/deps/v8/test/mjsunit/wasm/params.js b/deps/v8/test/mjsunit/wasm/params.js
new file mode 100644
index 0000000000..52d6214751
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/params.js
@@ -0,0 +1,139 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function runSelect2(module, which, a, b) {
+ assertEquals(which == 0 ? a : b, module.select(a, b));
+}
+
+function testSelect2(type) {
+ var kBodySize = 2;
+ var kNameOffset = 21 + kBodySize + 1;
+
+ for (var which = 0; which < 2; which++) {
+ print("type = " + type + ", which = " + which);
+
+ var data = bytes(
+ // -- memory
+ kDeclMemory,
+ 12, 12, 1, // memory
+ // -- signatures
+ kDeclSignatures, 1,
+ 2, type, type, type, // signature: (t,t)->t
+ // -- select
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ kExprGetLocal, which, // --
+ kDeclEnd,
+ 's','e','l','e','c','t',0 // name
+ );
+
+ var module = _WASMEXP_.instantiateModule(data);
+
+ assertEquals("function", typeof module.select);
+ runSelect2(module, which, 99, 97);
+ runSelect2(module, which, -99, -97);
+
+ if (type != kAstF32) {
+ runSelect2(module, which, 0x80000000 | 0, 0x7fffffff | 0);
+ runSelect2(module, which, 0x80000001 | 0, 0x7ffffffe | 0);
+ runSelect2(module, which, 0xffffffff | 0, 0xfffffffe | 0);
+ runSelect2(module, which, -2147483647, 2147483646);
+ runSelect2(module, which, -2147483646, 2147483645);
+ runSelect2(module, which, -2147483648, 2147483647);
+ }
+
+ if (type != kAstI32 && type != kAstI64) {
+ runSelect2(module, which, -1.25, 5.25);
+ runSelect2(module, which, Infinity, -Infinity);
+ }
+ }
+}
+
+
+testSelect2(kAstI32);
+testSelect2(kAstF32);
+testSelect2(kAstF64);
+
+
+function runSelect10(module, which, a, b) {
+ var x = -1;
+
+ var result = [
+ module.select(a, b, x, x, x, x, x, x, x, x),
+ module.select(x, a, b, x, x, x, x, x, x, x),
+ module.select(x, x, a, b, x, x, x, x, x, x),
+ module.select(x, x, x, a, b, x, x, x, x, x),
+ module.select(x, x, x, x, a, b, x, x, x, x),
+ module.select(x, x, x, x, x, a, b, x, x, x),
+ module.select(x, x, x, x, x, x, a, b, x, x),
+ module.select(x, x, x, x, x, x, x, a, b, x),
+ module.select(x, x, x, x, x, x, x, x, a, b),
+ module.select(x, x, x, x, x, x, x, x, x, a)
+ ];
+
+ for (var i = 0; i < 10; i++) {
+ if (which == i) assertEquals(a, result[i]);
+ else if (which == i+1) assertEquals(b, result[i]);
+ else assertEquals(x, result[i]);
+ }
+}
+
+function testSelect10(type) {
+ var kBodySize = 2;
+ var kNameOffset = 29 + kBodySize + 1;
+
+ for (var which = 0; which < 10; which++) {
+ print("type = " + type + ", which = " + which);
+
+ var t = type;
+ var data = bytes(
+ kDeclMemory,
+ 12, 12, 1, // memory
+ // signatures
+ kDeclSignatures, 1,
+ 10, t,t,t,t,t,t,t,t,t,t,t, // (tx10)->t
+ // main function
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ kExprGetLocal, which, // --
+ kDeclEnd,
+ 's','e','l','e','c','t',0 // name
+ );
+
+ var module = _WASMEXP_.instantiateModule(data);
+
+ assertEquals("function", typeof module.select);
+ runSelect10(module, which, 99, 97);
+ runSelect10(module, which, -99, -97);
+
+ if (type != kAstF32) {
+ runSelect10(module, which, 0x80000000 | 0, 0x7fffffff | 0);
+ runSelect10(module, which, 0x80000001 | 0, 0x7ffffffe | 0);
+ runSelect10(module, which, 0xffffffff | 0, 0xfffffffe | 0);
+ runSelect10(module, which, -2147483647, 2147483646);
+ runSelect10(module, which, -2147483646, 2147483645);
+ runSelect10(module, which, -2147483648, 2147483647);
+ }
+
+ if (type != kAstI32 && type != kAstI64) {
+ runSelect10(module, which, -1.25, 5.25);
+ runSelect10(module, which, Infinity, -Infinity);
+ }
+ }
+}
+
+
+testSelect10(kAstI32);
+testSelect10(kAstF32);
+testSelect10(kAstF64);
diff --git a/deps/v8/test/mjsunit/wasm/stackwalk.js b/deps/v8/test/mjsunit/wasm/stackwalk.js
new file mode 100644
index 0000000000..e863b07b4f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/stackwalk.js
@@ -0,0 +1,135 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function makeFFI(func) {
+ var kBodySize = 6;
+ var kNameFunOffset = 24 + kBodySize + 1;
+ var kNameMainOffset = kNameFunOffset + 4;
+
+ var ffi = new Object();
+ ffi.fun = func;
+
+ var data = bytes(
+ // signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstF64, kAstF64, // (f64,f64) -> int
+ // -- foreign function
+ kDeclFunctions, 2,
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0,
+ kNameFunOffset, 0, 0, 0, // name offset
+ // -- main function
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ kExprCallFunction, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ // names
+ kDeclEnd,
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ return module.main;
+}
+
+
+function makeReentrantFFI(func) {
+ var main = makeFFI(reenter);
+
+ function reenter(a, b) {
+ print(" reenter " + a);
+ if (a > 0) main(a - 1, b);
+ else func();
+ }
+ return main;
+}
+
+
+function runTest(builder) {
+ // ---- THROWING TEST -----------------------------------------------
+
+ function throwadd(a, b) {
+ print("-- trying throw --");
+ throw a + b;
+ }
+
+ function throwa(a) {
+ print("-- trying throw --");
+ throw a;
+ }
+
+ function throwstr() {
+ print("-- trying throw --");
+ throw "string";
+ }
+
+ assertThrows(builder(throwadd));
+ assertThrows(builder(throwa));
+ assertThrows(builder(throwstr));
+
+ try {
+ builder(throwadd)(7.8, 9.9);
+ } catch(e) {
+ print(e);
+ }
+
+ try {
+ builder(throwa)(11.8, 9.3);
+ } catch(e) {
+ print(e);
+ }
+
+
+ try {
+ builder(throwstr)(3, 5);
+ } catch(e) {
+ print(e);
+ }
+
+
+ // ---- DEOPT TEST -----------------------------------------------
+
+ function deopt() {
+ print("-- trying deopt --");
+ %DeoptimizeFunction(deopter);
+ }
+
+ var deopter = builder(deopt);
+
+ deopter(5, 5);
+ for (var i = 0; i < 9; i++) {
+ deopter(6, 6);
+ }
+
+
+ // ---- GC TEST -----------------------------------------------
+ function dogc(a, b) {
+ print("-- trying gc --");
+ gc();
+ gc();
+ }
+
+
+ var gcer = builder(dogc);
+ gcer(7, 7);
+
+ for (var i = 0; i < 9; i++) {
+ gcer(8, 8);
+ }
+}
+
+runTest(makeReentrantFFI);
+runTest(makeFFI);
diff --git a/deps/v8/test/mjsunit/wasm/unreachable.js b/deps/v8/test/mjsunit/wasm/unreachable.js
new file mode 100644
index 0000000000..10eea23230
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/unreachable.js
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+var module = (function () {
+ var kFuncWithBody = 9;
+ var kFuncImported = 7;
+ var kBodySize1 = 1;
+ var kMainOffset = 6 + kFuncWithBody + kBodySize1 + 1;
+
+ var ffi = new Object();
+ ffi.add = (function(a, b) { return a + b | 0; });
+
+ return _WASMEXP_.instantiateModule(bytes(
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstStmt, // void -> void
+ // -- function #0 (unreachable)
+ kDeclFunctions, 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0, // signature offset
+ kMainOffset, 0, 0, 0, // name offset
+ kBodySize1, 0, // body size
+ kExprUnreachable,
+ kDeclEnd,
+ 'm', 'a', 'i', 'n', 0 // name
+ ), ffi);
+})();
+
+// Check the module exists.
+assertFalse(module === undefined);
+assertFalse(module === null);
+assertFalse(module === 0);
+assertEquals("object", typeof module);
+assertEquals("function", typeof module.main);
+
+var exception = "";
+try {
+ assertEquals(0, module.main());
+} catch(e) {
+ print("correctly caught: " + e);
+ exception = e;
+}
+assertEquals("unreachable", exception);
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js b/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
new file mode 100644
index 0000000000..c7383c8327
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function Foo() { }
+
+assertThrows(function() { _WASMEXP_.verifyFunction(); })
+assertThrows(function() { _WASMEXP_.verifyFunction(0); })
+assertThrows(function() { _WASMEXP_.verifyFunction("s"); })
+assertThrows(function() { _WASMEXP_.verifyFunction(undefined); })
+assertThrows(function() { _WASMEXP_.verifyFunction(1.1); })
+assertThrows(function() { _WASMEXP_.verifyFunction(1/0); })
+assertThrows(function() { _WASMEXP_.verifyFunction(null); })
+assertThrows(function() { _WASMEXP_.verifyFunction(new Foo()); })
+assertThrows(function() { _WASMEXP_.verifyFunction(new ArrayBuffer(0)); })
+assertThrows(function() { _WASMEXP_.verifyFunction(new ArrayBuffer(140000)); })
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-simple.js b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
new file mode 100644
index 0000000000..c4d51c7423
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
@@ -0,0 +1,44 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+try {
+ var data = bytes(
+ 0, kAstStmt, // signature
+ 3, 0, // local int32 count
+ 4, 0, // local int64 count
+ 5, 0, // local float32 count
+ 6, 0, // local float64 count
+ kExprNop // body
+ );
+
+ _WASMEXP_.verifyFunction(data);
+ print("ok");
+} catch (e) {
+ assertTrue(false);
+}
+
+
+var threw = false;
+try {
+ var data = bytes(
+ 0, kAstI32, // signature
+ 2, 0, // local int32 count
+ 3, 0, // local int64 count
+ 4, 0, // local float32 count
+ 5, 0, // local float64 count
+ kExprBlock, 2, kExprNop, kExprNop // body
+ );
+
+ _WASMEXP_.verifyFunction(data);
+ print("not ok");
+} catch (e) {
+ print("ok: " + e);
+ threw = true;
+}
+
+assertTrue(threw);
diff --git a/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js b/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
new file mode 100644
index 0000000000..37658d3786
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function Foo() { }
+
+assertThrows(function() { _WASMEXP_.verifyModule(); })
+assertThrows(function() { _WASMEXP_.verifyModule(0); })
+assertThrows(function() { _WASMEXP_.verifyModule("s"); })
+assertThrows(function() { _WASMEXP_.verifyModule(undefined); })
+assertThrows(function() { _WASMEXP_.verifyModule(1.1); })
+assertThrows(function() { _WASMEXP_.verifyModule(1/0); })
+assertThrows(function() { _WASMEXP_.verifyModule(null); })
+assertThrows(function() { _WASMEXP_.verifyModule(new Foo()); })
+assertThrows(function() { _WASMEXP_.verifyModule(new ArrayBuffer(0)); })
+assertThrows(function() { _WASMEXP_.verifyModule(new ArrayBuffer(7)); })
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
new file mode 100644
index 0000000000..4b710f1037
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -0,0 +1,248 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+function bytes() {
+ var buffer = new ArrayBuffer(arguments.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < arguments.length; i++) {
+ var val = arguments[i];
+ if ((typeof val) == "string") val = val.charCodeAt(0);
+ view[i] = val | 0;
+ }
+ return buffer;
+}
+
+// Section declaration constants
+var kDeclMemory = 0x00;
+var kDeclSignatures = 0x01;
+var kDeclFunctions = 0x02;
+var kDeclGlobals = 0x03;
+var kDeclDataSegments = 0x04;
+var kDeclFunctionTable = 0x05;
+var kDeclEnd = 0x06;
+
+// Function declaration flags
+var kDeclFunctionName = 0x01;
+var kDeclFunctionImport = 0x02;
+var kDeclFunctionLocals = 0x04;
+var kDeclFunctionExport = 0x08;
+
+// Local types
+var kAstStmt = 0;
+var kAstI32 = 1;
+var kAstI64 = 2;
+var kAstF32 = 3;
+var kAstF64 = 4;
+
+// Opcodes
+var kExprNop = 0x00;
+var kExprBlock = 0x01;
+var kExprLoop = 0x02;
+var kExprIf = 0x03;
+var kExprIfElse = 0x04;
+var kExprSelect = 0x05;
+var kExprBr = 0x06;
+var kExprBrIf = 0x07;
+var kExprTableSwitch = 0x08;
+var kExprReturn = 0x14;
+var kExprUnreachable = 0x15;
+
+var kExprI8Const = 0x09;
+var kExprI32Const = 0x0a;
+var kExprI64Const = 0x0b;
+var kExprF64Const = 0x0c;
+var kExprF32Const = 0x0d;
+var kExprGetLocal = 0x0e;
+var kExprSetLocal = 0x0f;
+var kExprLoadGlobal = 0x10;
+var kExprStoreGlobal = 0x11;
+var kExprCallFunction = 0x12;
+var kExprCallIndirect = 0x13;
+
+var kExprI32LoadMem8S = 0x20;
+var kExprI32LoadMem8U = 0x21;
+var kExprI32LoadMem16S = 0x22;
+var kExprI32LoadMem16U = 0x23;
+var kExprI64LoadMem8S = 0x24;
+var kExprI64LoadMem8U = 0x25;
+var kExprI64LoadMem16S = 0x26;
+var kExprI64LoadMem16U = 0x27;
+var kExprI64LoadMem32S = 0x28;
+var kExprI64LoadMem32U = 0x29;
+var kExprI32LoadMem = 0x2a;
+var kExprI64LoadMem = 0x2b;
+var kExprF32LoadMem = 0x2c;
+var kExprF64LoadMem = 0x2d;
+
+var kExprI32StoreMem8 = 0x2e;
+var kExprI32StoreMem16 = 0x2f;
+var kExprI64StoreMem8 = 0x30;
+var kExprI64StoreMem16 = 0x31;
+var kExprI64StoreMem32 = 0x32;
+var kExprI32StoreMem = 0x33;
+var kExprI64StoreMem = 0x34;
+var kExprF32StoreMem = 0x35;
+var kExprF64StoreMem = 0x36;
+
+var kExprMemorySize = 0x3b;
+var kExprGrowMemory = 0x39;
+
+var kExprI32Add = 0x40;
+var kExprI32Sub = 0x41;
+var kExprI32Mul = 0x42;
+var kExprI32DivS = 0x43;
+var kExprI32DivU = 0x44;
+var kExprI32RemS = 0x45;
+var kExprI32RemU = 0x46;
+var kExprI32And = 0x47;
+var kExprI32Ior = 0x48;
+var kExprI32Xor = 0x49;
+var kExprI32Shl = 0x4a;
+var kExprI32ShrU = 0x4b;
+var kExprI32ShrS = 0x4c;
+var kExprI32Eq = 0x4d;
+var kExprI32Ne = 0x4e;
+var kExprI32LtS = 0x4f;
+var kExprI32LeS = 0x50;
+var kExprI32LtU = 0x51;
+var kExprI32LeU = 0x52;
+var kExprI32GtS = 0x53;
+var kExprI32GeS = 0x54;
+var kExprI32GtU = 0x55;
+var kExprI32GeU = 0x56;
+var kExprI32Clz = 0x57;
+var kExprI32Ctz = 0x58;
+var kExprI32Popcnt = 0x59;
+var kExprBoolNot = 0x5a;
+var kExprI64Add = 0x5b;
+var kExprI64Sub = 0x5c;
+var kExprI64Mul = 0x5d;
+var kExprI64DivS = 0x5e;
+var kExprI64DivU = 0x5f;
+var kExprI64RemS = 0x60;
+var kExprI64RemU = 0x61;
+var kExprI64And = 0x62;
+var kExprI64Ior = 0x63;
+var kExprI64Xor = 0x64;
+var kExprI64Shl = 0x65;
+var kExprI64ShrU = 0x66;
+var kExprI64ShrS = 0x67;
+var kExprI64Eq = 0x68;
+var kExprI64Ne = 0x69;
+var kExprI64LtS = 0x6a;
+var kExprI64LeS = 0x6b;
+var kExprI64LtU = 0x6c;
+var kExprI64LeU = 0x6d;
+var kExprI64GtS = 0x6e;
+var kExprI64GeS = 0x6f;
+var kExprI64GtU = 0x70;
+var kExprI64GeU = 0x71;
+var kExprI64Clz = 0x72;
+var kExprI64Ctz = 0x73;
+var kExprI64Popcnt = 0x74;
+var kExprF32Add = 0x75;
+var kExprF32Sub = 0x76;
+var kExprF32Mul = 0x77;
+var kExprF32Div = 0x78;
+var kExprF32Min = 0x79;
+var kExprF32Max = 0x7a;
+var kExprF32Abs = 0x7b;
+var kExprF32Neg = 0x7c;
+var kExprF32CopySign = 0x7d;
+var kExprF32Ceil = 0x7e;
+var kExprF32Floor = 0x7f;
+var kExprF32Trunc = 0x80;
+var kExprF32NearestInt = 0x81;
+var kExprF32Sqrt = 0x82;
+var kExprF32Eq = 0x83;
+var kExprF32Ne = 0x84;
+var kExprF32Lt = 0x85;
+var kExprF32Le = 0x86;
+var kExprF32Gt = 0x87;
+var kExprF32Ge = 0x88;
+var kExprF64Add = 0x89;
+var kExprF64Sub = 0x8a;
+var kExprF64Mul = 0x8b;
+var kExprF64Div = 0x8c;
+var kExprF64Min = 0x8d;
+var kExprF64Max = 0x8e;
+var kExprF64Abs = 0x8f;
+var kExprF64Neg = 0x90;
+var kExprF64CopySign = 0x91;
+var kExprF64Ceil = 0x92;
+var kExprF64Floor = 0x93;
+var kExprF64Trunc = 0x94;
+var kExprF64NearestInt = 0x95;
+var kExprF64Sqrt = 0x96;
+var kExprF64Eq = 0x97;
+var kExprF64Ne = 0x98;
+var kExprF64Lt = 0x99;
+var kExprF64Le = 0x9a;
+var kExprF64Gt = 0x9b;
+var kExprF64Ge = 0x9c;
+var kExprI32SConvertF32 = 0x9d;
+var kExprI32SConvertF64 = 0x9e;
+var kExprI32UConvertF32 = 0x9f;
+var kExprI32UConvertF64 = 0xa0;
+var kExprI32ConvertI64 = 0xa1;
+var kExprI64SConvertF32 = 0xa2;
+var kExprI64SConvertF64 = 0xa3;
+var kExprI64UConvertF32 = 0xa4;
+var kExprI64UConvertF64 = 0xa5;
+var kExprI64SConvertI32 = 0xa6;
+var kExprI64UConvertI32 = 0xa7;
+var kExprF32SConvertI32 = 0xa8;
+var kExprF32UConvertI32 = 0xa9;
+var kExprF32SConvertI64 = 0xaa;
+var kExprF32UConvertI64 = 0xab;
+var kExprF32ConvertF64 = 0xac;
+var kExprF32ReinterpretI32 = 0xad;
+var kExprF64SConvertI32 = 0xae;
+var kExprF64UConvertI32 = 0xaf;
+var kExprF64SConvertI64 = 0xb0;
+var kExprF64UConvertI64 = 0xb1;
+var kExprF64ConvertF32 = 0xb2;
+var kExprF64ReinterpretI64 = 0xb3;
+var kExprI32ReinterpretF32 = 0xb4;
+var kExprI64ReinterpretF64 = 0xb5;
+
+var kTrapUnreachable = 0;
+var kTrapMemOutOfBounds = 1;
+var kTrapDivByZero = 2;
+var kTrapDivUnrepresentable = 3;
+var kTrapRemByZero = 4;
+var kTrapFloatUnrepresentable = 5;
+var kTrapFuncInvalid = 6;
+var kTrapFuncSigMismatch = 7;
+
+var kTrapMsgs = [
+ "unreachable",
+ "memory access out of bounds",
+ "divide by zero",
+ "divide result unrepresentable",
+ "remainder by zero",
+ "integer result unrepresentable",
+ "invalid function",
+ "function signature mismatch"
+];
+
+function assertTraps(trap, code) {
+ var threwException = true;
+ try {
+ if (typeof code === 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ threwException = false;
+ } catch (e) {
+ assertEquals("string", typeof e);
+ assertEquals(kTrapMsgs[trap], e);
+ // Success.
+ return;
+ }
+ throw new MjsUnitAssertionError("Did not trap, expected: " + kTrapMsgs[trap]);
+}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-object-api.js b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
new file mode 100644
index 0000000000..1dfbb6522e
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+assertFalse(undefined === _WASMEXP_);
+assertFalse(undefined == _WASMEXP_);
+assertEquals("function", typeof _WASMEXP_.verifyModule);
+assertEquals("function", typeof _WASMEXP_.verifyFunction);
+assertEquals("function", typeof _WASMEXP_.compileRun);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 1bc6bf5576..c54d154ab3 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -90,11 +90,25 @@
# ES2015 ToLength semantics
'ecma_3/RegExp/15.10.6.2-2': [FAIL],
+ # Escaped keywords are early errors in ES6
+ 'ecma_3/Unicode/uc-003': [FAIL],
+
# RegExp.multiline is not part of any ECMAScript specification, and is
# slated for deprecation in Mozilla
# (https://bugzilla.mozilla.org/show_bug.cgi?id=1220457)
'js1_5/Regress/regress-418504': [FAIL],
+ # ES2015 const redefinition throws, initializers are required, and no
+ # global object properties are made, unlike Mozilla legacy const
+ 'js1_5/Regress/regress-103602': [FAIL],
+ 'js1_5/Regress/regress-321874': [FAIL],
+ 'js1_5/Regress/regress-383674': [FAIL],
+ 'js1_5/extensions/regress-452565': [FAIL],
+ 'js1_5/extensions/scope-001': [FAIL],
+ # To add insult to injury, these tests time out in debug mode
+ 'js1_5/Regress/regress-360969-03': [FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'js1_5/Regress/regress-360969-04': [FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+
##################### SKIPPED TESTS #####################
# This test checks that we behave properly in an out-of-memory
@@ -158,8 +172,6 @@
##################### FLAKY TESTS #####################
# These tests time out in debug mode but pass in product mode
- 'js1_5/Regress/regress-360969-03': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
- 'js1_5/Regress/regress-360969-04': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
'js1_5/Regress/regress-360969-05': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
'js1_5/Regress/regress-360969-06': [PASS, ['mode == debug', TIMEOUT, NO_VARIANTS]],
'js1_5/extensions/regress-365527': [PASS, SLOW, ['mode == debug', TIMEOUT, NO_VARIANTS]],
@@ -603,16 +615,6 @@
'js1_5/Regress/regress-290575': [PASS, FAIL_OK],
- # Fails because of the way function declarations are
- # handled in V8/JSC. V8 follows IE behavior and introduce
- # all nested function declarations when entering the
- # surrounding function, whereas Spidermonkey declares
- # them dynamically when the statement is executed.
- 'ecma_3/Function/scope-001': [FAIL_OK],
- 'ecma_3/FunExpr/fe-001': [FAIL_OK],
- 'js1_5/Scope/regress-184107': [FAIL_OK],
-
-
# Function is deletable in V8 and JSC.
'js1_5/Regress/regress-352604': [FAIL_OK],
@@ -621,11 +623,6 @@
'js1_5/Regress/regress-417893': [FAIL_OK],
- # Unsupported use of "[]" as function parameter. We match JSC.
- 'js1_5/Regress/regress-416737-01': [FAIL_OK],
- 'js1_5/Regress/regress-416737-02': [FAIL_OK],
-
-
# Illegal escape-sequences in string literals. Has already been fixed
# by most engines (i.e. V8, JSC, Opera and FF).
'ecma/Array/15.4.5.1-1': [FAIL_OK],
@@ -675,6 +672,9 @@
# We do not correctly handle assignments within "with"
'ecma_3/Statements/12.10-01': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4647
+ 'ecma_3/FunExpr/fe-001': [FAIL_OK],
+
##################### MOZILLA EXTENSION TESTS #####################
'ecma/extensions/15.1.2.1-1': [FAIL_OK],
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 74ec05c083..3444f37557 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -50,66 +50,34 @@
###################### MISSING ES6 FEATURES #######################
- # Functions in blocks are var-declared and hoisted in sloppy mode
- # https://code.google.com/p/v8/issues/detail?id=3305
- 'language/block-scope/shadowing/dynamic-lookup-from-closure': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/lookup-from-closure': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-var': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-var-with-function-declaration': [PASS, FAIL_SLOPPY],
- 'language/statements/let/block-local-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
-
- # https://code.google.com/p/v8/issues/detail?id=4405
- 'language/block-scope/leave/outermost-binding-updated-in-catch-block-nested-block-let-declaration-unseen-outside-of-block': [PASS, FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3305
- # This times out in sloppy mode because sloppy const assignment does not throw.
- 'language/statements/const/syntax/const-invalid-assignment-next-expression-for': [PASS, FAIL, TIMEOUT],
-
- # Number/Boolean.prototype is a plain object in ES6
- # https://code.google.com/p/v8/issues/detail?id=4001
- 'built-ins/Boolean/prototype/S15.6.3.1_A1': [FAIL],
- 'built-ins/Boolean/prototype/S15.6.4_A1': [FAIL],
- 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T1': [FAIL],
- 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T2': [FAIL],
- 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T1': [FAIL],
- 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T2': [FAIL],
- 'built-ins/Number/15.7.4-1': [FAIL],
- 'built-ins/Number/prototype/S15.7.3.1_A2_*': [FAIL],
- 'built-ins/Number/prototype/S15.7.3.1_A3': [FAIL],
- 'built-ins/Number/prototype/S15.7.4_A1': [FAIL],
- 'built-ins/Number/prototype/toFixed/S15.7.4.5_A1.1_T01': [FAIL],
- 'built-ins/Number/prototype/toString/S15.7.4.2_A1_*': [FAIL],
- 'built-ins/Number/prototype/toString/S15.7.4.2_A2_*': [FAIL],
- 'built-ins/Number/prototype/valueOf/S15.7.4.4_A1_*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=1543
- 'built-ins/Proxy/*': [FAIL],
- 'built-ins/Array/prototype/find/Array.prototype.find_callable-Proxy-1': [FAIL],
- 'built-ins/Array/prototype/find/Array.prototype.find_callable-Proxy-2': [FAIL],
- 'built-ins/Object/assign/source-own-prop-desc-missing': [FAIL],
- 'built-ins/Object/assign/source-own-prop-error': [FAIL],
- 'built-ins/Object/assign/source-own-prop-keys-error': [FAIL],
- 'built-ins/Object/setPrototypeOf/set-error': [FAIL],
- 'language/expressions/object/prop-def-id-eval-error-2': [FAIL],
- 'language/statements/for-of/iterator-as-proxy': [FAIL],
- 'language/statements/for-of/iterator-next-result-type': [FAIL],
- 'built-ins/Array/of/return-abrupt-from-data-property-using-proxy': [FAIL],
- 'built-ins/Array/prototype/copyWithin/return-abrupt-from-delete-proxy-target': [FAIL],
- 'built-ins/Array/prototype/copyWithin/return-abrupt-from-has-start': [FAIL],
+ # It's unclear what the right behavior for [[Enumerate]] is; we're awaiting
+ # clarification in the spec. Currently, our for-in implementation for
+ # Proxies checks all trap result values for being strings...
+ 'built-ins/Proxy/enumerate/return-trap-result': [FAIL],
+ # ...and our Reflect.enumerate implementation is built on for-in by wrapping
+ # the iteration's results in a new generator; this postpones exceptions.
+ 'built-ins/Reflect/enumerate/return-abrupt-from-result': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4093
'built-ins/Array/symbol-species': [FAIL],
'built-ins/Array/symbol-species-name': [FAIL],
'built-ins/ArrayBuffer/symbol-species': [FAIL],
'built-ins/ArrayBuffer/symbol-species-name': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-constructor-is-not-object': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-returns-smaller-arraybuffer': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-is-not-object': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-is-not-constructor': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-returns-larger-arraybuffer': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-returns-not-arraybuffer': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species-returns-same-arraybuffer': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/slice/species': [FAIL],
'built-ins/Map/symbol-species': [FAIL],
'built-ins/Map/symbol-species-name': [FAIL],
'built-ins/Promise/Symbol.species/prop-desc': [FAIL],
'built-ins/Promise/Symbol.species/return-value': [FAIL],
- 'built-ins/Promise/all/species-get-error': [FAIL],
+ 'built-ins/Promise/all/species-get-error': [PASS, FAIL],
'built-ins/Promise/prototype/then/ctor-custom': [FAIL],
- 'built-ins/Promise/race/species-get-error': [FAIL],
+ 'built-ins/Promise/race/species-get-error': [PASS, FAIL],
'built-ins/Promise/symbol-species': [FAIL],
'built-ins/Promise/symbol-species-name': [FAIL],
'built-ins/RegExp/symbol-species': [FAIL],
@@ -120,12 +88,6 @@
'built-ins/Symbol/species/builtin-getter-name': [FAIL],
'built-ins/Symbol/species/subclassing': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4242
- 'built-ins/Date/15.9.1.15-1': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4002
- 'built-ins/Error/prototype/S15.11.4_A2': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4163
'built-ins/GeneratorPrototype/next/context-constructor-invocation': [FAIL],
@@ -144,16 +106,7 @@
'built-ins/WeakMap/iterator-items-are-not-object-close-iterator': [FAIL],
'built-ins/WeakSet/iterator-close-after-add-failure': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4243
- 'built-ins/Promise/race/S25.4.4.3_A3.1_T2': [FAIL],
- 'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4119
- 'built-ins/RegExp/15.10.4.1-1': [FAIL],
- 'built-ins/RegExp/S15.10.3.1_A2_T1': [FAIL],
- 'built-ins/RegExp/S15.10.3.1_A2_T2': [FAIL],
- 'built-ins/RegExp/S15.10.4.1_A2_T1': [FAIL],
- 'built-ins/RegExp/S15.10.4.1_A2_T2': [FAIL],
'built-ins/RegExp/call_with_non_regexp_same_constructor': [FAIL],
'built-ins/RegExp/from-regexp-like-short-circuit': [FAIL],
'built-ins/RegExp/from-regexp-like': [FAIL],
@@ -161,16 +114,6 @@
'built-ins/RegExp/from-regexp-like-get-source-err': [FAIL],
'built-ins/RegExp/from-regexp-like-get-flags-err': [FAIL],
'built-ins/RegExp/from-regexp-like-get-ctor-err': [FAIL],
- 'built-ins/RegExp/call_with_regexp_not_same_constructor': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4006
- 'built-ins/String/prototype/S15.5.4_A1': [FAIL],
- 'built-ins/String/prototype/S15.5.4_A2': [FAIL],
- 'built-ins/String/prototype/S15.5.4_A3': [FAIL],
- 'language/expressions/property-accessors/S11.2.1_A4_T5': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4245
- 'built-ins/String/prototype/split/S15.5.4.14_A2_T37': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4348
'built-ins/String/prototype/Symbol.iterator/this-val-non-obj-coercible': [FAIL],
@@ -181,12 +124,6 @@
'language/computed-property-names/class/static/method-symbol': [FAIL, FAIL_SLOPPY],
'language/computed-property-names/class/static/method-string': [FAIL, FAIL_SLOPPY],
- # This should work as soon as rest parameters are re-implemented via desaguring.
- 'language/expressions/arrow-function/syntax/early-errors/arrowparameters-cover-no-duplicates-rest': [PASS, FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3673
- 'language/statements/class/definition/basics': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=3566
'language/statements/for-of/body-dstr-assign-error': [FAIL],
'language/statements/for-of/body-put-error': [FAIL],
@@ -211,13 +148,6 @@
'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
- #https://code.google.com/p/v8/issues/detail?id=3983
- 'language/expressions/generators/yield-as-function-expression-binding-identifier': [FAIL],
- 'language/expressions/generators/yield-as-generator-expression-binding-identifier': [FAIL],
- 'language/expressions/object/method-definition/generator-argSuperProperty': [FAIL],
- 'language/expressions/object/method-definition/yield-as-function-expression-binding-identifier': [FAIL],
- 'language/statements/generators/yield-as-function-expression-binding-identifier': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=3566
'built-ins/GeneratorPrototype/return/from-state-completed': [FAIL],
'built-ins/GeneratorPrototype/return/from-state-suspended-start': [FAIL],
@@ -236,12 +166,6 @@
'built-ins/GeneratorPrototype/return/try-finally-within-finally': [FAIL],
'built-ins/GeneratorPrototype/return/try-finally-within-try': [FAIL],
- # Destructuring assignment
- # https://code.google.com/p/v8/issues/detail?id=811
- 'language/expressions/assignment/destructuring/*': [SKIP],
- 'language/statements/for-of/body-dstr-assign': [FAIL],
-
-
# https://code.google.com/p/v8/issues/detail?id=4248
'language/expressions/compound-assignment/S11.13.2_A5.*': [FAIL],
'language/expressions/compound-assignment/S11.13.2_A6.*': [FAIL],
@@ -271,6 +195,22 @@
'language/expressions/assignment/S11.13.1_A6*': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=3699
+ 'built-ins/Proxy/revocable/revocation-function-name': [FAIL],
+ 'language/expressions/assignment/destructuring/array-elem-init-fn-name-arrow': [FAIL],
+ 'language/expressions/assignment/destructuring/array-elem-init-fn-name-class': [FAIL],
+ 'language/expressions/assignment/destructuring/array-elem-init-fn-name-cover': [FAIL],
+ 'language/expressions/assignment/destructuring/array-elem-init-fn-name-fn': [FAIL],
+ 'language/expressions/assignment/destructuring/array-elem-init-fn-name-gen': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-id-init-fn-name-arrow': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-id-init-fn-name-class': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-id-init-fn-name-cover': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-id-init-fn-name-fn': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-id-init-fn-name-gen': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-arrow': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-class': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-cover': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-fn': [FAIL],
+ 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-gen': [FAIL],
'language/expressions/assignment/fn-name-arrow': [FAIL],
'language/expressions/assignment/fn-name-class': [FAIL],
'language/expressions/assignment/fn-name-cover': [FAIL],
@@ -299,6 +239,7 @@
'language/expressions/object/method-definition/fn-name-cover': [FAIL],
'language/expressions/object/method-definition/fn-name-fn': [FAIL],
'language/expressions/object/method-definition/fn-name-gen': [FAIL],
+ 'language/statements/class/definition/basics': [FAIL],
'language/statements/class/definition/fn-name-accessor-get': [FAIL],
'language/statements/class/definition/fn-name-accessor-set': [FAIL],
'language/statements/class/definition/fn-name-gen-method': [FAIL],
@@ -349,13 +290,17 @@
'language/literals/regexp/u-surrogate-pairs': [FAIL],
'language/literals/regexp/u-case-mapping': [FAIL],
'language/literals/regexp/u-astral': [FAIL],
+ 'built-ins/RegExp/valid-flags-y': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/length': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/name': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/prop-desc': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-invald-obj': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-non-obj': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4342
+ # https://code.google.com/p/v8/issues/detail?id=4602
'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/test/get-sticky-coerce': [FAIL],
'built-ins/RegExp/prototype/test/get-sticky-err': [FAIL],
- 'built-ins/RegExp/valid-flags-y': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4504
'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [PASS, FAIL],
@@ -405,81 +350,6 @@
# https://code.google.com/p/v8/issues/detail?id=4361
'intl402/Collator/10.1.1_a': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=1972
- 'language/identifiers/val-break-via-escape-hex': [FAIL],
- 'language/identifiers/val-break-via-escape-hex4': [FAIL],
- 'language/identifiers/val-case-via-escape-hex': [FAIL],
- 'language/identifiers/val-case-via-escape-hex4': [FAIL],
- 'language/identifiers/val-catch-via-escape-hex': [FAIL],
- 'language/identifiers/val-catch-via-escape-hex4': [FAIL],
- 'language/identifiers/val-class-via-escape-hex': [FAIL],
- 'language/identifiers/val-class-via-escape-hex4': [FAIL],
- 'language/identifiers/val-const-via-escape-hex': [FAIL],
- 'language/identifiers/val-const-via-escape-hex4': [FAIL],
- 'language/identifiers/val-continue-via-escape-hex': [FAIL],
- 'language/identifiers/val-continue-via-escape-hex4': [FAIL],
- 'language/identifiers/val-debugger-via-escape-hex': [FAIL],
- 'language/identifiers/val-debugger-via-escape-hex4': [FAIL],
- 'language/identifiers/val-default-via-escape-hex': [FAIL],
- 'language/identifiers/val-default-via-escape-hex4': [FAIL],
- 'language/identifiers/val-delete-via-escape-hex': [FAIL],
- 'language/identifiers/val-delete-via-escape-hex4': [FAIL],
- 'language/identifiers/val-do-via-escape-hex': [FAIL],
- 'language/identifiers/val-do-via-escape-hex4': [FAIL],
- 'language/identifiers/val-else-via-escape-hex': [FAIL],
- 'language/identifiers/val-else-via-escape-hex4': [FAIL],
- 'language/identifiers/val-enum-via-escape-hex': [FAIL],
- 'language/identifiers/val-enum-via-escape-hex4': [FAIL],
- 'language/identifiers/val-export-via-escape-hex': [FAIL],
- 'language/identifiers/val-export-via-escape-hex4': [FAIL],
- 'language/identifiers/val-extends-via-escape-hex': [FAIL],
- 'language/identifiers/val-extends-via-escape-hex4': [FAIL],
- 'language/identifiers/val-false-via-escape-hex': [FAIL],
- 'language/identifiers/val-false-via-escape-hex4': [FAIL],
- 'language/identifiers/val-finally-via-escape-hex': [FAIL],
- 'language/identifiers/val-finally-via-escape-hex4': [FAIL],
- 'language/identifiers/val-for-via-escape-hex': [FAIL],
- 'language/identifiers/val-for-via-escape-hex4': [FAIL],
- 'language/identifiers/val-function-via-escape-hex': [FAIL],
- 'language/identifiers/val-function-via-escape-hex4': [FAIL],
- 'language/identifiers/val-if-via-escape-hex': [FAIL],
- 'language/identifiers/val-if-via-escape-hex4': [FAIL],
- 'language/identifiers/val-import-via-escape-hex': [FAIL],
- 'language/identifiers/val-import-via-escape-hex4': [FAIL],
- 'language/identifiers/val-in-via-escape-hex': [FAIL],
- 'language/identifiers/val-in-via-escape-hex4': [FAIL],
- 'language/identifiers/val-instanceof-via-escape-hex': [FAIL],
- 'language/identifiers/val-instanceof-via-escape-hex4': [FAIL],
- 'language/identifiers/val-new-via-escape-hex': [FAIL],
- 'language/identifiers/val-new-via-escape-hex4': [FAIL],
- 'language/identifiers/val-null-via-escape-hex': [FAIL],
- 'language/identifiers/val-null-via-escape-hex4': [FAIL],
- 'language/identifiers/val-return-via-escape-hex': [FAIL],
- 'language/identifiers/val-return-via-escape-hex4': [FAIL],
- 'language/identifiers/val-super-via-escape-hex': [FAIL],
- 'language/identifiers/val-super-via-escape-hex4': [FAIL],
- 'language/identifiers/val-switch-via-escape-hex': [FAIL],
- 'language/identifiers/val-switch-via-escape-hex4': [FAIL],
- 'language/identifiers/val-throw-via-escape-hex': [FAIL],
- 'language/identifiers/val-throw-via-escape-hex4': [FAIL],
- 'language/identifiers/val-true-via-escape-hex': [FAIL],
- 'language/identifiers/val-true-via-escape-hex4': [FAIL],
- 'language/identifiers/val-try-via-escape-hex': [FAIL],
- 'language/identifiers/val-try-via-escape-hex4': [FAIL],
- 'language/identifiers/val-typeof-via-escape-hex': [FAIL],
- 'language/identifiers/val-typeof-via-escape-hex4': [FAIL],
- 'language/identifiers/val-var-via-escape-hex': [FAIL],
- 'language/identifiers/val-var-via-escape-hex4': [FAIL],
- 'language/identifiers/val-void-via-escape-hex': [FAIL],
- 'language/identifiers/val-void-via-escape-hex4': [FAIL],
- 'language/identifiers/val-while-via-escape-hex': [FAIL],
- 'language/identifiers/val-while-via-escape-hex4': [FAIL],
- 'language/identifiers/val-with-via-escape-hex': [FAIL],
- 'language/identifiers/val-with-via-escape-hex4': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4362
- 'built-ins/String/prototype/repeat/empty-string-returns-empty': [PASS, FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4447
'built-ins/Function/prototype/Symbol.hasInstance/*': [SKIP],
'built-ins/Symbol/hasInstance/prop-desc': [FAIL],
@@ -487,9 +357,6 @@
'language/expressions/instanceof/symbol-hasinstance-invocation': [FAIL],
'language/expressions/instanceof/symbol-hasinstance-to-boolean': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=3931
- 'built-ins/Reflect/*': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4476
'built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional': [FAIL],
'built-ins/String/prototype/toLocaleLowerCase/supplementary_plane': [FAIL],
@@ -506,6 +373,45 @@
'intl402/String/prototype/toLocaleUpperCase/special_casing_Lithuanian': [FAIL],
'intl402/String/prototype/toLocaleUpperCase/special_casing_Turkish': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4628
+ 'language/eval-code/non-definable-function-with-variable': [FAIL],
+ 'language/eval-code/non-definable-function-with-function': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4592
+ 'built-ins/ArrayBuffer/length-is-absent': [FAIL],
+ 'built-ins/ArrayBuffer/length-is-not-number': [FAIL],
+ 'built-ins/ArrayBuffer/positive-integer-length': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4630
+ 'language/statements/generators/invoke-as-constructor': [FAIL],
+ 'language/expressions/generators/invoke-as-constructor': [FAIL],
+ 'language/expressions/object/method-definition/generator-invoke-ctor': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4633
+ 'built-ins/Promise/reject-function-name': [FAIL],
+ 'built-ins/Promise/resolve-function-name': [FAIL],
+ 'built-ins/Promise/all/resolve-element-function-name': [FAIL],
+ 'built-ins/Promise/executor-function-name': [FAIL],
+ 'built-ins/Promise/all/capability-executor-not-callable': [FAIL],
+ 'built-ins/Promise/reject/capability-executor-not-callable': [FAIL],
+ 'built-ins/Promise/race/capability-executor-not-callable': [FAIL],
+ 'built-ins/Promise/prototype/then/capability-executor-not-callable': [FAIL],
+ 'built-ins/Promise/resolve/capability-executor-not-callable': [FAIL],
+ 'built-ins/Promise/race/S25.4.4.3_A3.1_T2': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4634
+ 'built-ins/DataView/prototype/setFloat64/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setFloat32/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setInt16/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setInt32/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setUint16/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setUint32/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setUint8/index-check-before-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setInt8/index-check-before-value-conversion': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4639
+ 'built-ins/ArrayBuffer/allocation-limit': [SKIP],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
@@ -519,17 +425,13 @@
'intl402/Collator/10.1.2_a': [PASS, FAIL],
'intl402/Collator/10.2.3_b': [PASS, FAIL],
'intl402/Collator/prototype/10.3_a': [FAIL],
- 'intl402/Date/prototype/13.3.0_7': [FAIL],
'intl402/DateTimeFormat/12.1.1': [FAIL],
'intl402/DateTimeFormat/12.1.1_a': [FAIL],
- 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
'intl402/DateTimeFormat/12.1.2.1_4': [FAIL],
'intl402/DateTimeFormat/12.2.3_b': [FAIL],
- 'intl402/DateTimeFormat/prototype/12.3.2_FDT_7_a_iv': [FAIL],
'intl402/DateTimeFormat/prototype/12.3.3': [FAIL],
'intl402/DateTimeFormat/prototype/12.3_a': [FAIL],
- 'intl402/DateTimeFormat/prototype/format/12.3.2_FDT_7_a_iv': [FAIL],
'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
'intl402/NumberFormat/11.1.1_20_c': [FAIL],
'intl402/NumberFormat/11.1.1_a': [FAIL],
@@ -551,10 +453,8 @@
'language/types/number/S8.5_A2.1': [PASS, FAIL_OK],
'language/types/number/S8.5_A2.2': [PASS, FAIL_OK],
- # 'if' should never return a completion whose value component is empty.
- # https://bugs.ecmascript.org/show_bug.cgi?id=4540
- 'language/statements/for/S12.6.3_A9.1': [FAIL],
- 'language/statements/for/S12.6.3_A9': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=4693
+ 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
############################ INVALID TESTS #############################
@@ -611,7 +511,6 @@
'language/literals/regexp/S7.8.5_A1.4_T2': [SKIP],
'language/literals/regexp/S7.8.5_A2.1_T2': [SKIP],
'language/literals/regexp/S7.8.5_A2.4_T2': [SKIP],
- 'language/statements/const/syntax/const-invalid-assignment-next-expression-for': [SKIP],
'built-ins/Array/prototype/slice/S15.4.4.10_A3_T1': [SKIP],
'built-ins/Array/prototype/slice/S15.4.4.10_A3_T2': [SKIP],
}], # ALWAYS
@@ -662,14 +561,25 @@
'built-ins/encodeURIComponent/S15.1.3.4_A2.3_T1': [SKIP],
}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
+['asan == True', {
+ # BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
+ # asan's --omit-quit flag.
+ 'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
+}],
+
['ignition == True', {
'annexB/B.2.3.*': [SKIP],
'built-ins/Array/prototype/reduce/*': [SKIP],
'built-ins/Array/prototype/reduceRight/*': [SKIP],
- 'built-ins/decodeURI*': [SKIP],
+ 'built-ins/GeneratorFunction/*': [SKIP],
'built-ins/GeneratorPrototype/*': [SKIP],
'built-ins/Map/*': [SKIP],
'built-ins/MapIteratorPrototype/*': [SKIP],
+ 'built-ins/Promise/prototype/then/capability-executor-called-twice': [SKIP],
+ 'built-ins/Promise/prototype/then/capability-executor-not-callable': [SKIP],
+ 'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
+ 'built-ins/Proxy/has/*': [SKIP],
+ 'built-ins/Reflect/enumerate/*': [SKIP],
'built-ins/Set/*': [SKIP],
'built-ins/SetIteratorPrototype/*': [SKIP],
'built-ins/WeakMap/*': [SKIP],
@@ -678,10 +588,12 @@
'language/computed-property-names/to-name-side-effects/*': [SKIP],
'language/directive-prologue/*': [SKIP],
'language/expressions/arrow-function/*': [SKIP],
+ 'language/expressions/assignment/destructuring/*': [SKIP],
'language/expressions/class/*': [SKIP],
'language/expressions/generators/*': [SKIP],
'language/expressions/object/method-definition/yield*': [SKIP],
'language/expressions/object/method-definition/generator*': [SKIP],
+ 'language/expressions/object/prop-def-id-eval-error-2': [SKIP],
'language/expressions/yield/*': [SKIP],
'language/function-code/*': [SKIP],
'language/statements/class/*': [SKIP],
@@ -690,7 +602,6 @@
'language/statements/for-in/let*': [SKIP],
'language/statements/for-of/*': [SKIP],
'language/statements/generators/*': [SKIP],
- 'language/statements/let/*': [SKIP],
'language/statements/try/*': [SKIP],
'language/statements/with/*': [SKIP],
@@ -702,6 +613,7 @@
'built-ins/Array/prototype/toString/S15.4.4.2_A1_T4': [SKIP],
'built-ins/Date/15.9.1.15-1': [SKIP],
'built-ins/Date/prototype/toISOString/15.9.5.43-0-13': [SKIP],
+ 'built-ins/JSON/stringify/*': [SKIP],
'built-ins/Object/defineProperty/15.2.3.6-4-625gs': [SKIP],
'built-ins/Object/prototype/hasOwnProperty/S15.2.4.5_A12': [SKIP],
'built-ins/Object/prototype/isPrototypeOf/S15.2.4.6_A12': [SKIP],
@@ -717,6 +629,7 @@
'built-ins/Promise/race/ctx-ctor': [SKIP],
'built-ins/Promise/reject/ctx-ctor': [SKIP],
'built-ins/Promise/resolve/ctx-ctor': [SKIP],
+ 'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [SKIP],
'built-ins/String/prototype/codePointAt/this-is-undefined-throws': [SKIP],
'built-ins/String/prototype/concat/S15.5.4.6_A2': [SKIP],
'built-ins/String/prototype/endsWith/this-is-undefined-throws': [SKIP],
@@ -725,6 +638,9 @@
'built-ins/String/prototype/startsWith/this-is-undefined-throws': [SKIP],
'built-ins/String/prototype/trim/15.5.4.20-1-1': [SKIP],
'built-ins/String/S15.5.5.1_A4_T1': [SKIP],
+ 'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-1': [SKIP],
+ 'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-2': [SKIP],
+ 'language/block-scope/leave/verify-context-in-labelled-block': [SKIP],
'language/block-scope/leave/x-after-break-to-label': [SKIP],
'language/computed-property-names/object/accessor/getter-super': [SKIP],
'language/computed-property-names/object/accessor/setter-super': [SKIP],
@@ -734,6 +650,7 @@
'language/default-parameters/param-ref-uninitialized': [SKIP],
'language/expressions/delete/11.4.1-4.a-5': [SKIP],
'language/expressions/delete/11.4.1-4.a-6': [SKIP],
+ 'language/expressions/object/method-definition/name-prop-name-yield-expr': [SKIP],
'language/expressions/object/method-definition/name-super-prop-param': [SKIP],
'language/expressions/object/method-definition/name-super-prop-body': [SKIP],
'language/expressions/object/prop-def-id-eval-error': [SKIP],
@@ -750,6 +667,13 @@
'language/object-literal/getter': [SKIP],
'language/object-literal/method': [SKIP],
'language/object-literal/setter': [SKIP],
+ 'language/rest-parameters/arrow-function': [SKIP],
+ 'language/rest-parameters/expected-argument-count': [SKIP],
+ 'language/rest-parameters/no-alias-arguments': [SKIP],
+ 'language/rest-parameters/rest-index': [SKIP],
+ 'language/rest-parameters/rest-parameters-apply': [SKIP],
+ 'language/rest-parameters/rest-parameters-call': [SKIP],
+ 'language/rest-parameters/rest-parameters-produce-an-array': [SKIP],
'language/rest-parameters/with-new-target': [SKIP],
'language/statements/do-while/S12.6.1_A4_T5': [SKIP],
'language/statements/function/S13.2.2_A18_T2': [SKIP],
@@ -761,12 +685,36 @@
'language/statements/function/S13.2.2_A19_T6': [SKIP],
'language/statements/function/S13.2.2_A19_T7': [SKIP],
'language/statements/function/S13.2.2_A19_T8': [SKIP],
+ 'language/statements/function/S13.2.2_A18_T1': [SKIP],
+ 'language/statements/function/S13.2.2_A17_T2': [SKIP],
+ 'language/statements/function/S13.2.2_A17_T3': [SKIP],
+ 'language/statements/let/block-local-closure-get-before-initialization': [SKIP],
+ 'language/statements/let/block-local-closure-set-before-initialization': [SKIP],
+ 'language/statements/let/block-local-use-before-initialization-in-declaration-statement': [SKIP],
+ 'language/statements/let/block-local-use-before-initialization-in-prior-statement': [SKIP],
+ 'language/statements/let/function-local-closure-get-before-initialization': [SKIP],
+ 'language/statements/let/function-local-closure-set-before-initialization': [SKIP],
+ 'language/statements/let/function-local-use-before-initialization-in-declaration-statement': [SKIP],
+ 'language/statements/let/function-local-use-before-initialization-in-prior-statement': [SKIP],
+ 'language/statements/let/global-closure-get-before-initialization': [SKIP],
+ 'language/statements/let/global-closure-set-before-initialization': [SKIP],
+ 'language/statements/let/global-use-before-initialization-in-declaration-statement': [SKIP],
+ 'language/statements/let/global-use-before-initialization-in-prior-statement': [SKIP],
'language/statements/while/S12.6.2_A4_T5': [SKIP],
+
}], # ignition == True
['ignition == True and (arch == arm or arch == arm64)', {
- 'built-ins/encodeURI*': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A1.12_T3': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A1.10_T1': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A1.11_T2': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T2': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T3': [SKIP],
'intl402/9.2.2': [SKIP],
+ 'language/statements/let/fn-name-arrow': [SKIP],
+ 'language/statements/let/fn-name-cover': [SKIP],
+ 'language/statements/let/fn-name-fn': [SKIP],
+ 'language/statements/let/fn-name-gen': [SKIP],
}], # ignition == True and (arch == arm or arch == arm64)
]
diff --git a/deps/v8/test/unittests/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
new file mode 100644
index 0000000000..37690aaf80
--- /dev/null
+++ b/deps/v8/test/unittests/cancelable-tasks-unittest.cc
@@ -0,0 +1,218 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/atomicops.h"
+#include "src/base/platform/platform.h"
+#include "src/cancelable-task.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class TestTask : public Task, public Cancelable {
+ public:
+ enum Mode { kDoNothing, kWaitTillCanceledAgain, kCheckNotRun };
+
+ TestTask(CancelableTaskManager* parent, base::AtomicWord* result,
+ Mode mode = kDoNothing)
+ : Cancelable(parent), result_(result), mode_(mode) {}
+
+ // Task overrides.
+ void Run() final {
+ if (TryRun()) {
+ RunInternal();
+ }
+ }
+
+ private:
+ void RunInternal() {
+ base::Release_Store(result_, id());
+
+ switch (mode_) {
+ case kWaitTillCanceledAgain:
+ // Simple busy wait until the main thread tried to cancel.
+ while (CancelAttempts() == 0) {
+ }
+ break;
+ case kCheckNotRun:
+ // Check that we never execute {RunInternal}.
+ EXPECT_TRUE(false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ base::AtomicWord* result_;
+ Mode mode_;
+};
+
+
+class SequentialRunner {
+ public:
+ explicit SequentialRunner(TestTask* task) : task_(task) {}
+
+ void Run() {
+ task_->Run();
+ delete task_;
+ }
+
+ private:
+ TestTask* task_;
+};
+
+
+class ThreadedRunner final : public base::Thread {
+ public:
+ explicit ThreadedRunner(TestTask* task)
+ : Thread(Options("runner thread")), task_(task) {}
+
+ virtual void Run() {
+ task_->Run();
+ delete task_;
+ }
+
+ private:
+ TestTask* task_;
+};
+
+
+typedef base::AtomicWord ResultType;
+
+
+intptr_t GetValue(ResultType* result) { return base::Acquire_Load(result); }
+
+} // namespace
+
+
+TEST(CancelableTask, EmptyCancelableTaskManager) {
+ CancelableTaskManager manager;
+ manager.CancelAndWait();
+}
+
+
+TEST(CancelableTask, SequentialCancelAndWait) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ SequentialRunner runner1(
+ new TestTask(&manager, &result1, TestTask::kCheckNotRun));
+ EXPECT_EQ(GetValue(&result1), 0);
+ manager.CancelAndWait();
+ EXPECT_EQ(GetValue(&result1), 0);
+ runner1.Run(); // Run to avoid leaking the Task.
+ EXPECT_EQ(GetValue(&result1), 0);
+}
+
+
+TEST(CancelableTask, SequentialMultipleTasks) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ ResultType result2 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1);
+ TestTask* task2 = new TestTask(&manager, &result2);
+ SequentialRunner runner1(task1);
+ SequentialRunner runner2(task2);
+ EXPECT_EQ(task1->id(), 1u);
+ EXPECT_EQ(task2->id(), 2u);
+
+ EXPECT_EQ(GetValue(&result1), 0);
+ runner1.Run(); // Don't touch task1 after running it.
+ EXPECT_EQ(GetValue(&result1), 1);
+
+ EXPECT_EQ(GetValue(&result2), 0);
+ runner2.Run(); // Don't touch task2 after running it.
+ EXPECT_EQ(GetValue(&result2), 2);
+
+ manager.CancelAndWait();
+ EXPECT_FALSE(manager.TryAbort(1));
+ EXPECT_FALSE(manager.TryAbort(2));
+}
+
+
+TEST(CancelableTask, ThreadedMultipleTasksStarted) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ ResultType result2 = 0;
+ TestTask* task1 =
+ new TestTask(&manager, &result1, TestTask::kWaitTillCanceledAgain);
+ TestTask* task2 =
+ new TestTask(&manager, &result2, TestTask::kWaitTillCanceledAgain);
+ ThreadedRunner runner1(task1);
+ ThreadedRunner runner2(task2);
+ runner1.Start();
+ runner2.Start();
+ // Busy wait on result to make sure both tasks are done.
+ while ((GetValue(&result1) == 0) || (GetValue(&result2) == 0)) {
+ }
+ manager.CancelAndWait();
+ runner1.Join();
+ runner2.Join();
+ EXPECT_EQ(GetValue(&result1), 1);
+ EXPECT_EQ(GetValue(&result2), 2);
+}
+
+
+TEST(CancelableTask, ThreadedMultipleTasksNotRun) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ ResultType result2 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
+ TestTask* task2 = new TestTask(&manager, &result2, TestTask::kCheckNotRun);
+ ThreadedRunner runner1(task1);
+ ThreadedRunner runner2(task2);
+ manager.CancelAndWait();
+ // Tasks are canceled, hence the runner will bail out and not update result.
+ runner1.Start();
+ runner2.Start();
+ runner1.Join();
+ runner2.Join();
+ EXPECT_EQ(GetValue(&result1), 0);
+ EXPECT_EQ(GetValue(&result2), 0);
+}
+
+
+TEST(CancelableTask, RemoveBeforeCancelAndWait) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
+ ThreadedRunner runner1(task1);
+ uint32_t id = task1->id();
+ EXPECT_EQ(id, 1u);
+ EXPECT_TRUE(manager.TryAbort(id));
+ runner1.Start();
+ runner1.Join();
+ manager.CancelAndWait();
+ EXPECT_EQ(GetValue(&result1), 0);
+}
+
+
+TEST(CancelableTask, RemoveAfterCancelAndWait) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1);
+ ThreadedRunner runner1(task1);
+ uint32_t id = task1->id();
+ EXPECT_EQ(id, 1u);
+ runner1.Start();
+ runner1.Join();
+ manager.CancelAndWait();
+ EXPECT_FALSE(manager.TryAbort(id));
+ EXPECT_EQ(GetValue(&result1), 1);
+}
+
+
+TEST(CancelableTask, RemoveUnmanagedId) {
+ CancelableTaskManager manager;
+ EXPECT_FALSE(manager.TryAbort(1));
+ EXPECT_FALSE(manager.TryAbort(2));
+ manager.CancelAndWait();
+ EXPECT_FALSE(manager.TryAbort(1));
+ EXPECT_FALSE(manager.TryAbort(3));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index bfae2ba4d0..62abeda1b5 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -12,7 +12,6 @@ namespace compiler {
namespace {
-typedef RawMachineAssembler::Label MLabel;
typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
@@ -53,16 +52,22 @@ std::ostream& operator<<(std::ostream& os, const FAI& fai) {
}
-const FAI kFAIs[] = {
- {&RawMachineAssembler::Float32Add, "Float32Add", kMachFloat32, kArmVaddF32},
- {&RawMachineAssembler::Float64Add, "Float64Add", kMachFloat64, kArmVaddF64},
- {&RawMachineAssembler::Float32Sub, "Float32Sub", kMachFloat32, kArmVsubF32},
- {&RawMachineAssembler::Float64Sub, "Float64Sub", kMachFloat64, kArmVsubF64},
- {&RawMachineAssembler::Float32Mul, "Float32Mul", kMachFloat32, kArmVmulF32},
- {&RawMachineAssembler::Float64Mul, "Float64Mul", kMachFloat64, kArmVmulF64},
- {&RawMachineAssembler::Float32Div, "Float32Div", kMachFloat32, kArmVdivF32},
- {&RawMachineAssembler::Float64Div, "Float64Div", kMachFloat64,
- kArmVdivF64}};
+const FAI kFAIs[] = {{&RawMachineAssembler::Float32Add, "Float32Add",
+ MachineType::Float32(), kArmVaddF32},
+ {&RawMachineAssembler::Float64Add, "Float64Add",
+ MachineType::Float64(), kArmVaddF64},
+ {&RawMachineAssembler::Float32Sub, "Float32Sub",
+ MachineType::Float32(), kArmVsubF32},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub",
+ MachineType::Float64(), kArmVsubF64},
+ {&RawMachineAssembler::Float32Mul, "Float32Mul",
+ MachineType::Float32(), kArmVmulF32},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul",
+ MachineType::Float64(), kArmVmulF64},
+ {&RawMachineAssembler::Float32Div, "Float32Div",
+ MachineType::Float32(), kArmVdivF32},
+ {&RawMachineAssembler::Float64Div, "Float64Div",
+ MachineType::Float64(), kArmVdivF64}};
// Data processing instructions with overflow.
@@ -142,7 +147,8 @@ typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
TEST_P(InstructionSelectorDPITest, Parameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -156,7 +162,7 @@ TEST_P(InstructionSelectorDPITest, Parameters) {
TEST_P(InstructionSelectorDPITest, Immediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -167,7 +173,7 @@ TEST_P(InstructionSelectorDPITest, Immediate) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -183,7 +189,8 @@ TEST_P(InstructionSelectorDPITest, Immediate) {
TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
@@ -195,7 +202,8 @@ TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
@@ -213,7 +221,8 @@ TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*dpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
@@ -228,7 +237,8 @@ TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1)));
@@ -246,8 +256,9 @@ TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -265,8 +276,8 @@ TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
&b);
m.Bind(&a);
@@ -281,8 +292,8 @@ TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
&b);
m.Bind(&a);
@@ -302,8 +313,9 @@ TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
@@ -320,8 +332,9 @@ TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)),
@@ -344,8 +357,9 @@ TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0),
(m.*shift.constructor)(
m.Parameter(1), m.Int32Constant(imm))),
@@ -366,8 +380,9 @@ TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1)),
@@ -391,8 +406,9 @@ TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
m.Int32Constant(0)),
&a, &b);
@@ -411,8 +427,9 @@ TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(
m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
m.Int32Constant(0)),
@@ -433,8 +450,8 @@ TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32Equal(
(m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Int32Constant(0)),
@@ -451,8 +468,8 @@ TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32Equal(
(m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
m.Int32Constant(0)),
@@ -474,8 +491,8 @@ TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32NotEqual(
(m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Int32Constant(0)),
@@ -492,8 +509,8 @@ TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32NotEqual(
(m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
m.Int32Constant(0)),
@@ -525,7 +542,8 @@ typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
@@ -542,7 +560,7 @@ TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
@@ -556,7 +574,7 @@ TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -575,7 +593,8 @@ TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(
m.Parameter(0),
@@ -590,7 +609,8 @@ TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
@@ -611,7 +631,8 @@ TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(m.Parameter(0),
(m.*shift.constructor)(
@@ -629,7 +650,8 @@ TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
@@ -650,7 +672,8 @@ TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
TEST_P(InstructionSelectorODPITest, ValWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
@@ -666,7 +689,7 @@ TEST_P(InstructionSelectorODPITest, ValWithParameters) {
TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
@@ -679,7 +702,7 @@ TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -697,7 +720,8 @@ TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(
m.Parameter(0),
@@ -711,7 +735,8 @@ TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
@@ -731,7 +756,8 @@ TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(m.Parameter(0),
(m.*shift.constructor)(
@@ -748,7 +774,8 @@ TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
@@ -768,7 +795,8 @@ TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
TEST_P(InstructionSelectorODPITest, BothWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -785,7 +813,7 @@ TEST_P(InstructionSelectorODPITest, BothWithParameters) {
TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -799,7 +827,7 @@ TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -818,7 +846,8 @@ TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(
m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
@@ -832,7 +861,8 @@ TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
@@ -852,7 +882,8 @@ TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* n = (m.*odpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
@@ -870,7 +901,8 @@ TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* n = (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1));
@@ -891,8 +923,9 @@ TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -913,8 +946,8 @@ TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -932,8 +965,8 @@ TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -955,8 +988,9 @@ TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
m.Bind(&a);
@@ -976,8 +1010,9 @@ TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
m.Bind(&a);
@@ -1008,7 +1043,8 @@ typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
TEST_P(InstructionSelectorShiftTest, Parameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1022,7 +1058,7 @@ TEST_P(InstructionSelectorShiftTest, Parameters) {
TEST_P(InstructionSelectorShiftTest, Immediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1038,7 +1074,8 @@ TEST_P(InstructionSelectorShiftTest, Immediate) {
TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
const Shift shift = GetParam();
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32Equal(m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
@@ -1052,7 +1089,8 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
m.Parameter(0)));
@@ -1071,7 +1109,8 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
m.Parameter(0)));
@@ -1086,7 +1125,8 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
@@ -1105,7 +1145,8 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Word32Equal(m.Int32Constant(0),
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
@@ -1123,7 +1164,8 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(
m.Int32Constant(0),
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
@@ -1142,7 +1184,8 @@ TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1156,7 +1199,7 @@ TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Not(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
@@ -1172,7 +1215,8 @@ TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
m.Parameter(1), m.Parameter(2)))));
Stream s = m.Build();
@@ -1187,7 +1231,8 @@ TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
m.Word32Not((m.*shift.constructor)(
m.Parameter(1), m.Int32Constant(imm)))));
@@ -1228,49 +1273,49 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kArmLdrsb,
kArmStrb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kArmLdrb,
kArmStrb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
-127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kArmLdrsh,
kArmStrh,
&InstructionSelectorTest::Stream::IsInteger,
{-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
-98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
- {kMachUint16,
+ {MachineType::Uint16(),
kArmLdrh,
kArmStrh,
&InstructionSelectorTest::Stream::IsInteger,
{-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
-32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
- {kMachInt32,
+ {MachineType::Int32(),
kArmLdr,
kArmStr,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
-80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
- {kMachFloat32,
+ {MachineType::Float32(),
kArmVldrF32,
kArmVstrF32,
&InstructionSelectorTest::Stream::IsDouble,
{-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
-84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
- {kMachFloat64,
+ {MachineType::Float64(),
kArmVldrF64,
kArmVstrF64,
&InstructionSelectorTest::Stream::IsDouble,
@@ -1287,7 +1332,8 @@ typedef InstructionSelectorTestWithParam<MemoryAccess>
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1302,7 +1348,7 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1319,9 +1365,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1335,9 +1382,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1361,7 +1409,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float32());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1372,7 +1420,7 @@ TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float64());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1424,7 +1472,8 @@ typedef InstructionSelectorTestWithParam<Comparison>
TEST_P(InstructionSelectorComparisonTest, Parameters) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = (m.*cmp.constructor)(p0, p1);
@@ -1446,7 +1495,8 @@ TEST_P(InstructionSelectorComparisonTest, Parameters) {
TEST_P(InstructionSelectorComparisonTest, Word32EqualWithZero) {
{
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r =
@@ -1466,7 +1516,8 @@ TEST_P(InstructionSelectorComparisonTest, Word32EqualWithZero) {
}
{
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r =
@@ -1515,7 +1566,8 @@ typedef InstructionSelectorTestWithParam<Comparison>
TEST_P(InstructionSelectorF32ComparisonTest, WithParameters) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
+ MachineType::Float32());
m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1529,7 +1581,8 @@ TEST_P(InstructionSelectorF32ComparisonTest, WithParameters) {
TEST_P(InstructionSelectorF32ComparisonTest, NegatedWithParameters) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
+ MachineType::Float32());
m.Return(
m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
Stream const s = m.Build();
@@ -1544,7 +1597,7 @@ TEST_P(InstructionSelectorF32ComparisonTest, NegatedWithParameters) {
TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnRight) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
m.Return((m.*cmp.constructor)(m.Parameter(0), m.Float32Constant(0.0)));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1559,7 +1612,7 @@ TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnRight) {
TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnLeft) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
m.Return((m.*cmp.constructor)(m.Float32Constant(0.0f), m.Parameter(0)));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1596,7 +1649,8 @@ typedef InstructionSelectorTestWithParam<Comparison>
TEST_P(InstructionSelectorF64ComparisonTest, WithParameters) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64());
m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1610,7 +1664,8 @@ TEST_P(InstructionSelectorF64ComparisonTest, WithParameters) {
TEST_P(InstructionSelectorF64ComparisonTest, NegatedWithParameters) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64());
m.Return(
m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
Stream const s = m.Build();
@@ -1625,7 +1680,7 @@ TEST_P(InstructionSelectorF64ComparisonTest, NegatedWithParameters) {
TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnRight) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
m.Return((m.*cmp.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1640,7 +1695,7 @@ TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnRight) {
TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnLeft) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
m.Return((m.*cmp.constructor)(m.Float64Constant(0.0), m.Parameter(0)));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1690,7 +1745,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFAITest,
TEST_F(InstructionSelectorTest, Float32Abs) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -1705,7 +1760,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
TEST_F(InstructionSelectorTest, Float64Abs) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -1721,8 +1776,8 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32,
- kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1742,8 +1797,8 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32,
- kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1767,8 +1822,8 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64,
- kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1788,8 +1843,8 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64,
- kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1812,7 +1867,7 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
@@ -1827,7 +1882,7 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) {
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -1842,7 +1897,8 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1863,7 +1919,8 @@ TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1884,7 +1941,7 @@ TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32Sqrt) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sqrt(p0);
m.Return(n);
@@ -1900,7 +1957,7 @@ TEST_F(InstructionSelectorTest, Float32Sqrt) {
TEST_F(InstructionSelectorTest, Float64Sqrt) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sqrt(p0);
m.Return(n);
@@ -1921,7 +1978,8 @@ TEST_F(InstructionSelectorTest, Float64Sqrt) {
TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1938,7 +1996,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1959,7 +2018,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
TEST_F(InstructionSelectorTest, Int32AddWithInt32MulHigh) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1976,7 +2036,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithInt32MulHigh) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1997,7 +2058,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithInt32MulHigh) {
TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xff)), p1);
@@ -2013,7 +2075,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xff)));
@@ -2029,7 +2092,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xffff)), p1);
@@ -2045,7 +2109,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xffff)));
@@ -2065,7 +2130,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -2083,7 +2149,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -2101,7 +2168,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -2119,7 +2187,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -2140,7 +2209,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
Stream s = m.Build();
@@ -2154,7 +2224,8 @@ TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
Stream s = m.Build(MLS);
@@ -2166,7 +2237,8 @@ TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
@@ -2186,7 +2258,8 @@ TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(1U, s.size());
@@ -2195,7 +2268,8 @@ TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(6U, s.size());
@@ -2225,7 +2299,8 @@ TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(3U, s.size());
@@ -2246,7 +2321,8 @@ TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(MLS, SUDIV);
ASSERT_EQ(2U, s.size());
@@ -2263,7 +2339,8 @@ TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2276,7 +2353,7 @@ TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x >> k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2289,7 +2366,7 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// x * (2^k - 1) -> -x + (x >> k)
TRACED_FORRANGE(int32_t, k, 3, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2302,7 +2379,7 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// (2^k + 1) * x -> x + (x >> k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2315,7 +2392,7 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// x * (2^k - 1) -> -x + (x >> k)
TRACED_FORRANGE(int32_t, k, 3, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2330,7 +2407,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -2347,7 +2425,8 @@ TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
TEST_F(InstructionSelectorTest, Uint32MulHighWithParameters) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Uint32MulHigh(p0, p1);
@@ -2364,7 +2443,8 @@ TEST_F(InstructionSelectorTest, Uint32MulHighWithParameters) {
TEST_F(InstructionSelectorTest, Uint32DivWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
@@ -2384,7 +2464,8 @@ TEST_F(InstructionSelectorTest, Uint32DivWithParameters) {
TEST_F(InstructionSelectorTest, Uint32DivWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(1U, s.size());
@@ -2393,7 +2474,8 @@ TEST_F(InstructionSelectorTest, Uint32DivWithParametersForSUDIV) {
TEST_F(InstructionSelectorTest, Uint32ModWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(6U, s.size());
@@ -2423,7 +2505,8 @@ TEST_F(InstructionSelectorTest, Uint32ModWithParameters) {
TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(3U, s.size());
@@ -2444,7 +2527,8 @@ TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIV) {
TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(MLS, SUDIV);
ASSERT_EQ(2U, s.size());
@@ -2462,7 +2546,7 @@ TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
TRACED_FORRANGE(int32_t, width, 1, 32) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
m.Int32Constant(0xffffffffu >> (32 - width))));
Stream s = m.Build(ARMv7);
@@ -2473,7 +2557,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
}
TRACED_FORRANGE(int32_t, width, 1, 32) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
m.Parameter(0)));
Stream s = m.Build(ARMv7);
@@ -2489,7 +2573,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(
m.Parameter(0),
m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
@@ -2506,7 +2590,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
}
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
m.Parameter(0)));
@@ -2526,7 +2610,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32And(p0, m.Int32Constant(0xffff));
m.Return(r);
@@ -2540,7 +2624,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32And(m.Int32Constant(0xffff), p0);
m.Return(r);
@@ -2558,7 +2642,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
@@ -2573,7 +2657,7 @@ TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
@@ -2597,7 +2681,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
uint32_t jnk = rng()->NextInt(max);
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(lsb)));
Stream s = m.Build(ARMv7);
@@ -2614,7 +2698,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
uint32_t jnk = rng()->NextInt(max);
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(lsb)));
Stream s = m.Build(ARMv7);
@@ -2630,7 +2714,8 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2640,7 +2725,8 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2653,7 +2739,8 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2669,7 +2756,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2683,7 +2770,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2700,7 +2787,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2713,7 +2800,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2729,7 +2816,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Not(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2743,7 +2830,7 @@ TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
m.Int32Constant(0xffffffffu >> (32 - width))));
Stream s = m.Build(ARMv7);
@@ -2756,7 +2843,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
}
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
Stream s = m.Build(ARMv7);
@@ -2771,7 +2858,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
TEST_F(InstructionSelectorTest, Word32Clz) {
- StreamBuilder m(this, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32Clz(p0);
m.Return(n);
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 3af1232cff..73532aab2a 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -10,8 +10,6 @@ namespace compiler {
namespace {
-typedef RawMachineAssembler::Label MLabel;
-
template <typename T>
struct MachInst {
T constructor;
@@ -45,12 +43,12 @@ std::ostream& operator<<(std::ostream& os, const Shift& shift) {
// machine type.
Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
int64_t value) {
- switch (type) {
- case kMachInt32:
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
return m.Int32Constant(static_cast<int32_t>(value));
break;
- case kMachInt64:
+ case MachineRepresentation::kWord64:
return m.Int64Constant(value);
break;
@@ -63,12 +61,18 @@ Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
// ARM64 logical instructions.
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
- {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
- {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32, kMachInt32},
- {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eor, kMachInt64}};
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64And,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eor,
+ MachineType::Int64()}};
// ARM64 logical immediates: contiguous set bits, rotated about a power of two
@@ -132,13 +136,17 @@ std::ostream& operator<<(std::ostream& os, const AddSub& op) {
const AddSub kAddSubInstructions[] = {
- {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
+ {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32,
+ MachineType::Int32()},
kArm64Sub32},
- {{&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
+ {{&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add,
+ MachineType::Int64()},
kArm64Sub},
- {{&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
+ {{&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32,
+ MachineType::Int32()},
kArm64Add32},
- {{&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64},
+ {{&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub,
+ MachineType::Int64()},
kArm64Add}};
@@ -161,60 +169,78 @@ const int32_t kAddSubImmediates[] = {
// ARM64 flag setting data processing instructions.
const MachInst2 kDPFlagSetInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
- {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
- {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kArm64Tst, kMachInt64}};
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Tst,
+ MachineType::Int64()}};
// ARM64 arithmetic with overflow instructions.
const MachInst2 kOvfAddSubInstructions[] = {
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
- kArm64Add32, kMachInt32},
+ kArm64Add32, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kArm64Sub32, kMachInt32}};
+ kArm64Sub32, MachineType::Int32()}};
// ARM64 shift instructions.
const Shift kShiftInstructions[] = {
- {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
+ {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32,
+ MachineType::Int32()},
kMode_Operand2_R_LSL_I},
- {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
+ {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl,
+ MachineType::Int64()},
kMode_Operand2_R_LSL_I},
- {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
+ {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32,
+ MachineType::Int32()},
kMode_Operand2_R_LSR_I},
- {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
+ {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr,
+ MachineType::Int64()},
kMode_Operand2_R_LSR_I},
- {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
+ {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32,
+ MachineType::Int32()},
kMode_Operand2_R_ASR_I},
- {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
+ {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr,
+ MachineType::Int64()},
kMode_Operand2_R_ASR_I},
- {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+ {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32,
+ MachineType::Int32()},
kMode_Operand2_R_ROR_I},
- {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64},
+ {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror,
+ MachineType::Int64()},
kMode_Operand2_R_ROR_I}};
// ARM64 Mul/Div instructions.
const MachInst2 kMulDivInstructions[] = {
- {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
- {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
- {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
- {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32, kMachInt32},
- {&RawMachineAssembler::Uint64Div, "Uint64Div", kArm64Udiv, kMachInt64}};
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kArm64Udiv,
+ MachineType::Int64()}};
// ARM64 FP arithmetic instructions.
const MachInst2 kFPArithInstructions[] = {
{&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
- kMachFloat64},
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
- kMachFloat64},
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
- kMachFloat64},
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
- kMachFloat64}};
+ MachineType::Float64()}};
struct FPCmp {
@@ -232,23 +258,29 @@ std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
// ARM64 FP comparison instructions.
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
- kMachFloat64},
- kEqual, kEqual},
+ MachineType::Float64()},
+ kEqual,
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
- kArm64Float64Cmp, kMachFloat64},
- kFloatLessThan, kFloatGreaterThan},
+ kArm64Float64Cmp, MachineType::Float64()},
+ kFloatLessThan,
+ kFloatGreaterThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kArm64Float64Cmp, kMachFloat64},
- kFloatLessThanOrEqual, kFloatGreaterThanOrEqual},
+ kArm64Float64Cmp, MachineType::Float64()},
+ kFloatLessThanOrEqual,
+ kFloatGreaterThanOrEqual},
{{&RawMachineAssembler::Float32Equal, "Float32Equal", kArm64Float32Cmp,
- kMachFloat32},
- kEqual, kEqual},
+ MachineType::Float32()},
+ kEqual,
+ kEqual},
{{&RawMachineAssembler::Float32LessThan, "Float32LessThan",
- kArm64Float32Cmp, kMachFloat32},
- kFloatLessThan, kFloatGreaterThan},
+ kArm64Float32Cmp, MachineType::Float32()},
+ kFloatLessThan,
+ kFloatGreaterThan},
{{&RawMachineAssembler::Float32LessThanOrEqual, "Float32LessThanOrEqual",
- kArm64Float32Cmp, kMachFloat32},
- kFloatLessThanOrEqual, kFloatGreaterThanOrEqual}};
+ kArm64Float32Cmp, MachineType::Float32()},
+ kFloatLessThanOrEqual,
+ kFloatGreaterThanOrEqual}};
struct Conversion {
@@ -266,32 +298,33 @@ std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
// ARM64 type conversion instructions.
const Conversion kConversionInstructions[] = {
{{&RawMachineAssembler::ChangeFloat32ToFloat64, "ChangeFloat32ToFloat64",
- kArm64Float32ToFloat64, kMachFloat64},
- kMachFloat32},
+ kArm64Float32ToFloat64, MachineType::Float64()},
+ MachineType::Float32()},
{{&RawMachineAssembler::TruncateFloat64ToFloat32,
- "TruncateFloat64ToFloat32", kArm64Float64ToFloat32, kMachFloat32},
- kMachFloat64},
+ "TruncateFloat64ToFloat32", kArm64Float64ToFloat32,
+ MachineType::Float32()},
+ MachineType::Float64()},
{{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
- kArm64Sxtw, kMachInt64},
- kMachInt32},
+ kArm64Sxtw, MachineType::Int64()},
+ MachineType::Int32()},
{{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
- kArm64Mov32, kMachUint64},
- kMachUint32},
+ kArm64Mov32, MachineType::Uint64()},
+ MachineType::Uint32()},
{{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
- kArm64Mov32, kMachInt32},
- kMachInt64},
+ kArm64Mov32, MachineType::Int32()},
+ MachineType::Int64()},
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
- kArm64Int32ToFloat64, kMachFloat64},
- kMachInt32},
+ kArm64Int32ToFloat64, MachineType::Float64()},
+ MachineType::Int32()},
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
- kArm64Uint32ToFloat64, kMachFloat64},
- kMachUint32},
+ kArm64Uint32ToFloat64, MachineType::Float64()},
+ MachineType::Uint32()},
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
- kArm64Float64ToInt32, kMachInt32},
- kMachFloat64},
+ kArm64Float64ToInt32, MachineType::Int32()},
+ MachineType::Float64()},
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
- kArm64Float64ToUint32, kMachUint32},
- kMachFloat64}};
+ kArm64Float64ToUint32, MachineType::Uint32()},
+ MachineType::Float64()}};
} // namespace
@@ -321,7 +354,7 @@ TEST_P(InstructionSelectorLogicalTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
// TODO(all): Add support for testing 64-bit immediates.
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
// Immediate on the right.
TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
StreamBuilder m(this, type, type);
@@ -358,7 +391,7 @@ TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
// Only test 64-bit shifted operands with 64-bit instructions.
if (shift.mi.machine_type != type) continue;
- TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(
m.Parameter(0),
@@ -373,7 +406,7 @@ TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
- TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(
(m.*shift.mi.constructor)(m.Parameter(1),
@@ -464,7 +497,7 @@ TEST_P(InstructionSelectorAddSubTest, ShiftByImmediateOnRight) {
continue;
}
- TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.mi.constructor)(
m.Parameter(0),
@@ -554,7 +587,7 @@ TEST_F(InstructionSelectorTest, AddImmediateOnLeft) {
{
// 32-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -568,7 +601,7 @@ TEST_F(InstructionSelectorTest, AddImmediateOnLeft) {
{
// 64-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Int64Add(m.Int64Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -585,7 +618,8 @@ TEST_F(InstructionSelectorTest, AddImmediateOnLeft) {
TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
{
// 32-bit subtract.
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
@@ -598,7 +632,8 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
}
{
// 64-bit subtract.
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
Stream s = m.Build();
@@ -617,13 +652,14 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeftWithShift) {
{
// Test 32-bit operations. Ignore ROR shifts, as subtract does not
// support them.
- if ((shift.mi.machine_type != kMachInt32) ||
+ if ((shift.mi.machine_type != MachineType::Int32()) ||
(shift.mi.arch_opcode == kArm64Ror32) ||
(shift.mi.arch_opcode == kArm64Ror))
continue;
TRACED_FORRANGE(int, imm, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Sub(
m.Int32Constant(0),
(m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm))));
@@ -642,13 +678,14 @@ TEST_F(InstructionSelectorTest, SubZeroOnLeftWithShift) {
{
// Test 64-bit operations. Ignore ROR shifts, as subtract does not
// support them.
- if ((shift.mi.machine_type != kMachInt64) ||
+ if ((shift.mi.machine_type != MachineType::Int64()) ||
(shift.mi.arch_opcode == kArm64Ror32) ||
(shift.mi.arch_opcode == kArm64Ror))
continue;
TRACED_FORRANGE(int, imm, -32, 127) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(m.Int64Sub(
m.Int64Constant(0),
(m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm))));
@@ -673,7 +710,7 @@ TEST_F(InstructionSelectorTest, AddNegImmediateOnLeft) {
// 32-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(-imm), m.Parameter(0)));
Stream s = m.Build();
@@ -689,7 +726,7 @@ TEST_F(InstructionSelectorTest, AddNegImmediateOnLeft) {
// 64-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Int64Add(m.Int64Constant(-imm), m.Parameter(0)));
Stream s = m.Build();
@@ -708,13 +745,14 @@ TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
// 32-bit add.
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
- if (shift.mi.machine_type != kMachInt32) continue;
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
if (shift.mi.arch_opcode == kArm64Ror32) continue;
// The available shift operand range is `0 <= imm < 32`, but we also test
// that immediates outside this range are handled properly (modulo-32).
TRACED_FORRANGE(int, imm, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.Int32Add)(
(m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
m.Parameter(0)));
@@ -731,13 +769,14 @@ TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
// 64-bit add.
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
- if (shift.mi.machine_type != kMachInt64) continue;
+ if (shift.mi.machine_type != MachineType::Int64()) continue;
if (shift.mi.arch_opcode == kArm64Ror) continue;
// The available shift operand range is `0 <= imm < 64`, but we also test
// that immediates outside this range are handled properly (modulo-64).
TRACED_FORRANGE(int, imm, -64, 127) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return((m.Int64Add)(
(m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm)),
m.Parameter(0)));
@@ -755,7 +794,8 @@ TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
TEST_F(InstructionSelectorTest, AddUnsignedExtendByteOnLeft) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
m.Parameter(1)));
Stream s = m.Build();
@@ -766,7 +806,8 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendByteOnLeft) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
m.Parameter(1)));
Stream s = m.Build();
@@ -781,7 +822,8 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendByteOnLeft) {
TEST_F(InstructionSelectorTest, AddUnsignedExtendHalfwordOnLeft) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
m.Parameter(1)));
Stream s = m.Build();
@@ -792,7 +834,8 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendHalfwordOnLeft) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
m.Parameter(1)));
Stream s = m.Build();
@@ -807,7 +850,8 @@ TEST_F(InstructionSelectorTest, AddUnsignedExtendHalfwordOnLeft) {
TEST_F(InstructionSelectorTest, AddSignedExtendByteOnLeft) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
m.Int32Constant(24)),
@@ -820,7 +864,8 @@ TEST_F(InstructionSelectorTest, AddSignedExtendByteOnLeft) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
m.Return(
m.Int64Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
m.Int32Constant(24)),
@@ -837,7 +882,8 @@ TEST_F(InstructionSelectorTest, AddSignedExtendByteOnLeft) {
TEST_F(InstructionSelectorTest, AddSignedExtendHalfwordOnLeft) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(16)),
m.Int32Constant(16)),
@@ -850,7 +896,8 @@ TEST_F(InstructionSelectorTest, AddSignedExtendHalfwordOnLeft) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
m.Return(
m.Int64Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(16)),
m.Int32Constant(16)),
@@ -877,7 +924,7 @@ TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
- MLabel a, b;
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -901,8 +948,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnRight) {
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation32(imm) == 1) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -924,8 +971,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnRight) {
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation64(imm) == 1) continue;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -944,8 +991,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnRight) {
TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -962,8 +1009,8 @@ TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -983,8 +1030,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithImmediateOnLeft) {
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation32(imm) == 1) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1007,8 +1054,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnLeft) {
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation64(imm) == 1) continue;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(imm), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1028,8 +1075,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithImmediateOnLeft) {
TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1048,8 +1095,8 @@ TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(mask)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1066,8 +1113,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(
m.Word32BinaryNot(m.Word32And(m.Parameter(0), m.Int32Constant(mask))),
&a, &b);
@@ -1089,8 +1136,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Int32Constant(mask), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1107,8 +1154,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(
m.Word32BinaryNot(m.Word32And(m.Int32Constant(mask), m.Parameter(0))),
&a, &b);
@@ -1130,8 +1177,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1151,8 +1198,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 63) {
uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -1171,8 +1218,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* p0 = m.Parameter(0);
m.Branch(p0, &a, &b);
m.Bind(&a);
@@ -1188,8 +1235,8 @@ TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* p0 = m.Parameter(0);
m.Branch(m.Word32BinaryNot(p0), &a, &b);
m.Bind(&a);
@@ -1321,7 +1368,7 @@ TEST_P(InstructionSelectorOvfAddSubTest, BranchWithParameters) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
- MLabel a, b;
+ RawMachineLabel a, b;
Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -1343,7 +1390,7 @@ TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
const MachineType type = dpi.machine_type;
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
StreamBuilder m(this, type, type);
- MLabel a, b;
+ RawMachineLabel a, b;
Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -1368,7 +1415,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -1386,7 +1433,7 @@ TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -1403,7 +1450,7 @@ TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -1421,8 +1468,8 @@ TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -1465,7 +1512,8 @@ TEST_P(InstructionSelectorShiftTest, Parameter) {
TEST_P(InstructionSelectorShiftTest, Immediate) {
const Shift shift = GetParam();
const MachineType type = shift.mi.machine_type;
- TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
@@ -1485,7 +1533,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachInt32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1503,7 +1551,7 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachUint32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1520,7 +1568,7 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
m.Return(t);
@@ -1537,7 +1585,7 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(x)));
m.Return(t);
@@ -1602,10 +1650,10 @@ std::ostream& operator<<(std::ostream& os, const MulDPInst& inst) {
static const MulDPInst kMulDPInstructions[] = {
{"Int32Mul", &RawMachineAssembler::Int32Mul, &RawMachineAssembler::Int32Add,
&RawMachineAssembler::Int32Sub, kArm64Madd32, kArm64Msub32, kArm64Mneg32,
- kMachInt32},
+ MachineType::Int32()},
{"Int64Mul", &RawMachineAssembler::Int64Mul, &RawMachineAssembler::Int64Add,
&RawMachineAssembler::Int64Sub, kArm64Madd, kArm64Msub, kArm64Mneg,
- kMachInt64}};
+ MachineType::Int64()}};
typedef InstructionSelectorTestWithParam<MulDPInst>
@@ -1690,7 +1738,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1703,7 +1751,7 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// (2^k + 1) * x -> x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1716,7 +1764,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// x * (2^k + 1) + c -> x + (x << k) + c
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Add(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)),
m.Parameter(1)));
@@ -1732,7 +1781,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// (2^k + 1) * x + c -> x + (x << k) + c
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Add(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)),
m.Parameter(1)));
@@ -1748,7 +1798,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// c + x * (2^k + 1) -> c + x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Add(m.Parameter(0),
m.Int32Mul(m.Parameter(1), m.Int32Constant((1 << k) + 1))));
@@ -1764,7 +1815,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// c + (2^k + 1) * x -> c + x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Add(m.Parameter(0),
m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(1))));
@@ -1780,7 +1832,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// c - x * (2^k + 1) -> c - x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0),
m.Int32Mul(m.Parameter(1), m.Int32Constant((1 << k) + 1))));
@@ -1796,7 +1849,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
}
// c - (2^k + 1) * x -> c - x + (x << k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0),
m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(1))));
@@ -1816,7 +1870,7 @@ TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1829,7 +1883,7 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// (2^k + 1) * x -> x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1842,7 +1896,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// x * (2^k + 1) + c -> x + (x << k) + c
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(
m.Int64Add(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)),
m.Parameter(1)));
@@ -1858,7 +1913,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// (2^k + 1) * x + c -> x + (x << k) + c
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(
m.Int64Add(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)),
m.Parameter(1)));
@@ -1874,7 +1930,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// c + x * (2^k + 1) -> c + x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(
m.Int64Add(m.Parameter(0),
m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
@@ -1890,7 +1947,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// c + (2^k + 1) * x -> c + x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(
m.Int64Add(m.Parameter(0),
m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
@@ -1906,7 +1964,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// c - x * (2^k + 1) -> c - x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(
m.Int64Sub(m.Parameter(0),
m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
@@ -1922,7 +1981,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
}
// c - (2^k + 1) * x -> c - x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(
m.Int64Sub(m.Parameter(0),
m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
@@ -1967,7 +2027,8 @@ typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1981,8 +2042,8 @@ TEST_P(InstructionSelectorFPCmpTest, Parameter) {
TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnRight) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type);
- if (cmp.mi.machine_type == kMachFloat64) {
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type);
+ if (cmp.mi.machine_type == MachineType::Float64()) {
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
} else {
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float32Constant(0.0f)));
@@ -2000,8 +2061,8 @@ TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnRight) {
TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnLeft) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type);
- if (cmp.mi.machine_type == kMachFloat64) {
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type);
+ if (cmp.mi.machine_type == MachineType::Float64()) {
m.Return((m.*cmp.mi.constructor)(m.Float64Constant(0.0), m.Parameter(0)));
} else {
m.Return((m.*cmp.mi.constructor)(m.Float32Constant(0.0f), m.Parameter(0)));
@@ -2067,52 +2128,52 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kArm64Ldrsb,
kArm64Strb,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001, 2121,
2442, 4093, 4094, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kArm64Ldrb,
kArm64Strb,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001, 2121,
2442, 4093, 4094, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kArm64Ldrsh,
kArm64Strh,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098, 4100,
4242, 6786, 8188, 8190}},
- {kMachUint16,
+ {MachineType::Uint16(),
kArm64Ldrh,
kArm64Strh,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098, 4100,
4242, 6786, 8188, 8190}},
- {kMachInt32,
+ {MachineType::Int32(),
kArm64LdrW,
kArm64StrW,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
3276, 3280, 16376, 16380}},
- {kMachUint32,
+ {MachineType::Uint32(),
kArm64LdrW,
kArm64StrW,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
3276, 3280, 16376, 16380}},
- {kMachInt64,
+ {MachineType::Int64(),
kArm64Ldr,
kArm64Str,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
16384, 16392, 32752, 32760}},
- {kMachUint64,
+ {MachineType::Uint64(),
kArm64Ldr,
kArm64Str,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
16384, 16392, 32752, 32760}},
- {kMachFloat32,
+ {MachineType::Float32(),
kArm64LdrS,
kArm64StrS,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
3276, 3280, 16376, 16380}},
- {kMachFloat64,
+ {MachineType::Float64(),
kArm64LdrD,
kArm64StrD,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
@@ -2125,7 +2186,8 @@ typedef InstructionSelectorTestWithParam<MemoryAccess>
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2139,7 +2201,7 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2155,9 +2217,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2171,9 +2234,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2196,8 +2260,10 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
// Comparison instructions.
static const MachInst2 kComparisonInstructions[] = {
- {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
- {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp,
+ MachineType::Int64()},
};
@@ -2262,7 +2328,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2274,7 +2340,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2290,7 +2356,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2302,7 +2368,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2319,13 +2385,14 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
TEST_F(InstructionSelectorTest, Word32EqualWithWord32Shift) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Skip non 32-bit shifts or ror operations.
- if (shift.mi.machine_type != kMachInt32 ||
+ if (shift.mi.machine_type != MachineType::Int32() ||
shift.mi.arch_opcode == kArm64Ror32) {
continue;
}
TRACED_FORRANGE(int32_t, imm, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
@@ -2341,7 +2408,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithWord32Shift) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
TRACED_FORRANGE(int32_t, imm, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
@@ -2362,7 +2430,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithWord32Shift) {
TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendByte) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = m.Word32And(p1, m.Int32Constant(0xff));
@@ -2377,7 +2446,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendByte) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = m.Word32And(p1, m.Int32Constant(0xff));
@@ -2396,7 +2466,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendByte) {
TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendHalfword) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = m.Word32And(p1, m.Int32Constant(0xffff));
@@ -2411,7 +2482,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendHalfword) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = m.Word32And(p1, m.Int32Constant(0xffff));
@@ -2430,7 +2502,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendHalfword) {
TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendByte) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r =
@@ -2446,7 +2519,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendByte) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r =
@@ -2466,7 +2540,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendByte) {
TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendHalfword) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r =
@@ -2482,7 +2557,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendHalfword) {
ASSERT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r =
@@ -2502,7 +2578,8 @@ TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendHalfword) {
TEST_F(InstructionSelectorTest, Word32EqualZeroWithWord32Equal) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
m.Return(m.Word32Equal(m.Word32Equal(p0, p1), m.Int32Constant(0)));
@@ -2517,7 +2594,8 @@ TEST_F(InstructionSelectorTest, Word32EqualZeroWithWord32Equal) {
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
m.Return(m.Word32Equal(m.Int32Constant(0), m.Word32Equal(p0, p1)));
@@ -2549,19 +2627,19 @@ std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) {
// ARM64 32-bit integer comparison instructions.
const IntegerCmp kIntegerCmpInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
- kMachInt32},
+ MachineType::Int32()},
kEqual},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
- kMachInt32},
+ MachineType::Int32()},
kSignedLessThan},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kArm64Cmp32, kMachInt32},
+ kArm64Cmp32, MachineType::Int32()},
kSignedLessThanOrEqual},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
- kMachUint32},
+ MachineType::Uint32()},
kUnsignedLessThan},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kArm64Cmp32, kMachUint32},
+ kArm64Cmp32, MachineType::Uint32()},
kUnsignedLessThanOrEqual}};
} // namespace
@@ -2572,13 +2650,14 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Test 32-bit operations. Ignore ROR shifts, as compare-negate does not
// support them.
- if (shift.mi.machine_type != kMachInt32 ||
+ if (shift.mi.machine_type != MachineType::Int32() ||
shift.mi.arch_opcode == kArm64Ror32) {
continue;
}
TRACED_FORRANGE(int32_t, imm, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
@@ -2604,12 +2683,18 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
static const MachInst2 kLogicalWithNotRHSs[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kArm64Bic32, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kArm64Bic, kMachInt64},
- {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Orn32, kMachInt32},
- {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Orn, kMachInt64},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eon32, kMachInt32},
- {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon, kMachInt64}};
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Bic32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Bic,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Orn32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Orn,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eon32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon,
+ MachineType::Int64()}};
typedef InstructionSelectorTestWithParam<MachInst2>
@@ -2622,11 +2707,11 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
// Test cases where RHS is Xor(x, -1).
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return((m.*inst.constructor)(
m.Parameter(0), m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return((m.*inst.constructor)(
m.Parameter(0), m.Word64Xor(m.Parameter(1), m.Int64Constant(-1))));
}
@@ -2638,11 +2723,11 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
}
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return((m.*inst.constructor)(
m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)), m.Parameter(1)));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return((m.*inst.constructor)(
m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)), m.Parameter(1)));
}
@@ -2655,11 +2740,11 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
// Test cases where RHS is Not(x).
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return(
(m.*inst.constructor)(m.Parameter(0), m.Word32Not(m.Parameter(1))));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return(
(m.*inst.constructor)(m.Parameter(0), m.Word64Not(m.Parameter(1))));
}
@@ -2671,11 +2756,11 @@ TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
}
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return(
(m.*inst.constructor)(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return(
(m.*inst.constructor)(m.Word64Not(m.Parameter(0)), m.Parameter(1)));
}
@@ -2694,7 +2779,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Not(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2705,7 +2790,7 @@ TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
TEST_F(InstructionSelectorTest, Word64NotWithParameter) {
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Not(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2717,7 +2802,7 @@ TEST_F(InstructionSelectorTest, Word64NotWithParameter) {
TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2726,7 +2811,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2739,7 +2824,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2748,7 +2833,7 @@ TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2768,7 +2853,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(shift)));
Stream s = m.Build();
@@ -2785,7 +2870,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
uint32_t jnk = rng()->NextInt();
jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(shift)));
Stream s = m.Build();
@@ -2809,7 +2894,7 @@ TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
m.Int64Constant(shift)));
Stream s = m.Build();
@@ -2827,7 +2912,7 @@ TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
m.Int64Constant(shift)));
Stream s = m.Build();
@@ -2848,7 +2933,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
int32_t lsb = shift & 0x1f;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(msk)));
Stream s = m.Build();
@@ -2864,7 +2949,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
int32_t lsb = shift & 0x1f;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32And(m.Int32Constant(msk),
m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
@@ -2887,7 +2972,7 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
int64_t lsb = shift & 0x3f;
TRACED_FORRANGE(int64_t, width, 1, 63) {
uint64_t msk = (V8_UINT64_C(1) << width) - 1;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(shift)),
m.Int64Constant(msk)));
Stream s = m.Build();
@@ -2903,7 +2988,7 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
int64_t lsb = shift & 0x3f;
TRACED_FORRANGE(int64_t, width, 1, 63) {
uint64_t msk = (V8_UINT64_C(1) << width) - 1;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(
m.Word64And(m.Int64Constant(msk),
m.Word64Shr(m.Parameter(0), m.Int64Constant(shift))));
@@ -2920,7 +3005,8 @@ TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -2943,7 +3029,8 @@ TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
TEST_F(InstructionSelectorTest, Int32MulHighWithSar) {
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Word32Sar(m.Int32MulHigh(p0, p1), m.Int32Constant(shift));
@@ -2966,7 +3053,8 @@ TEST_F(InstructionSelectorTest, Int32MulHighWithSar) {
TEST_F(InstructionSelectorTest, Int32MulHighWithAdd) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const a = m.Int32Add(m.Int32MulHigh(p0, p1), p0);
@@ -2999,7 +3087,8 @@ TEST_F(InstructionSelectorTest, Int32MulHighWithAdd) {
TEST_F(InstructionSelectorTest, Uint32MulHighWithShr) {
TRACED_FORRANGE(int32_t, shift, -32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n =
@@ -3024,7 +3113,7 @@ TEST_F(InstructionSelectorTest, Uint32MulHighWithShr) {
TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
TRACED_FORRANGE(int32_t, shift, 1, 31) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(shift)),
m.Int32Constant(shift));
@@ -3038,7 +3127,7 @@ TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
TRACED_FORRANGE(int32_t, shift, 1, 31) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(shift + 32)),
m.Int32Constant(shift + 64));
@@ -3056,7 +3145,7 @@ TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
TEST_F(InstructionSelectorTest, Word32ShrWithWord32Shl) {
TRACED_FORRANGE(int32_t, shift, 1, 31) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32Shr(m.Word32Shl(p0, m.Int32Constant(shift)),
m.Int32Constant(shift));
@@ -3070,7 +3159,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32Shl) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
TRACED_FORRANGE(int32_t, shift, 1, 31) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32Shr(m.Word32Shl(p0, m.Int32Constant(shift + 32)),
m.Int32Constant(shift + 64));
@@ -3088,7 +3177,7 @@ TEST_F(InstructionSelectorTest, Word32ShrWithWord32Shl) {
TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
TRACED_FORRANGE(int32_t, shift, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
@@ -3103,7 +3192,7 @@ TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
TRACED_FORRANGE(int32_t, shift, 0, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
@@ -3121,7 +3210,7 @@ TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
TEST_F(InstructionSelectorTest, Word32Clz) {
- StreamBuilder m(this, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32Clz(p0);
m.Return(n);
@@ -3136,7 +3225,7 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
TEST_F(InstructionSelectorTest, Float32Abs) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -3151,7 +3240,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
TEST_F(InstructionSelectorTest, Float64Abs) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -3166,7 +3255,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -3181,7 +3270,8 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Max(p0, p1);
@@ -3199,7 +3289,8 @@ TEST_F(InstructionSelectorTest, Float32Max) {
TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Min(p0, p1);
@@ -3217,7 +3308,8 @@ TEST_F(InstructionSelectorTest, Float32Min) {
TEST_F(InstructionSelectorTest, Float64Max) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Max(p0, p1);
@@ -3235,7 +3327,8 @@ TEST_F(InstructionSelectorTest, Float64Max) {
TEST_F(InstructionSelectorTest, Float64Min) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Min(p0, p1);
diff --git a/deps/v8/test/unittests/compiler/binary-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/binary-operator-reducer-unittest.cc
deleted file mode 100644
index 5d223446e2..0000000000
--- a/deps/v8/test/unittests/compiler/binary-operator-reducer-unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/binary-operator-reducer.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/machine-type.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/types-inl.h"
-#include "test/unittests/compiler/graph-reducer-unittest.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-
-using testing::StrictMock;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class BinaryOperatorReducerTest : public TypedGraphTest {
- public:
- explicit BinaryOperatorReducerTest(int num_parameters = 1)
- : TypedGraphTest(num_parameters), machine_(zone()), simplified_(zone()) {}
- ~BinaryOperatorReducerTest() override {}
-
- protected:
- Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
- BinaryOperatorReducer reducer(editor, graph(), common(), machine());
- return reducer.Reduce(node);
- }
-
- Reduction Reduce(Node* node) {
- StrictMock<MockAdvancedReducerEditor> editor;
- return Reduce(&editor, node);
- }
-
- MachineOperatorBuilder* machine() { return &machine_; }
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- private:
- MachineOperatorBuilder machine_;
- SimplifiedOperatorBuilder simplified_;
-};
-
-
-TEST_F(BinaryOperatorReducerTest, Div52OfMul52) {
- // This reduction applies only to 64bit arch
- if (!machine()->Is64()) return;
-
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* t0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), p0);
- Node* t1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), p1);
-
- Type* mul_range = Type::Range(0x0, 0xFFFFFFFFFFFFFULL, graph()->zone());
- Node* mul = graph()->NewNode(machine()->Float64Mul(), t0, t1);
- NodeProperties::SetType(
- mul, Type::Intersect(mul_range, Type::Number(), graph()->zone()));
-
- Node* mul_replacement;
- auto mul_matcher = IsInt64Mul(p0, p1);
- {
- StrictMock<MockAdvancedReducerEditor> editor;
-
- EXPECT_CALL(editor, Revisit(mul_matcher));
-
- Reduction r = Reduce(&editor, mul);
- ASSERT_TRUE(r.Changed());
- mul_replacement = r.replacement();
- EXPECT_THAT(mul_replacement, IsRoundInt64ToFloat64(mul_matcher));
- }
-
- {
- StrictMock<MockAdvancedReducerEditor> editor;
-
- Node* power = Float64Constant(0x4000000);
- Node* div =
- graph()->NewNode(machine()->Float64Div(), mul_replacement, power);
-
- auto shr_matcher = IsWord64Shr(mul_matcher, IsInt64Constant(26));
- EXPECT_CALL(editor, Revisit(shr_matcher));
-
- Reduction r = Reduce(&editor, div);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsRoundInt64ToFloat64(shr_matcher));
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index efa490d7ec..fcd702c428 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -18,7 +18,8 @@ namespace compiler {
class BranchEliminationTest : public TypedGraphTest {
public:
BranchEliminationTest()
- : machine_(zone(), kMachPtr, MachineOperatorBuilder::kNoFlags) {}
+ : machine_(zone(), MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kNoFlags) {}
MachineOperatorBuilder* machine() { return &machine_; }
@@ -54,14 +55,15 @@ TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
Node* inner_merge =
graph()->NewNode(common()->Merge(2), inner_if_true, inner_if_false);
Node* inner_phi =
- graph()->NewNode(common()->Phi(kMachInt32, 2), Int32Constant(1),
- Int32Constant(2), inner_merge);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(1), Int32Constant(2), inner_merge);
Node* outer_if_false = graph()->NewNode(common()->IfFalse(), outer_branch);
Node* outer_merge =
graph()->NewNode(common()->Merge(2), inner_merge, outer_if_false);
- Node* outer_phi = graph()->NewNode(common()->Phi(kMachInt32, 2), inner_phi,
- Int32Constant(3), outer_merge);
+ Node* outer_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ inner_phi, Int32Constant(3), outer_merge);
Node* ret = graph()->NewNode(common()->Return(), outer_phi, graph()->start(),
outer_merge);
@@ -72,8 +74,8 @@ TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
// Outer branch should not be rewritten, the inner branch should be discarded.
EXPECT_THAT(outer_branch, IsBranch(condition, graph()->start()));
EXPECT_THAT(inner_phi,
- IsPhi(kMachInt32, IsInt32Constant(1), IsInt32Constant(2),
- IsMerge(outer_if_true, IsDead())));
+ IsPhi(MachineRepresentation::kWord32, IsInt32Constant(1),
+ IsInt32Constant(2), IsMerge(outer_if_true, IsDead())));
}
@@ -95,13 +97,14 @@ TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
Node* inner_merge =
graph()->NewNode(common()->Merge(2), inner_if_true, inner_if_false);
Node* inner_phi =
- graph()->NewNode(common()->Phi(kMachInt32, 2), Int32Constant(2),
- Int32Constant(3), inner_merge);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(2), Int32Constant(3), inner_merge);
Node* outer_merge =
graph()->NewNode(common()->Merge(2), outer_if_true, inner_merge);
- Node* outer_phi = graph()->NewNode(common()->Phi(kMachInt32, 2),
- Int32Constant(1), inner_phi, outer_merge);
+ Node* outer_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(1), inner_phi, outer_merge);
Node* ret = graph()->NewNode(common()->Return(), outer_phi, graph()->start(),
outer_merge);
@@ -112,8 +115,8 @@ TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
// Outer branch should not be rewritten, the inner branch should be discarded.
EXPECT_THAT(outer_branch, IsBranch(condition, graph()->start()));
EXPECT_THAT(inner_phi,
- IsPhi(kMachInt32, IsInt32Constant(2), IsInt32Constant(3),
- IsMerge(IsDead(), outer_if_false)));
+ IsPhi(MachineRepresentation::kWord32, IsInt32Constant(2),
+ IsInt32Constant(3), IsMerge(IsDead(), outer_if_false)));
}
@@ -127,15 +130,17 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* merge1 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- Node* phi1 = graph()->NewNode(common()->Phi(kMachInt32, 2), Int32Constant(1),
- Int32Constant(2), merge1);
+ Node* phi1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(1), Int32Constant(2), merge1);
Node* branch2 = graph()->NewNode(common()->Branch(), condition, merge1);
Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
Node* merge2 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- Node* phi2 = graph()->NewNode(common()->Phi(kMachInt32, 2), Int32Constant(3),
- Int32Constant(4), merge1);
+ Node* phi2 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(3), Int32Constant(4), merge1);
Node* add = graph()->NewNode(machine()->Int32Add(), phi1, phi2);
diff --git a/deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc
deleted file mode 100644
index 27ff4ca359..0000000000
--- a/deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <iostream>
-
-#include "src/compiler/bytecode-graph-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/instruction.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/interpreter/bytecode-array-builder.h"
-#include "src/parser.h"
-#include "test/unittests/compiler/compiler-test-utils.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-#include "test/unittests/test-utils.h"
-
-using ::testing::_;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class BytecodeGraphBuilderTest : public TestWithIsolateAndZone {
- public:
- BytecodeGraphBuilderTest() : array_builder_(isolate(), zone()) {}
-
- Graph* GetCompletedGraph();
-
- Matcher<Node*> IsUndefinedConstant();
- Matcher<Node*> IsNullConstant();
- Matcher<Node*> IsTheHoleConstant();
- Matcher<Node*> IsFalseConstant();
- Matcher<Node*> IsTrueConstant();
-
- interpreter::BytecodeArrayBuilder* array_builder() { return &array_builder_; }
-
- private:
- interpreter::BytecodeArrayBuilder array_builder_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilderTest);
-};
-
-
-Graph* BytecodeGraphBuilderTest::GetCompletedGraph() {
- MachineOperatorBuilder* machine = new (zone()) MachineOperatorBuilder(
- zone(), kMachPtr, InstructionSelector::SupportedMachineOperatorFlags());
- CommonOperatorBuilder* common = new (zone()) CommonOperatorBuilder(zone());
- JSOperatorBuilder* javascript = new (zone()) JSOperatorBuilder(zone());
- Graph* graph = new (zone()) Graph(zone());
- JSGraph* jsgraph = new (zone())
- JSGraph(isolate(), graph, common, javascript, nullptr, machine);
-
- Handle<String> name = factory()->NewStringFromStaticChars("test");
- Handle<String> script = factory()->NewStringFromStaticChars("test() {}");
- Handle<SharedFunctionInfo> shared_info =
- factory()->NewSharedFunctionInfo(name, MaybeHandle<Code>());
- shared_info->set_script(*factory()->NewScript(script));
-
- ParseInfo parse_info(zone(), shared_info);
- CompilationInfo info(&parse_info);
- Handle<BytecodeArray> bytecode_array = array_builder()->ToBytecodeArray();
- info.shared_info()->set_function_data(*bytecode_array);
-
- BytecodeGraphBuilder graph_builder(zone(), &info, jsgraph);
- graph_builder.CreateGraph();
- return graph;
-}
-
-
-Matcher<Node*> BytecodeGraphBuilderTest::IsUndefinedConstant() {
- return IsHeapConstant(factory()->undefined_value());
-}
-
-
-Matcher<Node*> BytecodeGraphBuilderTest::IsNullConstant() {
- return IsHeapConstant(factory()->null_value());
-}
-
-
-Matcher<Node*> BytecodeGraphBuilderTest::IsTheHoleConstant() {
- return IsHeapConstant(factory()->the_hole_value());
-}
-
-
-Matcher<Node*> BytecodeGraphBuilderTest::IsFalseConstant() {
- return IsHeapConstant(factory()->false_value());
-}
-
-
-Matcher<Node*> BytecodeGraphBuilderTest::IsTrueConstant() {
- return IsHeapConstant(factory()->true_value());
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnUndefined) {
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadUndefined().Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- Node* effect = graph->start();
- Node* control = graph->start();
- EXPECT_THAT(ret, IsReturn(IsUndefinedConstant(), effect, control));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnNull) {
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadNull().Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- EXPECT_THAT(ret, IsReturn(IsNullConstant(), graph->start(), graph->start()));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnTheHole) {
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadTheHole().Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- Node* effect = graph->start();
- Node* control = graph->start();
- EXPECT_THAT(ret, IsReturn(IsTheHoleConstant(), effect, control));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnTrue) {
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadTrue().Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- Node* effect = graph->start();
- Node* control = graph->start();
- EXPECT_THAT(ret, IsReturn(IsTrueConstant(), effect, control));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnFalse) {
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadFalse().Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- Node* effect = graph->start();
- Node* control = graph->start();
- EXPECT_THAT(ret, IsReturn(IsFalseConstant(), effect, control));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnInt8) {
- static const int kValue = 3;
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadLiteral(Smi::FromInt(kValue)).Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- Node* effect = graph->start();
- Node* control = graph->start();
- EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), effect, control));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, ReturnDouble) {
- const double kValue = 0.123456789;
- array_builder()->set_locals_count(0);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()->LoadLiteral(factory()->NewHeapNumber(kValue));
- array_builder()->Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- Node* effect = graph->start();
- Node* control = graph->start();
- EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), effect, control));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, SimpleExpressionWithParameters) {
- array_builder()->set_locals_count(1);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(3);
- array_builder()
- ->LoadAccumulatorWithRegister(array_builder()->Parameter(1))
- .BinaryOperation(Token::Value::ADD, array_builder()->Parameter(2),
- Strength::WEAK)
- .StoreAccumulatorInRegister(interpreter::Register(0))
- .Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- // NB binary operation is <reg> <op> <acc>. The register represents
- // the left-hand side, which is why parameters appear in opposite
- // order to construction via the builder.
- EXPECT_THAT(ret, IsReturn(IsJSAdd(IsParameter(2), IsParameter(1)), _, _));
-}
-
-
-TEST_F(BytecodeGraphBuilderTest, SimpleExpressionWithRegister) {
- static const int kLeft = -655371;
- static const int kRight = +2000000;
- array_builder()->set_locals_count(1);
- array_builder()->set_context_count(0);
- array_builder()->set_parameter_count(1);
- array_builder()
- ->LoadLiteral(Smi::FromInt(kLeft))
- .StoreAccumulatorInRegister(interpreter::Register(0))
- .LoadLiteral(Smi::FromInt(kRight))
- .BinaryOperation(Token::Value::ADD, interpreter::Register(0),
- Strength::WEAK)
- .Return();
-
- Graph* graph = GetCompletedGraph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* ret = end->InputAt(0);
- EXPECT_THAT(
- ret, IsReturn(IsJSAdd(IsNumberConstant(kLeft), IsNumberConstant(kRight)),
- _, _));
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
index aec568c9db..fd0766caba 100644
--- a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
@@ -27,11 +27,15 @@ class ChangeLoweringTest : public TypedGraphTest {
public:
ChangeLoweringTest() : simplified_(zone()) {}
- virtual MachineType WordRepresentation() const = 0;
+ virtual MachineRepresentation WordRepresentation() const = 0;
protected:
- bool Is32() const { return WordRepresentation() == kRepWord32; }
- bool Is64() const { return WordRepresentation() == kRepWord64; }
+ bool Is32() const {
+ return WordRepresentation() == MachineRepresentation::kWord32;
+ }
+ bool Is64() const {
+ return WordRepresentation() == MachineRepresentation::kWord64;
+ }
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone(), WordRepresentation());
@@ -67,7 +71,7 @@ class ChangeLoweringTest : public TypedGraphTest {
}
Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) {
- return IsLoad(kMachFloat64, value_matcher,
+ return IsLoad(MachineType::Float64(), value_matcher,
IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
graph()->start(), control_matcher);
}
@@ -94,11 +98,11 @@ class ChangeLoweringTest : public TypedGraphTest {
class ChangeLoweringCommonTest
: public ChangeLoweringTest,
- public ::testing::WithParamInterface<MachineType> {
+ public ::testing::WithParamInterface<MachineRepresentation> {
public:
~ChangeLoweringCommonTest() override {}
- MachineType WordRepresentation() const final { return GetParam(); }
+ MachineRepresentation WordRepresentation() const final { return GetParam(); }
};
@@ -107,8 +111,8 @@ TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
Reduction r =
Reduce(graph()->NewNode(simplified()->ChangeBitToBool(), value));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsSelect(kMachAnyTagged, value, IsTrueConstant(),
- IsFalseConstant()));
+ EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kTagged, value,
+ IsTrueConstant(), IsFalseConstant()));
}
@@ -177,8 +181,173 @@ TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedPointer) {
}
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldSmi) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::TaggedSigned());
+ Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ p0, IsIntPtrConstant(access.offset - access.tag()), p1,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldTagged) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Tagged());
+ Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier),
+ p0, IsIntPtrConstant(access.offset - access.tag()), p1,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, LoadField) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* load = graph()->NewNode(simplified()->LoadField(access), p0,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(load);
+
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match = IsIntPtrConstant(access.offset - access.tag());
+ EXPECT_THAT(r.replacement(),
+ IsLoad(MachineType::AnyTagged(), p0,
+ IsIntPtrConstant(access.offset - access.tag()),
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementTagged) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* p2 = Parameter(Type::Tagged());
+ Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
+ IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier),
+ p0, index_match, p2, graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementUint8) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Signed32(), MachineType::Uint8()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* p2 = Parameter(Type::Signed32());
+ Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kWord8,
+ kNoWriteBarrier),
+ p0, index_match, p2, graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementTagged) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(load);
+
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
+ IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(), IsLoad(MachineType::AnyTagged(), p0, index_match,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementInt8) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Signed32(), MachineType::Int8()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(load);
+
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(), IsLoad(MachineType::Int8(), p0, index_match,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, Allocate) {
+ Node* p0 = Parameter(Type::Signed32());
+ Node* alloc = graph()->NewNode(simplified()->Allocate(TENURED), p0,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(alloc);
+
+ // Only check that we lowered, but do not specify the exact form since
+ // this is subject to change.
+ ASSERT_TRUE(r.Changed());
+}
+
+
INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
- ::testing::Values(kRepWord32, kRepWord64));
+ ::testing::Values(MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64));
// -----------------------------------------------------------------------------
@@ -188,7 +357,9 @@ INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
class ChangeLowering32Test : public ChangeLoweringTest {
public:
~ChangeLowering32Test() override {}
- MachineType WordRepresentation() const final { return kRepWord32; }
+ MachineRepresentation WordRepresentation() const final {
+ return MachineRepresentation::kWord32;
+ }
};
@@ -200,12 +371,13 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
Capture<Node*> add, branch, heap_number, if_true;
EXPECT_THAT(
r.replacement(),
- IsPhi(kMachAnyTagged,
+ IsPhi(MachineRepresentation::kTagged,
IsFinishRegion(
AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_true))),
IsStore(
- StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier),
CaptureEq(&heap_number),
IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
IsChangeInt32ToFloat64(value), CaptureEq(&heap_number),
@@ -230,7 +402,8 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
Capture<Node*> branch, if_true;
EXPECT_THAT(
r.replacement(),
- IsPhi(kMachFloat64, IsLoadHeapNumber(value, CaptureEq(&if_true)),
+ IsPhi(MachineRepresentation::kFloat64,
+ IsLoadHeapNumber(value, CaptureEq(&if_true)),
IsChangeInt32ToFloat64(IsWord32Sar(
value, IsInt32Constant(kSmiTagSize + kSmiShiftSize))),
IsMerge(AllOf(CaptureEq(&if_true),
@@ -255,7 +428,7 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachInt32,
+ MachineRepresentation::kWord32,
IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
@@ -278,7 +451,7 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachUint32,
+ MachineRepresentation::kWord32,
IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
@@ -301,13 +474,14 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachAnyTagged,
+ MachineRepresentation::kTagged,
IsWord32Shl(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
IsFinishRegion(
AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_false))),
IsStore(
- StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier),
CaptureEq(&heap_number),
IsInt32Constant(HeapNumber::kValueOffset - kHeapObjectTag),
IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
@@ -328,7 +502,9 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
class ChangeLowering64Test : public ChangeLoweringTest {
public:
~ChangeLowering64Test() override {}
- MachineType WordRepresentation() const final { return kRepWord64; }
+ MachineRepresentation WordRepresentation() const final {
+ return MachineRepresentation::kWord64;
+ }
};
@@ -352,7 +528,8 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
Capture<Node*> branch, if_true;
EXPECT_THAT(
r.replacement(),
- IsPhi(kMachFloat64, IsLoadHeapNumber(value, CaptureEq(&if_true)),
+ IsPhi(MachineRepresentation::kFloat64,
+ IsLoadHeapNumber(value, CaptureEq(&if_true)),
IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(IsWord64Sar(
value, IsInt64Constant(kSmiTagSize + kSmiShiftSize)))),
IsMerge(AllOf(CaptureEq(&if_true),
@@ -377,7 +554,7 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachInt32,
+ MachineRepresentation::kWord32,
IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
IsTruncateInt64ToInt32(
IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
@@ -401,7 +578,7 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachUint32,
+ MachineRepresentation::kWord32,
IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
IsTruncateInt64ToInt32(
IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
@@ -425,14 +602,15 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachAnyTagged,
+ MachineRepresentation::kTagged,
IsWord64Shl(IsChangeUint32ToUint64(value),
IsInt64Constant(kSmiTagSize + kSmiShiftSize)),
IsFinishRegion(
AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_false))),
IsStore(
- StoreRepresentation(kMachFloat64, kNoWriteBarrier),
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier),
CaptureEq(&heap_number),
IsInt64Constant(HeapNumber::kValueOffset - kHeapObjectTag),
IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index 13d2d6707a..1c163706f2 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -5,9 +5,9 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
#include "src/compiler/simplified-operator.h"
+#include "src/machine-type.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -28,7 +28,8 @@ class CommonOperatorReducerTest : public GraphTest {
Reduction Reduce(
AdvancedReducer::Editor* editor, Node* node,
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
- MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
CommonOperatorReducer reducer(editor, graph(), common(), &machine);
return reducer.Reduce(node);
}
@@ -54,11 +55,11 @@ const BranchHint kBranchHints[] = {BranchHint::kNone, BranchHint::kFalse,
BranchHint::kTrue};
-const MachineType kMachineTypes[] = {
- kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
- kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
- kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+const MachineRepresentation kMachineRepresentations[] = {
+ MachineRepresentation::kBit, MachineRepresentation::kWord8,
+ MachineRepresentation::kWord16, MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64, MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64, MachineRepresentation::kTagged};
const Operator kOp0(0, Operator::kNoProperties, "Op0", 0, 0, 0, 1, 1, 0);
@@ -279,7 +280,7 @@ TEST_F(CommonOperatorReducerTest, PhiWithMerge) {
Node* const input = graph()->NewNode(&kOp0);
TRACED_FORRANGE(int, input_count, 2, kMaxInputs - 1) {
int const value_input_count = input_count - 1;
- TRACED_FOREACH(MachineType, type, kMachineTypes) {
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
for (int i = 0; i < value_input_count; ++i) {
inputs[i] = graph()->start();
}
@@ -292,7 +293,7 @@ TEST_F(CommonOperatorReducerTest, PhiWithMerge) {
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(
- &editor, graph()->NewNode(common()->Phi(type, value_input_count),
+ &editor, graph()->NewNode(common()->Phi(rep, value_input_count),
input_count, inputs));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
@@ -306,8 +307,8 @@ TEST_F(CommonOperatorReducerTest, PhiWithLoop) {
Node* const loop =
graph()->NewNode(common()->Loop(2), graph()->start(), graph()->start());
loop->ReplaceInput(1, loop);
- Node* const phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p0, loop);
phi->ReplaceInput(1, phi);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(loop));
@@ -327,8 +328,8 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat32Abs) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = graph()->NewNode(machine()->Float32Sub(), c0, p0);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachFloat32, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, 2), vtrue, vfalse, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(&editor, phi);
@@ -347,8 +348,8 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = graph()->NewNode(machine()->Float64Sub(), c0, p0);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachFloat64, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(&editor, phi);
@@ -365,7 +366,8 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat32Max) {
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(kMachFloat32, 2), p1, p0, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, 2), p1, p0, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Max);
@@ -382,7 +384,8 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64Max) {
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), p1, p0, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Max);
@@ -399,7 +402,8 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat32Min) {
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(kMachFloat32, 2), p0, p1, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, 2), p0, p1, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Min);
@@ -416,7 +420,8 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64Min) {
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), p0, p1, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Revisit(merge));
Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Min);
@@ -440,8 +445,8 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) {
Node* vfalse = Parameter(1);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
Node* ret = graph()->NewNode(common()->Return(), phi, ephi, merge);
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
StrictMock<MockAdvancedReducerEditor> editor;
@@ -461,9 +466,9 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) {
TEST_F(CommonOperatorReducerTest, SelectWithSameThenAndElse) {
Node* const input = graph()->NewNode(&kOp0);
TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- TRACED_FOREACH(MachineType, type, kMachineTypes) {
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
Reduction r = Reduce(
- graph()->NewNode(common()->Select(type, hint), input, input, input));
+ graph()->NewNode(common()->Select(rep, hint), input, input, input));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -474,8 +479,9 @@ TEST_F(CommonOperatorReducerTest, SelectWithSameThenAndElse) {
TEST_F(CommonOperatorReducerTest, SelectWithInt32ZeroConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- Node* select = graph()->NewNode(common()->Select(kMachAnyTagged),
- Int32Constant(0), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int32Constant(0), p0, p1);
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p1, r.replacement());
@@ -485,8 +491,9 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt32ZeroConstant) {
TEST_F(CommonOperatorReducerTest, SelectWithInt32OneConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- Node* select = graph()->NewNode(common()->Select(kMachAnyTagged),
- Int32Constant(1), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int32Constant(1), p0, p1);
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
@@ -496,8 +503,9 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt32OneConstant) {
TEST_F(CommonOperatorReducerTest, SelectWithInt64ZeroConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- Node* select = graph()->NewNode(common()->Select(kMachAnyTagged),
- Int64Constant(0), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int64Constant(0), p0, p1);
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p1, r.replacement());
@@ -507,8 +515,9 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt64ZeroConstant) {
TEST_F(CommonOperatorReducerTest, SelectWithInt64OneConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- Node* select = graph()->NewNode(common()->Select(kMachAnyTagged),
- Int64Constant(1), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int64Constant(1), p0, p1);
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
@@ -518,8 +527,9 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt64OneConstant) {
TEST_F(CommonOperatorReducerTest, SelectWithFalseConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- Node* select = graph()->NewNode(common()->Select(kMachAnyTagged),
- FalseConstant(), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ FalseConstant(), p0, p1);
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p1, r.replacement());
@@ -529,8 +539,8 @@ TEST_F(CommonOperatorReducerTest, SelectWithFalseConstant) {
TEST_F(CommonOperatorReducerTest, SelectWithTrueConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- Node* select = graph()->NewNode(common()->Select(kMachAnyTagged),
- TrueConstant(), p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged), TrueConstant(), p0, p1);
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
@@ -542,8 +552,8 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat32Abs) {
Node* c0 = Float32Constant(0.0);
Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
Node* select =
- graph()->NewNode(common()->Select(kMachFloat32), check, p0,
- graph()->NewNode(machine()->Float32Sub(), c0, p0));
+ graph()->NewNode(common()->Select(MachineRepresentation::kFloat32), check,
+ p0, graph()->NewNode(machine()->Float32Sub(), c0, p0));
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
@@ -555,8 +565,8 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
Node* c0 = Float64Constant(0.0);
Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
Node* select =
- graph()->NewNode(common()->Select(kMachFloat64), check, p0,
- graph()->NewNode(machine()->Float64Sub(), c0, p0));
+ graph()->NewNode(common()->Select(MachineRepresentation::kFloat64), check,
+ p0, graph()->NewNode(machine()->Float64Sub(), c0, p0));
Reduction r = Reduce(select);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
@@ -567,8 +577,8 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat32Max) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* select =
- graph()->NewNode(common()->Select(kMachFloat32), check, p1, p0);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat32), check, p1, p0);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Max);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
@@ -579,8 +589,8 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Max) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* select =
- graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64), check, p1, p0);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Max);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
@@ -591,8 +601,8 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat32Min) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* select =
- graph()->NewNode(common()->Select(kMachFloat32), check, p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat32), check, p0, p1);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Min);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
@@ -603,8 +613,8 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Min) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* select =
- graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64), check, p0, p1);
Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Min);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 64c5f73d27..0a55a2e2a2 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -272,16 +272,19 @@ TEST_F(CommonOperatorTest, IfValue) {
TEST_F(CommonOperatorTest, Select) {
- static const MachineType kTypes[] = {
- kMachInt8, kMachUint8, kMachInt16, kMachUint16,
- kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachFloat32, kMachFloat64, kMachAnyTagged};
- TRACED_FOREACH(MachineType, type, kTypes) {
+ static const MachineRepresentation kMachineRepresentations[] = {
+ MachineRepresentation::kBit, MachineRepresentation::kWord8,
+ MachineRepresentation::kWord16, MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64, MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64, MachineRepresentation::kTagged};
+
+
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- const Operator* const op = common()->Select(type, hint);
+ const Operator* const op = common()->Select(rep, hint);
EXPECT_EQ(IrOpcode::kSelect, op->opcode());
EXPECT_EQ(Operator::kPure, op->properties());
- EXPECT_EQ(type, SelectParametersOf(op).type());
+ EXPECT_EQ(rep, SelectParametersOf(op).representation());
EXPECT_EQ(hint, SelectParametersOf(op).hint());
EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index d383bf7c43..a87f760c82 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -6,6 +6,7 @@
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/source-position.h"
#include "src/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -30,7 +31,8 @@ class ControlEquivalenceTest : public GraphTest {
graph()->SetEnd(graph()->NewNode(common()->End(1), node));
if (FLAG_trace_turbo) {
OFStream os(stdout);
- os << AsDOT(*graph());
+ SourcePositionTable table(graph());
+ os << AsJSON(*graph(), &table);
}
ControlEquivalence equivalence(zone(), graph());
equivalence.Run(node);
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index 444f5f5fee..a5a3c74be2 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -105,8 +105,8 @@ TEST_F(ControlFlowOptimizerTest, CloneBranch) {
Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
- Node* phi0 =
- graph()->NewNode(common()->Phi(kRepBit, 2), cond1, cond2, merge0);
+ Node* phi0 = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2),
+ cond1, cond2, merge0);
Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index 8284fd8775..df93f25302 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -36,11 +36,11 @@ class DeadCodeEliminationTest : public GraphTest {
namespace {
-const MachineType kMachineTypes[] = {
- kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
- kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
- kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+const MachineRepresentation kMachineRepresentations[] = {
+ MachineRepresentation::kBit, MachineRepresentation::kWord8,
+ MachineRepresentation::kWord16, MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64, MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64, MachineRepresentation::kTagged};
const int kMaxInputs = 16;
@@ -190,8 +190,8 @@ TEST_F(DeadCodeEliminationTest, MergeWithOneLiveAndOneDeadInput) {
Node* const e0 = graph()->NewNode(&kOp0, v0, graph()->start(), c0);
Node* const e1 = graph()->NewNode(&kOp0, v1, graph()->start(), c1);
Node* const merge = graph()->NewNode(common()->Merge(2), c0, c1);
- Node* const phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), v0, v1, merge);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), v0, v1, merge);
Node* const ephi = graph()->NewNode(common()->EffectPhi(2), e0, e1, merge);
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(phi, v0));
@@ -217,8 +217,8 @@ TEST_F(DeadCodeEliminationTest, MergeWithTwoLiveAndTwoDeadInputs) {
Node* const e2 = graph()->NewNode(&kOp0, v2, e1, c0);
Node* const e3 = graph()->NewNode(&kOp0, v3, graph()->start(), c3);
Node* const merge = graph()->NewNode(common()->Merge(4), c0, c1, c2, c3);
- Node* const phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 4), v0, v1, v2, v3, merge);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 4), v0, v1, v2, v3, merge);
Node* const ephi =
graph()->NewNode(common()->EffectPhi(4), e0, e1, e2, e3, merge);
StrictMock<MockAdvancedReducerEditor> editor;
@@ -227,7 +227,8 @@ TEST_F(DeadCodeEliminationTest, MergeWithTwoLiveAndTwoDeadInputs) {
Reduction const r = Reduce(&editor, merge);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsMerge(c0, c3));
- EXPECT_THAT(phi, IsPhi(kMachAnyTagged, v0, v3, r.replacement()));
+ EXPECT_THAT(phi,
+ IsPhi(MachineRepresentation::kTagged, v0, v3, r.replacement()));
EXPECT_THAT(ephi, IsEffectPhi(e0, e3, r.replacement()));
}
@@ -274,8 +275,8 @@ TEST_F(DeadCodeEliminationTest, LoopWithOneLiveAndOneDeadInput) {
Node* const e0 = graph()->NewNode(&kOp0, v0, graph()->start(), c0);
Node* const e1 = graph()->NewNode(&kOp0, v1, graph()->start(), c1);
Node* const loop = graph()->NewNode(common()->Loop(2), c0, c1);
- Node* const phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), v0, v1, loop);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), v0, v1, loop);
Node* const ephi = graph()->NewNode(common()->EffectPhi(2), e0, e1, loop);
Node* const terminate = graph()->NewNode(common()->Terminate(), ephi, loop);
StrictMock<MockAdvancedReducerEditor> editor;
@@ -303,8 +304,8 @@ TEST_F(DeadCodeEliminationTest, LoopWithTwoLiveAndTwoDeadInputs) {
Node* const e2 = graph()->NewNode(&kOp0, v2, e1, c0);
Node* const e3 = graph()->NewNode(&kOp0, v3, graph()->start(), c3);
Node* const loop = graph()->NewNode(common()->Loop(4), c0, c1, c2, c3);
- Node* const phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 4), v0, v1, v2, v3, loop);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 4), v0, v1, v2, v3, loop);
Node* const ephi =
graph()->NewNode(common()->EffectPhi(4), e0, e1, e2, e3, loop);
StrictMock<MockAdvancedReducerEditor> editor;
@@ -313,7 +314,8 @@ TEST_F(DeadCodeEliminationTest, LoopWithTwoLiveAndTwoDeadInputs) {
Reduction const r = Reduce(&editor, loop);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsLoop(c0, c3));
- EXPECT_THAT(phi, IsPhi(kMachAnyTagged, v0, v3, r.replacement()));
+ EXPECT_THAT(phi,
+ IsPhi(MachineRepresentation::kTagged, v0, v3, r.replacement()));
EXPECT_THAT(ephi, IsEffectPhi(e0, e3, r.replacement()));
}
@@ -324,14 +326,14 @@ TEST_F(DeadCodeEliminationTest, LoopWithTwoLiveAndTwoDeadInputs) {
TEST_F(DeadCodeEliminationTest, PhiWithDeadControlInput) {
Node* inputs[kMaxInputs + 1];
- TRACED_FOREACH(MachineType, type, kMachineTypes) {
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
TRACED_FORRANGE(int, input_count, 1, kMaxInputs) {
for (int i = 0; i < input_count; ++i) {
inputs[i] = Parameter(i);
}
inputs[input_count] = graph()->NewNode(common()->Dead());
Reduction const r = Reduce(graph()->NewNode(
- common()->Phi(type, input_count), input_count + 1, inputs));
+ common()->Phi(rep, input_count), input_count + 1, inputs));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsDead());
}
diff --git a/deps/v8/test/unittests/compiler/diamond-unittest.cc b/deps/v8/test/unittests/compiler/diamond-unittest.cc
index 50c50d4e69..5b28a001ef 100644
--- a/deps/v8/test/unittests/compiler/diamond-unittest.cc
+++ b/deps/v8/test/unittests/compiler/diamond-unittest.cc
@@ -114,7 +114,8 @@ TEST_F(DiamondTest, DiamondPhis) {
Node* p2 = Parameter(2);
Diamond d(graph(), common(), p0);
- MachineType types[] = {kMachAnyTagged, kMachUint32, kMachInt32};
+ MachineRepresentation types[] = {MachineRepresentation::kTagged,
+ MachineRepresentation::kWord32};
for (size_t i = 0; i < arraysize(types); i++) {
Node* phi = d.Phi(types[i], p1, p2);
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
new file mode 100644
index 0000000000..b088367a58
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -0,0 +1,396 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bit-vector.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/types-inl.h"
+#include "src/zone-containers.h"
+#include "test/unittests/compiler/graph-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class EscapeAnalysisTest : public GraphTest {
+ public:
+ EscapeAnalysisTest()
+ : simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), nullptr, nullptr, nullptr),
+ escape_analysis_(graph(), common(), zone()),
+ effect_(graph()->start()),
+ control_(graph()->start()) {}
+
+ ~EscapeAnalysisTest() {}
+
+ EscapeAnalysis* escape_analysis() { return &escape_analysis_; }
+
+ protected:
+ void Analysis() { escape_analysis_.Run(); }
+
+ void Transformation() {
+ GraphReducer graph_reducer(zone(), graph());
+ EscapeAnalysisReducer escape_reducer(&graph_reducer, &jsgraph_,
+ &escape_analysis_, zone());
+ graph_reducer.AddReducer(&escape_reducer);
+ graph_reducer.ReduceGraph();
+ }
+
+ // ---------------------------------Node Creation Helper----------------------
+
+ Node* BeginRegion(Node* effect = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+
+ return effect_ = graph()->NewNode(common()->BeginRegion(), effect);
+ }
+
+ Node* FinishRegion(Node* value, Node* effect = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ return effect_ = graph()->NewNode(common()->FinishRegion(), value, effect);
+ }
+
+ Node* Allocate(Node* size, Node* effect = nullptr, Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return effect_ = graph()->NewNode(simplified()->Allocate(), size, effect,
+ control);
+ }
+
+ Node* Constant(int num) {
+ return graph()->NewNode(common()->NumberConstant(num));
+ }
+
+ Node* Store(const FieldAccess& access, Node* allocation, Node* value,
+ Node* effect = nullptr, Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return effect_ = graph()->NewNode(simplified()->StoreField(access),
+ allocation, value, effect, control);
+ }
+
+ Node* Load(const FieldAccess& access, Node* from, Node* effect = nullptr,
+ Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return graph()->NewNode(simplified()->LoadField(access), from, effect,
+ control);
+ }
+
+ Node* Return(Node* value, Node* effect = nullptr, Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return control_ =
+ graph()->NewNode(common()->Return(), value, effect, control);
+ }
+
+ void EndGraph() {
+ for (Edge edge : graph()->end()->input_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control_);
+ }
+ }
+ }
+
+ Node* Branch() {
+ return control_ =
+ graph()->NewNode(common()->Branch(), Constant(0), control_);
+ }
+
+ Node* IfTrue() {
+ return control_ = graph()->NewNode(common()->IfTrue(), control_);
+ }
+
+ Node* IfFalse() { return graph()->NewNode(common()->IfFalse(), control_); }
+
+ Node* Merge2(Node* control1, Node* control2) {
+ return control_ = graph()->NewNode(common()->Merge(2), control1, control2);
+ }
+
+ FieldAccess AccessAtIndex(int offset) {
+ FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+ }
+
+ // ---------------------------------Assertion Helper--------------------------
+
+ void ExpectReplacement(Node* node, Node* rep) {
+ EXPECT_EQ(rep, escape_analysis()->GetReplacement(node));
+ }
+
+ void ExpectReplacementPhi(Node* node, Node* left, Node* right) {
+ Node* rep = escape_analysis()->GetReplacement(node);
+ ASSERT_NE(nullptr, rep);
+ ASSERT_EQ(IrOpcode::kPhi, rep->opcode());
+ EXPECT_EQ(left, NodeProperties::GetValueInput(rep, 0));
+ EXPECT_EQ(right, NodeProperties::GetValueInput(rep, 1));
+ }
+
+ void ExpectVirtual(Node* node) {
+ EXPECT_TRUE(node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion);
+ EXPECT_TRUE(escape_analysis()->IsVirtual(node));
+ }
+
+ void ExpectEscaped(Node* node) {
+ EXPECT_TRUE(node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion);
+ EXPECT_TRUE(escape_analysis()->IsEscaped(node));
+ }
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ Node* effect() { return effect_; }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
+ EscapeAnalysis escape_analysis_;
+
+ Node* effect_;
+ Node* control_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Test cases.
+
+
+TEST_F(EscapeAnalysisTest, StraightNonEscape) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* load = Load(AccessAtIndex(0), finish);
+ Node* result = Return(load);
+ EndGraph();
+
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, StraightEscape) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* load = Load(AccessAtIndex(0), finish);
+ Node* result = Return(allocation);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), load);
+
+ Analysis();
+
+ ExpectEscaped(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, StoreLoadEscape) {
+ Node* object1 = Constant(1);
+
+ BeginRegion();
+ Node* allocation1 = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation1, object1);
+ Node* finish1 = FinishRegion(allocation1);
+
+ BeginRegion();
+ Node* allocation2 = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation2, finish1);
+ Node* finish2 = FinishRegion(allocation2);
+
+ Node* load = Load(AccessAtIndex(0), finish2);
+ Node* result = Return(load);
+ EndGraph();
+ Analysis();
+
+ ExpectEscaped(allocation1);
+ ExpectVirtual(allocation2);
+ ExpectReplacement(load, finish1);
+
+ Transformation();
+
+ ASSERT_EQ(finish1, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, BranchNonEscape) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* ifTrue = IfTrue();
+ Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish, ifFalse);
+ Node* effect2 = Store(AccessAtIndex(0), allocation, object2, finish, ifTrue);
+ Node* merge = Merge2(ifFalse, ifTrue);
+ Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, effect2, merge);
+ Node* load = Load(AccessAtIndex(0), finish, phi, merge);
+ Node* result = Return(load, phi);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), result);
+
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacementPhi(load, object1, object2);
+ Node* replacement_phi = escape_analysis()->GetReplacement(load);
+
+ Transformation();
+
+ ASSERT_EQ(replacement_phi, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, DanglingLoadOrder) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Node* store1 = Store(AccessAtIndex(0), allocation, object1);
+ Node* load1 = Load(AccessAtIndex(0), allocation);
+ Node* store2 = Store(AccessAtIndex(0), allocation, object2);
+ Node* load2 = Load(AccessAtIndex(0), allocation, store1);
+ Node* result = Return(load2);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), store2);
+ graph()->end()->AppendInput(zone(), load1);
+
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load1, object1);
+ ExpectReplacement(load2, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, DeoptReplacement) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
+ Node* state_values2 = graph()->NewNode(common()->StateValues(0));
+ Node* state_values3 = graph()->NewNode(common()->StateValues(0));
+ Node* frame_state = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ nullptr),
+ state_values1, state_values2, state_values3, UndefinedConstant(),
+ graph()->start(), graph()->start());
+ Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect1, ifFalse);
+ Node* ifTrue = IfTrue();
+ Node* load = Load(AccessAtIndex(0), finish, effect1, ifTrue);
+ Node* result = Return(load, effect1, ifTrue);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), deopt);
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+ Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
+ ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
+ ASSERT_EQ(1, object_state->op()->ValueInputCount());
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize * 2));
+ Store(AccessAtIndex(0), allocation, object1);
+ Store(AccessAtIndex(kPointerSize), allocation, allocation);
+ Node* finish = FinishRegion(allocation);
+ Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
+ Node* state_values2 = graph()->NewNode(common()->StateValues(1), finish);
+ Node* state_values3 = graph()->NewNode(common()->StateValues(0));
+ Node* frame_state = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ nullptr),
+ state_values1, state_values2, state_values3, UndefinedConstant(),
+ graph()->start(), graph()->start());
+ Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect1, ifFalse);
+ Node* ifTrue = IfTrue();
+ Node* load = Load(AccessAtIndex(0), finish, effect1, ifTrue);
+ Node* result = Return(load, effect1, ifTrue);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), deopt);
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+
+ Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
+ ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
+ ASSERT_EQ(2, object_state->op()->ValueInputCount());
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
+ ASSERT_EQ(object_state, NodeProperties::GetValueInput(object_state, 1));
+
+ Node* object_state2 = NodeProperties::GetValueInput(state_values1, 0);
+ ASSERT_EQ(object_state, object_state2);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 1e07d7a41b..5280f69aa0 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -19,7 +19,8 @@ const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2,
TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -30,7 +31,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -43,7 +44,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
}
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -60,7 +61,8 @@ TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -71,7 +73,7 @@ TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -87,7 +89,7 @@ TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float64());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -98,7 +100,7 @@ TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float32());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -113,7 +115,8 @@ TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* param1 = m.Parameter(0);
Node* param2 = m.Parameter(1);
Node* add = m.Int32Add(param1, param2);
@@ -131,7 +134,8 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* param1 = m.Parameter(0);
Node* param2 = m.Parameter(1);
Node* mul = m.Int32Mul(param1, param2);
@@ -151,7 +155,7 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachUint32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -178,14 +182,14 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kIA32Movsxbl, kIA32Movb},
- {kMachUint8, kIA32Movzxbl, kIA32Movb},
- {kMachInt16, kIA32Movsxwl, kIA32Movw},
- {kMachUint16, kIA32Movzxwl, kIA32Movw},
- {kMachInt32, kIA32Movl, kIA32Movl},
- {kMachUint32, kIA32Movl, kIA32Movl},
- {kMachFloat32, kIA32Movss, kIA32Movss},
- {kMachFloat64, kIA32Movsd, kIA32Movsd}};
+ {MachineType::Int8(), kIA32Movsxbl, kIA32Movb},
+ {MachineType::Uint8(), kIA32Movzxbl, kIA32Movb},
+ {MachineType::Int16(), kIA32Movsxwl, kIA32Movw},
+ {MachineType::Uint16(), kIA32Movzxwl, kIA32Movw},
+ {MachineType::Int32(), kIA32Movl, kIA32Movl},
+ {MachineType::Uint32(), kIA32Movl, kIA32Movl},
+ {MachineType::Float32(), kIA32Movss, kIA32Movss},
+ {MachineType::Float64(), kIA32Movsd, kIA32Movsd}};
} // namespace
@@ -196,7 +200,8 @@ typedef InstructionSelectorTestWithParam<MemoryAccess>
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -209,7 +214,7 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -229,7 +234,7 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -248,9 +253,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -263,9 +269,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Int32Constant(base), m.Parameter(0),
+ m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -285,9 +292,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -320,8 +328,9 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
void Run(Node* base, Node* load_index, Node* store_index,
AddressingMode mode) {
- Node* load = m->Load(kMachInt32, base, load_index);
- m->Store(kMachInt32, base, store_index, load, kNoWriteBarrier);
+ Node* load = m->Load(MachineType::Int32(), base, load_index);
+ m->Store(MachineRepresentation::kWord32, base, store_index, load,
+ kNoWriteBarrier);
m->Return(m->Int32Constant(0));
Stream s = m->Build();
ASSERT_EQ(2U, s.size());
@@ -339,7 +348,8 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
void Reset() {
delete m;
- m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
+ m = new StreamBuilder(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
zero = m->Int32Constant(0);
null_ptr = m->Int32Constant(0);
non_zero = m->Int32Constant(127);
@@ -565,7 +575,7 @@ static AddressingMode AddressingModeForAddMult(int32_t imm,
TEST_P(InstructionSelectorMultTest, Mult32) {
const MultParam m_param = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* param = m.Parameter(0);
Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value));
m.Return(mult);
@@ -586,7 +596,7 @@ TEST_P(InstructionSelectorMultTest, Mult32) {
TEST_P(InstructionSelectorMultTest, MultAdd32) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
const MultParam m_param = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* param = m.Parameter(0);
Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
m.Int32Constant(imm));
@@ -618,7 +628,8 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
TEST_F(InstructionSelectorTest, Int32MulHigh) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -643,7 +654,7 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) {
TEST_F(InstructionSelectorTest, Float32Abs) {
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -658,7 +669,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -676,7 +687,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
TEST_F(InstructionSelectorTest, Float64Abs) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -691,7 +702,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -709,7 +720,8 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
@@ -723,7 +735,8 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
@@ -741,7 +754,7 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
@@ -755,7 +768,7 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
@@ -773,7 +786,7 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -787,7 +800,7 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -808,9 +821,9 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
TEST_F(InstructionSelectorTest, Uint32LessThanWithLoadAndLoadStackPointer) {
- StreamBuilder m(this, kMachBool);
+ StreamBuilder m(this, MachineType::Bool());
Node* const sl = m.Load(
- kMachPtr,
+ MachineType::Pointer(),
m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
Node* const sp = m.LoadStackPointer();
Node* const n = m.Uint32LessThan(sl, sp);
@@ -827,7 +840,7 @@ TEST_F(InstructionSelectorTest, Uint32LessThanWithLoadAndLoadStackPointer) {
TEST_F(InstructionSelectorTest, Word32Clz) {
- StreamBuilder m(this, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32Clz(p0);
m.Return(n);
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 8b182a76ba..89c0a654e9 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -14,12 +14,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-typedef RawMachineAssembler::Label MLabel;
-
-} // namespace
-
InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
@@ -164,7 +158,7 @@ InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
const float kValue = 4.2f;
- StreamBuilder m(this, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32());
m.Return(m.Float32Constant(kValue));
Stream s = m.Build(kAllInstructions);
ASSERT_EQ(3U, s.size());
@@ -177,7 +171,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Parameter(0));
Stream s = m.Build(kAllInstructions);
ASSERT_EQ(3U, s.size());
@@ -189,7 +183,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
- StreamBuilder m(this, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32());
m.Return(m.Int32Constant(0));
Stream s = m.Build(kAllInstructions);
ASSERT_EQ(3U, s.size());
@@ -207,7 +201,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
- StreamBuilder m(this, kMachInt32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
m.Return(
m.TruncateFloat64ToInt32(TruncationMode::kJavaScript, m.Parameter(0)));
Stream s = m.Build(kAllInstructions);
@@ -225,7 +219,7 @@ TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* param = m.Parameter(0);
m.Return(param);
Stream s = m.Build(kAllInstructions);
@@ -234,7 +228,7 @@ TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
Node* param = m.Parameter(0);
m.Return(param);
Stream s = m.Build(kAllInstructions);
@@ -247,7 +241,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
Node* param = m.Parameter(0);
Node* finish =
m.AddNode(m.common()->FinishRegion(), param, m.graph()->start());
@@ -283,14 +277,14 @@ TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
StreamBuilder m(this, type, type, type);
Node* param0 = m.Parameter(0);
Node* param1 = m.Parameter(1);
- MLabel a, b, c;
+ RawMachineLabel a, b, c;
m.Branch(m.Int32Constant(0), &a, &b);
m.Bind(&a);
m.Goto(&c);
m.Bind(&b);
m.Goto(&c);
m.Bind(&c);
- Node* phi = m.Phi(type, param0, param1);
+ Node* phi = m.Phi(type.representation(), param0, param1);
m.Return(phi);
Stream s = m.Build(kAllInstructions);
EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param0));
@@ -303,14 +297,14 @@ TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
StreamBuilder m(this, type, type, type);
Node* param0 = m.Parameter(0);
Node* param1 = m.Parameter(1);
- MLabel a, b, c;
+ RawMachineLabel a, b, c;
m.Branch(m.Int32Constant(1), &a, &b);
m.Bind(&a);
m.Goto(&c);
m.Bind(&b);
m.Goto(&c);
m.Bind(&c);
- Node* phi = m.Phi(type, param0, param1);
+ Node* phi = m.Phi(type.representation(), param0, param1);
m.Return(phi);
Stream s = m.Build(kAllInstructions);
EXPECT_EQ(s.IsReference(phi), s.IsReference(param0));
@@ -318,11 +312,14 @@ TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
- ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
- kMachInt16, kMachUint16, kMachInt32,
- kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged));
+INSTANTIATE_TEST_CASE_P(
+ InstructionSelectorTest, InstructionSelectorPhiTest,
+ ::testing::Values(MachineType::Float64(), MachineType::Int8(),
+ MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(),
+ MachineType::Uint32(), MachineType::Int64(),
+ MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::AnyTagged()));
// -----------------------------------------------------------------------------
@@ -330,15 +327,15 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
- StreamBuilder m1(this, kMachInt32, kMachPtr);
+ StreamBuilder m1(this, MachineType::Int32(), MachineType::Pointer());
Node* p1 = m1.Parameter(0);
- m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
+ m1.Return(m1.Load(MachineType::Int32(), p1, m1.Int32Constant(0)));
Stream s1 = m1.Build(kAllInstructions);
- StreamBuilder m2(this, kMachInt32, kMachPtr);
+ StreamBuilder m2(this, MachineType::Int32(), MachineType::Pointer());
Node* p2 = m2.Parameter(0);
- m2.Return(
- m2.AddNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
- m2.AddNode(m2.common()->BeginRegion(), m2.graph()->start())));
+ m2.Return(m2.AddNode(
+ m2.machine()->Load(MachineType::Int32()), p2, m2.Int32Constant(0),
+ m2.AddNode(m2.common()->BeginRegion(), m2.graph()->start())));
Stream s2 = m2.Build(kAllInstructions);
EXPECT_LE(3U, s1.size());
ASSERT_EQ(s1.size(), s2.size());
@@ -357,8 +354,8 @@ TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged());
BailoutId bailout_id(42);
@@ -366,7 +363,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
Node* receiver = m.Parameter(1);
Node* context = m.Parameter(2);
- ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
+ ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
ZoneVector<MachineType> empty_types(zone());
CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
@@ -385,7 +382,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
m.UndefinedConstant());
// Build the call.
- Node* args[] = {receiver, m.Int32Constant(1), context};
+ Node* args[] = {receiver, m.UndefinedConstant(), m.Int32Constant(1), context};
Node* call =
m.CallNWithFrameState(descriptor, function_node, args, state_node);
m.Return(call);
@@ -408,8 +405,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged());
BailoutId bailout_id_before(42);
@@ -418,9 +415,9 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
Node* receiver = m.Parameter(1);
Node* context = m.Int32Constant(1); // Context is ignored.
- ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
- ZoneVector<MachineType> float64_type(1, kMachFloat64, zone());
- ZoneVector<MachineType> tagged_type(1, kMachAnyTagged, zone());
+ ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
+ ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
+ ZoneVector<MachineType> tagged_type(1, MachineType::AnyTagged(), zone());
Callable callable = CodeFactory::ToObject(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
@@ -486,13 +483,15 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
// We inserted 0 here.
EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined());
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(0)); // function is always
- // tagged/any.
- EXPECT_EQ(kMachInt32, desc_before->GetType(1));
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(2)); // context is always
- // tagged/any.
- EXPECT_EQ(kMachFloat64, desc_before->GetType(3));
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(4));
+ EXPECT_EQ(MachineType::AnyTagged(),
+ desc_before->GetType(0)); // function is always
+ // tagged/any.
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(1));
+ EXPECT_EQ(MachineType::AnyTagged(),
+ desc_before->GetType(2)); // context is always
+ // tagged/any.
+ EXPECT_EQ(MachineType::Float64(), desc_before->GetType(3));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(4));
// Function.
EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(7)));
@@ -506,8 +505,8 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged());
BailoutId bailout_id_before(42);
BailoutId bailout_id_parent(62);
@@ -518,9 +517,9 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
Node* context = m.Int32Constant(66);
Node* context2 = m.Int32Constant(46);
- ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
- ZoneVector<MachineType> int32x2_type(2, kMachInt32, zone());
- ZoneVector<MachineType> float64_type(1, kMachFloat64, zone());
+ ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
+ ZoneVector<MachineType> int32x2_type(2, MachineType::Int32(), zone());
+ ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
Callable callable = CodeFactory::ToObject(isolate());
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
@@ -592,31 +591,31 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
EXPECT_EQ(1u, desc_before_outer->locals_count());
EXPECT_EQ(1u, desc_before_outer->stack_count());
// Values from parent environment.
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(0));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(0));
EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(3)));
- EXPECT_EQ(kMachInt32, desc_before_outer->GetType(1));
+ EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(1));
// Context:
EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(4)));
- EXPECT_EQ(kMachAnyTagged, desc_before_outer->GetType(2));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before_outer->GetType(2));
EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(5)));
- EXPECT_EQ(kMachInt32, desc_before_outer->GetType(3));
+ EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(3));
EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(6)));
- EXPECT_EQ(kMachInt32, desc_before_outer->GetType(4));
+ EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(4));
// Values from the nested frame.
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
EXPECT_EQ(2u, desc_before->stack_count());
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(0));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(0));
EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(8)));
- EXPECT_EQ(kMachInt32, desc_before->GetType(1));
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(1));
EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(9)));
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(2));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(2));
EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(10)));
- EXPECT_EQ(kMachFloat64, desc_before->GetType(3));
+ EXPECT_EQ(MachineType::Float64(), desc_before->GetType(3));
EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(11)));
- EXPECT_EQ(kMachInt32, desc_before->GetType(4));
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(4));
EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(12)));
- EXPECT_EQ(kMachInt32, desc_before->GetType(5));
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(5));
// Function.
EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13)));
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index 574864edf5..fc7c144939 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -37,17 +37,19 @@ class InstructionSelectorTest : public TestWithContext,
class StreamBuilder final : public RawMachineAssembler {
public:
StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
- : RawMachineAssembler(
- test->isolate(), new (test->zone()) Graph(test->zone()),
- MakeCallDescriptor(test->zone(), return_type), kMachPtr,
- MachineOperatorBuilder::kAllOptionalOps),
+ : RawMachineAssembler(test->isolate(),
+ new (test->zone()) Graph(test->zone()),
+ MakeCallDescriptor(test->zone(), return_type),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type)
: RawMachineAssembler(
test->isolate(), new (test->zone()) Graph(test->zone()),
MakeCallDescriptor(test->zone(), return_type, parameter0_type),
- kMachPtr, MachineOperatorBuilder::kAllOptionalOps),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type)
@@ -55,7 +57,8 @@ class InstructionSelectorTest : public TestWithContext,
test->isolate(), new (test->zone()) Graph(test->zone()),
MakeCallDescriptor(test->zone(), return_type, parameter0_type,
parameter1_type),
- kMachPtr, MachineOperatorBuilder::kAllOptionalOps),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type,
@@ -64,7 +67,8 @@ class InstructionSelectorTest : public TestWithContext,
test->isolate(), new (test->zone()) Graph(test->zone()),
MakeCallDescriptor(test->zone(), return_type, parameter0_type,
parameter1_type, parameter2_type),
- kMachPtr, MachineOperatorBuilder::kAllOptionalOps),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
Stream Build(CpuFeature feature) {
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
index a7712880f7..f57ca05b3f 100644
--- a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
@@ -69,8 +69,7 @@ Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
- return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher,
- graph()->start(), graph()->start());
+ return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
@@ -79,16 +78,7 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher) {
return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
- value_matcher, graph()->start(),
- graph()->start());
-}
-
-
-template <class... A>
-Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsCall(
- const Matcher<const CallDescriptor*>& descriptor_matcher, A... args) {
- return ::i::compiler::IsCall(descriptor_matcher, args..., graph()->start(),
- graph()->start());
+ value_matcher, _, _);
}
@@ -96,7 +86,8 @@ Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
int offset) {
return IsLoad(
- kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
}
@@ -105,7 +96,8 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsBytecodeOperandSignExtended(int offset) {
Matcher<Node*> load_matcher = IsLoad(
- kMachInt8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ MachineType::Int8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
if (kPointerSize == 8) {
@@ -120,16 +112,19 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
- kMachUint16, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ MachineType::Uint16(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
} else {
Matcher<Node*> first_byte = IsLoad(
- kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset)));
Matcher<Node*> second_byte = IsLoad(
- kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
@@ -145,10 +140,44 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
}
-Graph*
-InterpreterAssemblerTest::InterpreterAssemblerForTest::GetCompletedGraph() {
- End();
- return graph();
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ IsBytecodeOperandShortSignExtended(int offset) {
+ Matcher<Node*> load_matcher;
+ if (TargetSupportsUnalignedAccess()) {
+ load_matcher = IsLoad(
+ MachineType::Int16(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ int hi_byte_offset = offset + 1;
+ int lo_byte_offset = offset;
+
+#elif V8_TARGET_BIG_ENDIAN
+ int hi_byte_offset = offset;
+ int lo_byte_offset = offset + 1;
+#else
+#error "Unknown Architecture"
+#endif
+ Matcher<Node*> hi_byte = IsLoad(
+ MachineType::Int8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(hi_byte_offset)));
+ hi_byte = IsWord32Shl(hi_byte, IsInt32Constant(kBitsPerByte));
+ Matcher<Node*> lo_byte = IsLoad(
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(lo_byte_offset)));
+ load_matcher = IsWord32Or(hi_byte, lo_byte);
+ }
+
+ if (kPointerSize == 8) {
+ load_matcher = IsChangeInt32ToInt64(load_matcher);
+ }
+ return load_matcher;
}
@@ -156,7 +185,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Dispatch();
- Graph* graph = m.GetCompletedGraph();
+ Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
@@ -165,13 +194,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
- Matcher<Node*> target_bytecode_matcher = m.IsLoad(
- kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- next_bytecode_offset_matcher);
- Matcher<Node*> code_target_matcher = m.IsLoad(
- kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
@@ -183,8 +214,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter),
- graph->start(), graph->start()));
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
}
@@ -195,7 +225,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Jump(m.Int32Constant(jump_offset));
- Graph* graph = m.GetCompletedGraph();
+ Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
@@ -203,13 +233,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(jump_offset));
- Matcher<Node*> target_bytecode_matcher = m.IsLoad(
- kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- next_bytecode_offset_matcher);
- Matcher<Node*> code_target_matcher = m.IsLoad(
- kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
@@ -221,8 +253,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter),
- graph->start(), graph->start()));
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
}
}
@@ -238,7 +269,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
Node* lhs = m.IntPtrConstant(0);
Node* rhs = m.IntPtrConstant(1);
m.JumpIfWordEqual(lhs, rhs, m.Int32Constant(kJumpIfTrueOffset));
- Graph* graph = m.GetCompletedGraph();
+ Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(2, end->InputCount());
@@ -248,13 +279,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
Matcher<Node*> next_bytecode_offset_matcher =
IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsInt32Constant(jump_offsets[i]));
- Matcher<Node*> target_bytecode_matcher = m.IsLoad(
- kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- next_bytecode_offset_matcher);
- Matcher<Node*> code_target_matcher = m.IsLoad(
- kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
EXPECT_THAT(
end->InputAt(i),
IsTailCall(m.call_descriptor(), code_target_matcher,
@@ -263,8 +296,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter),
- graph->start(), graph->start()));
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
// TODO(oth): test control flow paths.
@@ -276,7 +308,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Return) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
m.Return();
- Graph* graph = m.GetCompletedGraph();
+ Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
@@ -294,8 +326,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Return) {
IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter),
- graph->start(), graph->start()));
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
}
@@ -319,13 +350,22 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
break;
case interpreter::OperandType::kMaybeReg8:
case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kRegPair8:
EXPECT_THAT(m.BytecodeOperandReg(i),
m.IsBytecodeOperandSignExtended(offset));
break;
+ case interpreter::OperandType::kCount16:
+ EXPECT_THAT(m.BytecodeOperandCount(i),
+ m.IsBytecodeOperandShort(offset));
+ break;
case interpreter::OperandType::kIdx16:
EXPECT_THAT(m.BytecodeOperandIdx(i),
m.IsBytecodeOperandShort(offset));
break;
+ case interpreter::OperandType::kReg16:
+ EXPECT_THAT(m.BytecodeOperandReg(i),
+ m.IsBytecodeOperandShortSignExtended(offset));
+ break;
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -352,7 +392,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
// Should be passed to next bytecode handler on dispatch.
m.Dispatch();
- Graph* graph = m.GetCompletedGraph();
+ Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
@@ -360,7 +400,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
EXPECT_THAT(tail_call_node,
IsTailCall(m.call_descriptor(), _, accumulator_value_2, _, _, _,
- _, graph->start(), graph->start()));
+ _, _, _));
}
}
@@ -386,7 +426,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
Node* load_reg_node = m.LoadRegister(reg_index_node);
EXPECT_THAT(
load_reg_node,
- m.IsLoad(kMachAnyTagged,
+ m.IsLoad(MachineType::AnyTagged(),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
@@ -401,7 +441,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
- m.IsStore(StoreRepresentation(kMachAnyTagged, kNoWriteBarrier),
+ m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
store_value));
@@ -459,12 +500,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* index = m.Int32Constant(2);
Node* load_constant = m.LoadConstantPoolEntry(index);
Matcher<Node*> constant_pool_matcher = m.IsLoad(
- kMachAnyTagged,
+ MachineType::AnyTagged(),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
EXPECT_THAT(
load_constant,
- m.IsLoad(kMachAnyTagged, constant_pool_matcher,
+ m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
IsIntPtrAdd(
IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
IsWordShl(index, IsInt32Constant(kPointerSizeLog2)))));
@@ -480,7 +521,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
Node* load_element = m.LoadFixedArrayElement(fixed_array, index);
EXPECT_THAT(
load_element,
- m.IsLoad(kMachAnyTagged, fixed_array,
+ m.IsLoad(MachineType::AnyTagged(), fixed_array,
IsIntPtrAdd(
IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
IsWordShl(IsInt32Constant(index),
@@ -496,7 +537,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
int offset = 16;
Node* load_field = m.LoadObjectField(object, offset);
EXPECT_THAT(load_field,
- m.IsLoad(kMachAnyTagged, object,
+ m.IsLoad(MachineType::AnyTagged(), object,
IsIntPtrConstant(offset - kHeapObjectTag)));
}
}
@@ -512,7 +553,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
Matcher<Node*> offset =
IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
- EXPECT_THAT(load_context_slot, m.IsLoad(kMachAnyTagged, context, offset));
+ EXPECT_THAT(load_context_slot,
+ m.IsLoad(MachineType::AnyTagged(), context, offset));
}
}
@@ -528,10 +570,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
Matcher<Node*> offset =
IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
- EXPECT_THAT(
- store_context_slot,
- m.IsStore(StoreRepresentation(kMachAnyTagged, kFullWriteBarrier),
- context, offset, value));
+ EXPECT_THAT(store_context_slot,
+ m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier),
+ context, offset, value));
}
}
@@ -542,36 +584,42 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
Node* arg1 = m.Int32Constant(2);
Node* arg2 = m.Int32Constant(3);
Node* call_runtime = m.CallRuntime(Runtime::kAdd, arg1, arg2);
- EXPECT_THAT(call_runtime,
- m.IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
- IsParameter(Linkage::kInterpreterContextParameter)));
+ EXPECT_THAT(
+ call_runtime,
+ IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
}
TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
+ const int kResultSizes[] = {1, 2};
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- Callable builtin = CodeFactory::InterpreterCEntry(isolate());
-
- Node* function_id = m.Int32Constant(0);
- Node* first_arg = m.Int32Constant(1);
- Node* arg_count = m.Int32Constant(2);
-
- Matcher<Node*> function_table = IsExternalConstant(
- ExternalReference::runtime_function_table_address(isolate()));
- Matcher<Node*> function = IsIntPtrAdd(
- function_table,
- IsInt32Mul(function_id, IsInt32Constant(sizeof(Runtime::Function))));
- Matcher<Node*> function_entry =
- m.IsLoad(kMachPtr, function,
- IsInt32Constant(offsetof(Runtime::Function, entry)));
-
- Node* call_runtime = m.CallRuntime(function_id, first_arg, arg_count);
- EXPECT_THAT(call_runtime,
- m.IsCall(_, IsHeapConstant(builtin.code()), arg_count,
- first_arg, function_entry,
- IsParameter(Linkage::kInterpreterContextParameter)));
+ TRACED_FOREACH(int, result_size, kResultSizes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Callable builtin = CodeFactory::InterpreterCEntry(isolate(), result_size);
+
+ Node* function_id = m.Int32Constant(0);
+ Node* first_arg = m.Int32Constant(1);
+ Node* arg_count = m.Int32Constant(2);
+
+ Matcher<Node*> function_table = IsExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate()));
+ Matcher<Node*> function = IsIntPtrAdd(
+ function_table,
+ IsInt32Mul(function_id, IsInt32Constant(sizeof(Runtime::Function))));
+ Matcher<Node*> function_entry =
+ m.IsLoad(MachineType::Pointer(), function,
+ IsInt32Constant(offsetof(Runtime::Function, entry)));
+
+ Node* call_runtime =
+ m.CallRuntime(function_id, first_arg, arg_count, result_size);
+ EXPECT_THAT(
+ call_runtime,
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
+ function_entry,
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
}
}
@@ -586,9 +634,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallIC) {
Node* arg3 = m.Int32Constant(4);
Node* arg4 = m.Int32Constant(5);
Node* call_ic = m.CallIC(descriptor, target, arg1, arg2, arg3, arg4);
- EXPECT_THAT(call_ic,
- m.IsCall(_, target, arg1, arg2, arg3, arg4,
- IsParameter(Linkage::kInterpreterContextParameter)));
+ EXPECT_THAT(
+ call_ic,
+ IsCall(_, target, arg1, arg2, arg3, arg4,
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
}
}
@@ -603,8 +652,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
Node* call_js = m.CallJS(function, first_arg, arg_count);
EXPECT_THAT(
call_js,
- m.IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
- function, IsParameter(Linkage::kInterpreterContextParameter)));
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
+ function, IsParameter(Linkage::kInterpreterContextParameter), _,
+ _));
}
}
@@ -614,18 +664,19 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
InterpreterAssemblerForTest m(this, bytecode);
Node* feedback_vector = m.LoadTypeFeedbackVector();
- Matcher<Node*> load_function_matcher = m.IsLoad(
- kMachAnyTagged, IsParameter(Linkage::kInterpreterRegisterFileParameter),
- IsIntPtrConstant(
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Matcher<Node*> load_function_matcher =
+ m.IsLoad(MachineType::AnyTagged(),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsIntPtrConstant(
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
Matcher<Node*> load_shared_function_info_matcher =
- m.IsLoad(kMachAnyTagged, load_function_matcher,
+ m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
kHeapObjectTag));
EXPECT_THAT(
feedback_vector,
- m.IsLoad(kMachAnyTagged, load_shared_function_info_matcher,
+ m.IsLoad(MachineType::AnyTagged(), load_shared_function_info_matcher,
IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
kHeapObjectTag)));
}
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
index 49c1c2ad29..15fa38b1be 100644
--- a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
@@ -29,8 +29,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
: InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
~InterpreterAssemblerForTest() override {}
- Graph* GetCompletedGraph();
-
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher);
@@ -38,14 +36,11 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher);
- template <class... A>
- Matcher<Node*> IsCall(
- const Matcher<const CallDescriptor*>& descriptor_matcher,
- A... args);
Matcher<Node*> IsBytecodeOperand(int offset);
Matcher<Node*> IsBytecodeOperandSignExtended(int offset);
Matcher<Node*> IsBytecodeOperandShort(int offset);
+ Matcher<Node*> IsBytecodeOperandShortSignExtended(int offset);
using InterpreterAssembler::call_descriptor;
using InterpreterAssembler::graph;
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index ae367aa395..78e9253a17 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -26,7 +26,8 @@ class JSBuiltinReducerTest : public TypedGraphTest {
protected:
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags) {
- MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
SimplifiedOperatorBuilder simplified(zone());
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
@@ -142,7 +143,8 @@ TEST_F(JSBuiltinReducerTest, MathMax2) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsSelect(kMachNone, IsNumberLessThan(p1, p0), p0, p1));
+ IsSelect(MachineRepresentation::kNone,
+ IsNumberLessThan(p1, p0), p0, p1));
}
}
}
diff --git a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
index 0fcf9f7087..a44bd0278d 100644
--- a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
@@ -19,7 +19,8 @@ class JSContextRelaxationTest : public GraphTest {
protected:
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kNoFlags) {
- MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
@@ -173,13 +174,10 @@ TEST_F(JSContextRelaxationTest,
Node* const context = Parameter(2);
Node* const outer_context = Parameter(3);
const Operator* op = javascript()->CreateWithContext();
- Node* const frame_state_1 =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Node* nested_context =
- graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
- frame_state_1, effect, control);
+ Node* nested_context = graph()->NewNode(
+ op, graph()->start(), graph()->start(), outer_context, effect, control);
Node* const frame_state_2 =
ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* node = graph()->NewNode(
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index a22f660c21..f38f8eaac7 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-operator.h"
+#include "src/types-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -23,15 +24,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-class JSIntrinsicLoweringTest : public GraphTest {
+class JSIntrinsicLoweringTest : public TypedGraphTest {
public:
- JSIntrinsicLoweringTest() : GraphTest(3), javascript_(zone()) {}
+ JSIntrinsicLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
~JSIntrinsicLoweringTest() override {}
protected:
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kNoFlags) {
- MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
SimplifiedOperatorBuilder simplified(zone());
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
@@ -147,7 +149,7 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
EXPECT_THAT(
phi,
IsPhi(
- static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ MachineRepresentation::kTagged, IsFalseConstant(),
IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
@@ -178,7 +180,7 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsDate) {
EXPECT_THAT(
phi,
IsPhi(
- static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ MachineRepresentation::kTagged, IsFalseConstant(),
IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
@@ -209,7 +211,7 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
EXPECT_THAT(
phi,
IsPhi(
- static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ MachineRepresentation::kTagged, IsFalseConstant(),
IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
@@ -226,8 +228,8 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
TEST_F(JSIntrinsicLoweringTest, InlineIsFunction) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
+ Node* const input = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction const r = Reduce(
@@ -240,12 +242,13 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsFunction) {
EXPECT_THAT(
phi,
IsPhi(
- static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input,
- effect, CaptureEq(&if_false)),
- effect, _),
- IsInt32Constant(JS_FUNCTION_TYPE)),
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsUint32LessThanOrEqual(
+ IsInt32Constant(FIRST_FUNCTION_TYPE),
+ IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input, effect,
+ CaptureEq(&if_false)),
+ effect, _)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -271,7 +274,7 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
EXPECT_THAT(
phi,
IsPhi(
- static_cast<MachineType>(kTypeBool | kRepTagged), IsFalseConstant(),
+ MachineRepresentation::kTagged, IsFalseConstant(),
IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
@@ -284,6 +287,64 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
// -----------------------------------------------------------------------------
+// %_IsJSReceiver
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithAny) {
+ Node* const input = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node *> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsUint32LessThanOrEqual(
+ IsInt32Constant(FIRST_JS_RECEIVER_TYPE),
+ IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input, effect,
+ CaptureEq(&if_false)),
+ effect, _)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithReceiver) {
+ Node* const input = Parameter(Type::Receiver());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithUndefined) {
+ Node* const input = Parameter(Type::Undefined());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFalseConstant());
+}
+
+
+// -----------------------------------------------------------------------------
// %_JSValueGetValue
@@ -337,24 +398,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineMathSqrt) {
// -----------------------------------------------------------------------------
-// %_StringGetLength
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineStringGetLength) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineStringGetLength, 1), input,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
- input, effect, control));
-}
-
-
-// -----------------------------------------------------------------------------
// %_MathClz32
@@ -390,9 +433,10 @@ TEST_F(JSIntrinsicLoweringTest, InlineValueOf) {
EXPECT_THAT(
phi,
IsPhi(
- kMachAnyTagged, input,
- IsPhi(kMachAnyTagged, IsLoadField(AccessBuilder::ForValue(), input,
- effect, CaptureEq(&if_true1)),
+ MachineRepresentation::kTagged, input,
+ IsPhi(MachineRepresentation::kTagged,
+ IsLoadField(AccessBuilder::ForValue(), input, effect,
+ CaptureEq(&if_true1)),
input,
IsMerge(
AllOf(CaptureEq(&if_true1), IsIfTrue(CaptureEq(&branch1))),
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 0461a0d625..e0db771458 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -71,18 +71,16 @@ const SharedOperator kSharedOperators[] = {
SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(StrictEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
SHARED(StrictNotEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
- SHARED(UnaryNot, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
- SHARED(ToBoolean, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToString, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
- SHARED(Create, Operator::kEliminatable, 0, 0, 1, 0, 1, 1, 0),
+ SHARED(Create, Operator::kEliminatable, 2, 1, 1, 0, 1, 1, 0),
SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(TypeOf, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(CreateWithContext, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
#undef SHARED
};
@@ -187,17 +185,6 @@ const SharedOperatorWithLanguageMode kSharedOperatorsWithLanguageMode[] = {
SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(BitwiseOr, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(BitwiseXor, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(BitwiseAnd, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(ShiftLeft, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(ShiftRight, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(ShiftRightLogical, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(Add, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(Subtract, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(Multiply, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(Divide, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(Modulus, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
#undef SHARED
};
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 37dc1f3eb5..6fc89bb0ea 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -124,106 +124,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
// -----------------------------------------------------------------------------
-// JSUnaryNot
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithBoolean) {
- Node* input = Parameter(Type::Boolean(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsBooleanNot(input));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithOrderedNumber) {
- Node* input = Parameter(Type::OrderedNumber(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberEqual(input, IsNumberConstant(0)));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithFalsish) {
- Node* input = Parameter(
- Type::Union(
- Type::MinusZero(),
- Type::Union(
- Type::NaN(),
- Type::Union(
- Type::Null(),
- Type::Union(
- Type::Undefined(),
- Type::Union(
- Type::Undetectable(),
- Type::Union(
- Type::Constant(factory()->false_value(), zone()),
- Type::Range(0.0, 0.0, zone()), zone()),
- zone()),
- zone()),
- zone()),
- zone()),
- zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithTruish) {
- Node* input = Parameter(
- Type::Union(
- Type::Constant(factory()->true_value(), zone()),
- Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
- zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithNonZeroPlainNumber) {
- Node* input = Parameter(Type::Range(1.0, 42.0, zone()), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithString) {
- Node* input = Parameter(Type::String(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberEqual(IsLoadField(AccessBuilder::ForStringLength(), input,
- graph()->start(), graph()->start()),
- IsNumberConstant(0.0)));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithAny) {
- Node* input = Parameter(Type::Any(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
- context, graph()->start()));
- ASSERT_FALSE(r.Changed());
-}
-
-
-// -----------------------------------------------------------------------------
// Constant propagation
@@ -324,8 +224,9 @@ TEST_F(JSTypedLoweringTest, ParameterWithUndefined) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
Node* input = Parameter(Type::Boolean(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -353,8 +254,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -368,8 +270,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -378,8 +281,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -388,8 +292,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
Node* input = Parameter(Type::OrderedNumber(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
@@ -399,8 +304,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
Node* input = Parameter(Type::String(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -413,8 +319,9 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
- context, graph()->start()));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_FALSE(r.Changed());
}
@@ -450,7 +357,7 @@ TEST_F(JSTypedLoweringTest, JSToObjectWithAny) {
Reduction r = Reduce(graph()->NewNode(javascript()->ToObject(), input,
context, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsPhi(kMachAnyTagged, _, _, _));
+ EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kTagged, _, _, _));
}
@@ -480,10 +387,10 @@ TEST_F(JSTypedLoweringTest, JSToStringWithBoolean) {
Reduction r = Reduce(graph()->NewNode(javascript()->ToString(), input,
context, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsSelect(kMachAnyTagged, input, IsHeapConstant(factory()->true_string()),
- IsHeapConstant(factory()->false_string())));
+ EXPECT_THAT(r.replacement(),
+ IsSelect(MachineRepresentation::kTagged, input,
+ IsHeapConstant(factory()->true_string()),
+ IsHeapConstant(factory()->false_string())));
}
@@ -522,15 +429,17 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftLeft(language_mode), lhs, NumberConstant(rhs),
- context, EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->ShiftLeft(language_mode, hints), lhs,
+ NumberConstant(rhs), context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftLeft(lhs, IsNumberConstant(BitEq(rhs))));
@@ -540,6 +449,7 @@ TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
@@ -547,7 +457,7 @@ TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
Node* const control = graph()->start();
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftLeft(language_mode), lhs, rhs, context,
+ javascript()->ShiftLeft(language_mode, hints), lhs, rhs, context,
EmptyFrameState(), EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftLeft(lhs, rhs));
@@ -560,15 +470,17 @@ TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRight(language_mode), lhs, NumberConstant(rhs),
- context, EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->ShiftRight(language_mode, hints), lhs,
+ NumberConstant(rhs), context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftRight(lhs, IsNumberConstant(BitEq(rhs))));
@@ -578,6 +490,7 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
@@ -585,7 +498,7 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
Node* const control = graph()->start();
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRight(language_mode), lhs, rhs, context,
+ javascript()->ShiftRight(language_mode, hints), lhs, rhs, context,
EmptyFrameState(), EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftRight(lhs, rhs));
@@ -599,16 +512,17 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
TEST_F(JSTypedLoweringTest,
JSShiftRightLogicalWithUnsigned32AndConstant) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(
- graph()->NewNode(javascript()->ShiftRightLogical(language_mode), lhs,
- NumberConstant(rhs), context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftRightLogical(language_mode, hints), lhs,
+ NumberConstant(rhs), context, EmptyFrameState(), EmptyFrameState(),
+ effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftRightLogical(lhs, IsNumberConstant(BitEq(rhs))));
@@ -617,8 +531,8 @@ TEST_F(JSTypedLoweringTest,
}
-TEST_F(JSTypedLoweringTest,
- JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Unsigned32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
@@ -626,8 +540,8 @@ TEST_F(JSTypedLoweringTest,
Node* const control = graph()->start();
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRightLogical(language_mode), lhs, rhs, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ javascript()->ShiftRightLogical(language_mode, hints), lhs, rhs,
+ context, EmptyFrameState(), EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftRightLogical(lhs, rhs));
}
@@ -661,7 +575,7 @@ TEST_F(JSTypedLoweringTest, JSLoadContext) {
IsLoadField(AccessBuilder::ForContextSlot(
Context::PREVIOUS_INDEX),
context, effect, graph()->start()),
- effect, graph()->start()));
+ _, graph()->start()));
}
}
}
@@ -696,7 +610,7 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
IsLoadField(AccessBuilder::ForContextSlot(
Context::PREVIOUS_INDEX),
context, effect, graph()->start()),
- value, effect, control));
+ value, _, control));
}
}
}
@@ -868,13 +782,6 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Matcher<Node*> value_matcher =
IsToNumber(value, context, effect, control);
Matcher<Node*> effect_matcher = value_matcher;
- if (AccessBuilder::ForTypedArrayElement(type, true)
- .type->Is(Type::Signed32())) {
- value_matcher = IsNumberToInt32(value_matcher);
- } else if (AccessBuilder::ForTypedArrayElement(type, true)
- .type->Is(Type::Unsigned32())) {
- value_matcher = IsNumberToUint32(value_matcher);
- }
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -952,11 +859,33 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
}
+TEST_F(JSTypedLoweringTest, JSLoadNamedFunctionPrototype) {
+ VectorSlotPair feedback;
+ Handle<Name> name = factory()->prototype_string();
+ Handle<JSFunction> function = isolate()->object_function();
+ Handle<JSObject> function_prototype(JSObject::cast(function->prototype()));
+ Node* const receiver = Parameter(Type::Constant(function, zone()), 0);
+ Node* const vector = Parameter(Type::Internal(), 1);
+ Node* const context = Parameter(Type::Internal(), 2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(language_mode, name, feedback),
+ receiver, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(function_prototype));
+ }
+}
+
+
// -----------------------------------------------------------------------------
// JSAdd
TEST_F(JSTypedLoweringTest, JSAddWithString) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Node* lhs = Parameter(Type::String(), 0);
Node* rhs = Parameter(Type::String(), 1);
@@ -965,9 +894,9 @@ TEST_F(JSTypedLoweringTest, JSAddWithString) {
Node* frame_state1 = EmptyFrameState();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->Add(language_mode), lhs,
- rhs, context, frame_state0,
- frame_state1, effect, control));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->Add(language_mode, hints), lhs, rhs,
+ context, frame_state0, frame_state1, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsCall(_, IsHeapConstant(CodeFactory::StringAdd(
@@ -979,6 +908,27 @@ TEST_F(JSTypedLoweringTest, JSAddWithString) {
// -----------------------------------------------------------------------------
+// JSCreate
+
+
+TEST_F(JSTypedLoweringTest, JSCreate) {
+ Handle<JSFunction> function = isolate()->object_function();
+ Node* const target = Parameter(Type::Constant(function, graph()->zone()));
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Create(), target, target,
+ context, EmptyFrameState(), effect));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(function->initial_map()->instance_size()),
+ IsBeginRegion(effect), _),
+ _));
+}
+
+
+// -----------------------------------------------------------------------------
// JSCreateArguments
@@ -1002,6 +952,25 @@ TEST_F(JSTypedLoweringTest, JSCreateArgumentsViaStub) {
}
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsRestArrayViaStub) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state = FrameState(shared, graph()->start());
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsParameters::kRestArray, 0),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsCall(_,
+ IsHeapConstant(CodeFactory::RestArgumentsAccess(isolate()).code()),
+ IsNumberConstant(0), _, IsNumberConstant(0), _, effect, control));
+}
+
+
TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedMapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
@@ -1018,7 +987,7 @@ TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedMapped) {
EXPECT_THAT(r.replacement(),
IsFinishRegion(
IsAllocate(IsNumberConstant(Heap::kSloppyArgumentsObjectSize),
- IsBeginRegion(effect), control),
+ _, control),
_));
}
@@ -1039,11 +1008,29 @@ TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedUnmapped) {
EXPECT_THAT(r.replacement(),
IsFinishRegion(
IsAllocate(IsNumberConstant(Heap::kStrictArgumentsObjectSize),
- IsBeginRegion(effect), control),
+ _, control),
_));
}
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedRestArray) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsParameters::kRestArray, 0),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSArray::kSize), _, control), _));
+}
+
+
// -----------------------------------------------------------------------------
// JSCreateClosure
@@ -1070,22 +1057,26 @@ TEST_F(JSTypedLoweringTest, JSCreateClosure) {
TEST_F(JSTypedLoweringTest, JSCreateLiteralArray) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const input2 = HeapConstant(factory()->NewFixedArray(12));
- Node* const context = UndefinedConstant();
+ Handle<FixedArray> const constant_elements = factory()->NewFixedArray(12);
+ int const literal_flags = ArrayLiteral::kShallowElements;
+ int const literal_index = 1;
+ Node* const closure = Parameter(0);
+ Node* const context = Parameter(1);
Node* const frame_state = EmptyFrameState();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CreateLiteralArray(ArrayLiteral::kShallowElements), input0,
- input1, input2, context, frame_state, effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateLiteralArray(
+ constant_elements, literal_flags, literal_index),
+ closure, context, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsCall(_, IsHeapConstant(
CodeFactory::FastCloneShallowArray(isolate()).code()),
- input0, input1, input2, context, frame_state, effect, control));
+ closure, IsNumberConstant(literal_index),
+ IsHeapConstant(constant_elements), context, frame_state, effect,
+ control));
}
@@ -1094,22 +1085,27 @@ TEST_F(JSTypedLoweringTest, JSCreateLiteralArray) {
TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const input2 = HeapConstant(factory()->NewFixedArray(2 * 6));
- Node* const context = UndefinedConstant();
+ Handle<FixedArray> const constant_properties =
+ factory()->NewFixedArray(6 * 2);
+ int const literal_flags = ObjectLiteral::kShallowProperties;
+ int const literal_index = 1;
+ Node* const closure = Parameter(0);
+ Node* const context = Parameter(1);
Node* const frame_state = EmptyFrameState();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CreateLiteralObject(ObjectLiteral::kShallowProperties),
- input0, input1, input2, context, frame_state, effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateLiteralObject(
+ constant_properties, literal_flags, literal_index),
+ closure, context, frame_state, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
IsCall(_, IsHeapConstant(
CodeFactory::FastCloneShallowObject(isolate(), 6).code()),
- input0, input1, input2, _, context, frame_state, effect, control));
+ closure, IsNumberConstant(literal_index),
+ IsHeapConstant(constant_properties), _, context, frame_state,
+ effect, control));
}
@@ -1129,7 +1125,7 @@ TEST_F(JSTypedLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
8 + Context::MIN_CONTEXT_SLOTS)),
- IsBeginRegion(effect), control),
+ IsBeginRegion(_), control),
_));
}
@@ -1156,19 +1152,41 @@ TEST_F(JSTypedLoweringTest, JSCreateFunctionContextViaStub) {
TEST_F(JSTypedLoweringTest, JSCreateWithContext) {
Node* const object = Parameter(Type::Receiver());
- Node* const closure = Parameter(Type::Any());
+ Node* const closure = Parameter(Type::Function());
Node* const context = Parameter(Type::Any());
- Node* const frame_state = EmptyFrameState();
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction r =
Reduce(graph()->NewNode(javascript()->CreateWithContext(), object,
- closure, context, frame_state, effect, control));
+ closure, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
Context::MIN_CONTEXT_SLOTS)),
- IsBeginRegion(effect), control),
+ IsBeginRegion(_), control),
+ _));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateCatchContext
+
+
+TEST_F(JSTypedLoweringTest, JSCreateCatchContext) {
+ Handle<String> name = factory()->length_string();
+ Node* const exception = Parameter(Type::Receiver());
+ Node* const closure = Parameter(Type::Function());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateCatchContext(name), exception,
+ closure, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ Context::MIN_CONTEXT_SLOTS + 1)),
+ IsBeginRegion(_), control),
_));
}
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 5d24a3bd1d..597edde665 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -14,9 +14,11 @@ namespace compiler {
namespace {
-MachineType kMachineTypes[] = {kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged, kMachAnyTagged};
+MachineType kMachineTypes[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged()};
}
class LinkageTailCall : public TestWithZone {
@@ -26,17 +28,17 @@ class LinkageTailCall : public TestWithZone {
locations->return_count() + locations->parameter_count());
MachineSignature* types = new (zone()) MachineSignature(
locations->return_count(), locations->parameter_count(), kMachineTypes);
- return new (zone())
- CallDescriptor(CallDescriptor::kCallCodeObject, kMachAnyTagged,
- LinkageLocation::ForAnyRegister(),
- types, // machine_sig
- locations, // location_sig
- 0, // js_parameter_count
- Operator::kNoProperties, // properties
- 0, // callee-saved
- 0, // callee-saved fp
- CallDescriptor::kNoFlags, // flags,
- "");
+ return new (zone()) CallDescriptor(CallDescriptor::kCallCodeObject,
+ MachineType::AnyTagged(),
+ LinkageLocation::ForAnyRegister(),
+ types, // machine_sig
+ locations, // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ 0, // callee-saved
+ 0, // callee-saved fp
+ CallDescriptor::kNoFlags, // flags,
+ "");
}
LinkageLocation StackLocation(int loc) {
@@ -55,7 +57,9 @@ TEST_F(LinkageTailCall, EmptyToEmpty) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -71,7 +75,9 @@ TEST_F(LinkageTailCall, SameReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -89,7 +95,9 @@ TEST_F(LinkageTailCall, DifferingReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_FALSE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_FALSE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -108,7 +116,9 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -127,7 +137,9 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -146,7 +158,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_FALSE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(-1, stack_param_delta);
}
@@ -165,7 +179,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- EXPECT_FALSE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(1, stack_param_delta);
}
@@ -189,7 +205,9 @@ TEST_F(LinkageTailCall, MatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -213,7 +231,9 @@ TEST_F(LinkageTailCall, NonMatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_FALSE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -238,7 +258,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -264,7 +286,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
}
@@ -290,7 +314,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_FALSE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(1, stack_param_delta);
}
@@ -316,7 +342,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- EXPECT_FALSE(desc1->CanTailCall(node));
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(-1, stack_param_delta);
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/live-range-builder.h b/deps/v8/test/unittests/compiler/live-range-builder.h
index e5f05ebbcb..4a5621fab7 100644
--- a/deps/v8/test/unittests/compiler/live-range-builder.h
+++ b/deps/v8/test/unittests/compiler/live-range-builder.h
@@ -40,7 +40,7 @@ class TestRangeBuilder {
TopLevelLiveRange* Build() {
TopLevelLiveRange* range =
- new (zone_) TopLevelLiveRange(id_, MachineType::kRepTagged);
+ new (zone_) TopLevelLiveRange(id_, MachineRepresentation::kTagged);
// Traverse the provided interval specifications backwards, because that is
// what LiveRange expects.
for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
diff --git a/deps/v8/test/unittests/compiler/live-range-unittest.cc b/deps/v8/test/unittests/compiler/live-range-unittest.cc
index e802aedff1..e4fc2ca151 100644
--- a/deps/v8/test/unittests/compiler/live-range-unittest.cc
+++ b/deps/v8/test/unittests/compiler/live-range-unittest.cc
@@ -33,8 +33,8 @@ class LiveRangeUnitTest : public TestWithZone {
TopLevelLiveRange* Splinter(TopLevelLiveRange* top, int start, int end,
int new_id = 0) {
if (top->splinter() == nullptr) {
- TopLevelLiveRange* ret =
- new (zone()) TopLevelLiveRange(new_id, MachineType::kRepTagged);
+ TopLevelLiveRange* ret = new (zone())
+ TopLevelLiveRange(new_id, MachineRepresentation::kTagged);
top->SetSplinter(ret);
}
top->Splinter(LifetimePosition::FromInt(start),
@@ -74,7 +74,7 @@ class LiveRangeUnitTest : public TestWithZone {
TEST_F(LiveRangeUnitTest, InvalidConstruction) {
// Build a range manually, because the builder guards against empty cases.
TopLevelLiveRange* range =
- new (zone()) TopLevelLiveRange(1, MachineType::kRepTagged);
+ new (zone()) TopLevelLiveRange(1, MachineRepresentation::kTagged);
V8_ASSERT_DEBUG_DEATH(
range->AddUseInterval(LifetimePosition::FromInt(0),
LifetimePosition::FromInt(0), zone()),
@@ -437,7 +437,7 @@ TEST_F(LiveRangeUnitTest, IDGeneration) {
EXPECT_EQ(0, vreg->relative_id());
TopLevelLiveRange* splinter =
- new (zone()) TopLevelLiveRange(101, MachineType::kRepTagged);
+ new (zone()) TopLevelLiveRange(101, MachineRepresentation::kTagged);
vreg->SetSplinter(splinter);
vreg->Splinter(LifetimePosition::FromInt(4), LifetimePosition::FromInt(12),
zone());
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
index 3f6a658e82..b77830aa5e 100644
--- a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -23,7 +23,7 @@ class LivenessAnalysisTest : public GraphTest {
public:
explicit LivenessAnalysisTest(int locals_count = 4)
: locals_count_(locals_count),
- machine_(zone(), kRepWord32),
+ machine_(zone(), MachineRepresentation::kWord32),
javascript_(zone()),
jsgraph_(isolate(), graph(), common(), &javascript_, nullptr,
&machine_),
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index f1dac8bb64..9dcec85ebf 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -119,7 +119,8 @@ class LoopPeelingTest : public GraphTest {
a->loop->ReplaceInput(0, b->if_true);
}
Node* NewPhi(While* w, Node* a, Node* b) {
- return graph()->NewNode(common()->Phi(kMachAnyTagged, 2), a, b, w->loop);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), a,
+ b, w->loop);
}
Branch NewBranch(Node* cond, Node* control = nullptr) {
@@ -133,8 +134,8 @@ class LoopPeelingTest : public GraphTest {
Counter NewCounter(While* w, int32_t b, int32_t k) {
Node* base = Int32Constant(b);
Node* inc = Int32Constant(k);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), base, base, w->loop);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), base, base, w->loop);
Node* add = graph()->NewNode(machine()->Int32Add(), phi, inc);
phi->ReplaceInput(1, add);
return {base, inc, phi, add};
@@ -183,7 +184,7 @@ TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
Capture<Node*> merge;
EXPECT_THAT(
- r, IsReturn(IsPhi(kMachAnyTagged, c.phi, c.base,
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
AllOf(CaptureEq(&merge), IsMerge(w.exit, if_false1))),
start(), CaptureEq(&merge)));
}
@@ -222,7 +223,7 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
Capture<Node*> merge;
EXPECT_THAT(
r,
- IsReturn(IsPhi(kMachAnyTagged, c.phi, c.base,
+ IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
AllOf(CaptureEq(&merge), IsMerge(outer.exit, if_falseo))),
start(), CaptureEq(&merge)));
}
@@ -298,11 +299,11 @@ TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
Node* back = phi->InputAt(1);
- EXPECT_THAT(back, IsPhi(kMachAnyTagged, c.phi, c.base,
+ EXPECT_THAT(back, IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
IsMerge(inner.exit, if_falsei)));
- EXPECT_THAT(phi,
- IsPhi(kMachAnyTagged, IsInt32Constant(11), back, outer.loop));
+ EXPECT_THAT(phi, IsPhi(MachineRepresentation::kTagged, IsInt32Constant(11),
+ back, outer.loop));
EXPECT_THAT(r, IsReturn(phi, start(), outer.exit));
}
@@ -347,9 +348,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
Branch b1 = NewBranch(p0, loop);
Branch b2 = NewBranch(p0, b1.if_true);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 3), Int32Constant(0),
- Int32Constant(1), Int32Constant(2), loop);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 3),
+ Int32Constant(0), Int32Constant(1),
+ Int32Constant(2), loop);
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
@@ -376,14 +377,15 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
- EXPECT_THAT(
- phi, IsPhi(kMachAnyTagged, IsPhi(kMachAnyTagged, IsInt32Constant(1),
- IsInt32Constant(2), IsMerge(b2t, b2f)),
- IsInt32Constant(1), IsInt32Constant(2), loop));
+ EXPECT_THAT(phi,
+ IsPhi(MachineRepresentation::kTagged,
+ IsPhi(MachineRepresentation::kTagged, IsInt32Constant(1),
+ IsInt32Constant(2), IsMerge(b2t, b2f)),
+ IsInt32Constant(1), IsInt32Constant(2), loop));
Capture<Node*> merge;
EXPECT_THAT(
- r, IsReturn(IsPhi(kMachAnyTagged, phi, IsInt32Constant(0),
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
start(), CaptureEq(&merge)));
}
@@ -394,9 +396,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
Branch b1 = NewBranch(p0, loop);
Branch b2 = NewBranch(p0, b1.if_true);
- Node* phi =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 3), Int32Constant(0),
- Int32Constant(1), Int32Constant(2), loop);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 3),
+ Int32Constant(0), Int32Constant(1),
+ Int32Constant(2), loop);
phi->ReplaceInput(
1, graph()->NewNode(machine()->Int32Add(), phi, Int32Constant(1)));
@@ -432,18 +434,18 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
Node* eval = phi->InputAt(0);
- EXPECT_THAT(eval, IsPhi(kMachAnyTagged,
+ EXPECT_THAT(eval, IsPhi(MachineRepresentation::kTagged,
IsInt32Add(IsInt32Constant(0), IsInt32Constant(1)),
IsInt32Add(IsInt32Constant(0), IsInt32Constant(2)),
CaptureEq(&entry)));
- EXPECT_THAT(phi,
- IsPhi(kMachAnyTagged, eval, IsInt32Add(phi, IsInt32Constant(1)),
- IsInt32Add(phi, IsInt32Constant(2)), loop));
+ EXPECT_THAT(phi, IsPhi(MachineRepresentation::kTagged, eval,
+ IsInt32Add(phi, IsInt32Constant(1)),
+ IsInt32Add(phi, IsInt32Constant(2)), loop));
Capture<Node*> merge;
EXPECT_THAT(
- r, IsReturn(IsPhi(kMachAnyTagged, phi, IsInt32Constant(0),
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
start(), CaptureEq(&merge)));
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 6f7ed3aca4..2feba2ef7f 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -447,11 +447,13 @@ TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithPhi) {
TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
Reduction reduction = Reduce(graph()->NewNode(
machine()->TruncateFloat64ToInt32(mode),
- graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge)));
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), p0,
+ p1, merge)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsPhi(kMachInt32, IsTruncateFloat64ToInt32(p0),
- IsTruncateFloat64ToInt32(p1), merge));
+ EXPECT_THAT(
+ reduction.replacement(),
+ IsPhi(MachineRepresentation::kWord32, IsTruncateFloat64ToInt32(p0),
+ IsTruncateFloat64ToInt32(p1), merge));
}
}
@@ -828,8 +830,8 @@ TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
{
- Node* const l = graph()->NewNode(machine()->Load(kMachInt8), p0, p1,
- graph()->start(), graph()->start());
+ Node* const l = graph()->NewNode(machine()->Load(MachineType::Int8()), p0,
+ p1, graph()->start(), graph()->start());
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Sar(),
graph()->NewNode(machine()->Word32Shl(), l, Int32Constant(24)),
@@ -838,8 +840,8 @@ TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
EXPECT_EQ(l, r.replacement());
}
{
- Node* const l = graph()->NewNode(machine()->Load(kMachInt16), p0, p1,
- graph()->start(), graph()->start());
+ Node* const l = graph()->NewNode(machine()->Load(MachineType::Int16()), p0,
+ p1, graph()->start(), graph()->start());
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Sar(),
graph()->NewNode(machine()->Word32Shl(), l, Int32Constant(16)),
@@ -1142,7 +1144,8 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsSelect(kMachInt32, IsInt32LessThan(p0, IsInt32Constant(0)),
+ IsSelect(MachineRepresentation::kWord32,
+ IsInt32LessThan(p0, IsInt32Constant(0)),
IsInt32Sub(IsInt32Constant(0),
IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
IsInt32Constant(mask))),
@@ -1157,7 +1160,8 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsSelect(kMachInt32, IsInt32LessThan(p0, IsInt32Constant(0)),
+ IsSelect(MachineRepresentation::kWord32,
+ IsInt32LessThan(p0, IsInt32Constant(0)),
IsInt32Sub(IsInt32Constant(0),
IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
IsInt32Constant(mask))),
@@ -1556,7 +1560,7 @@ TEST_F(MachineOperatorReducerTest, Float64LessThanOrEqualWithFloat32Constant) {
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
- const StoreRepresentation rep(kMachUint8, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord8, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1578,7 +1582,7 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32SarAndWord32Shl) {
- const StoreRepresentation rep(kMachUint8, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord8, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1602,7 +1606,8 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32SarAndWord32Shl) {
TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
- const StoreRepresentation rep(kMachUint16, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord16,
+ kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1624,7 +1629,8 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32SarAndWord32Shl) {
- const StoreRepresentation rep(kMachUint16, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord16,
+ kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1646,18 +1652,6 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32SarAndWord32Shl) {
}
}
-
-TEST_F(MachineOperatorReducerTest, RoundPlusTruncate) {
- Node* p0 = Parameter(0);
- Node* t0 = graph()->NewNode(machine()->RoundInt64ToFloat64(), p0);
- Node* t1 = graph()->NewNode(
- machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript), t0);
-
- Reduction r = Reduce(t1);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index f49fbd7b03..59eb484dab 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -17,25 +17,38 @@ namespace compiler {
template <typename T>
class MachineOperatorTestWithParam
: public TestWithZone,
- public ::testing::WithParamInterface< ::testing::tuple<MachineType, T> > {
+ public ::testing::WithParamInterface<
+ ::testing::tuple<MachineRepresentation, T> > {
protected:
- MachineType type() const { return ::testing::get<0>(B::GetParam()); }
+ MachineRepresentation representation() const {
+ return ::testing::get<0>(B::GetParam());
+ }
const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
private:
- typedef ::testing::WithParamInterface< ::testing::tuple<MachineType, T> > B;
+ typedef ::testing::WithParamInterface<
+ ::testing::tuple<MachineRepresentation, T> > B;
};
namespace {
-const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
+const MachineRepresentation kMachineReps[] = {MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64};
const MachineType kMachineTypesForAccess[] = {
- kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
- kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged, kMachPtr};
+ MachineType::Float32(), MachineType::Float64(), MachineType::Int8(),
+ MachineType::Uint8(), MachineType::Int16(), MachineType::Uint16(),
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Int64(),
+ MachineType::Uint64(), MachineType::AnyTagged()};
+
+
+const MachineRepresentation kRepresentationsForStore[] = {
+ MachineRepresentation::kFloat32, MachineRepresentation::kFloat64,
+ MachineRepresentation::kWord8, MachineRepresentation::kWord16,
+ MachineRepresentation::kWord32, MachineRepresentation::kWord64,
+ MachineRepresentation::kTagged};
} // namespace
@@ -49,14 +62,14 @@ typedef MachineOperatorTestWithParam<LoadRepresentation>
TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
- MachineOperatorBuilder machine1(zone(), type());
- MachineOperatorBuilder machine2(zone(), type());
+ MachineOperatorBuilder machine1(zone(), representation());
+ MachineOperatorBuilder machine2(zone(), representation());
EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
}
TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
const Operator* op = machine.Load(GetParam());
EXPECT_EQ(2, op->ValueInputCount());
@@ -71,13 +84,13 @@ TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
}
TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(GetParam(),
OpParameter<LoadRepresentation>(machine.Load(GetParam())));
}
@@ -95,27 +108,29 @@ INSTANTIATE_TEST_CASE_P(
class MachineStoreOperatorTest
: public MachineOperatorTestWithParam<
- ::testing::tuple<MachineType, WriteBarrierKind> > {
+ ::testing::tuple<MachineRepresentation, WriteBarrierKind> > {
protected:
StoreRepresentation GetParam() const {
return StoreRepresentation(
- ::testing::get<0>(MachineOperatorTestWithParam<
- ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
- ::testing::get<1>(MachineOperatorTestWithParam<
- ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
+ ::testing::get<0>(
+ MachineOperatorTestWithParam< ::testing::tuple<
+ MachineRepresentation, WriteBarrierKind> >::GetParam()),
+ ::testing::get<1>(
+ MachineOperatorTestWithParam< ::testing::tuple<
+ MachineRepresentation, WriteBarrierKind> >::GetParam()));
}
};
TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
- MachineOperatorBuilder machine1(zone(), type());
- MachineOperatorBuilder machine2(zone(), type());
+ MachineOperatorBuilder machine1(zone(), representation());
+ MachineOperatorBuilder machine2(zone(), representation());
EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
}
TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
const Operator* op = machine.Store(GetParam());
EXPECT_EQ(3, op->ValueInputCount());
@@ -130,13 +145,13 @@ TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
}
TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(GetParam(),
OpParameter<StoreRepresentation>(machine.Store(GetParam())));
}
@@ -146,7 +161,7 @@ INSTANTIATE_TEST_CASE_P(
MachineOperatorTest, MachineStoreOperatorTest,
::testing::Combine(
::testing::ValuesIn(kMachineReps),
- ::testing::Combine(::testing::ValuesIn(kMachineTypesForAccess),
+ ::testing::Combine(::testing::ValuesIn(kRepresentationsForStore),
::testing::Values(kNoWriteBarrier,
kFullWriteBarrier))));
#endif
@@ -256,14 +271,16 @@ const PureOperator kPureOperators[] = {
class MachinePureOperatorTest : public TestWithZone {
protected:
- MachineType word_type() { return kMachPtr; }
+ MachineRepresentation word_type() {
+ return MachineType::PointerRepresentation();
+ }
};
TEST_F(MachinePureOperatorTest, PureOperators) {
- TRACED_FOREACH(MachineType, machine_rep1, kMachineReps) {
+ TRACED_FOREACH(MachineRepresentation, machine_rep1, kMachineReps) {
MachineOperatorBuilder machine1(zone(), machine_rep1);
- TRACED_FOREACH(MachineType, machine_rep2, kMachineReps) {
+ TRACED_FOREACH(MachineRepresentation, machine_rep2, kMachineReps) {
MachineOperatorBuilder machine2(zone(), machine_rep2);
TRACED_FOREACH(PureOperator, pop, kPureOperators) {
const Operator* op1 = (machine1.*pop.constructor)();
@@ -317,15 +334,17 @@ const OptionalOperatorEntry kOptionalOperators[] = {
class MachineOptionalOperatorTest : public TestWithZone {
protected:
- MachineType word_type() { return kMachPtr; }
+ MachineRepresentation word_rep() {
+ return MachineType::PointerRepresentation();
+ }
};
TEST_F(MachineOptionalOperatorTest, OptionalOperators) {
TRACED_FOREACH(OptionalOperatorEntry, pop, kOptionalOperators) {
- TRACED_FOREACH(MachineType, machine_rep1, kMachineReps) {
+ TRACED_FOREACH(MachineRepresentation, machine_rep1, kMachineReps) {
MachineOperatorBuilder machine1(zone(), machine_rep1, pop.enabling_flag);
- TRACED_FOREACH(MachineType, machine_rep2, kMachineReps) {
+ TRACED_FOREACH(MachineRepresentation, machine_rep2, kMachineReps) {
MachineOperatorBuilder machine2(zone(), machine_rep2,
pop.enabling_flag);
const Operator* op1 = (machine1.*pop.constructor)().op();
@@ -335,7 +354,7 @@ TEST_F(MachineOptionalOperatorTest, OptionalOperators) {
EXPECT_EQ(pop.control_input_count, op1->ControlInputCount());
EXPECT_EQ(pop.value_output_count, op1->ValueOutputCount());
- MachineOperatorBuilder machine3(zone(), word_type());
+ MachineOperatorBuilder machine3(zone(), word_rep());
EXPECT_TRUE((machine1.*pop.constructor)().IsSupported());
EXPECT_FALSE((machine3.*pop.constructor)().IsSupported());
}
@@ -356,7 +375,7 @@ typedef TestWithZone MachineOperatorTest;
TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
- MachineOperatorBuilder machine(zone(), kRepWord32);
+ MachineOperatorBuilder machine(zone(), MachineRepresentation::kWord32);
EXPECT_EQ(machine.Word32And(), machine.WordAnd());
EXPECT_EQ(machine.Word32Or(), machine.WordOr());
EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
@@ -378,7 +397,7 @@ TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
- MachineOperatorBuilder machine(zone(), kRepWord64);
+ MachineOperatorBuilder machine(zone(), MachineRepresentation::kWord64);
EXPECT_EQ(machine.Word64And(), machine.WordAnd());
EXPECT_EQ(machine.Word64Or(), machine.WordOr());
EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index a16ad7a31f..122c398e20 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -41,19 +41,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
- kMachFloat64},
+ MachineType::Float64()},
kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
- kMachFloat64},
+ MachineType::Float64()},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kMipsCmpD, kMachFloat64},
+ kMipsCmpD, MachineType::Float64()},
kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
- kMachFloat64},
+ MachineType::Float64()},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
- "Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
+ "Float64GreaterThanOrEqual", kMipsCmpD, MachineType::Float64()},
kUnsignedLessThanOrEqual}};
struct Conversion {
@@ -69,12 +69,14 @@ struct Conversion {
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, kMachInt16},
- {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, kMachInt16},
- {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, kMachInt16},
- {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd, kMachInt32},
- {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, kMachInt32},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor, kMachInt32}};
+ {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, MachineType::Int16()},
+ {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, MachineType::Int16()},
+ {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, MachineType::Int16()},
+ {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, MachineType::Int32()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor,
+ MachineType::Int32()}};
// ----------------------------------------------------------------------------
@@ -83,14 +85,18 @@ const MachInst2 kLogicalInstructions[] = {
const MachInst2 kShiftInstructions[] = {
- {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, kMachInt16},
- {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, kMachInt16},
- {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, kMachInt16},
- {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, kMachInt16},
- {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl, kMachInt32},
- {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr, kMachInt32},
- {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar, kMachInt32},
- {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor, kMachInt32}};
+ {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, MachineType::Int16()},
+ {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, MachineType::Int16()},
+ {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, MachineType::Int16()},
+ {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, MachineType::Int16()},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor,
+ MachineType::Int32()}};
// ----------------------------------------------------------------------------
@@ -99,11 +105,16 @@ const MachInst2 kShiftInstructions[] = {
const MachInst2 kMulDivInstructions[] = {
- {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul, kMachInt32},
- {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv, kMachInt32},
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU, kMachUint32},
- {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD, kMachFloat64},
- {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD, kMachFloat64}};
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD,
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -112,9 +123,12 @@ const MachInst2 kMulDivInstructions[] = {
const MachInst2 kModInstructions[] = {
- {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod, kMachInt32},
- {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU, kMachInt32},
- {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD, kMachFloat64}};
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD,
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -123,8 +137,10 @@ const MachInst2 kModInstructions[] = {
const MachInst2 kFPArithInstructions[] = {
- {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD, kMachFloat64},
- {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD, kMachFloat64}};
+ {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD,
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -133,12 +149,14 @@ const MachInst2 kFPArithInstructions[] = {
const MachInst2 kAddSubInstructions[] = {
- {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd, kMachInt32},
- {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub, kMachInt32},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub,
+ MachineType::Int32()},
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
- kMipsAddOvf, kMachInt32},
+ kMipsAddOvf, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kMipsSubOvf, kMachInt32}};
+ kMipsSubOvf, MachineType::Int32()}};
// ----------------------------------------------------------------------------
@@ -147,9 +165,11 @@ const MachInst2 kAddSubInstructions[] = {
const MachInst1 kAddSubOneInstructions[] = {
- {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub, kMachInt32},
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub,
+ MachineType::Int32()},
// TODO(dusmil): check this ...
- // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst, kMachInt32}
+ // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst,
+ // MachineType::Int32()}
};
@@ -159,31 +179,35 @@ const MachInst1 kAddSubOneInstructions[] = {
const IntCmp kCmpInstructions[] = {
- {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp, kMachInt16}, 1U},
- {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp, kMachInt16},
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp,
+ MachineType::Int16()},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp, kMachInt32},
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp,
+ MachineType::Int16()},
+ 1U},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp,
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMipsCmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMipsCmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kMipsCmp, kMachInt32},
+ kMipsCmp, MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMipsCmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kMipsCmp, kMachInt32},
+ kMipsCmp, MachineType::Int32()},
1U},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMipsCmp,
- kMachUint32},
+ MachineType::Uint32()},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kMipsCmp, kMachUint32},
+ kMipsCmp, MachineType::Uint32()},
1U}};
@@ -200,23 +224,51 @@ const Conversion kConversionInstructions[] = {
// integers.
// mips instruction: cvt_d_w
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
- kMipsCvtDW, kMachFloat64},
- kMachInt32},
+ kMipsCvtDW, MachineType::Float64()},
+ MachineType::Int32()},
// mips instruction: cvt_d_uw
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
- kMipsCvtDUw, kMachFloat64},
- kMachInt32},
+ kMipsCvtDUw, MachineType::Float64()},
+ MachineType::Int32()},
// mips instruction: trunc_w_d
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
- kMipsTruncWD, kMachFloat64},
- kMachInt32},
+ kMipsTruncWD, MachineType::Float64()},
+ MachineType::Int32()},
// mips instruction: trunc_uw_d
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
- kMipsTruncUwD, kMachFloat64},
- kMachInt32}};
+ kMipsTruncUwD, MachineType::Float64()},
+ MachineType::Int32()}};
+
+const Conversion kFloat64RoundInstructions[] = {
+ {{&RawMachineAssembler::Float64RoundUp, "Float64RoundUp", kMipsCeilWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundDown, "Float64RoundDown", kMipsFloorWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTiesEven, "Float64RoundTiesEven",
+ kMipsRoundWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTruncate, "Float64RoundTruncate",
+ kMipsTruncWD, MachineType::Int32()},
+ MachineType::Float64()}};
+
+const Conversion kFloat32RoundInstructions[] = {
+ {{&RawMachineAssembler::Float32RoundUp, "Float32RoundUp", kMipsCeilWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundDown, "Float32RoundDown", kMipsFloorWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTiesEven, "Float32RoundTiesEven",
+ kMipsRoundWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTruncate, "Float32RoundTruncate",
+ kMipsTruncWS, MachineType::Int32()},
+ MachineType::Float32()}};
} // namespace
@@ -226,7 +278,8 @@ typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -278,7 +331,8 @@ typedef InstructionSelectorTestWithParam<MachInst2>
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
- TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
@@ -296,6 +350,65 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
::testing::ValuesIn(kShiftInstructions));
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsShl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
@@ -322,6 +435,117 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
::testing::ValuesIn(kLogicalInstructions));
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)),
+ m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1),
+ m.Word32Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsIns, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Int32Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsIns, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
@@ -478,6 +702,81 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
::testing::ValuesIn(kConversionInstructions));
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat64ToInt32WithRoundFloat64;
+
+TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32((m.*conv.mi.constructor)(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
+
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat32ToInt32WithRoundFloat32;
+
+TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32(
+ m.ChangeFloat32ToFloat64((m.*conv.mi.constructor)(m.Parameter(0)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsTruncWS, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest,
+ TruncateFloat64ToFloat32OfChangeInt32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Int32());
+ m.Return(
+ m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsCvtSW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Loads and stores.
// ----------------------------------------------------------------------------
@@ -492,13 +791,13 @@ struct MemoryAccess {
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kMipsLb, kMipsSb},
- {kMachUint8, kMipsLbu, kMipsSb},
- {kMachInt16, kMipsLh, kMipsSh},
- {kMachUint16, kMipsLhu, kMipsSh},
- {kMachInt32, kMipsLw, kMipsSw},
- {kMachFloat32, kMipsLwc1, kMipsSwc1},
- {kMachFloat64, kMipsLdc1, kMipsSdc1}};
+ {MachineType::Int8(), kMipsLb, kMipsSb},
+ {MachineType::Uint8(), kMipsLbu, kMipsSb},
+ {MachineType::Int16(), kMipsLh, kMipsSh},
+ {MachineType::Uint16(), kMipsLhu, kMipsSh},
+ {MachineType::Int32(), kMipsLw, kMipsSw},
+ {MachineType::Float32(), kMipsLwc1, kMipsSwc1},
+ {MachineType::Float64(), kMipsLdc1, kMipsSdc1}};
struct MemoryAccessImm {
@@ -537,49 +836,49 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
const MemoryAccessImm kMemoryAccessesImm[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMipsLb,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kMipsLbu,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kMipsLh,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint16,
+ {MachineType::Uint16(),
kMipsLhu,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt32,
+ {MachineType::Int32(),
kMipsLw,
kMipsSw,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMipsLwc1,
kMipsSwc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMipsLdc1,
kMipsSdc1,
&InstructionSelectorTest::Stream::IsDouble,
@@ -589,37 +888,37 @@ const MemoryAccessImm kMemoryAccessesImm[] = {
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMipsLb,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt8,
+ {MachineType::Int8(),
kMipsLbu,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMipsLh,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMipsLhu,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt32,
+ {MachineType::Int32(),
kMipsLw,
kMipsSw,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMipsLwc1,
kMipsSwc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMipsLdc1,
kMipsSdc1,
&InstructionSelectorTest::Stream::IsDouble,
@@ -634,7 +933,8 @@ typedef InstructionSelectorTestWithParam<MemoryAccess>
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -645,8 +945,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -672,7 +974,7 @@ typedef InstructionSelectorTestWithParam<MemoryAccessImm>
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -695,9 +997,10 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -729,7 +1032,7 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
LoadWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -747,9 +1050,10 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
StoreWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -775,7 +1079,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -787,7 +1091,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -802,7 +1106,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
TEST_F(InstructionSelectorTest, Word32Clz) {
- StreamBuilder m(this, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32Clz(p0);
m.Return(n);
@@ -817,7 +1121,7 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
TEST_F(InstructionSelectorTest, Float32Abs) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -832,7 +1136,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
TEST_F(InstructionSelectorTest, Float64Abs) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -847,7 +1151,8 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Max(p0, p1);
@@ -863,7 +1168,8 @@ TEST_F(InstructionSelectorTest, Float32Max) {
TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Min(p0, p1);
@@ -879,7 +1185,8 @@ TEST_F(InstructionSelectorTest, Float32Min) {
TEST_F(InstructionSelectorTest, Float64Max) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Max(p0, p1);
@@ -895,7 +1202,8 @@ TEST_F(InstructionSelectorTest, Float64Max) {
TEST_F(InstructionSelectorTest, Float64Min) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Min(p0, p1);
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
index 5508ba626f..89455a4fbd 100644
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips64/OWNERS
@@ -3,3 +3,4 @@ gergely.kis@imgtec.com
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index 148667333b..d9cd96f471 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -41,19 +41,19 @@ struct FPCmp {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
- kMachFloat64},
+ MachineType::Float64()},
kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
- kMachFloat64},
+ MachineType::Float64()},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kMips64CmpD, kMachFloat64},
+ kMips64CmpD, MachineType::Float64()},
kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
- kMips64CmpD, kMachFloat64},
+ kMips64CmpD, MachineType::Float64()},
kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
- "Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
+ "Float64GreaterThanOrEqual", kMips64CmpD, MachineType::Float64()},
kUnsignedLessThanOrEqual}};
struct Conversion {
@@ -69,12 +69,18 @@ struct Conversion {
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kMips64And, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kMips64And, kMachInt64},
- {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or, kMachInt32},
- {&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or, kMachInt64},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor, kMachInt32},
- {&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor, kMachInt64}};
+ {&RawMachineAssembler::Word32And, "Word32And", kMips64And,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kMips64And,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -83,14 +89,22 @@ const MachInst2 kLogicalInstructions[] = {
const MachInst2 kShiftInstructions[] = {
- {&RawMachineAssembler::Word32Shl, "Word32Shl", kMips64Shl, kMachInt32},
- {&RawMachineAssembler::Word64Shl, "Word64Shl", kMips64Dshl, kMachInt64},
- {&RawMachineAssembler::Word32Shr, "Word32Shr", kMips64Shr, kMachInt32},
- {&RawMachineAssembler::Word64Shr, "Word64Shr", kMips64Dshr, kMachInt64},
- {&RawMachineAssembler::Word32Sar, "Word32Sar", kMips64Sar, kMachInt32},
- {&RawMachineAssembler::Word64Sar, "Word64Sar", kMips64Dsar, kMachInt64},
- {&RawMachineAssembler::Word32Ror, "Word32Ror", kMips64Ror, kMachInt32},
- {&RawMachineAssembler::Word64Ror, "Word64Ror", kMips64Dror, kMachInt64}};
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kMips64Shl,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shl, "Word64Shl", kMips64Dshl,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kMips64Shr,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shr, "Word64Shr", kMips64Dshr,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kMips64Sar,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Sar, "Word64Sar", kMips64Dsar,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kMips64Ror,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Ror, "Word64Ror", kMips64Dror,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -99,15 +113,22 @@ const MachInst2 kShiftInstructions[] = {
const MachInst2 kMulDivInstructions[] = {
- {&RawMachineAssembler::Int32Mul, "Int32Mul", kMips64Mul, kMachInt32},
- {&RawMachineAssembler::Int32Div, "Int32Div", kMips64Div, kMachInt32},
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU, kMachUint32},
- {&RawMachineAssembler::Int64Mul, "Int64Mul", kMips64Dmul, kMachInt64},
- {&RawMachineAssembler::Int64Div, "Int64Div", kMips64Ddiv, kMachInt64},
- {&RawMachineAssembler::Uint64Div, "Uint64Div", kMips64DdivU, kMachUint64},
- {&RawMachineAssembler::Float64Mul, "Float64Mul", kMips64MulD, kMachFloat64},
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kMips64Mul,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kMips64Div,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kMips64Dmul,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kMips64Ddiv,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kMips64DdivU,
+ MachineType::Uint64()},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kMips64MulD,
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Div, "Float64Div", kMips64DivD,
- kMachFloat64}};
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -116,10 +137,12 @@ const MachInst2 kMulDivInstructions[] = {
const MachInst2 kModInstructions[] = {
- {&RawMachineAssembler::Int32Mod, "Int32Mod", kMips64Mod, kMachInt32},
- {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU, kMachInt32},
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kMips64Mod,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU,
+ MachineType::Int32()},
{&RawMachineAssembler::Float64Mod, "Float64Mod", kMips64ModD,
- kMachFloat64}};
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -128,9 +151,10 @@ const MachInst2 kModInstructions[] = {
const MachInst2 kFPArithInstructions[] = {
- {&RawMachineAssembler::Float64Add, "Float64Add", kMips64AddD, kMachFloat64},
+ {&RawMachineAssembler::Float64Add, "Float64Add", kMips64AddD,
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Sub, "Float64Sub", kMips64SubD,
- kMachFloat64}};
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -139,10 +163,14 @@ const MachInst2 kFPArithInstructions[] = {
const MachInst2 kAddSubInstructions[] = {
- {&RawMachineAssembler::Int32Add, "Int32Add", kMips64Add, kMachInt32},
- {&RawMachineAssembler::Int64Add, "Int64Add", kMips64Dadd, kMachInt64},
- {&RawMachineAssembler::Int32Sub, "Int32Sub", kMips64Sub, kMachInt32},
- {&RawMachineAssembler::Int64Sub, "Int64Sub", kMips64Dsub, kMachInt64}};
+ {&RawMachineAssembler::Int32Add, "Int32Add", kMips64Add,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Add, "Int64Add", kMips64Dadd,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kMips64Sub,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Sub, "Int64Sub", kMips64Dsub,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -151,8 +179,10 @@ const MachInst2 kAddSubInstructions[] = {
const MachInst1 kAddSubOneInstructions[] = {
- {&RawMachineAssembler::Int32Neg, "Int32Neg", kMips64Sub, kMachInt32},
- {&RawMachineAssembler::Int64Neg, "Int64Neg", kMips64Dsub, kMachInt64}};
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kMips64Sub,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Neg, "Int64Neg", kMips64Dsub,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -161,33 +191,35 @@ const MachInst1 kAddSubOneInstructions[] = {
const IntCmp kCmpInstructions[] = {
- {{&RawMachineAssembler::WordEqual, "WordEqual", kMips64Cmp, kMachInt64},
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kMips64Cmp,
+ MachineType::Int64()},
1U},
{{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMips64Cmp,
- kMachInt64},
+ MachineType::Int64()},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp, kMachInt32},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp,
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kMips64Cmp, kMachInt32},
+ kMips64Cmp, MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kMips64Cmp, kMachInt32},
+ kMips64Cmp, MachineType::Int32()},
1U},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp,
- kMachUint32},
+ MachineType::Uint32()},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kMips64Cmp, kMachUint32},
+ kMips64Cmp, MachineType::Uint32()},
1U}};
@@ -205,28 +237,56 @@ const Conversion kConversionInstructions[] = {
// mips instructions:
// mtc1, cvt.d.w
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
- kMips64CvtDW, kMachFloat64},
- kMachInt32},
+ kMips64CvtDW, MachineType::Float64()},
+ MachineType::Int32()},
// mips instructions:
// cvt.d.uw
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
- kMips64CvtDUw, kMachFloat64},
- kMachInt32},
+ kMips64CvtDUw, MachineType::Float64()},
+ MachineType::Int32()},
// mips instructions:
// mfc1, trunc double to word, for more details look at mips macro
// asm and mips asm file
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
- kMips64TruncWD, kMachFloat64},
- kMachInt32},
+ kMips64TruncWD, MachineType::Float64()},
+ MachineType::Int32()},
// mips instructions:
// trunc double to unsigned word, for more details look at mips macro
// asm and mips asm file
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
- kMips64TruncUwD, kMachFloat64},
- kMachInt32}};
+ kMips64TruncUwD, MachineType::Float64()},
+ MachineType::Int32()}};
+
+const Conversion kFloat64RoundInstructions[] = {
+ {{&RawMachineAssembler::Float64RoundUp, "Float64RoundUp", kMips64CeilWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundDown, "Float64RoundDown",
+ kMips64FloorWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTiesEven, "Float64RoundTiesEven",
+ kMips64RoundWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTruncate, "Float64RoundTruncate",
+ kMips64TruncWD, MachineType::Int32()},
+ MachineType::Float64()}};
+
+const Conversion kFloat32RoundInstructions[] = {
+ {{&RawMachineAssembler::Float32RoundUp, "Float32RoundUp", kMips64CeilWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundDown, "Float32RoundDown",
+ kMips64FloorWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTiesEven, "Float32RoundTiesEven",
+ kMips64RoundWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTruncate, "Float32RoundTruncate",
+ kMips64TruncWS, MachineType::Int32()},
+ MachineType::Float32()}};
} // namespace
@@ -235,7 +295,8 @@ typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -279,7 +340,8 @@ typedef InstructionSelectorTestWithParam<MachInst2>
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
- TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
@@ -295,6 +357,140 @@ TEST_P(InstructionSelectorShiftTest, Immediate) {
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
::testing::ValuesIn(kShiftInstructions));
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
+ m.Int64Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
+ m.Int64Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Int32Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int64_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Parameter(0), m.Int64Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int64_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Int64Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
@@ -317,6 +513,215 @@ TEST_P(InstructionSelectorLogicalTest, Parameter) {
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
::testing::ValuesIn(kLogicalInstructions));
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithWord64Or) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Word64Or(m.Parameter(0), m.Parameter(0)),
+ m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1),
+ m.Word64Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)),
+ m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1),
+ m.Word32Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int64_t, shift, -64, 127) {
+ int64_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int64_t, width, 1, 63) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(shift)),
+ m.Int64Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ int64_t actual_width = (lsb + width > 64) ? (64 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int64_t, shift, -64, 127) {
+ int64_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int64_t, width, 1, 63) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(
+ m.Word64And(m.Int64Constant(msk),
+ m.Word64Shr(m.Parameter(0), m.Int64Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ int64_t actual_width = (lsb + width > 64) ? (64 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Shl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithWord64And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word64Shl(m.Word64And(p0, m.Int64Constant((1L << (63 - shift)) - 1)),
+ m.Int64Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dshl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
@@ -446,7 +851,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.TruncateInt64ToInt32(
m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
Stream s = m.Build();
@@ -457,7 +862,7 @@ TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word64Shl(m.ChangeInt32ToInt64(m.Parameter(0)), m.Int32Constant(32)));
Stream s = m.Build();
@@ -469,6 +874,121 @@ TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
}
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat64ToInt32WithRoundFloat64;
+
+TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32((m.*conv.mi.constructor)(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat32ToInt32WithRoundFloat32;
+
+TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32(
+ m.ChangeFloat32ToFloat64((m.*conv.mi.constructor)(m.Parameter(0)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64TruncWS, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest,
+ TruncateFloat64ToFloat32OfChangeInt32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Int32());
+ m.Return(
+ m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64CvtSW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithMul) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mul(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64DMulHigh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithDivMod) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Div(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ddiv, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mod(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dmod, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Loads and stores.
// ----------------------------------------------------------------------------
@@ -483,14 +1003,14 @@ struct MemoryAccess {
};
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kMips64Lb, kMips64Sb},
- {kMachUint8, kMips64Lbu, kMips64Sb},
- {kMachInt16, kMips64Lh, kMips64Sh},
- {kMachUint16, kMips64Lhu, kMips64Sh},
- {kMachInt32, kMips64Lw, kMips64Sw},
- {kMachFloat32, kMips64Lwc1, kMips64Swc1},
- {kMachFloat64, kMips64Ldc1, kMips64Sdc1},
- {kMachInt64, kMips64Ld, kMips64Sd}};
+ {MachineType::Int8(), kMips64Lb, kMips64Sb},
+ {MachineType::Uint8(), kMips64Lbu, kMips64Sb},
+ {MachineType::Int16(), kMips64Lh, kMips64Sh},
+ {MachineType::Uint16(), kMips64Lhu, kMips64Sh},
+ {MachineType::Int32(), kMips64Lw, kMips64Sw},
+ {MachineType::Float32(), kMips64Lwc1, kMips64Swc1},
+ {MachineType::Float64(), kMips64Ldc1, kMips64Sdc1},
+ {MachineType::Int64(), kMips64Ld, kMips64Sd}};
struct MemoryAccessImm {
@@ -529,56 +1049,56 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
const MemoryAccessImm kMemoryAccessesImm[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kMips64Lh,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint16,
+ {MachineType::Uint16(),
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt32,
+ {MachineType::Int32(),
kMips64Lw,
kMips64Sw,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMips64Lwc1,
kMips64Swc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMips64Ldc1,
kMips64Sdc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt64,
+ {MachineType::Int64(),
kMips64Ld,
kMips64Sd,
&InstructionSelectorTest::Stream::IsInteger,
@@ -588,42 +1108,42 @@ const MemoryAccessImm kMemoryAccessesImm[] = {
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt8,
+ {MachineType::Int8(),
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMips64Lh,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt32,
+ {MachineType::Int32(),
kMips64Lw,
kMips64Sw,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMips64Lwc1,
kMips64Swc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMips64Ldc1,
kMips64Sdc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt64,
+ {MachineType::Int64(),
kMips64Ld,
kMips64Sd,
&InstructionSelectorTest::Stream::IsInteger,
@@ -637,7 +1157,8 @@ typedef InstructionSelectorTestWithParam<MemoryAccess>
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -648,8 +1169,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -673,7 +1196,7 @@ typedef InstructionSelectorTestWithParam<MemoryAccessImm>
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -696,9 +1219,10 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -728,7 +1252,7 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
LoadWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -745,9 +1269,10 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
StoreWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index), m.Parameter(1),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -772,7 +1297,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -784,7 +1309,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -800,7 +1325,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -812,7 +1337,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -827,7 +1352,7 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
TEST_F(InstructionSelectorTest, Word32Clz) {
- StreamBuilder m(this, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32Clz(p0);
m.Return(n);
@@ -842,7 +1367,7 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
TEST_F(InstructionSelectorTest, Word64Clz) {
- StreamBuilder m(this, kMachUint64, kMachUint64);
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Clz(p0);
m.Return(n);
@@ -857,7 +1382,7 @@ TEST_F(InstructionSelectorTest, Word64Clz) {
TEST_F(InstructionSelectorTest, Float32Abs) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -872,7 +1397,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
TEST_F(InstructionSelectorTest, Float64Abs) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -887,7 +1412,8 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Max(p0, p1);
@@ -903,7 +1429,8 @@ TEST_F(InstructionSelectorTest, Float32Max) {
TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float32Min(p0, p1);
@@ -919,7 +1446,8 @@ TEST_F(InstructionSelectorTest, Float32Min) {
TEST_F(InstructionSelectorTest, Float64Max) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Max(p0, p1);
@@ -935,7 +1463,8 @@ TEST_F(InstructionSelectorTest, Float64Max) {
TEST_F(InstructionSelectorTest, Float64Min) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Float64Min(p0, p1);
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index 66eb9abc4f..413c58b6fe 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -67,16 +67,16 @@ class MoveOptimizerTest : public InstructionSequenceTest {
case kConstant:
return ConstantOperand(op.value_);
case kFixedSlot:
- return AllocatedOperand(LocationOperand::STACK_SLOT, kRepWord32,
- op.value_);
+ return AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord32, op.value_);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
- return AllocatedOperand(LocationOperand::REGISTER, kRepWord32,
- op.value_);
+ return AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, op.value_);
case kExplicit:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
- return ExplicitOperand(LocationOperand::REGISTER, kRepWord32,
- op.value_);
+ return ExplicitOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, op.value_);
default:
break;
}
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 34afc8822b..54168ee70b 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/handles-inl.h"
+#include "src/objects.h"
using testing::_;
using testing::MakeMatcher;
@@ -410,7 +411,7 @@ class IsConstantMatcher final : public NodeMatcher {
class IsSelectMatcher final : public NodeMatcher {
public:
- IsSelectMatcher(const Matcher<MachineType>& type_matcher,
+ IsSelectMatcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher)
@@ -422,7 +423,7 @@ class IsSelectMatcher final : public NodeMatcher {
void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose type (";
+ *os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
value0_matcher_.DescribeTo(os);
@@ -434,19 +435,20 @@ class IsSelectMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<SelectParameters>(node).type(),
- "type", type_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
- "value0", value0_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "value1", value1_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "value2", value2_matcher_, listener));
+ return (
+ NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(SelectParametersOf(node->op()).representation(),
+ "representation", type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "value0",
+ value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "value1",
+ value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), "value2",
+ value2_matcher_, listener));
}
private:
- const Matcher<MachineType> type_matcher_;
+ const Matcher<MachineRepresentation> type_matcher_;
const Matcher<Node*> value0_matcher_;
const Matcher<Node*> value1_matcher_;
const Matcher<Node*> value2_matcher_;
@@ -455,7 +457,7 @@ class IsSelectMatcher final : public NodeMatcher {
class IsPhiMatcher final : public NodeMatcher {
public:
- IsPhiMatcher(const Matcher<MachineType>& type_matcher,
+ IsPhiMatcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& control_matcher)
@@ -467,7 +469,7 @@ class IsPhiMatcher final : public NodeMatcher {
void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose type (";
+ *os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
value0_matcher_.DescribeTo(os);
@@ -480,8 +482,8 @@ class IsPhiMatcher final : public NodeMatcher {
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
- type_matcher_, listener) &&
+ PrintMatchAndExplain(PhiRepresentationOf(node->op()),
+ "representation", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value0", value0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
@@ -491,7 +493,7 @@ class IsPhiMatcher final : public NodeMatcher {
}
private:
- const Matcher<MachineType> type_matcher_;
+ const Matcher<MachineRepresentation> type_matcher_;
const Matcher<Node*> value0_matcher_;
const Matcher<Node*> value1_matcher_;
const Matcher<Node*> control_matcher_;
@@ -500,7 +502,7 @@ class IsPhiMatcher final : public NodeMatcher {
class IsPhi2Matcher final : public NodeMatcher {
public:
- IsPhi2Matcher(const Matcher<MachineType>& type_matcher,
+ IsPhi2Matcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -514,7 +516,7 @@ class IsPhi2Matcher final : public NodeMatcher {
void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose type (";
+ *os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
value0_matcher_.DescribeTo(os);
@@ -529,8 +531,8 @@ class IsPhi2Matcher final : public NodeMatcher {
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
- type_matcher_, listener) &&
+ PrintMatchAndExplain(PhiRepresentationOf(node->op()),
+ "representation", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value0", value0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
@@ -542,7 +544,7 @@ class IsPhi2Matcher final : public NodeMatcher {
}
private:
- const Matcher<MachineType> type_matcher_;
+ const Matcher<MachineRepresentation> type_matcher_;
const Matcher<Node*> value0_matcher_;
const Matcher<Node*> value1_matcher_;
const Matcher<Node*> value2_matcher_;
@@ -706,10 +708,18 @@ class IsCallMatcher final : public NodeMatcher {
return false;
}
}
- return (PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
+ return (PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
}
private:
@@ -764,10 +774,18 @@ class IsTailCallMatcher final : public NodeMatcher {
return false;
}
}
- return (PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
+ return (PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
}
private:
@@ -1194,6 +1212,14 @@ class IsLoadMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
rep_matcher_, listener) &&
@@ -1201,10 +1227,10 @@ class IsLoadMatcher final : public NodeMatcher {
base_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
"index", index_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
}
private:
@@ -1250,6 +1276,14 @@ class IsStoreMatcher final : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
rep_matcher_, listener) &&
@@ -1259,10 +1293,10 @@ class IsStoreMatcher final : public NodeMatcher {
"index", index_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
"value", value_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
}
private:
@@ -1404,6 +1438,7 @@ class IsUnopMatcher final : public NodeMatcher {
const Matcher<Node*> input_matcher_;
};
+
class IsParameterMatcher final : public NodeMatcher {
public:
explicit IsParameterMatcher(const Matcher<int>& index_matcher)
@@ -1592,7 +1627,7 @@ Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
}
-Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsSelect(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher) {
@@ -1601,7 +1636,7 @@ Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
}
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& merge_matcher) {
@@ -1610,7 +1645,7 @@ Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
}
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -2022,7 +2057,6 @@ IS_BINOP_MATCHER(Word64And)
IS_BINOP_MATCHER(Word64Or)
IS_BINOP_MATCHER(Word64Sar)
IS_BINOP_MATCHER(Word64Shl)
-IS_BINOP_MATCHER(Word64Shr)
IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
IS_BINOP_MATCHER(Int32Add)
@@ -2034,7 +2068,6 @@ IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Sub)
-IS_BINOP_MATCHER(Int64Mul)
IS_BINOP_MATCHER(JSAdd)
IS_BINOP_MATCHER(Float32Max)
IS_BINOP_MATCHER(Float32Min)
@@ -2063,7 +2096,6 @@ IS_UNOP_MATCHER(ChangeUint32ToUint64)
IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
-IS_UNOP_MATCHER(RoundInt64ToFloat64)
IS_UNOP_MATCHER(Float32Abs)
IS_UNOP_MATCHER(Float64Abs)
IS_UNOP_MATCHER(Float64Sqrt)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index ffdad5812a..8592f30566 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -6,7 +6,7 @@
#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
#include "src/compiler/machine-operator.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace v8 {
@@ -19,6 +19,7 @@ class Handle;
class HeapObject;
template <class>
class TypeImpl;
+enum TypeofMode : int;
struct ZoneTypeConfig;
typedef TypeImpl<ZoneTypeConfig> Type;
@@ -79,15 +80,15 @@ Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
-Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsSelect(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher);
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& merge_matcher);
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -101,6 +102,10 @@ Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher);
Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
@@ -275,8 +280,6 @@ Matcher<Node*> IsWord64Or(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsWord64Shr(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
@@ -301,8 +304,6 @@ Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
@@ -314,7 +315,6 @@ Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsRoundInt64ToFloat64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index af43b2efff..523c8ce9d4 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -13,6 +13,7 @@
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
#include "src/compiler/verifier.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/test-utils.h"
@@ -32,7 +33,8 @@ class SchedulerTest : public TestWithIsolateAndZone {
Schedule* ComputeAndVerifySchedule(size_t expected) {
if (FLAG_trace_turbo) {
OFStream os(stdout);
- os << AsDOT(*graph());
+ SourcePositionTable table(graph());
+ os << AsJSON(*graph(), &table);
}
Schedule* schedule =
@@ -658,7 +660,8 @@ Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common, Node* cond) {
Node* t = graph->NewNode(common->IfTrue(), br);
Node* f = graph->NewNode(common->IfFalse(), br);
Node* m = graph->NewNode(common->Merge(2), t, f);
- Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), tv, fv, m);
+ Node* phi =
+ graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), tv, fv, m);
return phi;
}
@@ -737,12 +740,13 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamonds) {
Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
Node* ttrue = graph()->NewNode(common()->Int32Constant(1));
Node* ffalse = graph()->NewNode(common()->Int32Constant(0));
- Node* phi1 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), ttrue, ffalse, m1);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), ttrue, ffalse, m1);
Node* m = graph()->NewNode(common()->Merge(2), t, f);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fv, phi1, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ fv, phi1, m);
Node* ephi1 = graph()->NewNode(common()->EffectPhi(2), start, map, m);
Node* ret = graph()->NewNode(common()->Return(), phi, ephi1, start);
@@ -766,27 +770,29 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithChain) {
Node* tA1 = graph()->NewNode(common()->IfTrue(), brA1);
Node* fA1 = graph()->NewNode(common()->IfFalse(), brA1);
Node* mA1 = graph()->NewNode(common()->Merge(2), tA1, fA1);
- Node* phiA1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, mA1);
+ Node* phiA1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p1, mA1);
Node* brB1 = graph()->NewNode(common()->Branch(), p1, graph()->start());
Node* tB1 = graph()->NewNode(common()->IfTrue(), brB1);
Node* fB1 = graph()->NewNode(common()->IfFalse(), brB1);
Node* mB1 = graph()->NewNode(common()->Merge(2), tB1, fB1);
- Node* phiB1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, mB1);
+ Node* phiB1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p1, mB1);
Node* brA2 = graph()->NewNode(common()->Branch(), phiB1, mA1);
Node* tA2 = graph()->NewNode(common()->IfTrue(), brA2);
Node* fA2 = graph()->NewNode(common()->IfFalse(), brA2);
Node* mA2 = graph()->NewNode(common()->Merge(2), tA2, fA2);
- Node* phiA2 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), phiB1, c, mA2);
+ Node* phiA2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), phiB1, c, mA2);
Node* brB2 = graph()->NewNode(common()->Branch(), phiA1, mB1);
Node* tB2 = graph()->NewNode(common()->IfTrue(), brB2);
Node* fB2 = graph()->NewNode(common()->IfFalse(), brB2);
Node* mB2 = graph()->NewNode(common()->Merge(2), tB2, fB2);
- Node* phiB2 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), phiA1, c, mB2);
+ Node* phiB2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), phiA1, c, mB2);
Node* add = graph()->NewNode(&kIntAdd, phiA2, phiB2);
Node* ret = graph()->NewNode(common()->Return(), add, start, start);
@@ -810,7 +816,8 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithLoop) {
Node* f = graph()->NewNode(common()->IfFalse(), br);
Node* loop = graph()->NewNode(common()->Loop(2), f, start);
- Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
Node* add = graph()->NewNode(&kIntAdd, ind, fv);
Node* br1 = graph()->NewNode(common()->Branch(), add, loop);
@@ -821,7 +828,8 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithLoop) {
ind->ReplaceInput(1, ind); // close induction variable.
Node* m = graph()->NewNode(common()->Merge(2), t, f1);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), fv, ind, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ fv, ind, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
@@ -840,7 +848,8 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond1) {
Node* c = graph()->NewNode(common()->Int32Constant(7));
Node* loop = graph()->NewNode(common()->Loop(2), start, start);
- Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
Node* add = graph()->NewNode(&kIntAdd, ind, c);
Node* br = graph()->NewNode(common()->Branch(), add, loop);
@@ -851,7 +860,8 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond1) {
Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
- Node* phi1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), add, p0, m1);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), add, p0, m1);
loop->ReplaceInput(1, t); // close loop.
ind->ReplaceInput(1, phi1); // close induction variable.
@@ -873,13 +883,15 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond2) {
Node* c = graph()->NewNode(common()->Int32Constant(7));
Node* loop = graph()->NewNode(common()->Loop(2), start, start);
- Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
- Node* phi1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), c, ind, m1);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), c, ind, m1);
Node* add = graph()->NewNode(&kIntAdd, ind, phi1);
@@ -907,14 +919,16 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond3) {
Node* c = graph()->NewNode(common()->Int32Constant(7));
Node* loop = graph()->NewNode(common()->Loop(2), start, start);
- Node* ind = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
Node* loop1 = graph()->NewNode(common()->Loop(2), t1, start);
- Node* ind1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p0, loop);
+ Node* ind1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p0, loop);
Node* add1 = graph()->NewNode(&kIntAdd, ind1, c);
Node* br2 = graph()->NewNode(common()->Branch(), add1, loop1);
@@ -925,7 +939,8 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond3) {
ind1->ReplaceInput(1, ind1); // close inner induction variable.
Node* m1 = graph()->NewNode(common()->Merge(2), f1, f2);
- Node* phi1 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), c, ind1, m1);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), c, ind1, m1);
Node* add = graph()->NewNode(&kIntAdd, ind, phi1);
@@ -960,15 +975,17 @@ TARGET_TEST_F(SchedulerTest, PhisPushedDownToDifferentBranches) {
Node* t = graph()->NewNode(common()->IfTrue(), br);
Node* f = graph()->NewNode(common()->IfFalse(), br);
Node* m = graph()->NewNode(common()->Merge(2), t, f);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), v1, v2, m);
- Node* phi2 = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), v3, v4, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ v1, v2, m);
+ Node* phi2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), v3, v4, m);
Node* br2 = graph()->NewNode(common()->Branch(), p1, graph()->start());
Node* t2 = graph()->NewNode(common()->IfTrue(), br2);
Node* f2 = graph()->NewNode(common()->IfFalse(), br2);
Node* m2 = graph()->NewNode(common()->Merge(2), t2, f2);
- Node* phi3 =
- graph()->NewNode(common()->Phi(kMachAnyTagged, 2), phi, phi2, m2);
+ Node* phi3 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), phi, phi2, m2);
Node* ret = graph()->NewNode(common()->Return(), phi3, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
@@ -990,7 +1007,8 @@ TARGET_TEST_F(SchedulerTest, BranchHintTrue) {
Node* t = graph()->NewNode(common()->IfTrue(), br);
Node* f = graph()->NewNode(common()->IfFalse(), br);
Node* m = graph()->NewNode(common()->Merge(2), t, f);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), tv, fv, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ tv, fv, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
@@ -1014,7 +1032,8 @@ TARGET_TEST_F(SchedulerTest, BranchHintFalse) {
Node* t = graph()->NewNode(common()->IfTrue(), br);
Node* f = graph()->NewNode(common()->IfFalse(), br);
Node* m = graph()->NewNode(common()->Merge(2), t, f);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), tv, fv, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ tv, fv, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
@@ -1042,7 +1061,8 @@ TARGET_TEST_F(SchedulerTest, CallException) {
common()->IfException(IfExceptionHint::kLocallyUncaught), c2, c2);
Node* hdl = graph()->NewNode(common()->Merge(2), ex1, ex2);
Node* m = graph()->NewNode(common()->Merge(2), ok2, hdl);
- Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), c2, p0, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ c2, p0, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
Node* end = graph()->NewNode(common()->End(1), ret);
@@ -1084,7 +1104,8 @@ TARGET_TEST_F(SchedulerTest, Switch) {
Node* d = graph()->NewNode(common()->IfDefault(), sw);
Node* vd = graph()->NewNode(common()->Int32Constant(33));
Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
- Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 3), v0, v1, vd, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 3),
+ v0, v1, vd, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
Node* end = graph()->NewNode(common()->End(1), ret);
@@ -1107,7 +1128,8 @@ TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
Node* d = graph()->NewNode(common()->IfDefault(), sw);
Node* vd = graph()->NewNode(common()->Int32Constant(33));
Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
- Node* phi = graph()->NewNode(common()->Phi(kMachInt32, 3), v0, v1, vd, m);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 3),
+ v0, v1, vd, m);
Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
diff --git a/deps/v8/test/unittests/compiler/select-lowering-unittest.cc b/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
index 51efc83f87..43cfd8484a 100644
--- a/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/select-lowering-unittest.cc
@@ -34,7 +34,8 @@ TEST_F(SelectLoweringTest, SelectWithSameConditions) {
Node* const p2 = Parameter(2);
Node* const p3 = Parameter(3);
Node* const p4 = Parameter(4);
- Node* const s0 = graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2);
+ Node* const s0 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32), p0, p1, p2);
Capture<Node*> branch;
Capture<Node*> merge;
@@ -44,26 +45,27 @@ TEST_F(SelectLoweringTest, SelectWithSameConditions) {
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachInt32, p1, p2,
+ MachineRepresentation::kWord32, p1, p2,
AllOf(CaptureEq(&merge),
IsMerge(IsIfTrue(CaptureEq(&branch)),
IsIfFalse(AllOf(CaptureEq(&branch),
IsBranch(p0, graph()->start())))))));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p3, p4));
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32), p0, p3, p4));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsPhi(kMachInt32, p3, p4, CaptureEq(&merge)));
+ EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kWord32, p3, p4,
+ CaptureEq(&merge)));
}
{
// We must not reuse the diamond if it is reachable from either else/then
// values of the Select, because the resulting graph can not be scheduled.
- Reduction const r =
- Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, s0, p0));
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32), p0, s0, p0));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsPhi(kMachInt32, s0, p0, Not(CaptureEq(&merge))));
+ EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kWord32, s0, p0,
+ Not(CaptureEq(&merge))));
}
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index b09840710a..871189ad79 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -200,37 +200,40 @@ INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
namespace {
const ElementAccess kElementAccesses[] = {
- {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
- {kUntaggedBase, 0, Type::Any(), kMachInt8},
- {kUntaggedBase, 0, Type::Any(), kMachInt16},
- {kUntaggedBase, 0, Type::Any(), kMachInt32},
- {kUntaggedBase, 0, Type::Any(), kMachUint8},
- {kUntaggedBase, 0, Type::Any(), kMachUint16},
- {kUntaggedBase, 0, Type::Any(), kMachUint32},
- {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
- {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
- {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
- {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
- {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
- {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
- {kUntaggedBase, 0, Type::Number(), kRepFloat32},
- {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+ {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int8()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int16()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int32()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint8()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint16()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint32()},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int8()},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint8()},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int16()},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint16()},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int32()},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint32()},
+ {kUntaggedBase, 0, Type::Number(),
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
+ {kUntaggedBase, 0, Type::Number(),
+ MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone)},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- kMachInt8},
+ MachineType::Int8()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- kMachUint8},
+ MachineType::Uint8()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- kMachInt16},
+ MachineType::Int16()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- kMachUint16},
+ MachineType::Uint16()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- kMachInt32},
+ MachineType::Int32()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- kMachUint32},
+ MachineType::Uint32()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- kRepFloat32},
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- kRepFloat64}};
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)}};
} // namespace
diff --git a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
index 7257cc9802..3441c68b96 100644
--- a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
@@ -26,11 +26,12 @@ class TailCallOptimizationTest : public GraphTest {
TEST_F(TailCallOptimizationTest, CallCodeObject0) {
- MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
@@ -47,11 +48,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject0) {
TEST_F(TailCallOptimizationTest, CallCodeObject1) {
- MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
@@ -72,11 +74,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
TEST_F(TailCallOptimizationTest, CallCodeObject2) {
- MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallCodeObject, kMachAnyTagged,
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
@@ -95,11 +98,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject2) {
TEST_F(TailCallOptimizationTest, CallJSFunction0) {
- MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallJSFunction, kMachAnyTagged,
+ CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
@@ -116,11 +120,12 @@ TEST_F(TailCallOptimizationTest, CallJSFunction0) {
TEST_F(TailCallOptimizationTest, CallJSFunction1) {
- MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallJSFunction, kMachAnyTagged,
+ CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
@@ -141,11 +146,12 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
TEST_F(TailCallOptimizationTest, CallJSFunction2) {
- MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
- CallDescriptor::kCallJSFunction, kMachAnyTagged,
+ CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 4a462cef10..6e4d4d589f 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -51,6 +51,7 @@ class TyperTest : public TypedGraphTest {
Types<Type, Type*, Zone> types_;
JSOperatorBuilder javascript_;
+ BinaryOperationHints const hints_ = BinaryOperationHints::Any();
Node* context_node_;
v8::base::RandomNumberGenerator* rng_;
std::vector<double> integers;
@@ -239,68 +240,78 @@ int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
TEST_F(TyperTest, TypeJSAdd) {
- TestBinaryArithOp(javascript_.Add(LanguageMode::SLOPPY), std::plus<double>());
- TestBinaryArithOp(javascript_.Add(LanguageMode::STRONG), std::plus<double>());
+ TestBinaryArithOp(javascript_.Add(LanguageMode::SLOPPY, hints_),
+ std::plus<double>());
+ TestBinaryArithOp(javascript_.Add(LanguageMode::STRONG, hints_),
+ std::plus<double>());
}
TEST_F(TyperTest, TypeJSSubtract) {
- TestBinaryArithOp(javascript_.Subtract(LanguageMode::SLOPPY),
+ TestBinaryArithOp(javascript_.Subtract(LanguageMode::SLOPPY, hints_),
std::minus<double>());
- TestBinaryArithOp(javascript_.Subtract(LanguageMode::STRONG),
+ TestBinaryArithOp(javascript_.Subtract(LanguageMode::STRONG, hints_),
std::minus<double>());
}
TEST_F(TyperTest, TypeJSMultiply) {
- TestBinaryArithOp(javascript_.Multiply(LanguageMode::SLOPPY),
+ TestBinaryArithOp(javascript_.Multiply(LanguageMode::SLOPPY, hints_),
std::multiplies<double>());
- TestBinaryArithOp(javascript_.Multiply(LanguageMode::STRONG),
+ TestBinaryArithOp(javascript_.Multiply(LanguageMode::STRONG, hints_),
std::multiplies<double>());
}
TEST_F(TyperTest, TypeJSDivide) {
- TestBinaryArithOp(javascript_.Divide(LanguageMode::SLOPPY),
+ TestBinaryArithOp(javascript_.Divide(LanguageMode::SLOPPY, hints_),
std::divides<double>());
- TestBinaryArithOp(javascript_.Divide(LanguageMode::STRONG),
+ TestBinaryArithOp(javascript_.Divide(LanguageMode::STRONG, hints_),
std::divides<double>());
}
TEST_F(TyperTest, TypeJSModulus) {
- TestBinaryArithOp(javascript_.Modulus(LanguageMode::SLOPPY), modulo);
- TestBinaryArithOp(javascript_.Modulus(LanguageMode::STRONG), modulo);
+ TestBinaryArithOp(javascript_.Modulus(LanguageMode::SLOPPY, hints_), modulo);
+ TestBinaryArithOp(javascript_.Modulus(LanguageMode::STRONG, hints_), modulo);
}
TEST_F(TyperTest, TypeJSBitwiseOr) {
- TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::SLOPPY), bit_or);
- TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::STRONG), bit_or);
+ TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::SLOPPY, hints_), bit_or);
+ TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::STRONG, hints_), bit_or);
}
TEST_F(TyperTest, TypeJSBitwiseAnd) {
- TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::SLOPPY), bit_and);
- TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::STRONG), bit_and);
+ TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::SLOPPY, hints_),
+ bit_and);
+ TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::STRONG, hints_),
+ bit_and);
}
TEST_F(TyperTest, TypeJSBitwiseXor) {
- TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::SLOPPY), bit_xor);
- TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::STRONG), bit_xor);
+ TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::SLOPPY, hints_),
+ bit_xor);
+ TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::STRONG, hints_),
+ bit_xor);
}
TEST_F(TyperTest, TypeJSShiftLeft) {
- TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::SLOPPY), shift_left);
- TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::STRONG), shift_left);
+ TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::SLOPPY, hints_),
+ shift_left);
+ TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::STRONG, hints_),
+ shift_left);
}
TEST_F(TyperTest, TypeJSShiftRight) {
- TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::SLOPPY), shift_right);
- TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::STRONG), shift_right);
+ TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::SLOPPY, hints_),
+ shift_right);
+ TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::STRONG, hints_),
+ shift_right);
}
@@ -362,47 +373,48 @@ TEST_F(TyperTest, TypeJSStrictNotEqual) {
// Monotonicity
-// List should be in sync with JS_SIMPLE_BINOP_LIST.
-#define JSBINOP_LIST(V) \
- V(Equal) \
- V(NotEqual) \
- V(StrictEqual) \
- V(StrictNotEqual)
-
-
-#define JSBINOP_WITH_STRONG_LIST(V) \
- V(LessThan) \
- V(GreaterThan) \
- V(LessThanOrEqual) \
- V(GreaterThanOrEqual) \
- V(BitwiseOr) \
- V(BitwiseXor) \
- V(BitwiseAnd) \
- V(ShiftLeft) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(Add) \
- V(Subtract) \
- V(Multiply) \
- V(Divide) \
- V(Modulus)
-
-
-#define TEST_FUNC(name) \
+#define TEST_BINARY_MONOTONICITY(name) \
TEST_F(TyperTest, Monotonicity_##name) { \
TestBinaryMonotonicity(javascript_.name()); \
}
-JSBINOP_LIST(TEST_FUNC)
-#undef TEST_FUNC
+TEST_BINARY_MONOTONICITY(Equal)
+TEST_BINARY_MONOTONICITY(NotEqual)
+TEST_BINARY_MONOTONICITY(StrictEqual)
+TEST_BINARY_MONOTONICITY(StrictNotEqual)
+#undef TEST_BINARY_MONOTONICITY
-#define TEST_FUNC(name) \
+#define TEST_BINARY_MONOTONICITY(name) \
TEST_F(TyperTest, Monotonicity_##name) { \
TestBinaryMonotonicity(javascript_.name(LanguageMode::SLOPPY)); \
TestBinaryMonotonicity(javascript_.name(LanguageMode::STRONG)); \
}
-JSBINOP_WITH_STRONG_LIST(TEST_FUNC)
-#undef TEST_FUNC
+TEST_BINARY_MONOTONICITY(LessThan)
+TEST_BINARY_MONOTONICITY(GreaterThan)
+TEST_BINARY_MONOTONICITY(LessThanOrEqual)
+TEST_BINARY_MONOTONICITY(GreaterThanOrEqual)
+#undef TEST_BINARY_MONOTONICITY
+
+
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity( \
+ javascript_.name(LanguageMode::SLOPPY, BinaryOperationHints::Any())); \
+ TestBinaryMonotonicity( \
+ javascript_.name(LanguageMode::STRONG, BinaryOperationHints::Any())); \
+ }
+TEST_BINARY_MONOTONICITY(BitwiseOr)
+TEST_BINARY_MONOTONICITY(BitwiseXor)
+TEST_BINARY_MONOTONICITY(BitwiseAnd)
+TEST_BINARY_MONOTONICITY(ShiftLeft)
+TEST_BINARY_MONOTONICITY(ShiftRight)
+TEST_BINARY_MONOTONICITY(ShiftRightLogical)
+TEST_BINARY_MONOTONICITY(Add)
+TEST_BINARY_MONOTONICITY(Subtract)
+TEST_BINARY_MONOTONICITY(Multiply)
+TEST_BINARY_MONOTONICITY(Divide)
+TEST_BINARY_MONOTONICITY(Modulus)
+#undef TEST_BINARY_MONOTONICITY
//------------------------------------------------------------------------------
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index f28087c54c..d6ed73266c 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -15,7 +15,7 @@ namespace compiler {
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float64());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -26,7 +26,7 @@ TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
- StreamBuilder m(this, kMachInt64, kMachInt32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -35,7 +35,7 @@ TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachUint32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -44,7 +44,7 @@ TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
- StreamBuilder m(this, kMachUint64, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -53,7 +53,7 @@ TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float32());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -64,7 +64,7 @@ TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -91,16 +91,16 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kX64Movsxbl, kX64Movb},
- {kMachUint8, kX64Movzxbl, kX64Movb},
- {kMachInt16, kX64Movsxwl, kX64Movw},
- {kMachUint16, kX64Movzxwl, kX64Movw},
- {kMachInt32, kX64Movl, kX64Movl},
- {kMachUint32, kX64Movl, kX64Movl},
- {kMachInt64, kX64Movq, kX64Movq},
- {kMachUint64, kX64Movq, kX64Movq},
- {kMachFloat32, kX64Movss, kX64Movss},
- {kMachFloat64, kX64Movsd, kX64Movsd}};
+ {MachineType::Int8(), kX64Movsxbl, kX64Movb},
+ {MachineType::Uint8(), kX64Movzxbl, kX64Movb},
+ {MachineType::Int16(), kX64Movsxwl, kX64Movw},
+ {MachineType::Uint16(), kX64Movzxwl, kX64Movw},
+ {MachineType::Int32(), kX64Movl, kX64Movl},
+ {MachineType::Uint32(), kX64Movl, kX64Movl},
+ {MachineType::Int64(), kX64Movq, kX64Movq},
+ {MachineType::Uint64(), kX64Movq, kX64Movq},
+ {MachineType::Float32(), kX64Movss, kX64Movss},
+ {MachineType::Float64(), kX64Movsd, kX64Movsd}};
} // namespace
@@ -111,7 +111,8 @@ typedef InstructionSelectorTestWithParam<MemoryAccess>
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -123,9 +124,10 @@ TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2),
- kNoWriteBarrier);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -191,7 +193,8 @@ typedef InstructionSelectorTestWithParam<BinaryOperation>
TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
const BinaryOperation& bop = GetParam();
- StreamBuilder m(this, kMachUint64, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1)));
@@ -210,7 +213,7 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
m.Return(t);
@@ -227,7 +230,7 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
m.Return(t);
@@ -248,7 +251,8 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const a0 = m.Int32Add(p0, p1);
@@ -265,7 +269,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) {
TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// If one of the add's operands is only used once, use an "leal", even though
@@ -287,7 +291,7 @@ TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) {
TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(1);
// If there is only a single use of an add's input and the immediate constant
@@ -305,7 +309,7 @@ TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) {
TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// A second use of an add's input uses lea
@@ -322,7 +326,7 @@ TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) {
TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// If one of the add's operands is only used once, use an "leal", even though
@@ -343,7 +347,7 @@ TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) {
TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// A second use of an add's input uses lea
@@ -361,7 +365,8 @@ TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) {
TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
// If one of the add's operands is only used once, use an "leal", even though
@@ -382,7 +387,8 @@ TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) {
TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
// If all of of the add's operands are used multiple times, use an "leal".
@@ -399,7 +405,8 @@ TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) {
TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -415,7 +422,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -431,7 +439,8 @@ TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
@@ -447,7 +456,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
@@ -463,7 +473,8 @@ TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
@@ -479,7 +490,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
@@ -495,7 +507,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
@@ -511,7 +524,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
@@ -527,7 +541,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -545,7 +560,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -563,7 +579,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -581,7 +598,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -599,7 +617,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -617,7 +636,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -635,7 +655,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
@@ -653,7 +674,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
@@ -671,7 +693,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
@@ -689,7 +712,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
@@ -707,7 +731,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
@@ -725,7 +750,7 @@ TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(-1);
// If there is only a single use of on of the sub's non-constant input, use a
@@ -742,7 +767,7 @@ TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) {
TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(-1);
// If there are multiple uses of on of the sub's non-constant input, use a
@@ -760,7 +785,8 @@ TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) {
TEST_F(InstructionSelectorTest, Int32AddScaled2Other) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -789,7 +815,8 @@ TEST_F(InstructionSelectorTest, Int32AddScaled2Other) {
TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const m0 = m.Int32Mul(p0, p1);
@@ -810,7 +837,8 @@ TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
TEST_F(InstructionSelectorTest, Int32MulHigh) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -830,7 +858,8 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) {
TEST_F(InstructionSelectorTest, Uint32MulHigh) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Uint32MulHigh(p0, p1);
@@ -850,7 +879,8 @@ TEST_F(InstructionSelectorTest, Uint32MulHigh) {
TEST_F(InstructionSelectorTest, Int32Mul2BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(2);
Node* const n = m.Int32Mul(p0, c1);
@@ -866,7 +896,8 @@ TEST_F(InstructionSelectorTest, Int32Mul2BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Mul3BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(3);
Node* const n = m.Int32Mul(p0, c1);
@@ -882,7 +913,8 @@ TEST_F(InstructionSelectorTest, Int32Mul3BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Mul4BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(4);
Node* const n = m.Int32Mul(p0, c1);
@@ -897,7 +929,8 @@ TEST_F(InstructionSelectorTest, Int32Mul4BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Mul5BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(5);
Node* const n = m.Int32Mul(p0, c1);
@@ -913,7 +946,8 @@ TEST_F(InstructionSelectorTest, Int32Mul5BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Mul8BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(8);
Node* const n = m.Int32Mul(p0, c1);
@@ -928,7 +962,8 @@ TEST_F(InstructionSelectorTest, Int32Mul8BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Mul9BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(9);
Node* const n = m.Int32Mul(p0, c1);
@@ -948,7 +983,8 @@ TEST_F(InstructionSelectorTest, Int32Mul9BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Shl1BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(1);
Node* const n = m.Word32Shl(p0, c1);
@@ -964,7 +1000,8 @@ TEST_F(InstructionSelectorTest, Int32Shl1BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Shl2BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(2);
Node* const n = m.Word32Shl(p0, c1);
@@ -979,7 +1016,8 @@ TEST_F(InstructionSelectorTest, Int32Shl2BecomesLea) {
TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(3);
Node* const n = m.Word32Shl(p0, c1);
@@ -999,7 +1037,7 @@ TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
TEST_F(InstructionSelectorTest, Float32Abs) {
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -1014,7 +1052,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Abs(p0);
m.Return(n);
@@ -1032,7 +1070,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
TEST_F(InstructionSelectorTest, Float64Abs) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -1047,7 +1085,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Abs(p0);
m.Return(n);
@@ -1065,7 +1103,8 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
@@ -1079,7 +1118,8 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
@@ -1097,7 +1137,7 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
@@ -1111,7 +1151,7 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
m.Return(n);
@@ -1129,7 +1169,7 @@ TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -1143,7 +1183,7 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -1164,9 +1204,9 @@ TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
TEST_F(InstructionSelectorTest, Uint64LessThanWithLoadAndLoadStackPointer) {
- StreamBuilder m(this, kMachBool);
+ StreamBuilder m(this, MachineType::Bool());
Node* const sl = m.Load(
- kMachPtr,
+ MachineType::Pointer(),
m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
Node* const sp = m.LoadStackPointer();
Node* const n = m.Uint64LessThan(sl, sp);
@@ -1184,7 +1224,7 @@ TEST_F(InstructionSelectorTest, Uint64LessThanWithLoadAndLoadStackPointer) {
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachInt32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1203,7 +1243,7 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachUint32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1222,7 +1262,7 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32And(p0, m.Int32Constant(0xff));
m.Return(n);
@@ -1235,7 +1275,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32And(m.Int32Constant(0xff), p0);
m.Return(n);
@@ -1252,7 +1292,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32And(p0, m.Int32Constant(0xffff));
m.Return(n);
@@ -1265,7 +1305,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32And(m.Int32Constant(0xffff), p0);
m.Return(n);
@@ -1281,7 +1321,7 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
TEST_F(InstructionSelectorTest, Word32Clz) {
- StreamBuilder m(this, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word32Clz(p0);
m.Return(n);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 30202c9d72..2140aa83c7 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-register-allocator.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -22,12 +23,12 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_locals_count(1);
+ builder.set_locals_count(200);
builder.set_context_count(1);
builder.set_parameter_count(0);
- CHECK_EQ(builder.locals_count(), 1);
+ CHECK_EQ(builder.locals_count(), 200);
CHECK_EQ(builder.context_count(), 1);
- CHECK_EQ(builder.fixed_register_count(), 2);
+ CHECK_EQ(builder.fixed_register_count(), 201);
// Emit constant loads.
builder.LoadLiteral(Smi::FromInt(0))
@@ -39,68 +40,77 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.LoadTrue()
.LoadFalse();
- // Emit accumulator transfers.
+ // Emit accumulator transfers. Stores followed by loads to the same register
+ // are not generated. Hence, a dummy instruction in between.
Register reg(0);
- builder.LoadAccumulatorWithRegister(reg).StoreAccumulatorInRegister(reg);
+ builder.LoadAccumulatorWithRegister(reg)
+ .LoadNull()
+ .StoreAccumulatorInRegister(reg);
- // Emit global load / store operations.
- builder.LoadGlobal(0, 1, LanguageMode::SLOPPY, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(0, 1, LanguageMode::STRICT, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(0, 1, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(0, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
- .StoreGlobal(0, 1, LanguageMode::SLOPPY)
- .StoreGlobal(0, 1, LanguageMode::STRICT);
+ // Emit register-register transfer.
+ Register other(1);
+ builder.MoveRegister(reg, other);
- // Emit wide global load / store operations.
- builder.LoadGlobal(0, 1024, LanguageMode::SLOPPY,
+ // Emit register-register exchanges.
+ Register wide(150);
+ builder.ExchangeRegisters(reg, wide);
+ builder.ExchangeRegisters(wide, reg);
+ Register wider(151);
+ builder.ExchangeRegisters(wide, wider);
+
+ // Emit global load / store operations.
+ Factory* factory = isolate()->factory();
+ Handle<String> name = factory->NewStringFromStaticChars("var_name");
+ builder.LoadGlobal(name, 1, LanguageMode::SLOPPY,
TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(1024, 1, LanguageMode::STRICT, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(0, 1024, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(1024, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
- .StoreGlobal(0, 1024, LanguageMode::SLOPPY)
- .StoreGlobal(1024, 1, LanguageMode::STRICT);
+ .LoadGlobal(name, 1, LanguageMode::STRICT, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(name, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
+ .StoreGlobal(name, 1, LanguageMode::SLOPPY)
+ .StoreGlobal(name, 1, LanguageMode::STRICT);
// Emit context operations.
- builder.PushContext(reg);
- builder.PopContext(reg);
- builder.LoadContextSlot(reg, 1);
- builder.StoreContextSlot(reg, 1);
+ builder.PushContext(reg)
+ .PopContext(reg)
+ .LoadContextSlot(reg, 1)
+ .StoreContextSlot(reg, 1);
// Emit load / store property operations.
- builder.LoadNamedProperty(reg, 0, 0, LanguageMode::SLOPPY)
+ builder.LoadNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
.LoadKeyedProperty(reg, 0, LanguageMode::SLOPPY)
- .StoreNamedProperty(reg, 0, 0, LanguageMode::SLOPPY)
+ .StoreNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
.StoreKeyedProperty(reg, reg, 0, LanguageMode::SLOPPY)
- .LoadNamedProperty(reg, 0, 0, LanguageMode::STRICT)
+ .LoadNamedProperty(reg, name, 0, LanguageMode::STRICT)
.LoadKeyedProperty(reg, 0, LanguageMode::STRICT)
- .StoreNamedProperty(reg, 0, 0, LanguageMode::STRICT)
+ .StoreNamedProperty(reg, name, 0, LanguageMode::STRICT)
.StoreKeyedProperty(reg, reg, 0, LanguageMode::STRICT);
- // Emit wide load / store property operations.
- builder.LoadNamedProperty(reg, 2056, 0, LanguageMode::SLOPPY)
- .LoadKeyedProperty(reg, 2056, LanguageMode::SLOPPY)
- .StoreNamedProperty(reg, 0, 2056, LanguageMode::SLOPPY)
- .StoreKeyedProperty(reg, reg, 2056, LanguageMode::SLOPPY)
- .LoadNamedProperty(reg, 2056, 0, LanguageMode::STRICT)
- .LoadKeyedProperty(reg, 2056, LanguageMode::STRICT)
- .StoreNamedProperty(reg, 0, 2056, LanguageMode::STRICT)
- .StoreKeyedProperty(reg, reg, 2056, LanguageMode::STRICT);
+ // Emit load / store lookup slots.
+ builder.LoadLookupSlot(name, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadLookupSlot(name, TypeofMode::INSIDE_TYPEOF)
+ .StoreLookupSlot(name, LanguageMode::SLOPPY)
+ .StoreLookupSlot(name, LanguageMode::STRICT);
// Emit closure operations.
- builder.CreateClosure(NOT_TENURED);
+ Handle<SharedFunctionInfo> shared_info = factory->NewSharedFunctionInfo(
+ factory->NewStringFromStaticChars("function_a"), MaybeHandle<Code>(),
+ false);
+ builder.CreateClosure(shared_info, NOT_TENURED);
// Emit argument creation operations.
builder.CreateArguments(CreateArgumentsType::kMappedArguments)
.CreateArguments(CreateArgumentsType::kUnmappedArguments);
- // Emit literal creation operations
- builder.CreateRegExpLiteral(0, reg)
- .CreateArrayLiteral(0, 0)
- .CreateObjectLiteral(0, 0);
+ // Emit literal creation operations.
+ builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("a"), 0, 0)
+ .CreateArrayLiteral(factory->NewFixedArray(1), 0, 0)
+ .CreateObjectLiteral(factory->NewFixedArray(1), 0, 0);
// Call operations.
- builder.Call(reg, reg, 0)
+ builder.Call(reg, reg, 0, 0)
+ .Call(reg, reg, 0, 1024)
.CallRuntime(Runtime::kIsArray, reg, 1)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlot, reg, 1, reg)
.CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg, 1);
// Emit binary operator invocations.
@@ -128,7 +138,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.LogicalNot().TypeOf();
// Emit delete
- builder.Delete(reg, LanguageMode::SLOPPY).Delete(reg, LanguageMode::STRICT);
+ builder.Delete(reg, LanguageMode::SLOPPY)
+ .Delete(reg, LanguageMode::STRICT)
+ .DeleteLookupSlot();
// Emit new.
builder.New(reg, reg, 0);
@@ -147,7 +159,6 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit cast operator invocations.
builder.CastAccumulatorToNumber()
- .CastAccumulatorToBoolean()
.CastAccumulatorToJSObject()
.CastAccumulatorToName();
@@ -198,14 +209,75 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.Throw()
.Bind(&after_throw);
- builder.ForInPrepare(reg).ForInDone(reg).ForInNext(reg, reg);
+ builder.ForInPrepare(reg, reg, reg)
+ .ForInDone(reg, reg)
+ .ForInNext(reg, reg, reg, reg)
+ .ForInStep(reg);
// Wide constant pool loads
for (int i = 0; i < 256; i++) {
// Emit junk in constant pool to force wide constant pool index.
- builder.GetConstantPoolEntry(handle(Smi::FromInt(i), isolate()));
+ builder.LoadLiteral(factory->NewNumber(2.5321 + i));
}
builder.LoadLiteral(Smi::FromInt(20000000));
+ Handle<String> wide_name = factory->NewStringFromStaticChars("var_wide_name");
+
+ // Emit wide global load / store operations.
+ builder.LoadGlobal(name, 1024, LanguageMode::SLOPPY,
+ TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(wide_name, 1, LanguageMode::STRICT,
+ TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1024, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(wide_name, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
+ .StoreGlobal(name, 1024, LanguageMode::SLOPPY)
+ .StoreGlobal(wide_name, 1, LanguageMode::STRICT);
+
+ // Emit wide load / store property operations.
+ builder.LoadNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
+ .LoadKeyedProperty(reg, 2056, LanguageMode::SLOPPY)
+ .StoreNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
+ .StoreKeyedProperty(reg, reg, 2056, LanguageMode::SLOPPY)
+ .LoadNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
+ .LoadKeyedProperty(reg, 2056, LanguageMode::STRICT)
+ .StoreNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
+ .StoreKeyedProperty(reg, reg, 2056, LanguageMode::STRICT);
+
+ // Emit wide context operations.
+ builder.LoadContextSlot(reg, 1024)
+ .StoreContextSlot(reg, 1024);
+
+ // Emit wide load / store lookup slots.
+ builder.LoadLookupSlot(wide_name, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadLookupSlot(wide_name, TypeofMode::INSIDE_TYPEOF)
+ .StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
+ .StoreLookupSlot(wide_name, LanguageMode::STRICT);
+
+ // CreateClosureWide
+ Handle<SharedFunctionInfo> shared_info2 = factory->NewSharedFunctionInfo(
+ factory->NewStringFromStaticChars("function_b"), MaybeHandle<Code>(),
+ false);
+ builder.CreateClosure(shared_info2, NOT_TENURED);
+
+ // Emit wide variant of literal creation operations.
+ builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("wide_literal"),
+ 0, 0)
+ .CreateArrayLiteral(factory->NewFixedArray(2), 0, 0)
+ .CreateObjectLiteral(factory->NewFixedArray(2), 0, 0);
+
+ // Longer jumps requiring ConstantWide operand
+ builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
builder.Return();
@@ -246,7 +318,7 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
builder.set_locals_count(locals);
builder.set_context_count(contexts);
- TemporaryRegisterScope temporaries(&builder);
+ BytecodeRegisterAllocator temporaries(&builder);
for (int i = 0; i < temps; i++) {
builder.StoreAccumulatorInRegister(temporaries.NewRegister());
}
@@ -261,32 +333,6 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
}
-TEST_F(BytecodeArrayBuilderTest, TemporariesRecycled) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.Return();
-
- int first;
- {
- TemporaryRegisterScope temporaries(&builder);
- first = temporaries.NewRegister().index();
- temporaries.NewRegister();
- temporaries.NewRegister();
- temporaries.NewRegister();
- }
-
- int second;
- {
- TemporaryRegisterScope temporaries(&builder);
- second = temporaries.NewRegister().index();
- }
-
- CHECK_EQ(first, second);
-}
-
-
TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
int index = 1;
uint8_t operand = static_cast<uint8_t>(-index);
@@ -320,15 +366,15 @@ TEST_F(BytecodeArrayBuilderTest, RegisterType) {
builder.set_locals_count(3);
builder.set_context_count(0);
- TemporaryRegisterScope temporary_register_scope(&builder);
- Register temp0 = temporary_register_scope.NewRegister();
+ BytecodeRegisterAllocator register_allocator(&builder);
+ Register temp0 = register_allocator.NewRegister();
Register param0(builder.Parameter(0));
Register param9(builder.Parameter(9));
- Register temp1 = temporary_register_scope.NewRegister();
+ Register temp1 = register_allocator.NewRegister();
Register reg0(0);
Register reg1(1);
Register reg2(2);
- Register temp2 = temporary_register_scope.NewRegister();
+ Register temp2 = register_allocator.NewRegister();
CHECK_EQ(builder.RegisterIsParameterOrLocal(temp0), false);
CHECK_EQ(builder.RegisterIsParameterOrLocal(temp1), false);
CHECK_EQ(builder.RegisterIsParameterOrLocal(temp2), false);
@@ -641,67 +687,6 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
}
-TEST_F(BytecodeArrayBuilderTest, ToBoolean) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
-
- // Check ToBoolean emitted at start of a basic block.
- builder.CastAccumulatorToBoolean();
-
- // Check ToBoolean emitted preceding bytecode is non-boolean.
- builder.LoadNull().CastAccumulatorToBoolean();
-
- // Check ToBoolean omitted if preceding bytecode is boolean.
- builder.LoadFalse().CastAccumulatorToBoolean();
-
- // Check ToBoolean emitted if it is at the start of a basic block caused by a
- // bound label.
- BytecodeLabel label;
- builder.LoadFalse()
- .Bind(&label)
- .CastAccumulatorToBoolean();
-
- // Check ToBoolean emitted if it is at the start of a basic block caused by a
- // jump.
- builder.LoadFalse()
- .JumpIfTrue(&label)
- .CastAccumulatorToBoolean();
-
- builder.Return();
-
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
- BytecodeArrayIterator iterator(array);
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kToBoolean);
- iterator.Advance();
-
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaNull);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kToBoolean);
- iterator.Advance();
-
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaFalse);
- iterator.Advance();
-
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaFalse);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kToBoolean);
- iterator.Advance();
-
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaFalse);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
- iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kToBoolean);
- iterator.Advance();
-
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
- iterator.Advance();
- CHECK(iterator.done());
-}
-
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 1cf907cf9b..cd9f120cad 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -36,7 +36,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Register reg_0(0);
Register reg_1(1);
Register reg_2 = Register::FromParameterIndex(2, builder.parameter_count());
- int name_index = 21;
+ Handle<String> name = factory->NewStringFromStaticChars("abc");
+ int name_index = 3;
int feedback_slot = 97;
builder.LoadLiteral(heap_num_0)
@@ -45,7 +46,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(smi_0)
.LoadLiteral(smi_1)
.LoadAccumulatorWithRegister(reg_0)
- .LoadNamedProperty(reg_1, name_index, feedback_slot, LanguageMode::SLOPPY)
+ .LoadNamedProperty(reg_1, name, feedback_slot, LanguageMode::SLOPPY)
.StoreAccumulatorInRegister(reg_2)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
.Return();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
new file mode 100644
index 0000000000..0620322162
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeRegisterAllocatorTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeRegisterAllocatorTest() {}
+ ~BytecodeRegisterAllocatorTest() override {}
+};
+
+
+TEST_F(BytecodeRegisterAllocatorTest, TemporariesRecycled) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ int first;
+ {
+ BytecodeRegisterAllocator temporaries(&builder);
+ first = temporaries.NewRegister().index();
+ temporaries.NewRegister();
+ temporaries.NewRegister();
+ temporaries.NewRegister();
+ }
+
+ int second;
+ {
+ BytecodeRegisterAllocator temporaries(&builder);
+ second = temporaries.NewRegister().index();
+ }
+
+ CHECK_EQ(first, second);
+}
+
+
+TEST_F(BytecodeRegisterAllocatorTest, ConsecutiveRegisters) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ BytecodeRegisterAllocator temporaries(&builder);
+ temporaries.PrepareForConsecutiveAllocations(4);
+ Register reg0 = temporaries.NextConsecutiveRegister();
+ Register other = temporaries.NewRegister();
+ Register reg1 = temporaries.NextConsecutiveRegister();
+ Register reg2 = temporaries.NextConsecutiveRegister();
+ Register reg3 = temporaries.NextConsecutiveRegister();
+ USE(other);
+
+ CHECK(Register::AreContiguous(reg0, reg1, reg2, reg3));
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 729978643f..812ee46c9c 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -42,7 +42,7 @@ TEST(OperandConversion, Parameters) {
TEST(OperandConversion, RegistersParametersNoOverlap) {
std::vector<uint8_t> operand_count(256);
- for (int i = 0; i <= Register::kMaxRegisterIndex; i++) {
+ for (int i = 0; i <= kMaxInt8; i++) {
Register r = Register(i);
uint8_t operand = r.ToOperand();
operand_count[operand] += 1;
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
new file mode 100644
index 0000000000..ea5d1bb8c3
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -0,0 +1,225 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/handles-inl.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConstantArrayBuilderTest : public TestWithIsolateAndZone {
+ public:
+ ConstantArrayBuilderTest() {}
+ ~ConstantArrayBuilderTest() override {}
+
+ static const size_t kLowCapacity = ConstantArrayBuilder::kLowCapacity;
+ static const size_t kMaxCapacity = ConstantArrayBuilder::kMaxCapacity;
+};
+
+
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ ConstantArrayBuilderTest::kMaxCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ ConstantArrayBuilderTest::kLowCapacity;
+
+
+TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kMaxCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK_EQ(builder.size(), i + 1);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ for (size_t i = 0; i < kMaxCapacity; i++) {
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), static_cast<double>(i));
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
+ for (size_t reserved = 1; reserved < kLowCapacity; reserved *= 3) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kByte);
+ }
+ for (size_t i = 0; i < 2 * kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ if (i + reserved < kLowCapacity) {
+ CHECK_LE(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), i + 1);
+ CHECK(builder.At(i)->SameValue(*object));
+ } else {
+ CHECK_GE(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), i + reserved + 1);
+ CHECK(builder.At(i + reserved)->SameValue(*object));
+ }
+ }
+ CHECK_EQ(builder.size(), 2 * kLowCapacity + reserved);
+
+ // Check reserved values represented by the hole.
+ for (size_t i = 0; i < reserved; i++) {
+ Handle<Object> empty = builder.At(kLowCapacity - reserved + i);
+ CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
+ }
+
+ // Commmit reserved entries with duplicates and check size does not change.
+ DCHECK_EQ(reserved + 2 * kLowCapacity, builder.size());
+ size_t duplicates_in_idx8_space =
+ std::min(reserved, kLowCapacity - reserved);
+ for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
+ builder.CommitReservedEntry(OperandSize::kByte,
+ isolate()->factory()->NewNumberFromSize(i));
+ DCHECK_EQ(reserved + 2 * kLowCapacity, builder.size());
+ }
+
+ // Check all committed values match expected (holes where
+ // duplicates_in_idx8_space allocated).
+ for (size_t i = 0; i < kLowCapacity - reserved; i++) {
+ Smi* smi = Smi::FromInt(static_cast<int>(i));
+ CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
+ }
+ for (size_t i = kLowCapacity; i < 2 * kLowCapacity + reserved; i++) {
+ Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
+ CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ size_t index = kLowCapacity - reserved + i;
+ CHECK(builder.At(index)->IsTheHole());
+ }
+
+ // Now make reservations, and commit them with unique entries.
+ for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kByte);
+ }
+ for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
+ Handle<Object> object =
+ isolate()->factory()->NewNumberFromSize(2 * kLowCapacity + i);
+ size_t index = builder.CommitReservedEntry(OperandSize::kByte, object);
+ CHECK_EQ(static_cast<int>(index), kLowCapacity - reserved + i);
+ CHECK(builder.At(static_cast<int>(index))->SameValue(*object));
+ }
+ CHECK_EQ(builder.size(), 2 * kLowCapacity + reserved);
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx16Reservations) {
+ for (size_t reserved = 1; reserved < kLowCapacity; reserved *= 3) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ CHECK_EQ(builder.size(), i + 1);
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kShort);
+ CHECK_EQ(builder.size(), kLowCapacity);
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ builder.DiscardReservedEntry(OperandSize::kShort);
+ CHECK_EQ(builder.size(), kLowCapacity);
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kShort);
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.CommitReservedEntry(operand_size, object);
+ CHECK_EQ(builder.size(), kLowCapacity);
+ }
+ for (size_t i = kLowCapacity; i < kLowCapacity + reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kShort);
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.CommitReservedEntry(operand_size, object);
+ CHECK_EQ(builder.size(), i + 1);
+ }
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ static const size_t kNumberOfElements = 37;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array =
+ builder.ToFixedArray(isolate()->factory());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(OperandSize::kByte == operand_size);
+ CHECK_EQ(builder.size(), 0);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK_EQ(builder.size(), i + kLowCapacity + 1);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ builder.CommitReservedEntry(OperandSize::kByte,
+ builder.At(i + kLowCapacity));
+ CHECK_EQ(builder.size(), 2 * kLowCapacity);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> original = builder.At(kLowCapacity + i);
+ Handle<Object> duplicate = builder.At(i);
+ CHECK(original->SameValue(*duplicate));
+ Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
+ CHECK(original->SameValue(*reference));
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(OperandSize::kByte == operand_size);
+ CHECK_EQ(builder.size(), 0);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK_EQ(builder.size(), i + kLowCapacity + 1);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ builder.DiscardReservedEntry(OperandSize::kByte);
+ builder.Insert(builder.At(i + kLowCapacity));
+ CHECK_EQ(builder.size(), 2 * kLowCapacity);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
+ Handle<Object> original = builder.At(kLowCapacity + i);
+ CHECK(original->SameValue(*reference));
+ Handle<Object> duplicate = builder.At(i);
+ CHECK(duplicate->SameValue(*isolate()->factory()->the_hole_value()));
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/locked-queue-unittest.cc b/deps/v8/test/unittests/locked-queue-unittest.cc
new file mode 100644
index 0000000000..cc176d937f
--- /dev/null
+++ b/deps/v8/test/unittests/locked-queue-unittest.cc
@@ -0,0 +1,90 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/locked-queue-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+typedef int Record;
+
+} // namespace
+
+namespace v8 {
+namespace internal {
+
+TEST(LockedQueue, ConstructorEmpty) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+
+TEST(LockedQueue, SingleRecordEnqueueDequeue) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ queue.Enqueue(1);
+ EXPECT_FALSE(queue.IsEmpty());
+ Record a = -1;
+ bool success = queue.Dequeue(&a);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(a, 1);
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+
+TEST(LockedQueue, Peek) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ queue.Enqueue(1);
+ EXPECT_FALSE(queue.IsEmpty());
+ Record a = -1;
+ bool success = queue.Peek(&a);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(a, 1);
+ EXPECT_FALSE(queue.IsEmpty());
+ success = queue.Dequeue(&a);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(a, 1);
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+
+TEST(LockedQueue, PeekOnEmpty) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ Record a = -1;
+ bool success = queue.Peek(&a);
+ EXPECT_FALSE(success);
+}
+
+
+TEST(LockedQueue, MultipleRecords) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ queue.Enqueue(1);
+ EXPECT_FALSE(queue.IsEmpty());
+ for (int i = 2; i <= 5; ++i) {
+ queue.Enqueue(i);
+ EXPECT_FALSE(queue.IsEmpty());
+ }
+ Record rec = 0;
+ for (int i = 1; i <= 4; ++i) {
+ EXPECT_FALSE(queue.IsEmpty());
+ queue.Dequeue(&rec);
+ EXPECT_EQ(i, rec);
+ }
+ for (int i = 6; i <= 12; ++i) {
+ queue.Enqueue(i);
+ EXPECT_FALSE(queue.IsEmpty());
+ }
+ for (int i = 5; i <= 12; ++i) {
+ EXPECT_FALSE(queue.IsEmpty());
+ queue.Dequeue(&rec);
+ EXPECT_EQ(i, rec);
+ }
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 40b4a3d984..5339da35fd 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -22,10 +22,6 @@
'include_dirs': [
'../..',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'sources': [ ### gcmole(all) ###
'atomic-utils-unittest.cc',
'base/bits-unittest.cc',
@@ -42,10 +38,9 @@
'base/platform/time-unittest.cc',
'base/sys-info-unittest.cc',
'base/utils/random-number-generator-unittest.cc',
+ 'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc',
- 'compiler/binary-operator-reducer-unittest.cc',
'compiler/branch-elimination-unittest.cc',
- 'compiler/bytecode-graph-builder-unittest.cc',
'compiler/change-lowering-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
@@ -55,6 +50,7 @@
'compiler/control-flow-optimizer-unittest.cc',
'compiler/dead-code-elimination-unittest.cc',
'compiler/diamond-unittest.cc',
+ 'compiler/escape-analysis-unittest.cc',
'compiler/graph-reducer-unittest.cc',
'compiler/graph-reducer-unittest.h',
'compiler/graph-trimmer-unittest.cc',
@@ -101,6 +97,8 @@
'interpreter/bytecodes-unittest.cc',
'interpreter/bytecode-array-builder-unittest.cc',
'interpreter/bytecode-array-iterator-unittest.cc',
+ 'interpreter/bytecode-register-allocator-unittest.cc',
+ 'interpreter/constant-array-builder-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
@@ -109,10 +107,15 @@
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
+ 'locked-queue-unittest.cc',
'run-all-unittests.cc',
'runtime/runtime-interpreter-unittest.cc',
'test-utils.h',
'test-utils.cc',
+ 'wasm/ast-decoder-unittest.cc',
+ 'wasm/encoder-unittest.cc',
+ 'wasm/module-decoder-unittest.cc',
+ 'wasm/wasm-macro-gen-unittest.cc',
],
'conditions': [
['v8_target_arch=="arm"', {
@@ -172,11 +175,6 @@
],
},
}],
- ['v8_wasm!=0', {
- 'dependencies': [
- '../../third_party/wasm/test/unittests/wasm/wasm.gyp:wasm_unittests',
- ],
- }],
],
},
],
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index d439913ccf..18201cd81c 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -3,4 +3,12 @@
# found in the LICENSE file.
[
+['byteorder == big', {
+ # TODO(mips-team): Fix Wasm for big-endian.
+ 'WasmModuleVerifyTest*': [SKIP],
+ 'WasmFunctionVerifyTest*': [SKIP],
+ 'WasmDecoderTest.TableSwitch*': [SKIP],
+ 'WasmDecoderTest.AllLoadMemCombinations': [SKIP],
+}], # 'byteorder == big'
+
]
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
new file mode 100644
index 0000000000..c2abc8a6ad
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -0,0 +1,3 @@
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
new file mode 100644
index 0000000000..923c554604
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -0,0 +1,2439 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/v8.h"
+
+#include "test/cctest/wasm/test-signatures.h"
+
+#include "src/objects.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static const byte kCodeGetLocal0[] = {kExprGetLocal, 0};
+static const byte kCodeGetLocal1[] = {kExprGetLocal, 1};
+static const byte kCodeSetLocal0[] = {kExprSetLocal, 0, kExprI8Const, 0};
+
+static const LocalType kLocalTypes[] = {kAstI32, kAstI64, kAstF32, kAstF64};
+static const MachineType machineTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+
+static const WasmOpcode kInt32BinopOpcodes[] = {
+ kExprI32Add, kExprI32Sub, kExprI32Mul, kExprI32DivS, kExprI32DivU,
+ kExprI32RemS, kExprI32RemU, kExprI32And, kExprI32Ior, kExprI32Xor,
+ kExprI32Shl, kExprI32ShrU, kExprI32ShrS, kExprI32Eq, kExprI32LtS,
+ kExprI32LeS, kExprI32LtU, kExprI32LeU};
+
+
+#define EXPECT_VERIFIES(env, x) Verify(kSuccess, env, x, x + arraysize(x))
+
+#define EXPECT_FAILURE(env, x) Verify(kError, env, x, x + arraysize(x))
+
+#define EXPECT_VERIFIES_INLINE(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(kSuccess, env, code, code + arraysize(code)); \
+ } while (false)
+
+
+#define EXPECT_FAILURE_INLINE(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(kError, env, code, code + arraysize(code)); \
+ } while (false)
+
+#define VERIFY(...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ Verify(kSuccess, &env_v_i, code, code + sizeof(code)); \
+ } while (false)
+
+
+class WasmDecoderTest : public TestWithZone {
+ public:
+ WasmDecoderTest() : TestWithZone(), sigs() {
+ init_env(&env_i_i, sigs.i_i());
+ init_env(&env_v_v, sigs.v_v());
+ init_env(&env_v_i, sigs.v_i());
+ init_env(&env_i_f, sigs.i_f());
+ init_env(&env_i_d, sigs.i_d());
+ init_env(&env_l_l, sigs.l_l());
+ init_env(&env_f_ff, sigs.f_ff());
+ init_env(&env_d_dd, sigs.d_dd());
+ }
+
+ TestSignatures sigs;
+
+ FunctionEnv env_i_i;
+ FunctionEnv env_v_v;
+ FunctionEnv env_v_i;
+ FunctionEnv env_i_f;
+ FunctionEnv env_i_d;
+ FunctionEnv env_l_l;
+ FunctionEnv env_f_ff;
+ FunctionEnv env_d_dd;
+
+ static void init_env(FunctionEnv* env, FunctionSig* sig) {
+ env->module = nullptr;
+ env->sig = sig;
+ env->local_int32_count = 0;
+ env->local_int64_count = 0;
+ env->local_float32_count = 0;
+ env->local_float64_count = 0;
+ env->SumLocals();
+ }
+
+ // A wrapper around VerifyWasmCode() that renders a nice failure message.
+ void Verify(ErrorCode expected, FunctionEnv* env, const byte* start,
+ const byte* end) {
+ TreeResult result = VerifyWasmCode(env, start, end);
+ if (result.error_code != expected) {
+ ptrdiff_t pc = result.error_pc - result.start;
+ ptrdiff_t pt = result.error_pt - result.start;
+ std::ostringstream str;
+ if (expected == kSuccess) {
+ str << "Verification failed: " << result.error_code << " pc = +" << pc;
+ if (result.error_pt) str << ", pt = +" << pt;
+ str << ", msg = " << result.error_msg.get();
+ } else {
+ str << "Verification expected: " << expected << ", but got "
+ << result.error_code;
+ if (result.error_code != kSuccess) {
+ str << " pc = +" << pc;
+ if (result.error_pt) str << ", pt = +" << pt;
+ }
+ }
+ FATAL(str.str().c_str());
+ }
+ }
+
+ void TestBinop(WasmOpcode opcode, FunctionSig* success) {
+ // op(local[0], local[1])
+ byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
+ 1};
+ FunctionEnv env;
+ init_env(&env, success);
+ EXPECT_VERIFIES(&env, code);
+
+ // Try all combinations of return and parameter types.
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ for (size_t k = 0; k < arraysize(kLocalTypes); k++) {
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[j], kLocalTypes[k]};
+ if (types[0] != success->GetReturn(0) ||
+ types[1] != success->GetParam(0) ||
+ types[2] != success->GetParam(1)) {
+ // Test signature mismatch.
+ FunctionSig sig(1, 2, types);
+ init_env(&env, &sig);
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+ }
+ }
+
+ void TestUnop(WasmOpcode opcode, FunctionSig* success) {
+ TestUnop(opcode, success->GetReturn(), success->GetParam(0));
+ }
+
+ void TestUnop(WasmOpcode opcode, LocalType ret_type, LocalType param_type) {
+ // Return(op(local[0]))
+ byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
+ FunctionEnv env;
+ {
+ LocalType types[] = {ret_type, param_type};
+ FunctionSig sig(1, 1, types);
+ init_env(&env, &sig);
+ EXPECT_VERIFIES(&env, code);
+ }
+
+ // Try all combinations of return and parameter types.
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
+ if (types[0] != ret_type || types[1] != param_type) {
+ // Test signature mismatch.
+ FunctionSig sig(1, 1, types);
+ init_env(&env, &sig);
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+ }
+};
+
+
+static FunctionEnv CreateInt32FunctionEnv(FunctionSig* sig, int count) {
+ FunctionEnv env;
+ env.module = nullptr;
+ env.sig = sig;
+ env.local_int32_count = count;
+ env.local_float64_count = 0;
+ env.local_float32_count = 0;
+ env.total_locals = static_cast<unsigned>(count + sig->parameter_count());
+ return env;
+}
+
+
+TEST_F(WasmDecoderTest, Int8Const) {
+ byte code[] = {kExprI8Const, 0};
+ for (int i = -128; i < 128; i++) {
+ code[1] = static_cast<byte>(i);
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, EmptyFunction) {
+ byte code[] = {0};
+ Verify(kSuccess, &env_v_v, code, code);
+ Verify(kError, &env_i_i, code, code);
+}
+
+
+TEST_F(WasmDecoderTest, IncompleteIf1) {
+ byte code[] = {kExprIf};
+ EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IncompleteIf2) {
+ byte code[] = {kExprIf, kExprI8Const, 0};
+ EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Int8Const_fallthru) {
+ byte code[] = {kExprI8Const, 0, kExprI8Const, 1};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Int32Const) {
+ byte code[] = {kExprI32Const, 0, 0, 0, 0};
+ int32_t* ptr = reinterpret_cast<int32_t*>(code + 1);
+ const int kInc = 4498211;
+ for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
+ *ptr = i;
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Int8Const_fallthru2) {
+ byte code[] = {kExprI8Const, 0, kExprI32Const, 1, 2, 3, 4};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Int64Const) {
+ byte code[] = {kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0};
+ int64_t* ptr = reinterpret_cast<int64_t*>(code + 1);
+ const int kInc = 4498211;
+ for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
+ *ptr = (static_cast<int64_t>(i) << 32) | i;
+ EXPECT_VERIFIES(&env_l_l, code);
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, Float32Const) {
+ byte code[] = {kExprF32Const, 0, 0, 0, 0};
+ float* ptr = reinterpret_cast<float*>(code + 1);
+ for (int i = 0; i < 30; i++) {
+ *ptr = i * -7.75f;
+ EXPECT_VERIFIES(&env_f_ff, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Float64Const) {
+ byte code[] = {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0};
+ double* ptr = reinterpret_cast<double*>(code + 1);
+ for (int i = 0; i < 30; i++) {
+ *ptr = i * 33.45;
+ EXPECT_VERIFIES(&env_d_dd, code);
+ }
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, Int32Const_off_end) {
+ byte code[] = {kExprI32Const, 0xaa, 0xbb, 0xcc, 0x44};
+
+ for (int size = 1; size <= 4; size++) {
+ Verify(kError, &env_i_i, code, code + size);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_param) {
+ EXPECT_VERIFIES(&env_i_i, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_local) {
+ FunctionEnv env;
+ init_env(&env, sigs.i_v());
+ env.AddLocals(kAstI32, 1);
+ EXPECT_VERIFIES(&env, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_param_n) {
+ FunctionSig* array[] = {sigs.i_i(), sigs.i_ii(), sigs.i_iii()};
+
+ for (size_t i = 0; i < arraysize(array); i++) {
+ FunctionEnv env = CreateInt32FunctionEnv(array[i], 0);
+ EXPECT_VERIFIES(&env, kCodeGetLocal0);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, GetLocalN_local) {
+ for (byte i = 1; i < 8; i++) {
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), i);
+ for (byte j = 0; j < i; j++) {
+ byte code[] = {kExprGetLocal, j};
+ EXPECT_VERIFIES(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_fail_no_params) {
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), 0);
+
+ EXPECT_FAILURE(&env, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal1_fail_no_locals) {
+ EXPECT_FAILURE(&env_i_i, kCodeGetLocal1);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal_off_end) {
+ static const byte code[] = {kExprGetLocal};
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal_varint) {
+ env_i_i.local_int32_count = 1000000000;
+ env_i_i.total_locals += 1000000000;
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xFF, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xF0, 0x80, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xF2, 0x81, 0x82, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xF3, 0xA1, 0xB1, 0xC1, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Binops_off_end) {
+ byte code1[] = {0}; // [opcode]
+ for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
+ code1[0] = kInt32BinopOpcodes[i];
+ EXPECT_FAILURE(&env_i_i, code1);
+ }
+
+ byte code3[] = {0, kExprGetLocal, 0}; // [opcode] [expr]
+ for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
+ code3[0] = kInt32BinopOpcodes[i];
+ EXPECT_FAILURE(&env_i_i, code3);
+ }
+
+ byte code4[] = {0, kExprGetLocal, 0, 0}; // [opcode] [expr] [opcode]
+ for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
+ code4[0] = kInt32BinopOpcodes[i];
+ code4[3] = kInt32BinopOpcodes[i];
+ EXPECT_FAILURE(&env_i_i, code4);
+ }
+}
+
+
+//===================================================================
+//== Statements
+//===================================================================
+TEST_F(WasmDecoderTest, Nop) {
+ static const byte code[] = {kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, SetLocal0_param) {
+ static const byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, SetLocal0_local) {
+ byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), 1);
+
+ EXPECT_VERIFIES(&env, code);
+}
+
+
+TEST_F(WasmDecoderTest, SetLocalN_local) {
+ for (byte i = 1; i < 8; i++) {
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), i);
+ for (byte j = 0; j < i; j++) {
+ byte code[] = {kExprSetLocal, j, kExprI8Const, i};
+ EXPECT_VERIFIES(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Block0) {
+ static const byte code[] = {kExprBlock, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block0_fallthru1) {
+ static const byte code[] = {kExprBlock, 0, kExprBlock, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block1) {
+ static const byte code[] = {kExprBlock, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block0_fallthru2) {
+ static const byte code[] = {kExprBlock, 0, kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2) {
+ static const byte code[] = {kExprBlock, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprSetLocal, 0, kExprI8Const, 0}; // --
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2_fallthru) {
+ static const byte code[] = {kExprBlock, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprI8Const, 11}; // --
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, BlockN) {
+ byte block[] = {kExprBlock, 2};
+
+ for (size_t i = 0; i < 10; i++) {
+ size_t total = sizeof(block) + sizeof(kCodeSetLocal0) * i;
+ byte* code = reinterpret_cast<byte*>(malloc(total));
+ memcpy(code, block, sizeof(block));
+ code[1] = static_cast<byte>(i);
+ for (size_t j = 0; j < i; j++) {
+ memcpy(code + sizeof(block) + j * sizeof(kCodeSetLocal0), kCodeSetLocal0,
+ sizeof(kCodeSetLocal0));
+ }
+ Verify(kSuccess, &env_v_i, code, code + total);
+ free(code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BlockN_off_end) {
+ for (byte i = 2; i < 10; i++) {
+ byte code[] = {kExprBlock, i, kExprNop};
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Block1_break) {
+ static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2_break) {
+ static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block1_continue) {
+ static const byte code[] = {kExprBlock, 1, kExprBr, 1, kExprNop};
+ EXPECT_FAILURE(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2_continue) {
+ static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 1, kExprNop};
+ EXPECT_FAILURE(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprBlock0) {
+ static const byte code[] = {kExprBlock, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprBlock1a) {
+ static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprBlock1b) {
+ static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
+ EXPECT_FAILURE(&env_f_ff, code);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ExprBlock1c) {
+ static const byte code[] = {kExprBlock, 1, kExprF32Const, 0, 0, 0, 0};
+ EXPECT_VERIFIES(&env_f_ff, code);
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, IfEmpty) {
+ static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfSet) {
+ static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprSetLocal,
+ 0, kExprI8Const, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfBlock1) {
+ static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprBlock,
+ 1, kExprSetLocal, 0, kExprI8Const,
+ 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfBlock2) {
+ static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprBlock,
+ 2, kExprSetLocal, 0, kExprI8Const,
+ 0, kExprSetLocal, 0, kExprI8Const,
+ 0};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfElseEmpty) {
+ static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprNop,
+ kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfElseSet) {
+ static const byte code[] = {kExprIfElse,
+ kExprGetLocal,
+ 0, // --
+ kExprSetLocal,
+ 0,
+ kExprI8Const,
+ 0, // --
+ kExprSetLocal,
+ 0,
+ kExprI8Const,
+ 1}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfElseUnreachable) {
+ static const byte code[] = {kExprIfElse, kExprI8Const, 0,
+ kExprUnreachable, kExprGetLocal, 0};
+
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType types[] = {kAstI32, kLocalTypes[i]};
+ FunctionEnv env;
+ FunctionSig sig(1, 1, types);
+ init_env(&env, &sig);
+
+ if (kLocalTypes[i] == kAstI32) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Loop0) {
+ static const byte code[] = {kExprLoop, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop1) {
+ static const byte code[] = {kExprLoop, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop2) {
+ static const byte code[] = {kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprSetLocal, 0, kExprI8Const, 0}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop1_continue) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop1_break) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 1, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop2_continue) {
+ static const byte code[] = {kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprBr, 0, kExprNop}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop2_break) {
+ static const byte code[] = {kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprBr, 1, kExprNop}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop0) {
+ static const byte code[] = {kExprLoop, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop1a) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop1b) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop2_unreachable) {
+ static const byte code[] = {kExprLoop, 2, kExprBr, 0,
+ kExprI8Const, 0, kExprNop};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ReturnVoid1) {
+ static const byte code[] = {kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+}
+
+
+TEST_F(WasmDecoderTest, ReturnVoid2) {
+ static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ReturnVoid3) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprI8Const, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprI32Const, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprF32Const, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ EXPECT_VERIFIES_INLINE(&env_v_i, kExprGetLocal, 0);
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, Unreachable1) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable, kExprUnreachable);
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(2, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(2, WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(2, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(2, WASM_BR(0), WASM_ZERO));
+}
+
+
+TEST_F(WasmDecoderTest, Codeiness) {
+ VERIFY(kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprBr, 0, kExprNop); // --
+}
+
+
+TEST_F(WasmDecoderTest, ExprIf1) {
+ VERIFY(kExprIf, kExprGetLocal, 0, kExprI8Const, 0, kExprI8Const, 1);
+ VERIFY(kExprIf, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0);
+ VERIFY(kExprIf, kExprGetLocal, 0, kExprI32Add, kExprGetLocal, 0,
+ kExprGetLocal, 0, kExprI8Const, 1);
+}
+
+
+TEST_F(WasmDecoderTest, ExprIf_off_end) {
+ static const byte kCode[] = {kExprIf, kExprGetLocal, 0, kExprGetLocal,
+ 0, kExprGetLocal, 0};
+ for (size_t len = 1; len < arraysize(kCode); len++) {
+ Verify(kError, &env_i_i, kCode, kCode + len);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprIf_type) {
+ {
+ // float|double ? 1 : 2
+ static const byte kCode[] = {kExprIfElse, kExprGetLocal, 0, kExprI8Const,
+ 1, kExprI8Const, 2};
+ EXPECT_FAILURE(&env_i_f, kCode);
+ EXPECT_FAILURE(&env_i_d, kCode);
+ }
+ {
+ // 1 ? float|double : 2
+ static const byte kCode[] = {kExprIfElse, kExprI8Const, 1, kExprGetLocal,
+ 0, kExprI8Const, 2};
+ EXPECT_FAILURE(&env_i_f, kCode);
+ EXPECT_FAILURE(&env_i_d, kCode);
+ }
+ {
+ // stmt ? 0 : 1
+ static const byte kCode[] = {kExprIfElse, kExprNop, kExprI8Const,
+ 0, kExprI8Const, 1};
+ EXPECT_FAILURE(&env_i_i, kCode);
+ }
+ {
+ // 0 ? stmt : 1
+ static const byte kCode[] = {kExprIfElse, kExprI8Const, 0,
+ kExprNop, kExprI8Const, 1};
+ EXPECT_FAILURE(&env_i_i, kCode);
+ }
+ {
+ // 0 ? 1 : stmt
+ static const byte kCode[] = {kExprIfElse, kExprI8Const, 0, kExprI8Const, 1,
+ 0, kExprBlock};
+ EXPECT_FAILURE(&env_i_i, kCode);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Int64Local_param) {
+ EXPECT_VERIFIES(&env_l_l, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, Int64Locals) {
+ for (byte i = 1; i < 8; i++) {
+ FunctionEnv env;
+ init_env(&env, sigs.l_v());
+ env.AddLocals(kAstI64, i);
+ for (byte j = 0; j < i; j++) {
+ byte code[] = {kExprGetLocal, j};
+ EXPECT_VERIFIES(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Int32Binops) {
+ TestBinop(kExprI32Add, sigs.i_ii());
+ TestBinop(kExprI32Sub, sigs.i_ii());
+ TestBinop(kExprI32Mul, sigs.i_ii());
+ TestBinop(kExprI32DivS, sigs.i_ii());
+ TestBinop(kExprI32DivU, sigs.i_ii());
+ TestBinop(kExprI32RemS, sigs.i_ii());
+ TestBinop(kExprI32RemU, sigs.i_ii());
+ TestBinop(kExprI32And, sigs.i_ii());
+ TestBinop(kExprI32Ior, sigs.i_ii());
+ TestBinop(kExprI32Xor, sigs.i_ii());
+ TestBinop(kExprI32Shl, sigs.i_ii());
+ TestBinop(kExprI32ShrU, sigs.i_ii());
+ TestBinop(kExprI32ShrS, sigs.i_ii());
+ TestBinop(kExprI32Eq, sigs.i_ii());
+ TestBinop(kExprI32LtS, sigs.i_ii());
+ TestBinop(kExprI32LeS, sigs.i_ii());
+ TestBinop(kExprI32LtU, sigs.i_ii());
+ TestBinop(kExprI32LeU, sigs.i_ii());
+}
+
+
+TEST_F(WasmDecoderTest, DoubleBinops) {
+ TestBinop(kExprF64Add, sigs.d_dd());
+ TestBinop(kExprF64Sub, sigs.d_dd());
+ TestBinop(kExprF64Mul, sigs.d_dd());
+ TestBinop(kExprF64Div, sigs.d_dd());
+
+ TestBinop(kExprF64Eq, sigs.i_dd());
+ TestBinop(kExprF64Lt, sigs.i_dd());
+ TestBinop(kExprF64Le, sigs.i_dd());
+}
+
+
+TEST_F(WasmDecoderTest, FloatBinops) {
+ TestBinop(kExprF32Add, sigs.f_ff());
+ TestBinop(kExprF32Sub, sigs.f_ff());
+ TestBinop(kExprF32Mul, sigs.f_ff());
+ TestBinop(kExprF32Div, sigs.f_ff());
+
+ TestBinop(kExprF32Eq, sigs.i_ff());
+ TestBinop(kExprF32Lt, sigs.i_ff());
+ TestBinop(kExprF32Le, sigs.i_ff());
+}
+
+
+TEST_F(WasmDecoderTest, TypeConversions) {
+ TestUnop(kExprI32SConvertF32, kAstI32, kAstF32);
+ TestUnop(kExprI32SConvertF64, kAstI32, kAstF64);
+ TestUnop(kExprI32UConvertF32, kAstI32, kAstF32);
+ TestUnop(kExprI32UConvertF64, kAstI32, kAstF64);
+ TestUnop(kExprF64SConvertI32, kAstF64, kAstI32);
+ TestUnop(kExprF64UConvertI32, kAstF64, kAstI32);
+ TestUnop(kExprF64ConvertF32, kAstF64, kAstF32);
+ TestUnop(kExprF32SConvertI32, kAstF32, kAstI32);
+ TestUnop(kExprF32UConvertI32, kAstF32, kAstI32);
+ TestUnop(kExprF32ConvertF64, kAstF32, kAstF64);
+}
+
+
+TEST_F(WasmDecoderTest, MacrosStmt) {
+ VERIFY(WASM_SET_LOCAL(0, WASM_I32(87348)));
+ VERIFY(WASM_STORE_MEM(MachineType::Int32(), WASM_I8(24), WASM_I8(40)));
+ VERIFY(WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
+ VERIFY(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ VERIFY(WASM_NOP);
+ VERIFY(WASM_BLOCK(1, WASM_NOP));
+ VERIFY(WASM_LOOP(1, WASM_NOP));
+ VERIFY(WASM_LOOP(1, WASM_BREAK(0)));
+ VERIFY(WASM_LOOP(1, WASM_CONTINUE(0)));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, MacrosBreak) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BREAK(0)));
+
+ EXPECT_VERIFIES_INLINE(&env_i_i, WASM_LOOP(1, WASM_BREAKV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_l_l, WASM_LOOP(1, WASM_BREAKV(0, WASM_I64(0))));
+ EXPECT_VERIFIES_INLINE(&env_f_ff,
+ WASM_LOOP(1, WASM_BREAKV(0, WASM_F32(0.0))));
+ EXPECT_VERIFIES_INLINE(&env_d_dd,
+ WASM_LOOP(1, WASM_BREAKV(0, WASM_F64(0.0))));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, MacrosContinue) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_CONTINUE(0)));
+}
+
+
+TEST_F(WasmDecoderTest, MacrosVariadic) {
+ VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(2, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
+}
+
+
+TEST_F(WasmDecoderTest, MacrosNestedBlocks) {
+ VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_BLOCK(2, WASM_NOP, WASM_NOP)));
+ VERIFY(WASM_BLOCK(3, WASM_NOP, // --
+ WASM_BLOCK(2, WASM_NOP, WASM_NOP), // --
+ WASM_BLOCK(2, WASM_NOP, WASM_NOP))); // --
+ VERIFY(WASM_BLOCK(1, WASM_BLOCK(1, WASM_BLOCK(2, WASM_NOP, WASM_NOP))));
+}
+
+
+TEST_F(WasmDecoderTest, MultipleReturn) {
+ static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
+ FunctionSig sig_ii_v(2, 0, kIntTypes5);
+ FunctionEnv env_ii_v;
+ init_env(&env_ii_v, &sig_ii_v);
+ EXPECT_VERIFIES_INLINE(&env_ii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_INLINE(&env_ii_v, WASM_RETURN(WASM_ZERO));
+
+ FunctionSig sig_iii_v(3, 0, kIntTypes5);
+ FunctionEnv env_iii_v;
+ init_env(&env_iii_v, &sig_iii_v);
+ EXPECT_VERIFIES_INLINE(&env_iii_v,
+ WASM_RETURN(WASM_ZERO, WASM_ONE, WASM_I8(44)));
+ EXPECT_FAILURE_INLINE(&env_iii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+}
+
+
+TEST_F(WasmDecoderTest, MultipleReturn_fallthru) {
+ static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
+ FunctionSig sig_ii_v(2, 0, kIntTypes5);
+ FunctionEnv env_ii_v;
+ init_env(&env_ii_v, &sig_ii_v);
+
+ EXPECT_VERIFIES_INLINE(&env_ii_v, WASM_ZERO, WASM_ONE);
+ EXPECT_FAILURE_INLINE(&env_ii_v, WASM_ZERO);
+
+ FunctionSig sig_iii_v(3, 0, kIntTypes5);
+ FunctionEnv env_iii_v;
+ init_env(&env_iii_v, &sig_iii_v);
+ EXPECT_VERIFIES_INLINE(&env_iii_v, WASM_ZERO, WASM_ONE, WASM_I8(44));
+ EXPECT_FAILURE_INLINE(&env_iii_v, WASM_ZERO, WASM_ONE);
+}
+
+
+TEST_F(WasmDecoderTest, MacrosInt32) {
+ VERIFY(WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I8(12)));
+ VERIFY(WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(13)));
+ VERIFY(WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I8(14)));
+ VERIFY(WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I8(15)));
+ VERIFY(WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I8(16)));
+ VERIFY(WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I8(17)));
+ VERIFY(WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_I8(18)));
+ VERIFY(WASM_I32_AND(WASM_GET_LOCAL(0), WASM_I8(19)));
+ VERIFY(WASM_I32_IOR(WASM_GET_LOCAL(0), WASM_I8(20)));
+ VERIFY(WASM_I32_XOR(WASM_GET_LOCAL(0), WASM_I8(21)));
+ VERIFY(WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I8(22)));
+ VERIFY(WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I8(23)));
+ VERIFY(WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I8(24)));
+ VERIFY(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(25)));
+ VERIFY(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I8(25)));
+
+ VERIFY(WASM_I32_LTS(WASM_GET_LOCAL(0), WASM_I8(26)));
+ VERIFY(WASM_I32_LES(WASM_GET_LOCAL(0), WASM_I8(27)));
+ VERIFY(WASM_I32_LTU(WASM_GET_LOCAL(0), WASM_I8(28)));
+ VERIFY(WASM_I32_LEU(WASM_GET_LOCAL(0), WASM_I8(29)));
+
+ VERIFY(WASM_I32_GTS(WASM_GET_LOCAL(0), WASM_I8(26)));
+ VERIFY(WASM_I32_GES(WASM_GET_LOCAL(0), WASM_I8(27)));
+ VERIFY(WASM_I32_GTU(WASM_GET_LOCAL(0), WASM_I8(28)));
+ VERIFY(WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I8(29)));
+}
+
+
+TEST_F(WasmDecoderTest, MacrosInt64) {
+ FunctionEnv env_i_ll;
+ FunctionEnv env_l_ll;
+ init_env(&env_i_ll, sigs.i_ll());
+ init_env(&env_l_ll, sigs.l_ll());
+
+#define VERIFY_L_LL(...) EXPECT_VERIFIES_INLINE(&env_l_ll, __VA_ARGS__)
+#define VERIFY_I_LL(...) EXPECT_VERIFIES_INLINE(&env_i_ll, __VA_ARGS__)
+
+ VERIFY_L_LL(WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64(12)));
+ VERIFY_L_LL(WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64(13)));
+ VERIFY_L_LL(WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64(14)));
+ VERIFY_L_LL(WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64(15)));
+ VERIFY_L_LL(WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64(16)));
+ VERIFY_L_LL(WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64(17)));
+ VERIFY_L_LL(WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64(18)));
+ VERIFY_L_LL(WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64(19)));
+ VERIFY_L_LL(WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64(20)));
+ VERIFY_L_LL(WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64(21)));
+
+ VERIFY_L_LL(WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64(22)));
+ VERIFY_L_LL(WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64(23)));
+ VERIFY_L_LL(WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64(24)));
+
+ VERIFY_I_LL(WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64(26)));
+ VERIFY_I_LL(WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64(27)));
+ VERIFY_I_LL(WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64(28)));
+ VERIFY_I_LL(WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64(29)));
+
+ VERIFY_I_LL(WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64(26)));
+ VERIFY_I_LL(WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64(27)));
+ VERIFY_I_LL(WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64(28)));
+ VERIFY_I_LL(WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64(29)));
+
+ VERIFY_I_LL(WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64(25)));
+ VERIFY_I_LL(WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64(25)));
+}
+
+
+TEST_F(WasmDecoderTest, AllSimpleExpressions) {
+// Test all simple expressions which are described by a signature.
+#define DECODE_TEST(name, opcode, sig) \
+ { \
+ FunctionSig* sig = WasmOpcodes::Signature(kExpr##name); \
+ if (sig->parameter_count() == 1) { \
+ TestUnop(kExpr##name, sig); \
+ } else { \
+ TestBinop(kExpr##name, sig); \
+ } \
+ }
+
+ FOREACH_SIMPLE_OPCODE(DECODE_TEST);
+
+#undef DECODE_TEST
+}
+
+
+TEST_F(WasmDecoderTest, MemorySize) {
+ byte code[] = {kExprMemorySize};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_f_ff, code);
+}
+
+
+TEST_F(WasmDecoderTest, GrowMemory) {
+ byte code[] = {kExprGrowMemory, kExprGetLocal, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_d, code);
+}
+
+
+TEST_F(WasmDecoderTest, LoadMemOffset) {
+ for (int offset = 0; offset < 128; offset += 7) {
+ byte code[] = {kExprI32LoadMem, WasmOpcodes::LoadStoreAccessOf(true),
+ static_cast<byte>(offset), kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, StoreMemOffset) {
+ for (int offset = 0; offset < 128; offset += 7) {
+ byte code[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ static_cast<byte>(offset),
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, LoadMemOffset_varint) {
+ byte code1[] = {kExprI32LoadMem, WasmOpcodes::LoadStoreAccessOf(true), 0,
+ kExprI8Const, 0};
+ byte code2[] = {kExprI32LoadMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x80,
+ 1,
+ kExprI8Const,
+ 0};
+ byte code3[] = {kExprI32LoadMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x81,
+ 0x82,
+ 5,
+ kExprI8Const,
+ 0};
+ byte code4[] = {kExprI32LoadMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x83,
+ 0x84,
+ 0x85,
+ 7,
+ kExprI8Const,
+ 0};
+
+ EXPECT_VERIFIES(&env_i_i, code1);
+ EXPECT_VERIFIES(&env_i_i, code2);
+ EXPECT_VERIFIES(&env_i_i, code3);
+ EXPECT_VERIFIES(&env_i_i, code4);
+}
+
+
+TEST_F(WasmDecoderTest, StoreMemOffset_varint) {
+ byte code1[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ byte code2[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x80,
+ 1,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ byte code3[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x81,
+ 0x82,
+ 5,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ byte code4[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x83,
+ 0x84,
+ 0x85,
+ 7,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+
+ EXPECT_VERIFIES(&env_i_i, code1);
+ EXPECT_VERIFIES(&env_i_i, code2);
+ EXPECT_VERIFIES(&env_i_i, code3);
+ EXPECT_VERIFIES(&env_i_i, code4);
+}
+
+
+TEST_F(WasmDecoderTest, AllLoadMemCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ byte code[] = {
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, false)),
+ WasmOpcodes::LoadStoreAccessOf(false), kExprI8Const, 0};
+ FunctionEnv env;
+ FunctionSig sig(1, 0, &local_type);
+ init_env(&env, &sig);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, AllStoreMemCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ byte code[] = {
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, true)),
+ WasmOpcodes::LoadStoreAccessOf(false),
+ kExprI8Const,
+ 0,
+ kExprGetLocal,
+ 0};
+ FunctionEnv env;
+ FunctionSig sig(0, 1, &local_type);
+ init_env(&env, &sig);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+}
+
+
+namespace {
+// A helper for tests that require a module environment for functions and
+// globals.
+class TestModuleEnv : public ModuleEnv {
+ public:
+ TestModuleEnv() {
+ mem_start = 0;
+ mem_end = 0;
+ module = &mod;
+ linker = nullptr;
+ function_code = nullptr;
+ mod.globals = new std::vector<WasmGlobal>;
+ mod.signatures = new std::vector<FunctionSig*>;
+ mod.functions = new std::vector<WasmFunction>;
+ }
+ byte AddGlobal(MachineType mem_type) {
+ mod.globals->push_back({0, mem_type, 0, false});
+ CHECK(mod.globals->size() <= 127);
+ return static_cast<byte>(mod.globals->size() - 1);
+ }
+ byte AddSignature(FunctionSig* sig) {
+ mod.signatures->push_back(sig);
+ CHECK(mod.signatures->size() <= 127);
+ return static_cast<byte>(mod.signatures->size() - 1);
+ }
+ byte AddFunction(FunctionSig* sig) {
+ mod.functions->push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
+ CHECK(mod.functions->size() <= 127);
+ return static_cast<byte>(mod.functions->size() - 1);
+ }
+
+ private:
+ WasmModule mod;
+};
+} // namespace
+
+
+TEST_F(WasmDecoderTest, SimpleCalls) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_v());
+ module_env.AddFunction(sigs.i_i());
+ module_env.AddFunction(sigs.i_ii());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
+}
+
+
+TEST_F(WasmDecoderTest, CallsWithTooFewArguments) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_i());
+ module_env.AddFunction(sigs.i_ii());
+ module_env.AddFunction(sigs.f_ff());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION0(0));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_ZERO));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, CallsWithSpilloverArgs) {
+ static LocalType a_i_ff[] = {kAstI32, kAstF32, kAstF32};
+ FunctionSig sig_i_ff(1, 2, a_i_ff);
+ FunctionEnv env_i_ff;
+ init_env(&env_i_ff, &sig_i_ff);
+
+ TestModuleEnv module_env;
+ env_i_ff.module = &module_env;
+ env_i_i.module = &module_env;
+ env_f_ff.module = &module_env;
+
+ module_env.AddFunction(&sig_i_ff);
+
+ EXPECT_VERIFIES_INLINE(&env_i_i,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
+
+ EXPECT_VERIFIES_INLINE(&env_i_ff,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
+
+ EXPECT_FAILURE_INLINE(&env_f_ff,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
+
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(0.2)));
+
+ EXPECT_VERIFIES_INLINE(
+ &env_f_ff,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(11)));
+}
+
+
+TEST_F(WasmDecoderTest, CallsWithMismatchedSigs2) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_i());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I64(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
+}
+
+
+TEST_F(WasmDecoderTest, CallsWithMismatchedSigs3) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_f());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
+
+ module_env.AddFunction(sigs.i_d());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I64(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, SimpleIndirectCalls) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ byte f0 = module_env.AddSignature(sigs.i_v());
+ byte f1 = module_env.AddSignature(sigs.i_i());
+ byte f2 = module_env.AddSignature(sigs.i_ii());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(
+ env, WASM_CALL_INDIRECT(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
+}
+
+
+TEST_F(WasmDecoderTest, IndirectCallsOutOfBounds) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ module_env.AddSignature(sigs.i_v());
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(22)));
+ module_env.AddSignature(sigs.i_i());
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(27)));
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(2, WASM_ZERO, WASM_I8(27)));
+}
+
+
+TEST_F(WasmDecoderTest, IndirectCallsWithMismatchedSigs3) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ byte f0 = module_env.AddFunction(sigs.i_f());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2)));
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
+
+ byte f1 = module_env.AddFunction(sigs.i_d());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
+}
+
+
+TEST_F(WasmDecoderTest, Int32Globals) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Int8());
+ module_env.AddGlobal(MachineType::Uint8());
+ module_env.AddGlobal(MachineType::Int16());
+ module_env.AddGlobal(MachineType::Uint16());
+ module_env.AddGlobal(MachineType::Int32());
+ module_env.AddGlobal(MachineType::Uint32());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(1));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(2));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(3));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(4));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(5));
+
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(4, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(5, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Int32Globals_fail) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Int64());
+ module_env.AddGlobal(MachineType::Uint64());
+ module_env.AddGlobal(MachineType::Float32());
+ module_env.AddGlobal(MachineType::Float64());
+
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(1));
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(2));
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(3));
+
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Int64Globals) {
+ FunctionEnv* env = &env_l_l;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Int64());
+ module_env.AddGlobal(MachineType::Uint64());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(1));
+
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Float32Globals) {
+ FunctionEnv env_f_ff;
+ FunctionEnv* env = &env_f_ff;
+ init_env(env, sigs.f_ff());
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Float32());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Float64Globals) {
+ FunctionEnv env_d_dd;
+ FunctionEnv* env = &env_d_dd;
+ init_env(env, sigs.d_dd());
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Float64());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, AllLoadGlobalCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ FunctionEnv env;
+ FunctionSig sig(1, 0, &local_type);
+ TestModuleEnv module_env;
+ init_env(&env, &sig);
+ env.module = &module_env;
+ module_env.AddGlobal(mem_type);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES_INLINE(&env, WASM_LOAD_GLOBAL(0));
+ } else {
+ EXPECT_FAILURE_INLINE(&env, WASM_LOAD_GLOBAL(0));
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, AllStoreGlobalCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ FunctionEnv env;
+ FunctionSig sig(0, 1, &local_type);
+ TestModuleEnv module_env;
+ init_env(&env, &sig);
+ env.module = &module_env;
+ module_env.AddGlobal(mem_type);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES_INLINE(&env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ } else {
+ EXPECT_FAILURE_INLINE(&env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BreakNesting1) {
+ for (int i = 0; i < 5; i++) {
+ // (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
+ byte code[] = {WASM_BLOCK(
+ 2, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i, WASM_ZERO)),
+ WASM_SET_LOCAL(0, WASM_I8(1))),
+ WASM_GET_LOCAL(0))};
+ if (i < 3) {
+ EXPECT_VERIFIES(&env_i_i, code);
+ } else {
+ EXPECT_FAILURE(&env_i_i, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BreakNesting2) {
+ env_v_v.AddLocals(kAstI32, 1);
+ for (int i = 0; i < 5; i++) {
+ // (block[2] (loop[2] (if (get p) break[N]) (set p 1)) (return p)) (11)
+ byte code[] = {
+ WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(i)),
+ WASM_SET_LOCAL(0, WASM_I8(1)))),
+ WASM_I8(11)};
+ if (i < 2) {
+ EXPECT_VERIFIES(&env_v_v, code);
+ } else {
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BreakNesting3) {
+ env_v_v.AddLocals(kAstI32, 1);
+ for (int i = 0; i < 5; i++) {
+ // (block[1] (loop[1] (block[1] (if (get p) break[N])
+ byte code[] = {WASM_BLOCK(
+ 1, WASM_LOOP(
+ 1, WASM_BLOCK(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(i)))))};
+ if (i < 3) {
+ EXPECT_VERIFIES(&env_v_v, code);
+ } else {
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, BreaksWithMultipleTypes) {
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(7)), WASM_F32(7.7)));
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(7)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_BLOCK(3, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(8)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_I8(0)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_BLOCK(3, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(9)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_I8(11))));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, BreakNesting_6_levels) {
+ for (int mask = 0; mask < 64; mask++) {
+ for (int i = 0; i < 14; i++) {
+ byte code[] = {
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBr, static_cast<byte>(i),
+ kExprNop // --
+ };
+
+ int depth = 6;
+ for (int l = 0; l < 6; l++) {
+ if (mask & (1 << l)) {
+ code[l * 2] = kExprLoop;
+ depth++;
+ }
+ }
+
+ if (i < depth) {
+ EXPECT_VERIFIES(&env_v_v, code);
+ } else {
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+ }
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ExprBreak_TypeCheck) {
+ FunctionEnv* envs[] = {&env_i_i, &env_l_l, &env_f_ff, &env_d_dd};
+ for (size_t i = 0; i < arraysize(envs); i++) {
+ FunctionEnv* env = envs[i];
+ // unify X and X => OK
+ EXPECT_VERIFIES_INLINE(
+ env, WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
+ }
+
+ // unify i32 and f32 => fail
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
+
+ // unify f64 and f64 => OK
+ EXPECT_VERIFIES_INLINE(
+ &env_d_dd,
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_F64(1.2)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, ExprBreak_TypeCheckAll) {
+ byte code1[] = {WASM_BLOCK(2,
+ WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
+ byte code2[] = {WASM_BLOCK(
+ 2, WASM_IF(WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
+
+
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ FunctionEnv env;
+ LocalType storage[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig);
+
+ if (i == j) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprBr_Unify) {
+ FunctionEnv env;
+
+ for (int which = 0; which < 2; which++) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ LocalType storage[] = {kAstI32, kAstI32, type};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig); // (i32, X) -> i32
+
+ byte code1[] = {
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {
+ WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
+
+
+ if (type == kAstI32) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprBrIf_type) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(
+ &env_d_dd,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0)));
+
+ FunctionEnv env;
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ LocalType storage[] = {kAstI32, kAstI32, type};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig); // (i32, X) -> i32
+
+ byte code1[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_GET_LOCAL(0))};
+
+ byte code2[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0))};
+ if (type == kAstI32) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprBrIf_Unify) {
+ FunctionEnv env;
+
+ for (int which = 0; which < 2; which++) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ LocalType storage[] = {kAstI32, kAstI32, type};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig); // (i32, X) -> i32
+
+ byte code1[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {
+ WASM_LOOP(2, WASM_BRV_IF(1, WASM_ZERO, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+
+
+ if (type == kAstI32) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch0) {
+ static byte code[] = {kExprTableSwitch, 0, 0, 0, 0};
+ EXPECT_FAILURE(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch0b) {
+ static byte code[] = {kExprTableSwitch, 0, 0, 0, 0, kExprI8Const, 11};
+ EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch0c) {
+ static byte code[] = {
+ WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)), WASM_I8(67))};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch1) {
+ static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_I8(0), WASM_I8(9))};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_f_ff, code);
+ EXPECT_FAILURE(&env_d_dd, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch_off_end) {
+ static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_I8(0), WASM_I8(9))};
+ for (size_t len = arraysize(code) - 1; len > 0; len--) {
+ Verify(kError, &env_v_v, code, code + len);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch2) {
+ static byte code[] = {
+ WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_I8(3), WASM_I8(10), WASM_I8(11))};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_f_ff, code);
+ EXPECT_FAILURE(&env_d_dd, code);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, TableSwitch1b) {
+ EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
+
+ EXPECT_VERIFIES_INLINE(&env_f_ff, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F32(0.0)));
+
+ EXPECT_VERIFIES_INLINE(&env_d_dd, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F64(0.0)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, TableSwitch_br) {
+ EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)),
+ WASM_GET_LOCAL(0));
+ for (int depth = 0; depth < 2; depth++) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
+ WASM_GET_LOCAL(0)));
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch_invalid_br) {
+ for (int depth = 1; depth < 4; depth++) {
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
+ WASM_GET_LOCAL(0));
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth + 1)),
+ WASM_GET_LOCAL(0)));
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch_invalid_case_ref) {
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_TABLESWITCH_OP(0, 1, WASM_CASE(0)),
+ WASM_GET_LOCAL(0));
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, TableSwitch1_br) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_ZERO)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, TableSwitch2_br) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(0)),
+ WASM_BRV(0, WASM_I8(1))));
+
+ EXPECT_FAILURE_INLINE(
+ &env_f_ff, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_BRV(0, WASM_I8(3)),
+ WASM_BRV(0, WASM_I8(4))));
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch2x2) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_TABLESWITCH_OP(2, 4, WASM_CASE(0), WASM_CASE(1),
+ WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(3)),
+ WASM_BRV(0, WASM_I8(4))));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ExprBreakNesting1) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(&env_v_v,
+ WASM_BLOCK(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR_IF(0, WASM_ZERO)));
+
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(&env_v_v,
+ WASM_LOOP(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BRV(1, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(1)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, Select) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i,
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, Select_TypeCheck) {
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25),
+ WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(
+ &env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64(0)));
+}
+
+#endif
+
+
+class WasmOpcodeLengthTest : public TestWithZone {
+ public:
+ WasmOpcodeLengthTest() : TestWithZone() {}
+};
+
+
+#define EXPECT_LENGTH(expected, opcode) \
+ { \
+ static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
+ EXPECT_EQ(expected, OpcodeLength(code)); \
+ }
+
+
+TEST_F(WasmOpcodeLengthTest, Statements) {
+ EXPECT_LENGTH(1, kExprNop);
+ EXPECT_LENGTH(2, kExprBlock);
+ EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(1, kExprIf);
+ EXPECT_LENGTH(1, kExprIfElse);
+ EXPECT_LENGTH(1, kExprSelect);
+ EXPECT_LENGTH(2, kExprBr);
+ EXPECT_LENGTH(2, kExprBrIf);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
+ EXPECT_LENGTH(2, kExprI8Const);
+ EXPECT_LENGTH(5, kExprI32Const);
+ EXPECT_LENGTH(5, kExprF32Const);
+ EXPECT_LENGTH(9, kExprI64Const);
+ EXPECT_LENGTH(9, kExprF64Const);
+ EXPECT_LENGTH(2, kExprGetLocal);
+ EXPECT_LENGTH(2, kExprSetLocal);
+ EXPECT_LENGTH(2, kExprLoadGlobal);
+ EXPECT_LENGTH(2, kExprStoreGlobal);
+ EXPECT_LENGTH(2, kExprCallFunction);
+ EXPECT_LENGTH(2, kExprCallIndirect);
+ EXPECT_LENGTH(1, kExprIf);
+ EXPECT_LENGTH(1, kExprIfElse);
+ EXPECT_LENGTH(2, kExprBlock);
+ EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(2, kExprBr);
+ EXPECT_LENGTH(2, kExprBrIf);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, VariableLength) {
+ byte size2[] = {kExprLoadGlobal, 1};
+ byte size3[] = {kExprLoadGlobal, 1 | 0x80, 2};
+ byte size4[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3};
+ byte size5[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4};
+ byte size6[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4 | 0x80, 5};
+
+ EXPECT_EQ(2, OpcodeLength(size2));
+ EXPECT_EQ(3, OpcodeLength(size3));
+ EXPECT_EQ(4, OpcodeLength(size4));
+ EXPECT_EQ(5, OpcodeLength(size5));
+ EXPECT_EQ(6, OpcodeLength(size6));
+}
+
+
+TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
+ EXPECT_LENGTH(2, kExprI32LoadMem8S);
+ EXPECT_LENGTH(2, kExprI32LoadMem8U);
+ EXPECT_LENGTH(2, kExprI32LoadMem16S);
+ EXPECT_LENGTH(2, kExprI32LoadMem16U);
+ EXPECT_LENGTH(2, kExprI32LoadMem);
+ EXPECT_LENGTH(2, kExprI64LoadMem8S);
+ EXPECT_LENGTH(2, kExprI64LoadMem8U);
+ EXPECT_LENGTH(2, kExprI64LoadMem16S);
+ EXPECT_LENGTH(2, kExprI64LoadMem16U);
+ EXPECT_LENGTH(2, kExprI64LoadMem32S);
+ EXPECT_LENGTH(2, kExprI64LoadMem32U);
+ EXPECT_LENGTH(2, kExprI64LoadMem);
+ EXPECT_LENGTH(2, kExprF32LoadMem);
+ EXPECT_LENGTH(2, kExprF64LoadMem);
+
+ EXPECT_LENGTH(2, kExprI32StoreMem8);
+ EXPECT_LENGTH(2, kExprI32StoreMem16);
+ EXPECT_LENGTH(2, kExprI32StoreMem);
+ EXPECT_LENGTH(2, kExprI64StoreMem8);
+ EXPECT_LENGTH(2, kExprI64StoreMem16);
+ EXPECT_LENGTH(2, kExprI64StoreMem32);
+ EXPECT_LENGTH(2, kExprI64StoreMem);
+ EXPECT_LENGTH(2, kExprF32StoreMem);
+ EXPECT_LENGTH(2, kExprF64StoreMem);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
+ EXPECT_LENGTH(1, kExprMemorySize);
+ EXPECT_LENGTH(1, kExprGrowMemory);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
+ EXPECT_LENGTH(1, kExprI32Add);
+ EXPECT_LENGTH(1, kExprI32Sub);
+ EXPECT_LENGTH(1, kExprI32Mul);
+ EXPECT_LENGTH(1, kExprI32DivS);
+ EXPECT_LENGTH(1, kExprI32DivU);
+ EXPECT_LENGTH(1, kExprI32RemS);
+ EXPECT_LENGTH(1, kExprI32RemU);
+ EXPECT_LENGTH(1, kExprI32And);
+ EXPECT_LENGTH(1, kExprI32Ior);
+ EXPECT_LENGTH(1, kExprI32Xor);
+ EXPECT_LENGTH(1, kExprI32Shl);
+ EXPECT_LENGTH(1, kExprI32ShrU);
+ EXPECT_LENGTH(1, kExprI32ShrS);
+ EXPECT_LENGTH(1, kExprI32Eq);
+ EXPECT_LENGTH(1, kExprI32Ne);
+ EXPECT_LENGTH(1, kExprI32LtS);
+ EXPECT_LENGTH(1, kExprI32LeS);
+ EXPECT_LENGTH(1, kExprI32LtU);
+ EXPECT_LENGTH(1, kExprI32LeU);
+ EXPECT_LENGTH(1, kExprI32GtS);
+ EXPECT_LENGTH(1, kExprI32GeS);
+ EXPECT_LENGTH(1, kExprI32GtU);
+ EXPECT_LENGTH(1, kExprI32GeU);
+ EXPECT_LENGTH(1, kExprI32Clz);
+ EXPECT_LENGTH(1, kExprI32Ctz);
+ EXPECT_LENGTH(1, kExprI32Popcnt);
+ EXPECT_LENGTH(1, kExprBoolNot);
+ EXPECT_LENGTH(1, kExprI64Add);
+ EXPECT_LENGTH(1, kExprI64Sub);
+ EXPECT_LENGTH(1, kExprI64Mul);
+ EXPECT_LENGTH(1, kExprI64DivS);
+ EXPECT_LENGTH(1, kExprI64DivU);
+ EXPECT_LENGTH(1, kExprI64RemS);
+ EXPECT_LENGTH(1, kExprI64RemU);
+ EXPECT_LENGTH(1, kExprI64And);
+ EXPECT_LENGTH(1, kExprI64Ior);
+ EXPECT_LENGTH(1, kExprI64Xor);
+ EXPECT_LENGTH(1, kExprI64Shl);
+ EXPECT_LENGTH(1, kExprI64ShrU);
+ EXPECT_LENGTH(1, kExprI64ShrS);
+ EXPECT_LENGTH(1, kExprI64Eq);
+ EXPECT_LENGTH(1, kExprI64Ne);
+ EXPECT_LENGTH(1, kExprI64LtS);
+ EXPECT_LENGTH(1, kExprI64LeS);
+ EXPECT_LENGTH(1, kExprI64LtU);
+ EXPECT_LENGTH(1, kExprI64LeU);
+ EXPECT_LENGTH(1, kExprI64GtS);
+ EXPECT_LENGTH(1, kExprI64GeS);
+ EXPECT_LENGTH(1, kExprI64GtU);
+ EXPECT_LENGTH(1, kExprI64GeU);
+ EXPECT_LENGTH(1, kExprI64Clz);
+ EXPECT_LENGTH(1, kExprI64Ctz);
+ EXPECT_LENGTH(1, kExprI64Popcnt);
+ EXPECT_LENGTH(1, kExprF32Add);
+ EXPECT_LENGTH(1, kExprF32Sub);
+ EXPECT_LENGTH(1, kExprF32Mul);
+ EXPECT_LENGTH(1, kExprF32Div);
+ EXPECT_LENGTH(1, kExprF32Min);
+ EXPECT_LENGTH(1, kExprF32Max);
+ EXPECT_LENGTH(1, kExprF32Abs);
+ EXPECT_LENGTH(1, kExprF32Neg);
+ EXPECT_LENGTH(1, kExprF32CopySign);
+ EXPECT_LENGTH(1, kExprF32Ceil);
+ EXPECT_LENGTH(1, kExprF32Floor);
+ EXPECT_LENGTH(1, kExprF32Trunc);
+ EXPECT_LENGTH(1, kExprF32NearestInt);
+ EXPECT_LENGTH(1, kExprF32Sqrt);
+ EXPECT_LENGTH(1, kExprF32Eq);
+ EXPECT_LENGTH(1, kExprF32Ne);
+ EXPECT_LENGTH(1, kExprF32Lt);
+ EXPECT_LENGTH(1, kExprF32Le);
+ EXPECT_LENGTH(1, kExprF32Gt);
+ EXPECT_LENGTH(1, kExprF32Ge);
+ EXPECT_LENGTH(1, kExprF64Add);
+ EXPECT_LENGTH(1, kExprF64Sub);
+ EXPECT_LENGTH(1, kExprF64Mul);
+ EXPECT_LENGTH(1, kExprF64Div);
+ EXPECT_LENGTH(1, kExprF64Min);
+ EXPECT_LENGTH(1, kExprF64Max);
+ EXPECT_LENGTH(1, kExprF64Abs);
+ EXPECT_LENGTH(1, kExprF64Neg);
+ EXPECT_LENGTH(1, kExprF64CopySign);
+ EXPECT_LENGTH(1, kExprF64Ceil);
+ EXPECT_LENGTH(1, kExprF64Floor);
+ EXPECT_LENGTH(1, kExprF64Trunc);
+ EXPECT_LENGTH(1, kExprF64NearestInt);
+ EXPECT_LENGTH(1, kExprF64Sqrt);
+ EXPECT_LENGTH(1, kExprF64Eq);
+ EXPECT_LENGTH(1, kExprF64Ne);
+ EXPECT_LENGTH(1, kExprF64Lt);
+ EXPECT_LENGTH(1, kExprF64Le);
+ EXPECT_LENGTH(1, kExprF64Gt);
+ EXPECT_LENGTH(1, kExprF64Ge);
+ EXPECT_LENGTH(1, kExprI32SConvertF32);
+ EXPECT_LENGTH(1, kExprI32SConvertF64);
+ EXPECT_LENGTH(1, kExprI32UConvertF32);
+ EXPECT_LENGTH(1, kExprI32UConvertF64);
+ EXPECT_LENGTH(1, kExprI32ConvertI64);
+ EXPECT_LENGTH(1, kExprI64SConvertF32);
+ EXPECT_LENGTH(1, kExprI64SConvertF64);
+ EXPECT_LENGTH(1, kExprI64UConvertF32);
+ EXPECT_LENGTH(1, kExprI64UConvertF64);
+ EXPECT_LENGTH(1, kExprI64SConvertI32);
+ EXPECT_LENGTH(1, kExprI64UConvertI32);
+ EXPECT_LENGTH(1, kExprF32SConvertI32);
+ EXPECT_LENGTH(1, kExprF32UConvertI32);
+ EXPECT_LENGTH(1, kExprF32SConvertI64);
+ EXPECT_LENGTH(1, kExprF32UConvertI64);
+ EXPECT_LENGTH(1, kExprF32ConvertF64);
+ EXPECT_LENGTH(1, kExprF32ReinterpretI32);
+ EXPECT_LENGTH(1, kExprF64SConvertI32);
+ EXPECT_LENGTH(1, kExprF64UConvertI32);
+ EXPECT_LENGTH(1, kExprF64SConvertI64);
+ EXPECT_LENGTH(1, kExprF64UConvertI64);
+ EXPECT_LENGTH(1, kExprF64ConvertF32);
+ EXPECT_LENGTH(1, kExprF64ReinterpretI64);
+ EXPECT_LENGTH(1, kExprI32ReinterpretF32);
+ EXPECT_LENGTH(1, kExprI64ReinterpretF64);
+}
+
+
+class WasmOpcodeArityTest : public TestWithZone {
+ public:
+ WasmOpcodeArityTest() : TestWithZone() {}
+};
+
+
+#define EXPECT_ARITY(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeArity(&env, code)); \
+ }
+
+
+TEST_F(WasmOpcodeArityTest, Control) {
+ FunctionEnv env;
+ EXPECT_ARITY(0, kExprNop);
+
+ EXPECT_ARITY(0, kExprBlock, 0);
+ EXPECT_ARITY(1, kExprBlock, 1);
+ EXPECT_ARITY(2, kExprBlock, 2);
+ EXPECT_ARITY(5, kExprBlock, 5);
+ EXPECT_ARITY(10, kExprBlock, 10);
+
+ EXPECT_ARITY(0, kExprLoop, 0);
+ EXPECT_ARITY(1, kExprLoop, 1);
+ EXPECT_ARITY(2, kExprLoop, 2);
+ EXPECT_ARITY(7, kExprLoop, 7);
+ EXPECT_ARITY(11, kExprLoop, 11);
+
+ EXPECT_ARITY(2, kExprIf);
+ EXPECT_ARITY(3, kExprIfElse);
+ EXPECT_ARITY(3, kExprSelect);
+
+ EXPECT_ARITY(1, kExprBr);
+ EXPECT_ARITY(2, kExprBrIf);
+
+ {
+ TestSignatures sigs;
+ FunctionEnv env;
+ WasmDecoderTest::init_env(&env, sigs.v_v());
+ EXPECT_ARITY(0, kExprReturn);
+ WasmDecoderTest::init_env(&env, sigs.i_i());
+ EXPECT_ARITY(1, kExprReturn);
+ }
+}
+
+
+TEST_F(WasmOpcodeArityTest, Misc) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(0, kExprI8Const);
+ EXPECT_ARITY(0, kExprI32Const);
+ EXPECT_ARITY(0, kExprF32Const);
+ EXPECT_ARITY(0, kExprI64Const);
+ EXPECT_ARITY(0, kExprF64Const);
+ EXPECT_ARITY(0, kExprGetLocal);
+ EXPECT_ARITY(1, kExprSetLocal);
+ EXPECT_ARITY(0, kExprLoadGlobal);
+ EXPECT_ARITY(1, kExprStoreGlobal);
+}
+
+
+TEST_F(WasmOpcodeArityTest, Calls) {
+ TestSignatures sigs;
+ TestModuleEnv module;
+ module.AddFunction(sigs.i_ii());
+ module.AddFunction(sigs.i_i());
+
+ module.AddSignature(sigs.f_ff());
+ module.AddSignature(sigs.i_d());
+
+ {
+ FunctionEnv env;
+ WasmDecoderTest::init_env(&env, sigs.i_ii());
+ env.module = &module;
+
+ EXPECT_ARITY(2, kExprCallFunction, 0);
+ EXPECT_ARITY(3, kExprCallIndirect, 0);
+ EXPECT_ARITY(1, kExprBr);
+ EXPECT_ARITY(2, kExprBrIf);
+ }
+
+ {
+ FunctionEnv env;
+ WasmDecoderTest::init_env(&env, sigs.v_v());
+ env.module = &module;
+
+ EXPECT_ARITY(1, kExprCallFunction, 1);
+ EXPECT_ARITY(2, kExprCallIndirect, 1);
+ EXPECT_ARITY(1, kExprBr);
+ EXPECT_ARITY(2, kExprBrIf);
+ }
+}
+
+
+TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(1, kExprI32LoadMem8S);
+ EXPECT_ARITY(1, kExprI32LoadMem8U);
+ EXPECT_ARITY(1, kExprI32LoadMem16S);
+ EXPECT_ARITY(1, kExprI32LoadMem16U);
+ EXPECT_ARITY(1, kExprI32LoadMem);
+
+ EXPECT_ARITY(1, kExprI64LoadMem8S);
+ EXPECT_ARITY(1, kExprI64LoadMem8U);
+ EXPECT_ARITY(1, kExprI64LoadMem16S);
+ EXPECT_ARITY(1, kExprI64LoadMem16U);
+ EXPECT_ARITY(1, kExprI64LoadMem32S);
+ EXPECT_ARITY(1, kExprI64LoadMem32U);
+ EXPECT_ARITY(1, kExprI64LoadMem);
+ EXPECT_ARITY(1, kExprF32LoadMem);
+ EXPECT_ARITY(1, kExprF64LoadMem);
+
+ EXPECT_ARITY(2, kExprI32StoreMem8);
+ EXPECT_ARITY(2, kExprI32StoreMem16);
+ EXPECT_ARITY(2, kExprI32StoreMem);
+ EXPECT_ARITY(2, kExprI64StoreMem8);
+ EXPECT_ARITY(2, kExprI64StoreMem16);
+ EXPECT_ARITY(2, kExprI64StoreMem32);
+ EXPECT_ARITY(2, kExprI64StoreMem);
+ EXPECT_ARITY(2, kExprF32StoreMem);
+ EXPECT_ARITY(2, kExprF64StoreMem);
+}
+
+
+TEST_F(WasmOpcodeArityTest, MiscMemExpressions) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(0, kExprMemorySize);
+ EXPECT_ARITY(1, kExprGrowMemory);
+}
+
+
+TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(2, kExprI32Add);
+ EXPECT_ARITY(2, kExprI32Sub);
+ EXPECT_ARITY(2, kExprI32Mul);
+ EXPECT_ARITY(2, kExprI32DivS);
+ EXPECT_ARITY(2, kExprI32DivU);
+ EXPECT_ARITY(2, kExprI32RemS);
+ EXPECT_ARITY(2, kExprI32RemU);
+ EXPECT_ARITY(2, kExprI32And);
+ EXPECT_ARITY(2, kExprI32Ior);
+ EXPECT_ARITY(2, kExprI32Xor);
+ EXPECT_ARITY(2, kExprI32Shl);
+ EXPECT_ARITY(2, kExprI32ShrU);
+ EXPECT_ARITY(2, kExprI32ShrS);
+ EXPECT_ARITY(2, kExprI32Eq);
+ EXPECT_ARITY(2, kExprI32Ne);
+ EXPECT_ARITY(2, kExprI32LtS);
+ EXPECT_ARITY(2, kExprI32LeS);
+ EXPECT_ARITY(2, kExprI32LtU);
+ EXPECT_ARITY(2, kExprI32LeU);
+ EXPECT_ARITY(2, kExprI32GtS);
+ EXPECT_ARITY(2, kExprI32GeS);
+ EXPECT_ARITY(2, kExprI32GtU);
+ EXPECT_ARITY(2, kExprI32GeU);
+ EXPECT_ARITY(1, kExprI32Clz);
+ EXPECT_ARITY(1, kExprI32Ctz);
+ EXPECT_ARITY(1, kExprI32Popcnt);
+ EXPECT_ARITY(1, kExprBoolNot);
+ EXPECT_ARITY(2, kExprI64Add);
+ EXPECT_ARITY(2, kExprI64Sub);
+ EXPECT_ARITY(2, kExprI64Mul);
+ EXPECT_ARITY(2, kExprI64DivS);
+ EXPECT_ARITY(2, kExprI64DivU);
+ EXPECT_ARITY(2, kExprI64RemS);
+ EXPECT_ARITY(2, kExprI64RemU);
+ EXPECT_ARITY(2, kExprI64And);
+ EXPECT_ARITY(2, kExprI64Ior);
+ EXPECT_ARITY(2, kExprI64Xor);
+ EXPECT_ARITY(2, kExprI64Shl);
+ EXPECT_ARITY(2, kExprI64ShrU);
+ EXPECT_ARITY(2, kExprI64ShrS);
+ EXPECT_ARITY(2, kExprI64Eq);
+ EXPECT_ARITY(2, kExprI64Ne);
+ EXPECT_ARITY(2, kExprI64LtS);
+ EXPECT_ARITY(2, kExprI64LeS);
+ EXPECT_ARITY(2, kExprI64LtU);
+ EXPECT_ARITY(2, kExprI64LeU);
+ EXPECT_ARITY(2, kExprI64GtS);
+ EXPECT_ARITY(2, kExprI64GeS);
+ EXPECT_ARITY(2, kExprI64GtU);
+ EXPECT_ARITY(2, kExprI64GeU);
+ EXPECT_ARITY(1, kExprI64Clz);
+ EXPECT_ARITY(1, kExprI64Ctz);
+ EXPECT_ARITY(1, kExprI64Popcnt);
+ EXPECT_ARITY(2, kExprF32Add);
+ EXPECT_ARITY(2, kExprF32Sub);
+ EXPECT_ARITY(2, kExprF32Mul);
+ EXPECT_ARITY(2, kExprF32Div);
+ EXPECT_ARITY(2, kExprF32Min);
+ EXPECT_ARITY(2, kExprF32Max);
+ EXPECT_ARITY(1, kExprF32Abs);
+ EXPECT_ARITY(1, kExprF32Neg);
+ EXPECT_ARITY(2, kExprF32CopySign);
+ EXPECT_ARITY(1, kExprF32Ceil);
+ EXPECT_ARITY(1, kExprF32Floor);
+ EXPECT_ARITY(1, kExprF32Trunc);
+ EXPECT_ARITY(1, kExprF32NearestInt);
+ EXPECT_ARITY(1, kExprF32Sqrt);
+ EXPECT_ARITY(2, kExprF32Eq);
+ EXPECT_ARITY(2, kExprF32Ne);
+ EXPECT_ARITY(2, kExprF32Lt);
+ EXPECT_ARITY(2, kExprF32Le);
+ EXPECT_ARITY(2, kExprF32Gt);
+ EXPECT_ARITY(2, kExprF32Ge);
+ EXPECT_ARITY(2, kExprF64Add);
+ EXPECT_ARITY(2, kExprF64Sub);
+ EXPECT_ARITY(2, kExprF64Mul);
+ EXPECT_ARITY(2, kExprF64Div);
+ EXPECT_ARITY(2, kExprF64Min);
+ EXPECT_ARITY(2, kExprF64Max);
+ EXPECT_ARITY(1, kExprF64Abs);
+ EXPECT_ARITY(1, kExprF64Neg);
+ EXPECT_ARITY(2, kExprF64CopySign);
+ EXPECT_ARITY(1, kExprF64Ceil);
+ EXPECT_ARITY(1, kExprF64Floor);
+ EXPECT_ARITY(1, kExprF64Trunc);
+ EXPECT_ARITY(1, kExprF64NearestInt);
+ EXPECT_ARITY(1, kExprF64Sqrt);
+ EXPECT_ARITY(2, kExprF64Eq);
+ EXPECT_ARITY(2, kExprF64Ne);
+ EXPECT_ARITY(2, kExprF64Lt);
+ EXPECT_ARITY(2, kExprF64Le);
+ EXPECT_ARITY(2, kExprF64Gt);
+ EXPECT_ARITY(2, kExprF64Ge);
+ EXPECT_ARITY(1, kExprI32SConvertF32);
+ EXPECT_ARITY(1, kExprI32SConvertF64);
+ EXPECT_ARITY(1, kExprI32UConvertF32);
+ EXPECT_ARITY(1, kExprI32UConvertF64);
+ EXPECT_ARITY(1, kExprI32ConvertI64);
+ EXPECT_ARITY(1, kExprI64SConvertF32);
+ EXPECT_ARITY(1, kExprI64SConvertF64);
+ EXPECT_ARITY(1, kExprI64UConvertF32);
+ EXPECT_ARITY(1, kExprI64UConvertF64);
+ EXPECT_ARITY(1, kExprI64SConvertI32);
+ EXPECT_ARITY(1, kExprI64UConvertI32);
+ EXPECT_ARITY(1, kExprF32SConvertI32);
+ EXPECT_ARITY(1, kExprF32UConvertI32);
+ EXPECT_ARITY(1, kExprF32SConvertI64);
+ EXPECT_ARITY(1, kExprF32UConvertI64);
+ EXPECT_ARITY(1, kExprF32ConvertF64);
+ EXPECT_ARITY(1, kExprF32ReinterpretI32);
+ EXPECT_ARITY(1, kExprF64SConvertI32);
+ EXPECT_ARITY(1, kExprF64UConvertI32);
+ EXPECT_ARITY(1, kExprF64SConvertI64);
+ EXPECT_ARITY(1, kExprF64UConvertI64);
+ EXPECT_ARITY(1, kExprF64ConvertF32);
+ EXPECT_ARITY(1, kExprF64ReinterpretI64);
+ EXPECT_ARITY(1, kExprI32ReinterpretF32);
+ EXPECT_ARITY(1, kExprI64ReinterpretF64);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/encoder-unittest.cc b/deps/v8/test/unittests/wasm/encoder-unittest.cc
new file mode 100644
index 0000000000..156cf6b1e5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/encoder-unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/v8.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/encoder.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class EncoderTest : public TestWithZone {
+ protected:
+ void AddLocal(WasmFunctionBuilder* f, LocalType type) {
+ uint16_t index = f->AddLocal(type);
+ const std::vector<uint8_t>& out_index = UnsignedLEB128From(index);
+ std::vector<uint8_t> code;
+ code.push_back(kExprGetLocal);
+ for (size_t i = 0; i < out_index.size(); i++) {
+ code.push_back(out_index.at(i));
+ }
+ uint32_t local_indices[] = {1};
+ f->EmitCode(&code[0], static_cast<uint32_t>(code.size()), local_indices, 1);
+ }
+
+ void CheckReadValue(uint8_t* leb_value, uint32_t expected_result,
+ int expected_length,
+ ReadUnsignedLEB128ErrorCode expected_error_code) {
+ int length;
+ uint32_t result;
+ ReadUnsignedLEB128ErrorCode error_code =
+ ReadUnsignedLEB128Operand(leb_value, leb_value + 5, &length, &result);
+ CHECK_EQ(error_code, expected_error_code);
+ if (error_code == 0) {
+ CHECK_EQ(result, expected_result);
+ CHECK_EQ(length, expected_length);
+ }
+ }
+
+ void CheckWriteValue(uint32_t input, int length, uint8_t* vals) {
+ const std::vector<uint8_t> result = UnsignedLEB128From(input);
+ CHECK_EQ(result.size(), length);
+ for (int i = 0; i < length; i++) {
+ CHECK_EQ(result.at(i), vals[i]);
+ }
+ }
+};
+
+
+TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ uint16_t local_float32 = function->AddLocal(kAstF32);
+ uint16_t param_float32 = function->AddParam(kAstF32);
+ uint16_t local_int32 = function->AddLocal(kAstI32);
+ uint16_t local_float64 = function->AddLocal(kAstF64);
+ uint16_t local_int64 = function->AddLocal(kAstI64);
+ uint16_t param_int32 = function->AddParam(kAstI32);
+ uint16_t local_int32_2 = function->AddLocal(kAstI32);
+
+ byte code[] = {kExprGetLocal, static_cast<uint8_t>(param_float32)};
+ uint32_t local_indices[] = {1};
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(param_int32);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_int32);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_int32_2);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_int64);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_float32);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_float64);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + f->HeaderSize();
+ f->Serialize(buffer, &header, &body);
+ for (size_t i = 0; i < 7; i++) {
+ CHECK_EQ(i, static_cast<size_t>(*(buffer + 2 * i + f->HeaderSize() + 1)));
+ }
+}
+
+
+TEST_F(EncoderTest, Function_Builder_Indexing_Variable_Width) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ for (size_t i = 0; i < 128; i++) {
+ AddLocal(function, kAstF32);
+ }
+ AddLocal(function, kAstI32);
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + f->HeaderSize();
+ f->Serialize(buffer, &header, &body);
+ body = buffer + f->HeaderSize();
+ for (size_t i = 0; i < 127; i++) {
+ CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * i)));
+ CHECK_EQ(i + 1, static_cast<size_t>(*(body + 2 * i + 1)));
+ }
+ CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * 127)));
+ CHECK_EQ(0x80, static_cast<size_t>(*(body + 2 * 127 + 1)));
+ CHECK_EQ(0x01, static_cast<size_t>(*(body + 2 * 127 + 2)));
+ CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * 127 + 3)));
+ CHECK_EQ(0x00, static_cast<size_t>(*(body + 2 * 127 + 4)));
+}
+
+
+TEST_F(EncoderTest, LEB_Functions) {
+ byte leb_value[5] = {0, 0, 0, 0, 0};
+ CheckReadValue(leb_value, 0, 1, kNoError);
+ CheckWriteValue(0, 1, leb_value);
+ leb_value[0] = 23;
+ CheckReadValue(leb_value, 23, 1, kNoError);
+ CheckWriteValue(23, 1, leb_value);
+ leb_value[0] = 0x80;
+ leb_value[1] = 0x01;
+ CheckReadValue(leb_value, 128, 2, kNoError);
+ CheckWriteValue(128, 2, leb_value);
+ leb_value[0] = 0x80;
+ leb_value[1] = 0x80;
+ leb_value[2] = 0x80;
+ leb_value[3] = 0x80;
+ leb_value[4] = 0x01;
+ CheckReadValue(leb_value, 0x10000000, 5, kNoError);
+ CheckWriteValue(0x10000000, 5, leb_value);
+ leb_value[0] = 0x80;
+ leb_value[1] = 0x80;
+ leb_value[2] = 0x80;
+ leb_value[3] = 0x80;
+ leb_value[4] = 0x80;
+ CheckReadValue(leb_value, -1, -1, kInvalidLEB128);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
new file mode 100644
index 0000000000..0738b5909b
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -0,0 +1,957 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleVerifyTest : public TestWithZone {
+ public:
+ ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
+ return DecodeWasmModule(nullptr, zone(), module_start, module_end, false,
+ false);
+ }
+};
+
+
+#define EXPECT_VERIFIES(data) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + arraysize(data)); \
+ EXPECT_TRUE(result.ok()); \
+ if (result.val) delete result.val; \
+ } while (false)
+
+
+#define EXPECT_FAILURE(data) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + arraysize(data)); \
+ EXPECT_FALSE(result.ok()); \
+ if (result.val) delete result.val; \
+ } while (false)
+
+
+struct LocalTypePair {
+ uint8_t code;
+ LocalType type;
+} kLocalTypes[] = {{kLocalI32, kAstI32},
+ {kLocalI64, kAstI64},
+ {kLocalF32, kAstF32},
+ {kLocalF64, kAstF64}};
+
+
+TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
+ static const byte data[1]{kDeclEnd};
+ {
+ ModuleResult result = DecodeModule(data, data);
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+ }
+ {
+ ModuleResult result = DecodeModule(data, data + 1);
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneGlobal) {
+ const byte data[] = {
+ kDeclGlobals,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0, // name offset
+ kMemI32, // memory type
+ 0, // exported
+ };
+
+ {
+ // Should decode to exactly one global.
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(0, result.val->data_segments->size());
+
+ WasmGlobal* global = &result.val->globals->back();
+
+ EXPECT_EQ(0, global->name_offset);
+ EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(0, global->offset);
+ EXPECT_FALSE(global->exported);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
+ const byte data[] = {
+ kDeclGlobals, 0, // declare 0 globals
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
+ while (true) {
+ uint32_t next = val >> 7;
+ uint32_t out = val & 0x7f;
+ if (next) {
+ buffer.push_back(static_cast<byte>(0x80 | out));
+ val = next;
+ } else {
+ buffer.push_back(static_cast<byte>(out));
+ break;
+ }
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, NGlobals) {
+ const byte data[] = {
+ 0, 0, 0, 0, // name offset
+ kMemI32, // memory type
+ 0, // exported
+ };
+ for (uint32_t i = 0; i < 1000000; i = i * 7 + 1) {
+ std::vector<byte> buffer;
+ buffer.push_back(kDeclGlobals);
+ AppendUint32v(buffer, i);
+ for (uint32_t j = 0; j < i; j++) {
+ buffer.insert(buffer.end(), data, data + arraysize(data));
+ }
+
+ ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, GlobalWithInvalidNameOffset) {
+ const byte data[] = {
+ kDeclGlobals,
+ 1, // declare one global
+ 0,
+ 3,
+ 0,
+ 0, // name offset
+ kMemI32, // memory type
+ 0, // exported
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
+ const byte data[] = {
+ kDeclGlobals,
+ 1, // declare one global
+ 0,
+ 0,
+ 0,
+ 0, // name offset
+ 33, // memory type
+ 0, // exported
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, TwoGlobals) {
+ const byte data[] = {
+ kDeclGlobals,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0, // #0: name offset
+ kMemF32, // memory type
+ 0, // exported
+ 0,
+ 0,
+ 0,
+ 0, // #1: name offset
+ kMemF64, // memory type
+ 1, // exported
+ };
+
+ {
+ // Should decode to exactly two globals.
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(2, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(0, result.val->data_segments->size());
+
+ WasmGlobal* g0 = &result.val->globals->at(0);
+ WasmGlobal* g1 = &result.val->globals->at(1);
+
+ EXPECT_EQ(0, g0->name_offset);
+ EXPECT_EQ(MachineType::Float32(), g0->type);
+ EXPECT_EQ(0, g0->offset);
+ EXPECT_FALSE(g0->exported);
+
+ EXPECT_EQ(0, g1->name_offset);
+ EXPECT_EQ(MachineType::Float64(), g1->type);
+ EXPECT_EQ(0, g1->offset);
+ EXPECT_TRUE(g1->exported);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneSignature) {
+ static const byte data[] = {
+ kDeclSignatures, 1, 0, kLocalVoid // void -> void
+ };
+ EXPECT_VERIFIES(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
+ static const byte data[] = {
+ kDeclSignatures,
+ 3,
+ 0,
+ kLocalVoid, // void -> void
+ 1,
+ kLocalI32,
+ kLocalF32, // f32 -> i32
+ 2,
+ kLocalI32,
+ kLocalF64,
+ kLocalF64, // (f64,f64) -> i32
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(3, result.val->signatures->size());
+ if (result.val->signatures->size() == 3) {
+ EXPECT_EQ(0, result.val->signatures->at(0)->return_count());
+ EXPECT_EQ(1, result.val->signatures->at(1)->return_count());
+ EXPECT_EQ(1, result.val->signatures->at(2)->return_count());
+
+ EXPECT_EQ(0, result.val->signatures->at(0)->parameter_count());
+ EXPECT_EQ(1, result.val->signatures->at(1)->parameter_count());
+ EXPECT_EQ(2, result.val->signatures->at(2)->parameter_count());
+ }
+ if (result.val) delete result.val;
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ ModuleResult result = DecodeModule(data, data + size);
+ // Should fall off the end of module bytes.
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
+ static const byte data[] = {
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ 0, 0, // signature index
+ 0, 0, 0, 0, // name offset
+ 0, 0, 0, 0, // code start offset
+ 0, 0, 0, 0, // code end offset
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 0, // exported
+ 1 // external
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
+ const int kCodeStartOffset = 23;
+ const int kCodeEndOffset = kCodeStartOffset + 1;
+
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ // func#0 ------------------------------------------------------
+ kDeclFunctions, 1,
+ kDeclFunctionLocals | kDeclFunctionExport | kDeclFunctionName, 0,
+ 0, // signature index
+ 9, 0, 0, 0, // name offset
+ 11, 2, // local int32 count
+ 13, 4, // local int64 count
+ 15, 6, // local float32 count
+ 17, 8, // local float64 count
+ 1, 0, // size
+ kExprNop,
+ };
+
+ {
+ // Should decode to exactly one function.
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0, result.val->globals->size());
+ EXPECT_EQ(1, result.val->signatures->size());
+ EXPECT_EQ(1, result.val->functions->size());
+ EXPECT_EQ(0, result.val->data_segments->size());
+ EXPECT_EQ(0, result.val->function_table->size());
+
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(9, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_EQ(523, function->local_int32_count);
+ EXPECT_EQ(1037, function->local_int64_count);
+ EXPECT_EQ(1551, function->local_float32_count);
+ EXPECT_EQ(2065, function->local_float64_count);
+
+ EXPECT_TRUE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 5; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ kDeclFunctionImport, // no name, no locals, imported
+ 0, 0, // signature index
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->functions->size());
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(0, function->code_start_offset);
+ EXPECT_EQ(0, function->code_end_offset);
+
+ EXPECT_EQ(0, function->local_int32_count);
+ EXPECT_EQ(0, function->local_int64_count);
+ EXPECT_EQ(0, function->local_float32_count);
+ EXPECT_EQ(0, function->local_float64_count);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_TRUE(function->external);
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
+ static const byte kCodeStartOffset = 11;
+ static const byte kCodeEndOffset = kCodeStartOffset + 1;
+
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ 0, // no name, no locals
+ 0, 0, // signature index
+ 1, 0, // body size
+ kExprNop // body
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->functions->size());
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_EQ(0, function->local_int32_count);
+ EXPECT_EQ(0, function->local_int64_count);
+ EXPECT_EQ(0, function->local_float32_count);
+ EXPECT_EQ(0, function->local_float64_count);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
+ static const byte kCodeStartOffset = 19;
+ static const byte kCodeEndOffset = kCodeStartOffset + 1;
+
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ kDeclFunctionLocals, 0, 0, // signature index
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 1, 0, // body size
+ kExprNop // body
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->functions->size());
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_EQ(513, function->local_int32_count);
+ EXPECT_EQ(1027, function->local_int64_count);
+ EXPECT_EQ(1541, function->local_float32_count);
+ EXPECT_EQ(2055, function->local_float64_count);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
+ static const byte kCodeStartOffset = 2 + kDeclGlobalSize + 4 + 2 + 17;
+ static const byte kCodeEndOffset = kCodeStartOffset + 3;
+
+ static const byte data[] = {
+ // global#0 --------------------------------------------------
+ kDeclGlobals, 1, 0, 0, 0, 0, // name offset
+ kMemU8, // memory type
+ 0, // exported
+ // sig#0 -----------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // func#0 ----------------------------------------------------
+ kDeclFunctions, 1, kDeclFunctionLocals | kDeclFunctionName, 0,
+ 0, // signature index
+ 9, 0, 0, 0, // name offset
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 3, 0, // body size
+ kExprNop, // func#0 body
+ kExprNop, // func#0 body
+ kExprNop, // func#0 body
+ // segment#0 -------------------------------------------------
+ kDeclDataSegments, 1, 0xae, 0xb3, 0x08, 0, // dest addr
+ 15, 0, 0, 0, // source offset
+ 5, 0, 0, 0, // source size
+ 1, // init
+ // rest ------------------------------------------------------
+ kDeclEnd,
+ };
+
+ {
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->globals->size());
+ EXPECT_EQ(1, result.val->functions->size());
+ EXPECT_EQ(1, result.val->data_segments->size());
+
+ WasmGlobal* global = &result.val->globals->back();
+
+ EXPECT_EQ(0, global->name_offset);
+ EXPECT_EQ(MachineType::Uint8(), global->type);
+ EXPECT_EQ(0, global->offset);
+ EXPECT_FALSE(global->exported);
+
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(9, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ WasmDataSegment* segment = &result.val->data_segments->back();
+
+ EXPECT_EQ(0x8b3ae, segment->dest_addr);
+ EXPECT_EQ(15, segment->source_offset);
+ EXPECT_EQ(5, segment->source_size);
+ EXPECT_TRUE(segment->init);
+
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneDataSegment) {
+ const byte data[] = {
+ kDeclDataSegments,
+ 1,
+ 0xaa,
+ 0xbb,
+ 0x09,
+ 0, // dest addr
+ 11,
+ 0,
+ 0,
+ 0, // source offset
+ 3,
+ 0,
+ 0,
+ 0, // source size
+ 1, // init
+ };
+
+ {
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(1, result.val->data_segments->size());
+
+ WasmDataSegment* segment = &result.val->data_segments->back();
+
+ EXPECT_EQ(0x9bbaa, segment->dest_addr);
+ EXPECT_EQ(11, segment->source_offset);
+ EXPECT_EQ(3, segment->source_size);
+ EXPECT_TRUE(segment->init);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
+ const byte data[] = {
+ kDeclDataSegments,
+ 2,
+ 0xee,
+ 0xff,
+ 0x07,
+ 0, // dest addr
+ 9,
+ 0,
+ 0,
+ 0, // #0: source offset
+ 4,
+ 0,
+ 0,
+ 0, // source size
+ 0, // init
+ 0xcc,
+ 0xdd,
+ 0x06,
+ 0, // #1: dest addr
+ 6,
+ 0,
+ 0,
+ 0, // source offset
+ 10,
+ 0,
+ 0,
+ 0, // source size
+ 1, // init
+ };
+
+ {
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(2, result.val->data_segments->size());
+
+ WasmDataSegment* s0 = &result.val->data_segments->at(0);
+ WasmDataSegment* s1 = &result.val->data_segments->at(1);
+
+ EXPECT_EQ(0x7ffee, s0->dest_addr);
+ EXPECT_EQ(9, s0->source_offset);
+ EXPECT_EQ(4, s0->source_size);
+ EXPECT_FALSE(s0->init);
+
+ EXPECT_EQ(0x6ddcc, s1->dest_addr);
+ EXPECT_EQ(6, s1->source_offset);
+ EXPECT_EQ(10, s1->source_size);
+ EXPECT_TRUE(s1->init);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+// To make below tests for indirect calls much shorter.
+#define FUNCTION(sig_index, external) \
+ kDeclFunctionImport, static_cast<byte>(sig_index), \
+ static_cast<byte>(sig_index >> 8)
+
+
+TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // func#0 ------------------------------------------------------
+ kDeclFunctions, 1, FUNCTION(0, 0),
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 1, 0, 0};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.ok()) {
+ EXPECT_EQ(1, result.val->signatures->size());
+ EXPECT_EQ(1, result.val->functions->size());
+ EXPECT_EQ(1, result.val->function_table->size());
+ EXPECT_EQ(0, result.val->function_table->at(0));
+ }
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 2, 0, 0, // void -> void
+ 0, kLocalI32, // void -> i32
+ // func#0 ------------------------------------------------------
+ kDeclFunctions, 4, FUNCTION(0, 1), FUNCTION(1, 1), FUNCTION(0, 1),
+ FUNCTION(1, 1),
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 8, 0, 0, 1, 0, 2, 0, 3, 0, 0, 0, 1, 0, 2, 0, 3, 0,
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.ok()) {
+ EXPECT_EQ(2, result.val->signatures->size());
+ EXPECT_EQ(4, result.val->functions->size());
+ EXPECT_EQ(8, result.val->function_table->size());
+ for (int i = 0; i < 8; i++) {
+ EXPECT_EQ(i & 3, result.val->function_table->at(i));
+ }
+ }
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 1, 0, 0,
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // functions ---------------------------------------------------
+ kDeclFunctions, 1, FUNCTION(0, 1),
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 1, 1, 0,
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+class WasmSignatureDecodeTest : public TestWithZone {};
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
+ static const byte data[] = {0, 0};
+ Zone zone;
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(&zone, data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(0, sig->parameter_count());
+ EXPECT_EQ(0, sig->return_count());
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair ret_type = kLocalTypes[i];
+ const byte data[] = {0, ret_type.code};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(0, sig->parameter_count());
+ EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(ret_type.type, sig->GetReturn());
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair param_type = kLocalTypes[i];
+ const byte data[] = {1, 0, param_type.code};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(1, sig->parameter_count());
+ EXPECT_EQ(0, sig->return_count());
+ EXPECT_EQ(param_type.type, sig->GetParam(0));
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair ret_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalTypePair param_type = kLocalTypes[j];
+ const byte data[] = {1, // param count
+ ret_type.code, // ret
+ param_type.code}; // param
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(1, sig->parameter_count());
+ EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(param_type.type, sig->GetParam(0));
+ EXPECT_EQ(ret_type.type, sig->GetReturn());
+ }
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair p0_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalTypePair p1_type = kLocalTypes[j];
+ const byte data[] = {2, // param count
+ kLocalI32, // ret
+ p0_type.code, // p0
+ p1_type.code}; // p1
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(2, sig->parameter_count());
+ EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(p0_type.type, sig->GetParam(0));
+ EXPECT_EQ(p1_type.type, sig->GetParam(1));
+ }
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
+ byte data[256];
+ for (int p = 0; p <= 255; p = p + 1 + p * 3) {
+ for (int i = 0; i <= p; i++) data[i] = kLocalI32;
+ data[0] = static_cast<byte>(p);
+
+ for (int i = 0; i < p + 1; i++) {
+ // Should fall off the end for all signatures.
+ FunctionSig* sig = DecodeWasmSignatureForTesting(zone(), data, data + i);
+ EXPECT_EQ(nullptr, sig);
+ }
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
+ byte kInvalidType = 76;
+ for (int i = 1; i < 3; i++) {
+ byte data[] = {2, kLocalI32, kLocalI32, kLocalI32};
+ data[i] = kInvalidType;
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type) {
+ static const int kParamCount = 3;
+ for (int i = 0; i < kParamCount; i++) {
+ byte data[] = {kParamCount, kLocalI32, kLocalI32, kLocalI32, kLocalI32};
+ data[i + 2] = kLocalVoid;
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+ }
+}
+
+
+class WasmFunctionVerifyTest : public TestWithZone {};
+
+
+TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
+ byte data[] = {
+ 0, kLocalVoid, // signature
+ 3, 0, // local int32 count
+ 4, 0, // local int64 count
+ 5, 0, // local float32 count
+ 6, 0, // local float64 count
+ kExprNop // body
+ };
+
+ FunctionResult result = DecodeWasmFunction(nullptr, zone(), nullptr, data,
+ data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+
+ if (result.val && result.ok()) {
+ WasmFunction* function = result.val;
+ EXPECT_EQ(0, function->sig->parameter_count());
+ EXPECT_EQ(0, function->sig->return_count());
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(arraysize(data) - 1, function->code_start_offset);
+ EXPECT_EQ(arraysize(data), function->code_end_offset);
+ EXPECT_EQ(3, function->local_int32_count);
+ EXPECT_EQ(4, function->local_int64_count);
+ EXPECT_EQ(5, function->local_float32_count);
+ EXPECT_EQ(6, function->local_float64_count);
+ EXPECT_FALSE(function->external);
+ EXPECT_FALSE(function->exported);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionNoLen) {
+ const byte data[] = {
+ kDeclWLL, // section without length.
+ };
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionEmpty) {
+ const byte data[] = {
+ kDeclWLL, 0, // empty section
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionOne) {
+ const byte data[] = {
+ kDeclWLL,
+ 1, // LEB128 1
+ 0, // one byte section
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionTen) {
+ const byte data[] = {
+ kDeclWLL,
+ 10, // LEB128 10
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionOverflow) {
+ const byte data[] = {
+ kDeclWLL,
+ 11, // LEB128 11
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ };
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionUnderflow) {
+ const byte data[] = {
+ kDeclWLL,
+ 0xff, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xffffffff
+ 1, 2, 3, 4, // 4 byte section
+ };
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionLoop) {
+ // Would infinite loop decoding if wrapping and allowed.
+ const byte data[] = {
+ kDeclWLL,
+ 0xfa, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xfffffffa
+ 1, 2, 3, 4, // 4 byte section
+ };
+ EXPECT_FAILURE(data);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
new file mode 100644
index 0000000000..c5bb5eca00
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -0,0 +1,319 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmMacroGenTest : public TestWithZone {};
+
+#define EXPECT_SIZE(size, ...) \
+ do { \
+ byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(size, sizeof(code)); \
+ } while (false)
+
+
+TEST_F(WasmMacroGenTest, Constants) {
+ EXPECT_SIZE(2, WASM_ONE);
+ EXPECT_SIZE(2, WASM_ZERO);
+
+ EXPECT_SIZE(2, WASM_I8(122));
+ EXPECT_SIZE(2, WASM_I8(254));
+
+ EXPECT_SIZE(5, WASM_I32(1));
+ EXPECT_SIZE(5, WASM_I32(10000));
+ EXPECT_SIZE(5, WASM_I32(-9828934));
+
+ EXPECT_SIZE(9, WASM_I64(1));
+ EXPECT_SIZE(9, WASM_I64(10000));
+ EXPECT_SIZE(9, WASM_I64(-9828934));
+ EXPECT_SIZE(9, WASM_I64(0x123456789abcdef0ULL));
+
+ EXPECT_SIZE(5, WASM_F32(1.0f));
+ EXPECT_SIZE(5, WASM_F32(10000.0f));
+ EXPECT_SIZE(5, WASM_F32(-9828934.0f));
+
+ EXPECT_SIZE(9, WASM_F64(1.5));
+ EXPECT_SIZE(9, WASM_F64(10200.0));
+ EXPECT_SIZE(9, WASM_F64(-9818934.0));
+}
+
+
+TEST_F(WasmMacroGenTest, Statements) {
+ EXPECT_SIZE(1, WASM_NOP);
+
+ EXPECT_SIZE(4, WASM_SET_LOCAL(0, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_STORE_GLOBAL(0, WASM_ZERO));
+
+ EXPECT_SIZE(6, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_IF(WASM_ZERO, WASM_NOP));
+
+ EXPECT_SIZE(5, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
+
+ EXPECT_SIZE(5, WASM_SELECT(WASM_ZERO, WASM_NOP, WASM_NOP));
+
+ EXPECT_SIZE(3, WASM_BR(0));
+ EXPECT_SIZE(5, WASM_BR_IF(0, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_BLOCK(1, WASM_NOP));
+ EXPECT_SIZE(4, WASM_BLOCK(2, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
+
+ EXPECT_SIZE(5, WASM_INFINITE_LOOP);
+
+ EXPECT_SIZE(3, WASM_LOOP(1, WASM_NOP));
+ EXPECT_SIZE(4, WASM_LOOP(2, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_SIZE(7, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+
+ EXPECT_SIZE(1, WASM_RETURN0);
+ EXPECT_SIZE(3, WASM_RETURN(WASM_ZERO));
+ EXPECT_SIZE(5, WASM_RETURN(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(1, WASM_UNREACHABLE);
+}
+
+
+TEST_F(WasmMacroGenTest, MacroStatements) {
+ EXPECT_SIZE(8, WASM_WHILE(WASM_I8(0), WASM_NOP));
+ EXPECT_SIZE(7, WASM_INC_LOCAL(0));
+ EXPECT_SIZE(7, WASM_INC_LOCAL_BY(0, 3));
+
+ EXPECT_SIZE(3, WASM_BREAK(0));
+ EXPECT_SIZE(3, WASM_CONTINUE(0));
+}
+
+
+TEST_F(WasmMacroGenTest, TableSwitch) {
+ EXPECT_SIZE(2, WASM_CASE(9));
+ EXPECT_SIZE(2, WASM_CASE_BR(11));
+
+ EXPECT_SIZE(7, WASM_TABLESWITCH_OP(0, 1, WASM_CASE(7)));
+ EXPECT_SIZE(9, WASM_TABLESWITCH_OP(0, 2, WASM_CASE(7), WASM_CASE(8)));
+
+ EXPECT_SIZE(4, WASM_TABLESWITCH_BODY(WASM_I8(88), WASM_I8(77)));
+ EXPECT_SIZE(
+ 6, WASM_TABLESWITCH_BODY(WASM_I8(33), WASM_I8(44), WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmMacroGenTest, Expressions) {
+ EXPECT_SIZE(2, WASM_GET_LOCAL(0));
+ EXPECT_SIZE(2, WASM_GET_LOCAL(1));
+ EXPECT_SIZE(2, WASM_GET_LOCAL(12));
+ EXPECT_SIZE(2, WASM_LOAD_GLOBAL(0));
+ EXPECT_SIZE(2, WASM_LOAD_GLOBAL(1));
+ EXPECT_SIZE(2, WASM_LOAD_GLOBAL(12));
+ EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
+ EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO));
+ EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_NOT(WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_BRV(1, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_BLOCK(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BLOCK(2, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_LOOP(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOOP(2, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, FunctionCalls) {
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(0));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(1));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(11));
+
+ EXPECT_SIZE(4, WASM_CALL_FUNCTION(0, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_CALL_FUNCTION(1, WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
+
+ EXPECT_SIZE(6, WASM_CALL_INDIRECT(0, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(8, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Int32Ops) {
+ EXPECT_SIZE(5, WASM_I32_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_DIVS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_DIVU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_REMS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_REMU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_AND(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_IOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_XOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SHL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SHR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SAR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_EQ(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I32_LTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_LES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_LTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_LEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I32_GTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_GES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_GTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_GEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_I32_CLZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_CTZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_POPCNT(WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Int64Ops) {
+ EXPECT_SIZE(5, WASM_I64_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_DIVS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_DIVU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_REMS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_REMU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_AND(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_IOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_XOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SHL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SHR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SAR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_EQ(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I64_LTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_LES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_LTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_LEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I64_GTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_GES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_GTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_GEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_I64_CLZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_CTZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_POPCNT(WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Float32Ops) {
+ EXPECT_SIZE(5, WASM_F32_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_DIV(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_MIN(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_MAX(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_COPYSIGN(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_F32_ABS(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_NEG(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_CEIL(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_FLOOR(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_TRUNC(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_NEARESTINT(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_SQRT(WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_F32_EQ(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_LT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_LE(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_GT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_GE(WASM_ZERO, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Float64Ops) {
+ EXPECT_SIZE(5, WASM_F64_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_DIV(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_MIN(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_MAX(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_COPYSIGN(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_F64_ABS(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_NEG(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_CEIL(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_FLOOR(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_TRUNC(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_NEARESTINT(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_SQRT(WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_F64_EQ(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_LT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_LE(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_GT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_GE(WASM_ZERO, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Conversions) {
+ EXPECT_SIZE(3, WASM_I32_SCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_SCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_UCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_UCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_CONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_SCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_SCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_UCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_UCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_SCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_UCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_SCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_UCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_SCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_UCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_CONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_REINTERPRET_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_SCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_UCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_SCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_UCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_CONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_REINTERPRET_I64(WASM_ZERO));
+}
+
+static const MachineType kMemTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+
+TEST_F(WasmMacroGenTest, LoadsAndStores) {
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(4, WASM_LOAD_MEM(kMemTypes[i], WASM_ZERO));
+ }
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(6, WASM_STORE_MEM(kMemTypes[i], WASM_ZERO, WASM_GET_LOCAL(0)));
+ }
+}
+
+
+TEST_F(WasmMacroGenTest, LoadsAndStoresWithOffset) {
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(5, WASM_LOAD_MEM_OFFSET(kMemTypes[i], 11, WASM_ZERO));
+ }
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(7, WASM_STORE_MEM_OFFSET(kMemTypes[i], 13, WASM_ZERO,
+ WASM_GET_LOCAL(0)));
+ }
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/webkit/class-syntax-call-expected.txt b/deps/v8/test/webkit/class-syntax-call-expected.txt
index 79045ace95..00fbe1e922 100644
--- a/deps/v8/test/webkit/class-syntax-call-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-call-expected.txt
@@ -4,13 +4,13 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS new A did not throw exception.
-PASS A() threw exception TypeError: Class constructors cannot be invoked without 'new'.
+PASS A() threw exception TypeError: Class constructor A cannot be invoked without 'new'.
PASS new B did not throw exception.
-PASS B() threw exception TypeError: Class constructors cannot be invoked without 'new'.
+PASS B() threw exception TypeError: Class constructor B cannot be invoked without 'new'.
PASS new (class { constructor() {} })() did not throw exception.
-PASS (class { constructor() {} })() threw exception TypeError: Class constructors cannot be invoked without 'new'.
-PASS new (class extends null { constructor() { super() } })() threw exception TypeError: function () {} is not a constructor.
-PASS (class extends null { constructor() { super() } })() threw exception TypeError: Class constructors cannot be invoked without 'new'.
+PASS (class { constructor() {} })() threw exception TypeError: Class constructor cannot be invoked without 'new'.
+PASS new (class extends null { constructor() { super() } })() threw exception TypeError: super is not a constructor.
+PASS (class extends null { constructor() { super() } })() threw exception TypeError: Class constructor cannot be invoked without 'new'.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/class-syntax-call.js b/deps/v8/test/webkit/class-syntax-call.js
index 3cf82a8899..fa32a8cc9b 100644
--- a/deps/v8/test/webkit/class-syntax-call.js
+++ b/deps/v8/test/webkit/class-syntax-call.js
@@ -29,12 +29,12 @@ class A { constructor() {} };
class B extends A { constructor() { super() } };
shouldNotThrow('new A');
-shouldThrow('A()', '"TypeError: Class constructors cannot be invoked without \'new\'"');
+shouldThrow('A()', '"TypeError: Class constructor A cannot be invoked without \'new\'"');
shouldNotThrow('new B');
-shouldThrow('B()', '"TypeError: Class constructors cannot be invoked without \'new\'"');
+shouldThrow('B()', '"TypeError: Class constructor B cannot be invoked without \'new\'"');
shouldNotThrow('new (class { constructor() {} })()');
-shouldThrow('(class { constructor() {} })()', '"TypeError: Class constructors cannot be invoked without \'new\'"');
-shouldThrow('new (class extends null { constructor() { super() } })()', '"TypeError: function () {} is not a constructor"');
-shouldThrow('(class extends null { constructor() { super() } })()', '"TypeError: Class constructors cannot be invoked without \'new\'"');
+shouldThrow('(class { constructor() {} })()', '"TypeError: Class constructor cannot be invoked without \'new\'"');
+shouldThrow('new (class extends null { constructor() { super() } })()', '"TypeError: super is not a constructor"');
+shouldThrow('(class extends null { constructor() { super() } })()', '"TypeError: Class constructor cannot be invoked without \'new\'"');
var successfullyParsed = true;
diff --git a/deps/v8/test/webkit/class-syntax-declaration-expected.txt b/deps/v8/test/webkit/class-syntax-declaration-expected.txt
index f1466c5114..a424edfe90 100644
--- a/deps/v8/test/webkit/class-syntax-declaration-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-declaration-expected.txt
@@ -32,12 +32,12 @@ PASS class X { constructor() {} static set prototype() {} } threw exception Synt
PASS class X { constructor() {} prototype() { return instanceMethodValue; } } did not throw exception.
PASS class X { constructor() {} prototype() { return instanceMethodValue; } }; (new X).prototype() is instanceMethodValue
PASS class X { constructor() {} set foo(a) {} } did not throw exception.
-FAIL class X { constructor() {} set foo({x, y}) {} } should not throw exception. Threw exception SyntaxError: Unexpected token {.
+PASS class X { constructor() {} set foo({x, y}) {} } did not throw exception.
PASS class X { constructor() {} set foo() {} } threw exception SyntaxError: Setter must have exactly one formal parameter..
PASS class X { constructor() {} set foo(a, b) {} } threw exception SyntaxError: Setter must have exactly one formal parameter..
PASS class X { constructor() {} get foo() {} } did not throw exception.
PASS class X { constructor() {} get foo(x) {} } threw exception SyntaxError: Getter must not have any formal parameters..
-PASS class X { constructor() {} get foo({x, y}) {} } threw exception SyntaxError: Unexpected token {.
+PASS class X { constructor() {} get foo({x, y}) {} } threw exception SyntaxError: Getter must not have any formal parameters..
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/class-syntax-declaration.js b/deps/v8/test/webkit/class-syntax-declaration.js
index 3c9aed7441..f8ecdbb151 100644
--- a/deps/v8/test/webkit/class-syntax-declaration.js
+++ b/deps/v8/test/webkit/class-syntax-declaration.js
@@ -21,7 +21,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
+// Flags: --harmony-sloppy --harmony-destructuring-bind
description('Tests for ES6 class syntax declarations');
diff --git a/deps/v8/test/webkit/class-syntax-default-constructor-expected.txt b/deps/v8/test/webkit/class-syntax-default-constructor-expected.txt
index b08e7f0a1e..2a6e1c4a52 100644
--- a/deps/v8/test/webkit/class-syntax-default-constructor-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-default-constructor-expected.txt
@@ -4,11 +4,11 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS new A instanceof A is true
-PASS A() threw exception TypeError: Class constructors cannot be invoked without 'new'.
+PASS A() threw exception TypeError: Class constructor A cannot be invoked without 'new'.
PASS A.prototype.constructor instanceof Function is true
PASS A.prototype.constructor.name is "A"
PASS new B instanceof A; new B instanceof A is true
-PASS B() threw exception TypeError: Class constructors cannot be invoked without 'new'.
+PASS B() threw exception TypeError: Class constructor B cannot be invoked without 'new'.
PASS B.prototype.constructor.name is "B"
PASS A !== B is true
PASS A.prototype.constructor !== B.prototype.constructor is true
diff --git a/deps/v8/test/webkit/class-syntax-default-constructor.js b/deps/v8/test/webkit/class-syntax-default-constructor.js
index 841afddadc..1695f6a8da 100644
--- a/deps/v8/test/webkit/class-syntax-default-constructor.js
+++ b/deps/v8/test/webkit/class-syntax-default-constructor.js
@@ -29,11 +29,11 @@ class A { };
class B extends A { };
shouldBeTrue('new A instanceof A');
-shouldThrow('A()', '"TypeError: Class constructors cannot be invoked without \'new\'"');
+shouldThrow('A()', '"TypeError: Class constructor A cannot be invoked without \'new\'"');
shouldBeTrue('A.prototype.constructor instanceof Function');
shouldBe('A.prototype.constructor.name', '"A"');
shouldBeTrue('new B instanceof A; new B instanceof A');
-shouldThrow('B()', '"TypeError: Class constructors cannot be invoked without \'new\'"');
+shouldThrow('B()', '"TypeError: Class constructor B cannot be invoked without \'new\'"');
shouldBe('B.prototype.constructor.name', '"B"');
shouldBeTrue('A !== B');
shouldBeTrue('A.prototype.constructor !== B.prototype.constructor');
diff --git a/deps/v8/test/webkit/class-syntax-expression-expected.txt b/deps/v8/test/webkit/class-syntax-expression-expected.txt
index aa1cfb76b0..5bcaf002f8 100644
--- a/deps/v8/test/webkit/class-syntax-expression-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-expression-expected.txt
@@ -30,12 +30,12 @@ PASS x = class { constructor() {} static set prototype() {} } threw exception Sy
PASS x = class { constructor() {} prototype() { return instanceMethodValue; } } did not throw exception.
PASS x = class { constructor() {} prototype() { return instanceMethodValue; } }; (new x).prototype() is instanceMethodValue
PASS x = class { constructor() {} set foo(a) {} } did not throw exception.
-FAIL x = class { constructor() {} set foo({x, y}) {} } should not throw exception. Threw exception SyntaxError: Unexpected token {.
+PASS x = class { constructor() {} set foo({x, y}) {} } did not throw exception.
PASS x = class { constructor() {} set foo() {} } threw exception SyntaxError: Setter must have exactly one formal parameter..
PASS x = class { constructor() {} set foo(a, b) {} } threw exception SyntaxError: Setter must have exactly one formal parameter..
PASS x = class { constructor() {} get foo() {} } did not throw exception.
PASS x = class { constructor() {} get foo(x) {} } threw exception SyntaxError: Getter must not have any formal parameters..
-PASS x = class { constructor() {} get foo({x, y}) {} } threw exception SyntaxError: Unexpected token {.
+PASS x = class { constructor() {} get foo({x, y}) {} } threw exception SyntaxError: Getter must not have any formal parameters..
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/class-syntax-expression.js b/deps/v8/test/webkit/class-syntax-expression.js
index 3272b81f7e..182afb1ff0 100644
--- a/deps/v8/test/webkit/class-syntax-expression.js
+++ b/deps/v8/test/webkit/class-syntax-expression.js
@@ -21,7 +21,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
+// Flags: --harmony-sloppy --harmony-destructuring-bind
description('Tests for ES6 class syntax expressions');
diff --git a/deps/v8/test/webkit/class-syntax-extends-expected.txt b/deps/v8/test/webkit/class-syntax-extends-expected.txt
index 1eede9c60b..45e999dd93 100644
--- a/deps/v8/test/webkit/class-syntax-extends-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-extends-expected.txt
@@ -60,7 +60,7 @@ PASS x = {}; new (class extends undefined { constructor () { return x; } }) thre
PASS y = 12; new (class extends undefined { constructor () { return y; } }) threw exception TypeError: Class extends value undefined is not a function or null.
PASS class x {}; new (class extends null { constructor () { return new x; } }) instanceof x is true
PASS new (class extends null { constructor () { this; } }) threw exception ReferenceError: this is not defined.
-PASS new (class extends null { constructor () { super(); } }) threw exception TypeError: function () {} is not a constructor.
+PASS new (class extends null { constructor () { super(); } }) threw exception TypeError: super is not a constructor.
PASS x = {}; new (class extends null { constructor () { return x } }) is x
PASS y = 12; new (class extends null { constructor () { return y; } }) threw exception TypeError: Derived constructors may only return object or undefined.
PASS class x {}; new (class extends null { constructor () { return new x; } }) instanceof x is true
diff --git a/deps/v8/test/webkit/class-syntax-extends.js b/deps/v8/test/webkit/class-syntax-extends.js
index 3c7ee19cef..a1b8f1292d 100644
--- a/deps/v8/test/webkit/class-syntax-extends.js
+++ b/deps/v8/test/webkit/class-syntax-extends.js
@@ -100,7 +100,7 @@ shouldThrow('x = {}; new (class extends undefined { constructor () { return x; }
shouldThrow('y = 12; new (class extends undefined { constructor () { return y; } })', '"TypeError: Class extends value undefined is not a function or null"');
shouldBeTrue ('class x {}; new (class extends null { constructor () { return new x; } }) instanceof x');
shouldThrow('new (class extends null { constructor () { this; } })', '"ReferenceError: this is not defined"');
-shouldThrow('new (class extends null { constructor () { super(); } })', '"TypeError: function () {} is not a constructor"');
+shouldThrow('new (class extends null { constructor () { super(); } })', '"TypeError: super is not a constructor"');
shouldBe('x = {}; new (class extends null { constructor () { return x } })', 'x');
shouldThrow('y = 12; new (class extends null { constructor () { return y; } })', '"TypeError: Derived constructors may only return object or undefined"');
shouldBeTrue ('class x {}; new (class extends null { constructor () { return new x; } }) instanceof x');
diff --git a/deps/v8/test/webkit/class-syntax-super-expected.txt b/deps/v8/test/webkit/class-syntax-super-expected.txt
index 2f38f5ae38..b45b6b7ad2 100644
--- a/deps/v8/test/webkit/class-syntax-super-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-super-expected.txt
@@ -29,12 +29,12 @@ PASS x instanceof Base is false
PASS new (class extends Base { constructor() { } }) threw exception ReferenceError: this is not defined.
PASS new (class extends Base { constructor() { return 1; } }) threw exception TypeError: Derived constructors may only return object or undefined.
PASS new (class extends null { constructor() { return undefined } }) threw exception ReferenceError: this is not defined.
-PASS new (class extends null { constructor() { super(); return undefined } }) threw exception TypeError: function () {} is not a constructor.
+PASS new (class extends null { constructor() { super(); return undefined } }) threw exception TypeError: super is not a constructor.
PASS x = { }; new (class extends null { constructor() { return x } }); is x
PASS x instanceof Object is true
PASS new (class extends null { constructor() { } }) threw exception ReferenceError: this is not defined.
PASS new (class extends null { constructor() { return 1; } }) threw exception TypeError: Derived constructors may only return object or undefined.
-PASS new (class extends null { constructor() { super() } }) threw exception TypeError: function () {} is not a constructor.
+PASS new (class extends null { constructor() { super() } }) threw exception TypeError: super is not a constructor.
PASS new (class { constructor() { super() } }) threw exception SyntaxError: 'super' keyword unexpected here.
PASS function x() { super(); } threw exception SyntaxError: 'super' keyword unexpected here.
PASS new (class extends Object { constructor() { function x() { super() } } }) threw exception SyntaxError: 'super' keyword unexpected here.
diff --git a/deps/v8/test/webkit/class-syntax-super.js b/deps/v8/test/webkit/class-syntax-super.js
index 8625831de5..e355b4b965 100644
--- a/deps/v8/test/webkit/class-syntax-super.js
+++ b/deps/v8/test/webkit/class-syntax-super.js
@@ -80,12 +80,12 @@ shouldBeFalse('x instanceof Base');
shouldThrow('new (class extends Base { constructor() { } })', '"ReferenceError: this is not defined"');
shouldThrow('new (class extends Base { constructor() { return 1; } })', '"TypeError: Derived constructors may only return object or undefined"');
shouldThrow('new (class extends null { constructor() { return undefined } })');
-shouldThrow('new (class extends null { constructor() { super(); return undefined } })', '"TypeError: function () {} is not a constructor"');
+shouldThrow('new (class extends null { constructor() { super(); return undefined } })', '"TypeError: super is not a constructor"');
shouldBe('x = { }; new (class extends null { constructor() { return x } });', 'x');
shouldBeTrue('x instanceof Object');
shouldThrow('new (class extends null { constructor() { } })', '"ReferenceError: this is not defined"');
shouldThrow('new (class extends null { constructor() { return 1; } })', '"TypeError: Derived constructors may only return object or undefined"');
-shouldThrow('new (class extends null { constructor() { super() } })', '"TypeError: function () {} is not a constructor"');
+shouldThrow('new (class extends null { constructor() { super() } })', '"TypeError: super is not a constructor"');
shouldThrow('new (class { constructor() { super() } })', '"SyntaxError: \'super\' keyword unexpected here"');
shouldThrow('function x() { super(); }', '"SyntaxError: \'super\' keyword unexpected here"');
shouldThrow('new (class extends Object { constructor() { function x() { super() } } })', '"SyntaxError: \'super\' keyword unexpected here"');
diff --git a/deps/v8/test/webkit/const-without-initializer.js b/deps/v8/test/webkit/const-without-initializer.js
index 9eb036522a..b1a86b9629 100644
--- a/deps/v8/test/webkit/const-without-initializer.js
+++ b/deps/v8/test/webkit/const-without-initializer.js
@@ -21,6 +21,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
description(
'Tests that declaring a const variable without initializing has the correct behavior and does not crash'
);
diff --git a/deps/v8/test/webkit/constant-count.js b/deps/v8/test/webkit/constant-count.js
index 7bc67e799b..2e1ec5b92d 100644
--- a/deps/v8/test/webkit/constant-count.js
+++ b/deps/v8/test/webkit/constant-count.js
@@ -21,6 +21,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --legacy-const
+
description(
"This test checks exceptional cases for constant counting in the parser."
);
diff --git a/deps/v8/test/webkit/exception-for-nonobject-expected.txt b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
index b6cb95b946..7b8883aa2a 100644
--- a/deps/v8/test/webkit/exception-for-nonobject-expected.txt
+++ b/deps/v8/test/webkit/exception-for-nonobject-expected.txt
@@ -26,7 +26,7 @@ Test for correct handling of exceptions from instanceof and 'new' expressions
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS new {}.undefined threw exception TypeError: (intermediate value).undefined is not a function.
+PASS new {}.undefined threw exception TypeError: (intermediate value).undefined is not a constructor.
PASS 1 instanceof {}.undefined threw exception TypeError: Expecting a function in instanceof check, but got undefined.
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/js/JSON-parse-reviver-expected.txt b/deps/v8/test/webkit/fast/js/JSON-parse-reviver-expected.txt
index c68efa417b..bcd9e5a081 100644
--- a/deps/v8/test/webkit/fast/js/JSON-parse-reviver-expected.txt
+++ b/deps/v8/test/webkit/fast/js/JSON-parse-reviver-expected.txt
@@ -46,7 +46,7 @@ Ensure that we always get the same holder
PASS currentHolder is lastHolder
Ensure that returning undefined has removed the property 0 from the holder during filtering.
-FAIL currentHolder.hasOwnProperty(0) should be false. Was true.
+PASS currentHolder.hasOwnProperty(0) is false
Ensure the holder for our array is indeed an array
PASS Array.isArray(currentHolder) is true
@@ -76,18 +76,19 @@ PASS value is undefined.
Ensure the holder for our array is indeed an array
PASS Array.isArray(currentHolder) is true
-FAIL currentHolder.length should be 3. Was 4.
+PASS currentHolder.length is 4
Ensure that we always get the same holder
PASS currentHolder is lastHolder
-FAIL Did not call reviver for deleted property
+PASS Ensured that property was visited despite Array length being reduced.
+PASS value is undefined.
Ensure that we created the root holder as specified in ES5
PASS '' in lastHolder is true
PASS result is lastHolder['']
Ensure that a deleted value is revived if the reviver function returns a value
-FAIL result.hasOwnProperty(3) should be true. Was false.
+PASS result.hasOwnProperty(3) is true
Test behaviour of revivor used in conjunction with an object
PASS currentHolder != globalObject is true
@@ -113,11 +114,21 @@ PASS currentHolder['and another property'] is "a replaced value"
Ensure that the changed value is reflected in the arguments passed to the reviver
PASS value is "a replaced value"
+PASS currentHolder != globalObject is true
+
+Ensure that we get the same holder object for each property
+PASS currentHolder is lastHolder
+
+Ensure that we visited a value that we have deleted, and that deletion is reflected while filtering.
+PASS currentHolder.hasOwnProperty('to delete') is false
+
+Ensure that when visiting a deleted property value is undefined
+PASS value is undefined.
Ensure that we created the root holder as specified in ES5
PASS lastHolder.hasOwnProperty('') is true
PASS result.hasOwnProperty('a property') is false
-FAIL result.hasOwnProperty('to delete') should be true. Was false.
+PASS result.hasOwnProperty('to delete') is true
PASS result is lastHolder['']
Test behaviour of revivor that introduces a cycle
diff --git a/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js b/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js
index 1f04602ee9..7634031232 100644
--- a/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js
+++ b/deps/v8/test/webkit/fast/js/JSON-parse-reviver.js
@@ -75,12 +75,13 @@ function arrayReviver(i,v) {
debug("");
debug("Ensure that when visiting a deleted property value is undefined");
shouldBeUndefined("value");
+ this.length = 3;
v = "undelete the property";
- expectedLength = this.length = 3;
+ expectedLength = 4;
break;
case 4:
- if (this.length != 3) {
+ if (this.length != 4) {
testFailed("Did not call reviver for deleted property");
expectedLength = this.length = 3;
break;
diff --git a/deps/v8/test/webkit/fast/js/arguments-expected.txt b/deps/v8/test/webkit/fast/js/arguments-expected.txt
index 4f0150033d..17fbd88113 100644
--- a/deps/v8/test/webkit/fast/js/arguments-expected.txt
+++ b/deps/v8/test/webkit/fast/js/arguments-expected.txt
@@ -157,7 +157,7 @@ PASS access_after_delete_extra_5(1, 2, 3, 4, 5) is 5
PASS argumentsParam(true) is true
PASS argumentsFunctionConstructorParam(true) is true
PASS argumentsVarUndefined() is '[object Arguments]'
-FAIL argumentsConstUndefined() should be [object Arguments]. Threw exception SyntaxError: Identifier 'arguments' has already been declared
+FAIL argumentsConstUndefined() should be [object Arguments]. Threw exception SyntaxError: Missing initializer in const declaration
PASS argumentCalleeInException() is argumentCalleeInException
PASS shadowedArgumentsApply([true]) is true
PASS shadowedArgumentsLength([]) is 0
diff --git a/deps/v8/test/webkit/fast/js/function-apply-expected.txt b/deps/v8/test/webkit/fast/js/function-apply-expected.txt
index 5762037415..673bac76b7 100644
--- a/deps/v8/test/webkit/fast/js/function-apply-expected.txt
+++ b/deps/v8/test/webkit/fast/js/function-apply-expected.txt
@@ -56,9 +56,9 @@ PASS arrayApplyChangeLength4() is 0
PASS var a = []; a.length = 0xFFFE; [].constructor.apply('', a).length is 0xFFFE
PASS var a = []; a.length = 0xFFFF; [].constructor.apply('', a).length is 0xFFFF
PASS var a = []; a.length = 0x10000; [].constructor.apply('', a).length is 0x10000
-FAIL var a = []; a.length = 0x10001; [].constructor.apply('', a).length should throw an exception. Was 65537.
-PASS var a = []; a.length = 0xFFFFFFFE; [].constructor.apply('', a).length threw exception RangeError: Maximum call stack size exceeded.
-PASS var a = []; a.length = 0xFFFFFFFF; [].constructor.apply('', a).length threw exception RangeError: Maximum call stack size exceeded.
+PASS var a = []; a.length = 0x10001; [].constructor.apply('', a).length is 0x10001
+PASS var a = []; a.length = 0xFFFFFFFE; [].constructor.apply('', a).length threw exception RangeError: Invalid array length.
+PASS var a = []; a.length = 0xFFFFFFFF; [].constructor.apply('', a).length threw exception RangeError: Invalid array length.
PASS (function(a,b,c,d){ return d ? -1 : (a+b+c); }).apply(undefined, {length:3, 0:100, 1:20, 2:3}) is 123
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/js/function-apply.js b/deps/v8/test/webkit/fast/js/function-apply.js
index 537dc2431e..14a65646b3 100644
--- a/deps/v8/test/webkit/fast/js/function-apply.js
+++ b/deps/v8/test/webkit/fast/js/function-apply.js
@@ -310,7 +310,7 @@ shouldBe("arrayApplyChangeLength4()", "0");
shouldBe("var a = []; a.length = 0xFFFE; [].constructor.apply('', a).length", "0xFFFE");
shouldBe("var a = []; a.length = 0xFFFF; [].constructor.apply('', a).length", "0xFFFF");
shouldBe("var a = []; a.length = 0x10000; [].constructor.apply('', a).length", "0x10000");
-shouldThrow("var a = []; a.length = 0x10001; [].constructor.apply('', a).length");
+shouldBe("var a = []; a.length = 0x10001; [].constructor.apply('', a).length", "0x10001");
shouldThrow("var a = []; a.length = 0xFFFFFFFE; [].constructor.apply('', a).length");
shouldThrow("var a = []; a.length = 0xFFFFFFFF; [].constructor.apply('', a).length");
diff --git a/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt b/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
index 5364f846e3..1d0f081b36 100644
--- a/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
+++ b/deps/v8/test/webkit/fast/js/function-toString-parentheses-expected.txt
@@ -512,8 +512,8 @@ PASS compileAndSerializeLeftmostTest('var a, b, c') is 'var a, b, c'
PASS compileAndSerializeLeftmostTest('var a = 1, b = 2, c = 3') is 'var a = 1, b = 2, c = 3'
PASS compileAndSerializeLeftmostTest('const a = 1') is 'const a = 1'
PASS compileAndSerializeLeftmostTest('const a = (1, 2)') is 'const a = (1, 2)'
-PASS compileAndSerializeLeftmostTest('const a, b = 1') is 'const a, b = 1'
-PASS compileAndSerializeLeftmostTest('const a = 1, b') is 'const a = 1, b'
+FAIL compileAndSerializeLeftmostTest('const a, b = 1') should be const a, b = 1. Threw exception SyntaxError: Missing initializer in const declaration
+FAIL compileAndSerializeLeftmostTest('const a = 1, b') should be const a = 1, b. Threw exception SyntaxError: Missing initializer in const declaration
PASS compileAndSerializeLeftmostTest('const a = 1, b = 1') is 'const a = 1, b = 1'
PASS compileAndSerializeLeftmostTest('const a = (1, 2), b = 1') is 'const a = (1, 2), b = 1'
PASS compileAndSerializeLeftmostTest('const a = 1, b = (1, 2)') is 'const a = 1, b = (1, 2)'
diff --git a/deps/v8/test/webkit/fast/js/kde/RegExp-expected.txt b/deps/v8/test/webkit/fast/js/kde/RegExp-expected.txt
index a3e1f1c44b..add9a29ccc 100644
--- a/deps/v8/test/webkit/fast/js/kde/RegExp-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/RegExp-expected.txt
@@ -114,7 +114,7 @@ PASS str.match(/d/gi).toString() is 'D,d'
PASS /\u0061/.source is '\\u0061'
PASS 'abc'.match(/\u0062/).toString() is 'b'
FAIL Object.prototype.toString.apply(RegExp.prototype) should be [object RegExp]. Was [object Object].
-FAIL typeof RegExp.prototype.toString() should be string. Threw exception TypeError: Method RegExp.prototype.toString called on incompatible receiver [object Object]
+PASS typeof RegExp.prototype.toString() is 'string'
PASS new RegExp().toString() is '/(?:)/'
PASS (new RegExp('(?:)')).source is '(?:)'
PASS /(?:)/.toString() is '/(?:)/'
diff --git a/deps/v8/test/webkit/fast/js/kde/func-decl-expected.txt b/deps/v8/test/webkit/fast/js/kde/func-decl-expected.txt
index d2db3810d9..d5b8a6fa10 100644
--- a/deps/v8/test/webkit/fast/js/kde/func-decl-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/func-decl-expected.txt
@@ -21,21 +21,24 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Due to changes in ES2015 function hoisting semantics, this test is
+# no longer really accurate and is expected to fail. test262 and mjsunit
+# tests verify the correct semantics.
KDE JS Test
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS Function declaration takes effect at entry
+FAIL Function declaration takes effect at entry: value has type undefined , not:function
PASS Decl not yet overwritten
PASS After assign (0)
PASS function decls have no execution content
PASS After assign #2 (0)
-PASS Decl already overwritten
+FAIL Decl already overwritten: value has type function , not:number
PASS After assign (1)
PASS function decls have no execution content
PASS After assign #2 (1)
-PASS Decl already overwritten
+FAIL Decl already overwritten: value has type function , not:number
PASS After assign (2)
PASS function decls have no execution content
PASS After assign #2 (2)
diff --git a/deps/v8/test/webkit/fast/js/native-error-prototype-expected.txt b/deps/v8/test/webkit/fast/js/native-error-prototype-expected.txt
index 137620bc6e..c0227b25a6 100644
--- a/deps/v8/test/webkit/fast/js/native-error-prototype-expected.txt
+++ b/deps/v8/test/webkit/fast/js/native-error-prototype-expected.txt
@@ -26,8 +26,8 @@ This is a test case for bugs 55346, 70889, and 75452.
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-PASS ({}).toString.call(Error.prototype) is "[object Error]"
-PASS ({}).toString.call(RangeError.prototype) is "[object Error]"
+PASS ({}).toString.call(Error.prototype) is "[object Object]"
+PASS ({}).toString.call(RangeError.prototype) is "[object Object]"
PASS err.toString() is "message"
PASS err.hasOwnProperty('message') is false
PASS err.hasOwnProperty('message') is false
diff --git a/deps/v8/test/webkit/fast/js/native-error-prototype.js b/deps/v8/test/webkit/fast/js/native-error-prototype.js
index 588deafe8d..2ce548788d 100644
--- a/deps/v8/test/webkit/fast/js/native-error-prototype.js
+++ b/deps/v8/test/webkit/fast/js/native-error-prototype.js
@@ -25,8 +25,8 @@ description(
'This is a test case for bugs <a href="https://bugs.webkit.org/show_bug.cgi?id=55346">55346</a>, <a href="https://bugs.webkit.org/show_bug.cgi?id=70889">70889</a>, and <a href="https://bugs.webkit.org/show_bug.cgi?id=75452">75452</a>.'
);
-shouldBe("({}).toString.call(Error.prototype)", '"[object Error]"');
-shouldBe("({}).toString.call(RangeError.prototype)", '"[object Error]"');
+shouldBe("({}).toString.call(Error.prototype)", '"[object Object]"');
+shouldBe("({}).toString.call(RangeError.prototype)", '"[object Object]"');
var err = new Error("message");
err.name = "";
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
index 0f4c4cf9a0..132fb4bca0 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
@@ -325,16 +325,16 @@ PASS Invalid: "function f() { throw }"
var and const statements
PASS Valid: "var a, b = null"
PASS Valid: "function f() { var a, b = null }"
-PASS Valid: "const a = 5, b, c"
-PASS Valid: "function f() { const a = 5, b, c }"
+FAIL Valid: "const a = 5, b, c" should NOT throw
+FAIL Valid: "function f() { const a = 5, b, c }" should NOT throw
PASS Invalid: "var"
PASS Invalid: "function f() { var }"
PASS Invalid: "var = 7"
PASS Invalid: "function f() { var = 7 }"
PASS Invalid: "var c (6)"
PASS Invalid: "function f() { var c (6) }"
-PASS Valid: "if (a) var a,b; else const b, c"
-PASS Valid: "function f() { if (a) var a,b; else const b, c }"
+FAIL Valid: "if (a) var a,b; else const b, c" should NOT throw
+FAIL Valid: "function f() { if (a) var a,b; else const b, c }" should NOT throw
PASS Invalid: "var 5 = 6"
PASS Invalid: "function f() { var 5 = 6 }"
PASS Valid: "while (0) var a, b, c=6, d, e, f=5*6, g=f*h, h"
@@ -343,8 +343,8 @@ PASS Invalid: "var a = if (b) { c }"
PASS Invalid: "function f() { var a = if (b) { c } }"
PASS Invalid: "var a = var b"
PASS Invalid: "function f() { var a = var b }"
-PASS Valid: "const a = b += c, a, a, a = (b - f())"
-PASS Valid: "function f() { const a = b += c, a, a, a = (b - f()) }"
+FAIL Valid: "const a = b += c, a, a, a = (b - f())" should NOT throw
+FAIL Valid: "function f() { const a = b += c, a, a, a = (b - f()) }" should NOT throw
PASS Invalid: "var a %= b | 5"
PASS Invalid: "function f() { var a %= b | 5 }"
PASS Invalid: "var (a) = 5"
@@ -357,8 +357,8 @@ PASS Invalid: "var var = 3"
PASS Invalid: "function f() { var var = 3 }"
PASS Valid: "var varr = 3 in 1"
PASS Valid: "function f() { var varr = 3 in 1 }"
-PASS Valid: "const a, a, a = void 7 - typeof 8, a = 8"
-PASS Valid: "function f() { const a, a, a = void 7 - typeof 8, a = 8 }"
+FAIL Valid: "const a, a, a = void 7 - typeof 8, a = 8" should NOT throw
+FAIL Valid: "function f() { const a, a, a = void 7 - typeof 8, a = 8 }" should NOT throw
PASS Valid: "const x_x = 6 /= 7 ? e : f"
PASS Valid: "function f() { const x_x = 6 /= 7 ? e : f }"
PASS Invalid: "var a = ?"
@@ -412,8 +412,8 @@ PASS Valid: "for (var a = b, c, d ; ; 1 in a()) break"
PASS Valid: "function f() { for (var a = b, c, d ; ; 1 in a()) break }"
PASS Invalid: "for ( ; var a ; ) break"
PASS Invalid: "function f() { for ( ; var a ; ) break }"
-FAIL Invalid: "for (const a; ; ) break" should throw undefined
-FAIL Invalid: "function f() { for (const a; ; ) break }" should throw undefined
+PASS Invalid: "for (const a; ; ) break"
+PASS Invalid: "function f() { for (const a; ; ) break }"
PASS Invalid: "for ( %a ; ; ) { }"
PASS Invalid: "function f() { for ( %a ; ; ) { } }"
PASS Valid: "for (a in b) break"
diff --git a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
index 9d1c1748de..c6eddab66c 100644
--- a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
+++ b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
@@ -28,10 +28,10 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS [1].toString() is '1'
PASS [1].toLocaleString() is 'toLocaleString'
-FAIL [1].toLocaleString() should be 1. Threw exception TypeError: [1].toLocaleString is not a function
+FAIL [1].toLocaleString() should be 1. Threw exception TypeError: (var).toLocaleString is not a function
PASS [/r/].toString() is 'toString2'
PASS [/r/].toLocaleString() is 'toLocaleString2'
-FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: [/r/].toLocaleString is not a function
+FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: (var).toLocaleString is not a function
PASS caught is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/regex/constructor-expected.txt b/deps/v8/test/webkit/fast/regex/constructor-expected.txt
index e59c322770..55816d969a 100644
--- a/deps/v8/test/webkit/fast/regex/constructor-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/constructor-expected.txt
@@ -28,8 +28,8 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS re === RegExp(re) is true
PASS re !== new RegExp(re) is true
-PASS re === RegExp(re,'i') threw exception TypeError: Cannot supply flags when constructing one RegExp from another.
-PASS re !== new RegExp(re,'i') threw exception TypeError: Cannot supply flags when constructing one RegExp from another.
+PASS re === RegExp(re,'i') is false
+PASS re !== new RegExp(re,'i') is true
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/regex/constructor.js b/deps/v8/test/webkit/fast/regex/constructor.js
index 552d82e3ac..e8ad726830 100644
--- a/deps/v8/test/webkit/fast/regex/constructor.js
+++ b/deps/v8/test/webkit/fast/regex/constructor.js
@@ -27,5 +27,5 @@ var re = /abc/;
shouldBeTrue("re === RegExp(re)");
shouldBeTrue("re !== new RegExp(re)");
-shouldThrow("re === RegExp(re,'i')");
-shouldThrow("re !== new RegExp(re,'i')");
+shouldBeFalse("re === RegExp(re,'i')");
+shouldBeTrue("re !== new RegExp(re,'i')");
diff --git a/deps/v8/test/webkit/fast/regex/toString-expected.txt b/deps/v8/test/webkit/fast/regex/toString-expected.txt
index de68728ba2..ea1fbc3ee6 100644
--- a/deps/v8/test/webkit/fast/regex/toString-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/toString-expected.txt
@@ -31,14 +31,14 @@ PASS RegExp('').source is "(?:)"
FAIL RegExp.prototype.source should be (?:). Threw exception TypeError: RegExp.prototype.source getter called on non-RegExp object
PASS RegExp('/').toString() is "/\\//"
PASS RegExp('').toString() is "/(?:)/"
-FAIL RegExp.prototype.toString() should be /(?:)/. Threw exception TypeError: Method RegExp.prototype.toString called on incompatible receiver [object Object]
+PASS RegExp.prototype.toString() is "/(?:)/"
PASS testForwardSlash("^/$", "/"); is true
PASS testForwardSlash("^/$", "/"); is true
PASS testForwardSlash("^\/$", "/"); is true
PASS testForwardSlash("^\\/$", "\/"); is true
PASS testForwardSlash("^\\\/$", "\/"); is true
-FAIL testForwardSlash("^\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("^\\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Unexpected end of input
+FAIL testForwardSlash("^\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Invalid regular expression flags
+FAIL testForwardSlash("^\\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Invalid regular expression flags
PASS testForwardSlash("x/x/x", "x\/x\/x"); is true
PASS testForwardSlash("x\/x/x", "x\/x\/x"); is true
PASS testForwardSlash("x/x\/x", "x\/x\/x"); is true
diff --git a/deps/v8/test/webkit/regexp-compile-expected.txt b/deps/v8/test/webkit/regexp-compile-expected.txt
index d5c59ce803..319eb51fa0 100644
--- a/deps/v8/test/webkit/regexp-compile-expected.txt
+++ b/deps/v8/test/webkit/regexp-compile-expected.txt
@@ -40,7 +40,7 @@ PASS re.toString() is '/c/i'
PASS re.compile(new RegExp('c'), 'i'); threw exception TypeError: Cannot supply flags when constructing one RegExp from another.
PASS re.toString() is '/c/i'
PASS re.compile(new RegExp('+')); threw exception SyntaxError: Invalid regular expression: /+/: Nothing to repeat.
-PASS re.toString() is '/undefined/'
+PASS re.toString() is '/(?:)/'
PASS re.toString() is '/null/'
PASS re.toString() is '/(?:)/'
PASS re.toString() is '/z/'
diff --git a/deps/v8/test/webkit/regexp-compile.js b/deps/v8/test/webkit/regexp-compile.js
index b06ac22e19..ec23130082 100644
--- a/deps/v8/test/webkit/regexp-compile.js
+++ b/deps/v8/test/webkit/regexp-compile.js
@@ -55,7 +55,7 @@ shouldBe("re.toString()", "'/c/i'");
shouldThrow("re.compile(new RegExp('+'));");
re.compile(undefined);
-shouldBe("re.toString()", "'/undefined/'");
+shouldBe("re.toString()", "'/(?:)/'");
re.compile(null);
shouldBe("re.toString()", "'/null/'");
diff --git a/deps/v8/test/webkit/run-json-stringify-expected.txt b/deps/v8/test/webkit/run-json-stringify-expected.txt
index fef38156a7..2cd78521b5 100644
--- a/deps/v8/test/webkit/run-json-stringify-expected.txt
+++ b/deps/v8/test/webkit/run-json-stringify-expected.txt
@@ -83,7 +83,7 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON});
}
-PASS tests[i](nativeJSON) threw exception TypeError: jsonObject.stringify is not a function.
+PASS tests[i](nativeJSON) threw exception TypeError: (var).toISOString is not a function.
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return "custom toISOString"; }});
}
@@ -101,7 +101,7 @@ function (jsonObject){
d.toISOString = null;
return jsonObject.stringify(d);
}
-PASS tests[i](nativeJSON) threw exception TypeError: jsonObject.stringify is not a function.
+PASS tests[i](nativeJSON) threw exception TypeError: (var).toISOString is not a function.
function (jsonObject){
var d = new Date(0);
d.toJSON = undefined;
diff --git a/deps/v8/test/webkit/toString-recursion-expected.txt b/deps/v8/test/webkit/toString-recursion-expected.txt
index a7ccb8f205..d5e25ba9e8 100644
--- a/deps/v8/test/webkit/toString-recursion-expected.txt
+++ b/deps/v8/test/webkit/toString-recursion-expected.txt
@@ -27,7 +27,6 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS var array = []; array[0] = array; array + '' is ''
-PASS var error = new Error; error.name = error; error.message = error; error + '' is ''
PASS var regexp = /a/; regexp.source = regexp; regexp + '' is '/a/'
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/toString-recursion.js b/deps/v8/test/webkit/toString-recursion.js
index 51e9f98c2d..c0e68f3ecc 100644
--- a/deps/v8/test/webkit/toString-recursion.js
+++ b/deps/v8/test/webkit/toString-recursion.js
@@ -28,8 +28,5 @@ description(
// Array (elements)
shouldBe("var array = []; array[0] = array; array + ''", "''");
-// Error (name, message)
-shouldBe("var error = new Error; error.name = error; error.message = error; error + ''", "''");
-
// RegExp (source)
shouldBe("var regexp = /a/; regexp.source = regexp; regexp + ''", "'/a/'");
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 903698892c..971cf4691f 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -35,6 +35,9 @@
'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
# TODO(turbofan): We run out of stack earlier on 64-bit for now.
'fast/js/deep-recursion-test': [PASS, NO_VARIANTS],
+ # This test leads to a SyntaxError from conflicting let declarations
+ # in ES2015
+ 'function-declarations-in-switch-statement': [FAIL],
}], # ALWAYS
['mode == debug', {
# Too slow in debug mode.
@@ -95,4 +98,11 @@
}], # 'gc_stress == True and mode == debug'
##############################################################################
+['gcov_coverage', {
+ # Tests taking too long or getting too large call stacks.
+ 'fast/js/excessive-comma-usage': [SKIP],
+ 'run-json-stringify': [SKIP],
+}], # 'gcov_coverage'
+
+##############################################################################
]
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index 6e324246d6..5b9f7f5073 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -39,12 +39,16 @@ _v8_flag() {
cur="${COMP_WORDS[COMP_CWORD]}"
defines=$(cat $v8_source/src/flag-definitions.h \
| grep "^DEFINE" \
- | grep -v "DEFINE_implication" \
+ | grep -v "DEFINE_IMPLICATION" \
+ | sed -e 's/_/-/g'; \
+ cat $v8_source/src/flag-definitions.h \
+ | grep "^ V(harmony_" \
+ | sed -e 's/^ V/DEFINE-BOOL/' \
| sed -e 's/_/-/g')
targets=$(echo "$defines" \
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
- | sed -ne 's/^DEFINE-bool(\([^,]*\).*/--no\1/p'; \
+ | sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
cat $v8_source/src/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
diff --git a/deps/v8/tools/check-static-initializers.gyp b/deps/v8/tools/check-static-initializers.gyp
new file mode 100644
index 0000000000..547a6c873b
--- /dev/null
+++ b/deps/v8/tools/check-static-initializers.gyp
@@ -0,0 +1,26 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'check_static_initializers_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'check-static-initializers.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/tools/check-static-initializers.isolate b/deps/v8/tools/check-static-initializers.isolate
new file mode 100644
index 0000000000..d1197d3d6c
--- /dev/null
+++ b/deps/v8/tools/check-static-initializers.isolate
@@ -0,0 +1,16 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'check-static-initializers.sh',
+ ],
+ 'files': [
+ 'check-static-initializers.sh',
+ ],
+ },
+ 'includes': [
+ '../src/d8.isolate',
+ ],
+}
diff --git a/deps/v8/tools/cpu.sh b/deps/v8/tools/cpu.sh
index 0597d09ea9..5634cac939 100755
--- a/deps/v8/tools/cpu.sh
+++ b/deps/v8/tools/cpu.sh
@@ -14,26 +14,38 @@ set_governor() {
done
}
+enable_cores() {
+ # $1: How many cores to enable.
+ for (( i=1; i<=$MAXID; i++ )); do
+ if [ "$i" -lt "$1" ]; then
+ echo 1 > $CPUPATH/cpu$i/online
+ else
+ echo 0 > $CPUPATH/cpu$i/online
+ fi
+ done
+}
+
dual_core() {
echo "Switching to dual-core mode"
- for (( i=2; i<=$MAXID; i++ )); do
- echo 0 > $CPUPATH/cpu$i/online
- done
+ enable_cores 2
}
single_core() {
echo "Switching to single-core mode"
- for (( i=1; i<=$MAXID; i++ )); do
- echo 0 > $CPUPATH/cpu$i/online
- done
+ enable_cores 1
}
all_cores() {
echo "Reactivating all CPU cores"
- for (( i=1; i<=$MAXID; i++ )); do
- echo 1 > $CPUPATH/cpu$i/online
- done
+ enable_cores $((MAXID+1))
+}
+
+
+limit_cores() {
+ # $1: How many cores to enable.
+ echo "Limiting to $1 cores"
+ enable_cores $1
}
case "$1" in
@@ -55,8 +67,15 @@ case "$1" in
allcores | all)
all_cores
;;
+ limit_cores)
+ if [ $# -ne 2 ]; then
+ echo "Usage $0 limit_cores <num>"
+ exit 1
+ fi
+ limit_cores $2
+ ;;
*)
- echo "Usage: $0 fast|slow|default|singlecore|dualcore|all"
+ echo "Usage: $0 fast|slow|default|singlecore|dualcore|all|limit_cores"
exit 1
;;
esac
diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh
new file mode 100755
index 0000000000..21cd93d0ac
--- /dev/null
+++ b/deps/v8/tools/eval_gc_time.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Convenience Script used to rank GC NVP output.
+
+print_usage_and_die() {
+ echo "Usage: $0 new-gen-rank|old-gen-rank max|avg logfile"
+ exit 1
+}
+
+if [ $# -ne 3 ]; then
+ print_usage_and_die
+fi
+
+case $1 in
+ new-gen-rank|old-gen-rank)
+ OP=$1
+ ;;
+ *)
+ print_usage_and_die
+esac
+
+case $2 in
+ max|avg)
+ RANK_MODE=$2
+ ;;
+ *)
+ print_usage_and_die
+esac
+
+LOGFILE=$3
+
+GENERAL_INTERESTING_KEYS="\
+ pause \
+"
+
+INTERESTING_NEW_GEN_KEYS="\
+ ${GENERAL_INTERESTING_KEYS} \
+ scavenge \
+ weak \
+ roots \
+ old_new \
+ code \
+ semispace \
+ object_groups \
+"
+
+INTERESTING_OLD_GEN_KEYS="\
+ ${GENERAL_INTERESTING_KEYS} \
+ external \
+ clear \
+ clear.code_flush \
+ clear.dependent_code \
+ clear.global_handles \
+ clear.maps \
+ clear.slots_buffer \
+ clear.store_buffer \
+ clear.string_table \
+ clear.weak_cells \
+ clear.weak_collections \
+ clear.weak_lists \
+ finish \
+ evacuate \
+ evacuate.candidates \
+ evacuate.clean_up \
+ evacuate.new_space \
+ evacuate.update_pointers \
+ evacuate.update_pointers.between_evacuated \
+ evacuate.update_pointers.to_evacuated \
+ evacuate.update_pointers.to_new \
+ evacuate.update_pointers.weak \
+ mark \
+ mark.finish_incremental \
+ mark.prepare_code_flush \
+ mark.roots \
+ mark.weak_closure \
+ sweep \
+ sweep.code \
+ sweep.map \
+ sweep.old \
+ incremental_finalize \
+"
+
+BASE_DIR=$(dirname $0)
+
+case $OP in
+ new-gen-rank)
+ cat $LOGFILE | grep "gc=s" \
+ | $BASE_DIR/eval_gc_nvp.py \
+ --no-histogram \
+ --rank $RANK_MODE \
+ ${INTERESTING_NEW_GEN_KEYS}
+ ;;
+ old-gen-rank)
+ cat $LOGFILE | grep "gc=ms" | grep "reduce_memory=0" | grep -v "steps=0" \
+ | $BASE_DIR/eval_gc_nvp.py \
+ --no-histogram \
+ --rank $RANK_MODE \
+ ${INTERESTING_OLD_GEN_KEYS}
+ ;;
+ *)
+ ;;
+esac
diff --git a/deps/v8/tools/gc_nvp_common.py b/deps/v8/tools/gc_nvp_common.py
index 5149e6f729..3b51731399 100644
--- a/deps/v8/tools/gc_nvp_common.py
+++ b/deps/v8/tools/gc_nvp_common.py
@@ -12,7 +12,7 @@ import re
def split_nvp(s):
t = {}
- for (name, value) in re.findall(r"(\w+)=([-\w]+(?:\.[0-9]+)?)", s):
+ for (name, value) in re.findall(r"([._\w]+)=([-\w]+(?:\.[0-9]+)?)", s):
try:
t[name] = float(value)
except ValueError:
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 1f2c4e7cbe..516f8e7490 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -189,7 +189,7 @@ consts_misc = [
extras_accessors = [
'JSFunction, context, Context, kContextOffset',
'Context, closure_index, int, CLOSURE_INDEX',
- 'Context, global_object_index, int, GLOBAL_OBJECT_INDEX',
+ 'Context, native_context_index, int, NATIVE_CONTEXT_INDEX',
'Context, previous_index, int, PREVIOUS_INDEX',
'Context, min_context_slots, int, MIN_CONTEXT_SLOTS',
'HeapObject, map, Map, kMapOffset',
@@ -319,7 +319,7 @@ def load_objects():
in_insttype = False;
continue;
- line = re.sub('//.*', '', line.rstrip().lstrip());
+ line = re.sub('//.*', '', line.strip());
if (in_insttype):
typestr += line;
@@ -329,10 +329,10 @@ def load_objects():
line);
if (match):
- klass = match.group(1).rstrip().lstrip();
+ klass = match.group(1).strip();
pklass = match.group(3);
if (pklass):
- pklass = pklass.rstrip().lstrip();
+ pklass = pklass.strip();
klasses[klass] = { 'parent': pklass };
#
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index c8f73e29d0..ca5fb0902b 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -181,7 +181,6 @@
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
@@ -231,7 +230,6 @@
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
@@ -369,6 +367,8 @@
},
'include_dirs+': [
'../..',
+ # To be able to find base/trace_event/common/trace_event_common.h
+ '../../..',
],
'defines': [
# TODO(jochen): Remove again after this is globally turned on.
@@ -376,6 +376,7 @@
],
'sources': [ ### gcmole(all) ###
'../../include/v8-debug.h',
+ '../../include/v8-experimental.h',
'../../include/v8-platform.h',
'../../include/v8-profiler.h',
'../../include/v8-testing.h',
@@ -391,6 +392,8 @@
'../../src/allocation.h',
'../../src/allocation-site-scopes.cc',
'../../src/allocation-site-scopes.h',
+ '../../src/api-experimental.cc',
+ '../../src/api-experimental.h',
'../../src/api.cc',
'../../src/api.h',
'../../src/api-natives.cc',
@@ -401,16 +404,28 @@
'../../src/assembler.h',
'../../src/assert-scope.h',
'../../src/assert-scope.cc',
- '../../src/ast-expression-visitor.cc',
- '../../src/ast-expression-visitor.h',
- '../../src/ast-literal-reindexer.cc',
- '../../src/ast-literal-reindexer.h',
- '../../src/ast-numbering.cc',
- '../../src/ast-numbering.h',
- '../../src/ast-value-factory.cc',
- '../../src/ast-value-factory.h',
- '../../src/ast.cc',
- '../../src/ast.h',
+ '../../src/ast/ast-expression-rewriter.cc',
+ '../../src/ast/ast-expression-rewriter.h',
+ '../../src/ast/ast-expression-visitor.cc',
+ '../../src/ast/ast-expression-visitor.h',
+ '../../src/ast/ast-literal-reindexer.cc',
+ '../../src/ast/ast-literal-reindexer.h',
+ '../../src/ast/ast-numbering.cc',
+ '../../src/ast/ast-numbering.h',
+ '../../src/ast/ast-value-factory.cc',
+ '../../src/ast/ast-value-factory.h',
+ '../../src/ast/ast.cc',
+ '../../src/ast/ast.h',
+ '../../src/ast/modules.cc',
+ '../../src/ast/modules.h',
+ '../../src/ast/prettyprinter.cc',
+ '../../src/ast/prettyprinter.h',
+ '../../src/ast/scopeinfo.cc',
+ '../../src/ast/scopeinfo.h',
+ '../../src/ast/scopes.cc',
+ '../../src/ast/scopes.h',
+ '../../src/ast/variables.cc',
+ '../../src/ast/variables.h',
'../../src/atomic-utils.h',
'../../src/background-parsing-task.cc',
'../../src/background-parsing-task.h',
@@ -461,10 +476,10 @@
'../../src/compiler/ast-loop-assignment-analyzer.h',
'../../src/compiler/basic-block-instrumentor.cc',
'../../src/compiler/basic-block-instrumentor.h',
- '../../src/compiler/binary-operator-reducer.cc',
- '../../src/compiler/binary-operator-reducer.h',
'../../src/compiler/branch-elimination.cc',
'../../src/compiler/branch-elimination.h',
+ '../../src/compiler/bytecode-branch-analysis.cc',
+ '../../src/compiler/bytecode-branch-analysis.h',
'../../src/compiler/bytecode-graph-builder.cc',
'../../src/compiler/bytecode-graph-builder.h',
'../../src/compiler/change-lowering.cc',
@@ -475,6 +490,8 @@
'../../src/compiler/code-generator-impl.h',
'../../src/compiler/code-generator.cc',
'../../src/compiler/code-generator.h',
+ '../../src/compiler/code-stub-assembler.cc',
+ '../../src/compiler/code-stub-assembler.h',
'../../src/compiler/common-node-cache.cc',
'../../src/compiler/common-node-cache.h',
'../../src/compiler/common-operator-reducer.cc',
@@ -490,6 +507,12 @@
'../../src/compiler/dead-code-elimination.cc',
'../../src/compiler/dead-code-elimination.h',
'../../src/compiler/diamond.h',
+ '../../src/compiler/escape-analysis.cc',
+ '../../src/compiler/escape-analysis.h',
+ "../../src/compiler/escape-analysis-reducer.cc",
+ "../../src/compiler/escape-analysis-reducer.h",
+ '../../src/compiler/fast-accessor-assembler.cc',
+ '../../src/compiler/fast-accessor-assembler.h',
'../../src/compiler/frame.cc',
'../../src/compiler/frame.h',
'../../src/compiler/frame-elider.cc',
@@ -514,12 +537,16 @@
'../../src/compiler/instruction-selector-impl.h',
'../../src/compiler/instruction-selector.cc',
'../../src/compiler/instruction-selector.h',
+ '../../src/compiler/instruction-scheduler.cc',
+ '../../src/compiler/instruction-scheduler.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
'../../src/compiler/interpreter-assembler.cc',
'../../src/compiler/interpreter-assembler.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
+ '../../src/compiler/js-call-reducer.cc',
+ '../../src/compiler/js-call-reducer.h',
'../../src/compiler/js-context-relaxation.cc',
'../../src/compiler/js-context-relaxation.h',
'../../src/compiler/js-context-specialization.cc',
@@ -562,8 +589,6 @@
'../../src/compiler/machine-operator-reducer.h',
'../../src/compiler/machine-operator.cc',
'../../src/compiler/machine-operator.h',
- '../../src/compiler/machine-type.cc',
- '../../src/compiler/machine-type.h',
'../../src/compiler/move-optimizer.cc',
'../../src/compiler/move-optimizer.h',
'../../src/compiler/node-aux-data.h',
@@ -595,6 +620,7 @@
'../../src/compiler/register-allocator.h',
'../../src/compiler/register-allocator-verifier.cc',
'../../src/compiler/register-allocator-verifier.h',
+ '../../src/compiler/representation-change.cc',
'../../src/compiler/representation-change.h',
'../../src/compiler/schedule.cc',
'../../src/compiler/schedule.h',
@@ -614,12 +640,19 @@
'../../src/compiler/state-values-utils.h',
'../../src/compiler/tail-call-optimization.cc',
'../../src/compiler/tail-call-optimization.h',
+ '../../src/compiler/type-hint-analyzer.cc',
+ '../../src/compiler/type-hint-analyzer.h',
+ '../../src/compiler/type-hints.cc',
+ '../../src/compiler/type-hints.h',
'../../src/compiler/typer.cc',
'../../src/compiler/typer.h',
'../../src/compiler/value-numbering-reducer.cc',
'../../src/compiler/value-numbering-reducer.h',
'../../src/compiler/verifier.cc',
'../../src/compiler/verifier.h',
+ '../../src/compiler/wasm-compiler.cc',
+ '../../src/compiler/wasm-compiler.h',
+ '../../src/compiler/wasm-linkage.cc',
'../../src/compiler/zone-pool.cc',
'../../src/compiler/zone-pool.h',
'../../src/compiler.cc',
@@ -729,7 +762,6 @@
'../../src/elements.h',
'../../src/execution.cc',
'../../src/execution.h',
- '../../src/expression-classifier.h',
'../../src/extensions/externalize-string-extension.cc',
'../../src/extensions/externalize-string-extension.h',
'../../src/extensions/free-buffer-extension.cc',
@@ -756,8 +788,6 @@
'../../src/frames.h',
'../../src/full-codegen/full-codegen.cc',
'../../src/full-codegen/full-codegen.h',
- '../../src/func-name-inferrer.cc',
- '../../src/func-name-inferrer.h',
'../../src/futex-emulation.cc',
'../../src/futex-emulation.h',
'../../src/gdb-jit.cc',
@@ -833,9 +863,13 @@
'../../src/interpreter/bytecode-array-builder.h',
'../../src/interpreter/bytecode-array-iterator.cc',
'../../src/interpreter/bytecode-array-iterator.h',
+ '../../src/interpreter/bytecode-register-allocator.cc',
+ '../../src/interpreter/bytecode-register-allocator.h',
'../../src/interpreter/bytecode-generator.cc',
'../../src/interpreter/bytecode-generator.h',
'../../src/interpreter/bytecode-traits.h',
+ '../../src/interpreter/constant-array-builder.cc',
+ '../../src/interpreter/constant-array-builder.h',
'../../src/interpreter/control-flow-builders.cc',
'../../src/interpreter/control-flow-builders.h',
'../../src/interpreter/interpreter.cc',
@@ -843,7 +877,6 @@
'../../src/isolate-inl.h',
'../../src/isolate.cc',
'../../src/isolate.h',
- '../../src/json-parser.h',
'../../src/json-stringifier.h',
'../../src/key-accumulator.h',
'../../src/key-accumulator.cc',
@@ -852,6 +885,8 @@
'../../src/layout-descriptor.h',
'../../src/list-inl.h',
'../../src/list.h',
+ '../../src/locked-queue-inl.h',
+ '../../src/locked-queue.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
@@ -860,11 +895,13 @@
'../../src/lookup.cc',
'../../src/lookup.h',
'../../src/macro-assembler.h',
+ '../../src/machine-type.cc',
+ '../../src/machine-type.h',
'../../src/messages.cc',
'../../src/messages.h',
- '../../src/modules.cc',
- '../../src/modules.h',
'../../src/msan.h',
+ '../../src/objects-body-descriptors-inl.h',
+ '../../src/objects-body-descriptors.h',
'../../src/objects-debug.cc',
'../../src/objects-inl.h',
'../../src/objects-printer.cc',
@@ -874,20 +911,31 @@
'../../src/optimizing-compile-dispatcher.h',
'../../src/ostreams.cc',
'../../src/ostreams.h',
- '../../src/parameter-initializer-rewriter.cc',
- '../../src/parameter-initializer-rewriter.h',
- '../../src/parser.cc',
- '../../src/parser.h',
- '../../src/pattern-rewriter.cc',
+ '../../src/parsing/expression-classifier.h',
+ '../../src/parsing/func-name-inferrer.cc',
+ '../../src/parsing/func-name-inferrer.h',
+ '../../src/parsing/json-parser.h',
+ '../../src/parsing/parameter-initializer-rewriter.cc',
+ '../../src/parsing/parameter-initializer-rewriter.h',
+ '../../src/parsing/parser-base.h',
+ '../../src/parsing/parser.cc',
+ '../../src/parsing/parser.h',
+ '../../src/parsing/pattern-rewriter.cc',
+ '../../src/parsing/preparse-data-format.h',
+ '../../src/parsing/preparse-data.cc',
+ '../../src/parsing/preparse-data.h',
+ '../../src/parsing/preparser.cc',
+ '../../src/parsing/preparser.h',
+ '../../src/parsing/rewriter.cc',
+ '../../src/parsing/rewriter.h',
+ '../../src/parsing/scanner-character-streams.cc',
+ '../../src/parsing/scanner-character-streams.h',
+ '../../src/parsing/scanner.cc',
+ '../../src/parsing/scanner.h',
+ '../../src/parsing/token.cc',
+ '../../src/parsing/token.h',
'../../src/pending-compilation-error-handler.cc',
'../../src/pending-compilation-error-handler.h',
- '../../src/preparse-data-format.h',
- '../../src/preparse-data.cc',
- '../../src/preparse-data.h',
- '../../src/preparser.cc',
- '../../src/preparser.h',
- '../../src/prettyprinter.cc',
- '../../src/prettyprinter.h',
'../../src/profiler/allocation-tracker.cc',
'../../src/profiler/allocation-tracker.h',
'../../src/profiler/circular-queue-inl.h',
@@ -921,6 +969,8 @@
'../../src/regexp/jsregexp-inl.h',
'../../src/regexp/jsregexp.cc',
'../../src/regexp/jsregexp.h',
+ '../../src/regexp/regexp-ast.cc',
+ '../../src/regexp/regexp-ast.h',
'../../src/regexp/regexp-macro-assembler-irregexp-inl.h',
'../../src/regexp/regexp-macro-assembler-irregexp.cc',
'../../src/regexp/regexp-macro-assembler-irregexp.h',
@@ -928,12 +978,12 @@
'../../src/regexp/regexp-macro-assembler-tracer.h',
'../../src/regexp/regexp-macro-assembler.cc',
'../../src/regexp/regexp-macro-assembler.h',
+ '../../src/regexp/regexp-parser.cc',
+ '../../src/regexp/regexp-parser.h',
'../../src/regexp/regexp-stack.cc',
'../../src/regexp/regexp-stack.h',
'../../src/register-configuration.cc',
'../../src/register-configuration.h',
- '../../src/rewriter.cc',
- '../../src/rewriter.h',
'../../src/runtime-profiler.cc',
'../../src/runtime-profiler.h',
'../../src/runtime/runtime-array.cc',
@@ -972,14 +1022,6 @@
'../../src/runtime/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
- '../../src/scanner-character-streams.cc',
- '../../src/scanner-character-streams.h',
- '../../src/scanner.cc',
- '../../src/scanner.h',
- '../../src/scopeinfo.cc',
- '../../src/scopeinfo.h',
- '../../src/scopes.cc',
- '../../src/scopes.h',
'../../src/signature.h',
'../../src/simulator.h',
'../../src/small-pointer-list.h',
@@ -1004,8 +1046,8 @@
'../../src/strtod.h',
'../../src/ic/stub-cache.cc',
'../../src/ic/stub-cache.h',
- '../../src/token.cc',
- '../../src/token.h',
+ '../../src/tracing/trace-event.cc',
+ '../../src/tracing/trace-event.h',
'../../src/transitions-inl.h',
'../../src/transitions.cc',
'../../src/transitions.h',
@@ -1037,13 +1079,29 @@
'../../src/v8memory.h',
'../../src/v8threads.cc',
'../../src/v8threads.h',
- '../../src/variables.cc',
- '../../src/variables.h',
'../../src/vector.h',
'../../src/version.cc',
'../../src/version.h',
'../../src/vm-state-inl.h',
'../../src/vm-state.h',
+ '../../src/wasm/asm-wasm-builder.cc',
+ '../../src/wasm/asm-wasm-builder.h',
+ '../../src/wasm/ast-decoder.cc',
+ '../../src/wasm/ast-decoder.h',
+ '../../src/wasm/decoder.h',
+ '../../src/wasm/encoder.cc',
+ '../../src/wasm/encoder.h',
+ '../../src/wasm/module-decoder.cc',
+ '../../src/wasm/module-decoder.h',
+ '../../src/wasm/wasm-js.cc',
+ '../../src/wasm/wasm-js.h',
+ '../../src/wasm/wasm-macro-gen.h',
+ '../../src/wasm/wasm-module.cc',
+ '../../src/wasm/wasm-module.h',
+ '../../src/wasm/wasm-opcodes.cc',
+ '../../src/wasm/wasm-opcodes.h',
+ '../../src/wasm/wasm-result.cc',
+ '../../src/wasm/wasm-result.h',
'../../src/zone.cc',
'../../src/zone.h',
'../../src/zone-allocator.h',
@@ -1082,6 +1140,7 @@
'../../src/arm/simulator-arm.h',
'../../src/compiler/arm/code-generator-arm.cc',
'../../src/compiler/arm/instruction-codes-arm.h',
+ '../../src/compiler/arm/instruction-scheduler-arm.cc',
'../../src/compiler/arm/instruction-selector-arm.cc',
'../../src/crankshaft/arm/lithium-arm.cc',
'../../src/crankshaft/arm/lithium-arm.h',
@@ -1135,6 +1194,7 @@
'../../src/arm64/utils-arm64.h',
'../../src/compiler/arm64/code-generator-arm64.cc',
'../../src/compiler/arm64/instruction-codes-arm64.h',
+ '../../src/compiler/arm64/instruction-scheduler-arm64.cc',
'../../src/compiler/arm64/instruction-selector-arm64.cc',
'../../src/crankshaft/arm64/delayed-masm-arm64.cc',
'../../src/crankshaft/arm64/delayed-masm-arm64.h',
@@ -1176,6 +1236,7 @@
'../../src/ia32/macro-assembler-ia32.h',
'../../src/compiler/ia32/code-generator-ia32.cc',
'../../src/compiler/ia32/instruction-codes-ia32.h',
+ '../../src/compiler/ia32/instruction-scheduler-ia32.cc',
'../../src/compiler/ia32/instruction-selector-ia32.cc',
'../../src/crankshaft/ia32/lithium-codegen-ia32.cc',
'../../src/crankshaft/ia32/lithium-codegen-ia32.h',
@@ -1214,6 +1275,7 @@
'../../src/x87/macro-assembler-x87.h',
'../../src/compiler/x87/code-generator-x87.cc',
'../../src/compiler/x87/instruction-codes-x87.h',
+ '../../src/compiler/x87/instruction-scheduler-x87.cc',
'../../src/compiler/x87/instruction-selector-x87.cc',
'../../src/crankshaft/x87/lithium-codegen-x87.cc',
'../../src/crankshaft/x87/lithium-codegen-x87.h',
@@ -1256,6 +1318,7 @@
'../../src/mips/simulator-mips.h',
'../../src/compiler/mips/code-generator-mips.cc',
'../../src/compiler/mips/instruction-codes-mips.h',
+ '../../src/compiler/mips/instruction-scheduler-mips.cc',
'../../src/compiler/mips/instruction-selector-mips.cc',
'../../src/crankshaft/mips/lithium-codegen-mips.cc',
'../../src/crankshaft/mips/lithium-codegen-mips.h',
@@ -1298,6 +1361,7 @@
'../../src/mips64/simulator-mips64.h',
'../../src/compiler/mips64/code-generator-mips64.cc',
'../../src/compiler/mips64/instruction-codes-mips64.h',
+ '../../src/compiler/mips64/instruction-scheduler-mips64.cc',
'../../src/compiler/mips64/instruction-selector-mips64.cc',
'../../src/crankshaft/mips64/lithium-codegen-mips64.cc',
'../../src/crankshaft/mips64/lithium-codegen-mips64.h',
@@ -1355,6 +1419,7 @@
'sources': [
'../../src/compiler/x64/code-generator-x64.cc',
'../../src/compiler/x64/instruction-codes-x64.h',
+ '../../src/compiler/x64/instruction-scheduler-x64.cc',
'../../src/compiler/x64/instruction-selector-x64.cc',
],
}],
@@ -1362,6 +1427,7 @@
'sources': [ ### gcmole(arch:ppc) ###
'../../src/compiler/ppc/code-generator-ppc.cc',
'../../src/compiler/ppc/instruction-codes-ppc.h',
+ '../../src/compiler/ppc/instruction-scheduler-ppc.cc',
'../../src/compiler/ppc/instruction-selector-ppc.cc',
'../../src/crankshaft/ppc/lithium-ppc.cc',
'../../src/crankshaft/ppc/lithium-ppc.h',
@@ -1449,9 +1515,6 @@
}],
],
}],
- ['v8_wasm!=0', {
- 'dependencies': ['../../third_party/wasm/src/wasm/wasm.gyp:wasm'],
- }],
],
},
{
@@ -1764,7 +1827,6 @@
'inputs': [
'../../tools/concatenate-files.py',
'<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
@@ -1840,7 +1902,6 @@
'../../src/js/uri.js',
'../../src/js/math.js',
'../../src/third_party/fdlibm/fdlibm.js',
- '../../src/js/date.js',
'../../src/js/regexp.js',
'../../src/js/arraybuffer.js',
'../../src/js/typedarray.js',
@@ -1867,20 +1928,16 @@
'../../src/js/proxy.js',
'../../src/js/generator.js',
'../../src/js/harmony-atomics.js',
- '../../src/js/harmony-array-includes.js',
'../../src/js/harmony-regexp.js',
'../../src/js/harmony-reflect.js',
'../../src/js/harmony-object-observe.js',
'../../src/js/harmony-sharedarraybuffer.js',
'../../src/js/harmony-simd.js',
- ],
- 'code_stub_library_files': [
- '../../src/js/macros.py',
- '../../src/messages.h',
- '../../src/js/code-stubs.js',
+ '../../src/js/harmony-species.js',
+ '../../src/js/harmony-unicode-regexps.js',
+ '../../src/js/promise-extra.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- 'libraries_code_stub_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
@@ -1955,38 +2012,6 @@
],
},
{
- 'action_name': 'js2c_code_stubs',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(code_stub_library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
- 'CODE_STUB',
- '<@(code_stub_library_files)'
- ],
- },
- {
- 'action_name': 'js2c_code_stubs_bin',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(code_stub_library_files)',
- ],
- 'outputs': ['<@(libraries_code_stub_bin_file)'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
- 'CODE_STUB',
- '<@(code_stub_library_files)',
- '--startup_blob', '<@(libraries_code_stub_bin_file)',
- '--nojs',
- ],
- },
- {
'action_name': 'js2c_extras',
'inputs': [
'../../tools/js2c.py',
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index f9bea4a61f..7dac2e05eb 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -568,7 +568,7 @@ PERF_EVENT_HEADER_DESC = Descriptor([
])
-# Reference: kernel/events/core.c
+# Reference: kernel/tools/perf/util/event.h
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
@@ -577,6 +577,20 @@ PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pgoff", "u64")
])
+# Reference: kernel/tools/perf/util/event.h
+PERF_MMAP2_EVENT_BODY_DESC = Descriptor([
+ ("pid", "u32"),
+ ("tid", "u32"),
+ ("addr", "u64"),
+ ("len", "u64"),
+ ("pgoff", "u64"),
+ ("maj", "u32"),
+ ("min", "u32"),
+ ("ino", "u64"),
+ ("ino_generation", "u64"),
+ ("prot", "u32"),
+ ("flags","u32")
+])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
@@ -616,6 +630,7 @@ PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
+PERF_RECORD_MMAP2 = 10
PERF_RECORD_SAMPLE = 9
@@ -664,6 +679,15 @@ class TraceReader(object):
mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
return mmap_info
+ def ReadMmap2(self, header, offset):
+ mmap_info = PERF_MMAP2_EVENT_BODY_DESC.Read(self.trace,
+ offset + self.header_size)
+ # Read null-terminated filename.
+ filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
+ offset + header.size]
+ mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
+ return mmap_info
+
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
@@ -973,6 +997,14 @@ if __name__ == "__main__":
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
+ elif header.type == PERF_RECORD_MMAP2:
+ start = time.time()
+ mmap_info = trace_reader.ReadMmap2(header, offset)
+ if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
+ log_reader.ReadUpToGC()
+ else:
+ library_repo.Load(mmap_info, code_map, options)
+ mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index da874595de..5d4b0cc490 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -36,12 +36,12 @@
#include "include/libplatform/libplatform.h"
#include "src/api.h"
#include "src/compiler.h"
-#include "src/scanner-character-streams.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparser.h"
#include "tools/shell-utils.h"
-#include "src/parser.h"
-#include "src/preparse-data-format.h"
-#include "src/preparse-data.h"
-#include "src/preparser.h"
using namespace v8::internal;
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index bd1804712b..998656908d 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -35,6 +35,7 @@ except ImportError, e:
md5er = md5.new
+import json
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
@@ -45,6 +46,10 @@ import subprocess
import multiprocessing
from subprocess import PIPE
+from testrunner.local import statusfile
+from testrunner.local import testsuite
+from testrunner.local import utils
+
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# build/include_what_you_use: Started giving false positives for variables
@@ -57,7 +62,6 @@ LINT_RULES = """
-build/include_what_you_use
-build/namespaces
-readability/check
--readability/nolint
+readability/streams
-runtime/references
""".split()
@@ -403,6 +407,61 @@ def CheckExternalReferenceRegistration(workspace):
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
+
+def _CheckStatusFileForDuplicateKeys(filepath):
+ comma_space_bracket = re.compile(", *]")
+ lines = []
+ with open(filepath) as f:
+ for line in f.readlines():
+ # Skip all-comment lines.
+ if line.lstrip().startswith("#"): continue
+ # Strip away comments at the end of the line.
+ comment_start = line.find("#")
+ if comment_start != -1:
+ line = line[:comment_start]
+ line = line.strip()
+ # Strip away trailing commas within the line.
+ line = comma_space_bracket.sub("]", line)
+ if len(line) > 0:
+ lines.append(line)
+
+ # Strip away trailing commas at line ends. Ugh.
+ for i in range(len(lines) - 1):
+ if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
+ lines[i + 1][0] in ("}", "]")):
+ lines[i] = lines[i][:-1]
+
+ contents = "\n".join(lines)
+ # JSON wants double-quotes.
+ contents = contents.replace("'", '"')
+ # Fill in keywords (like PASS, SKIP).
+ for key in statusfile.KEYWORDS:
+ contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
+
+ status = {"success": True}
+ def check_pairs(pairs):
+ keys = {}
+ for key, value in pairs:
+ if key in keys:
+ print("%s: Error: duplicate key %s" % (filepath, key))
+ status["success"] = False
+ keys[key] = True
+
+ json.loads(contents, object_pairs_hook=check_pairs)
+ return status["success"]
+
+def CheckStatusFiles(workspace):
+ success = True
+ suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ for root in suite_paths:
+ suite_path = join(workspace, "test", root)
+ status_file_path = join(suite_path, root + ".status")
+ suite = testsuite.TestSuite.LoadTestSuite(suite_path)
+ if suite and exists(status_file_path):
+ success &= statusfile.PresubmitCheck(status_file_path)
+ success &= _CheckStatusFileForDuplicateKeys(status_file_path)
+ return success
+
def CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
@@ -445,11 +504,12 @@ def Main():
success = True
print "Running C++ lint check..."
if not options.no_lint:
- success = CppLintProcessor().Run(workspace) and success
+ success &= CppLintProcessor().Run(workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
- success = SourceProcessor().Run(workspace) and success
- success = CheckExternalReferenceRegistration(workspace) and success
+ success &= SourceProcessor().Run(workspace)
+ success &= CheckExternalReferenceRegistration(workspace)
+ success &= CheckStatusFiles(workspace)
if success:
return 0
else:
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index f7692cf6f9..27fd370971 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -4,61 +4,68 @@
# found in the LICENSE file.
import argparse
-import json
import os
import sys
-import urllib
from common_includes import *
-import chromium_roll
+ROLL_SUMMARY = ("Summary of changes available at:\n"
+ "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
-class CheckActiveRoll(Step):
- MESSAGE = "Check active roll."
+ISSUE_MSG = (
+"""Please follow these instructions for assigning/CC'ing issues:
+https://github.com/v8/v8/wiki/Triaging%20issues
- @staticmethod
- def ContainsChromiumRoll(changes):
- for change in changes:
- if change["subject"].startswith("Update V8 to"):
- return True
- return False
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.""")
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
def RunStep(self):
- params = {
- "closed": 3,
- "owner": self._options.author,
- "limit": 30,
- "format": "json",
- }
- params = urllib.urlencode(params)
- search_url = "https://codereview.chromium.org/search"
- result = self.ReadURL(search_url, params, wait_plan=[5, 20])
- if self.ContainsChromiumRoll(json.loads(result)["results"]):
- print "Stop due to existing Chromium roll."
- return True
+ self['json_output']['monitoring_state'] = 'preparation'
+ # Update v8 remote tracking branches.
+ self.GitFetchOrigin()
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
+ self['json_output']['monitoring_state'] = 'detect_last_roll'
+ self["last_roll"] = self._options.last_roll
+ if not self["last_roll"]:
+ # Interpret the DEPS file to retrieve the v8 revision.
+ # TODO(machenbach): This should be part or the roll-deps api of
+ # depot_tools.
+ Var = lambda var: '%s'
+ exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
+
+ # The revision rolled last.
+ self["last_roll"] = vars['v8_revision']
+ self["last_version"] = self.GetVersionTag(self["last_roll"])
+ assert self["last_version"], "The last rolled v8 revision is not tagged."
+
+
+class DetectRevisionToRoll(Step):
+ MESSAGE = "Detect commit ID of the V8 revision to roll."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'detect_revision'
+ self["roll"] = self._options.revision
+ if self["roll"]:
+ # If the revision was passed on the cmd line, continue script execution
+ # in the next step.
+ return False
+
# The revision that should be rolled. Check for the latest of the most
# recent releases based on commit timestamp.
revisions = self.GetRecentReleases(
max_age=self._options.max_age * DAY_IN_SECONDS)
assert revisions, "Didn't find any recent release."
- # Interpret the DEPS file to retrieve the v8 revision.
- # TODO(machenbach): This should be part or the roll-deps api of
- # depot_tools.
- Var = lambda var: '%s'
- exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
-
- # The revision rolled last.
- self["last_roll"] = vars['v8_revision']
- last_version = self.GetVersionTag(self["last_roll"])
- assert last_version, "The last rolled v8 revision is not tagged."
-
# There must be some progress between the last roll and the new candidate
# revision (i.e. we don't go backwards). The revisions are ordered newest
# to oldest. It is possible that the newest timestamp has no progress
@@ -68,35 +75,107 @@ class DetectLastRoll(Step):
version = self.GetVersionTag(revision)
assert version, "Internal error. All recent releases should have a tag"
- if SortingKey(last_version) < SortingKey(version):
+ if SortingKey(self["last_version"]) < SortingKey(version):
self["roll"] = revision
break
else:
print("There is no newer v8 revision than the one in Chromium (%s)."
% self["last_roll"])
+ self['json_output']['monitoring_state'] = 'up_to_date'
return True
-class RollChromium(Step):
- MESSAGE = "Roll V8 into Chromium."
+class PrepareRollCandidate(Step):
+ MESSAGE = "Robustness checks of the roll candidate."
def RunStep(self):
- if self._options.roll:
- args = [
- "--author", self._options.author,
- "--reviewer", self._options.reviewer,
- "--chromium", self._options.chromium,
- "--last-roll", self["last_roll"],
- "--use-commit-queue",
- self["roll"],
- ]
- if self._options.sheriff:
- args.append("--sheriff")
- if self._options.dry_run:
- args.append("--dry-run")
- if self._options.work_dir:
- args.extend(["--work-dir", self._options.work_dir])
- self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
+ self['json_output']['monitoring_state'] = 'prepare_candidate'
+ self["roll_title"] = self.GitLog(n=1, format="%s",
+ git_hash=self["roll"])
+
+ # Make sure the last roll and the roll candidate are releases.
+ version = self.GetVersionTag(self["roll"])
+ assert version, "The revision to roll is not tagged."
+ version = self.GetVersionTag(self["last_roll"])
+ assert version, "The revision used as last roll is not tagged."
+
+
+class SwitchChromium(Step):
+ MESSAGE = "Switch to Chromium checkout."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'switch_chromium'
+ cwd = self._options.chromium
+ self.InitialEnvironmentChecks(cwd)
+ # Check for a clean workdir.
+ if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
+ self.Die("Workspace is not clean. Please commit or undo your changes.")
+ # Assert that the DEPS file is there.
+ if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
+ self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+ MESSAGE = "Update the checkout and create a new branch."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'update_chromium'
+ cwd = self._options.chromium
+ self.GitCheckout("master", cwd=cwd)
+ self.DeleteBranch("work-branch", cwd=cwd)
+ self.Command("gclient", "sync --nohooks", cwd=cwd)
+ self.GitPull(cwd=cwd)
+
+ # Update v8 remotes.
+ self.GitFetchOrigin()
+
+ self.GitCreateBranch("work-branch", cwd=cwd)
+
+
+class UploadCL(Step):
+ MESSAGE = "Create and upload CL."
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'upload'
+ cwd = self._options.chromium
+ # Patch DEPS file.
+ if self.Command("roll-dep-svn", "v8 %s" %
+ self["roll"], cwd=cwd) is None:
+ self.Die("Failed to create deps for %s" % self["roll"])
+
+ message = []
+ message.append("Update V8 to %s." % self["roll_title"].lower())
+
+ message.append(
+ ROLL_SUMMARY % (self["last_roll"][:8], self["roll"][:8]))
+
+ message.append(ISSUE_MSG)
+
+ message.append("TBR=%s" % self._options.reviewer)
+ self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
+ if not self._options.dry_run:
+ self.GitUpload(author=self._options.author,
+ force=True,
+ cq=self._options.use_commit_queue,
+ cwd=cwd)
+ print "CL uploaded."
+ else:
+ print "Dry run - don't upload."
+
+ self.GitCheckout("master", cwd=cwd)
+ self.GitDeleteBranch("work-branch", cwd=cwd)
+
+class CleanUp(Step):
+ MESSAGE = "Done!"
+
+ def RunStep(self):
+ self['json_output']['monitoring_state'] = 'success'
+ print("Congratulations, you have successfully rolled %s into "
+ "Chromium."
+ % self["roll"])
+
+ # Clean up all temporary files.
+ Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
class AutoRoll(ScriptsBase):
@@ -104,30 +183,45 @@ class AutoRoll(ScriptsBase):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
- parser.add_argument("--max-age", default=3, type=int,
+ parser.add_argument("--last-roll",
+ help="The git commit ID of the last rolled version. "
+ "Auto-detected if not specified.")
+ parser.add_argument("--max-age", default=7, type=int,
help="Maximum age in days of the latest release.")
- parser.add_argument("--roll", help="Call Chromium roll script.",
- default=False, action="store_true")
+ parser.add_argument("--revision",
+ help="Revision to roll. Auto-detected if not "
+ "specified."),
+ parser.add_argument("--roll", help="Deprecated.",
+ default=True, action="store_true")
+ parser.add_argument("--use-commit-queue",
+ help="Check the CQ bit on upload.",
+ default=True, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
- if not options.reviewer:
- print "A reviewer (-r) is required."
- return False
- if not options.author:
- print "An author (-a) is required."
+ if not options.author or not options.reviewer:
+ print "A reviewer (-r) and an author (-a) are required."
return False
+
+ options.requires_editor = False
+ options.force = True
+ options.manual = False
return True
def _Config(self):
return {
- "PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
+ "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
}
def _Steps(self):
return [
- CheckActiveRoll,
+ Preparation,
DetectLastRoll,
- RollChromium,
+ DetectRevisionToRoll,
+ PrepareRollCandidate,
+ SwitchChromium,
+ UpdateChromiumCheckout,
+ UploadCL,
+ CleanUp,
]
diff --git a/deps/v8/tools/release/chromium_roll.py b/deps/v8/tools/release/chromium_roll.py
deleted file mode 100755
index bcc6c79f2d..0000000000
--- a/deps/v8/tools/release/chromium_roll.py
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import argparse
-import os
-import sys
-
-from common_includes import *
-
-ROLL_SUMMARY = ("Summary of changes available at:\n"
- "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
-
-ISSUE_MSG = (
-"""Please follow these instructions for assigning/CC'ing issues:
-https://code.google.com/p/v8-wiki/wiki/TriagingIssues
-
-Please close rolling in case of a roll revert:
-https://v8-roll.appspot.com/
-This only works with a Google account.""")
-
-class Preparation(Step):
- MESSAGE = "Preparation."
-
- def RunStep(self):
- # Update v8 remote tracking branches.
- self.GitFetchOrigin()
- self.Git("fetch origin +refs/tags/*:refs/tags/*")
-
-
-class PrepareRollCandidate(Step):
- MESSAGE = "Robustness checks of the roll candidate."
-
- def RunStep(self):
- self["roll_title"] = self.GitLog(n=1, format="%s",
- git_hash=self._options.roll)
-
- # Make sure the last roll and the roll candidate are releases.
- version = self.GetVersionTag(self._options.roll)
- assert version, "The revision to roll is not tagged."
- version = self.GetVersionTag(self._options.last_roll)
- assert version, "The revision used as last roll is not tagged."
-
-
-class SwitchChromium(Step):
- MESSAGE = "Switch to Chromium checkout."
-
- def RunStep(self):
- cwd = self._options.chromium
- self.InitialEnvironmentChecks(cwd)
- # Check for a clean workdir.
- if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
- self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Assert that the DEPS file is there.
- if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
- self.Die("DEPS file not present.")
-
-
-class UpdateChromiumCheckout(Step):
- MESSAGE = "Update the checkout and create a new branch."
-
- def RunStep(self):
- cwd = self._options.chromium
- self.GitCheckout("master", cwd=cwd)
- self.DeleteBranch("work-branch", cwd=cwd)
- self.Command("gclient", "sync --nohooks", cwd=cwd)
- self.GitPull(cwd=cwd)
-
- # Update v8 remotes.
- self.GitFetchOrigin()
-
- self.GitCreateBranch("work-branch", cwd=cwd)
-
-
-class UploadCL(Step):
- MESSAGE = "Create and upload CL."
-
- def RunStep(self):
- cwd = self._options.chromium
- # Patch DEPS file.
- if self.Command("roll-dep-svn", "v8 %s" %
- self._options.roll, cwd=cwd) is None:
- self.Die("Failed to create deps for %s" % self._options.roll)
-
- message = []
- message.append("Update V8 to %s." % self["roll_title"].lower())
-
- message.append(
- ROLL_SUMMARY % (self._options.last_roll[:8], self._options.roll[:8]))
-
- message.append(ISSUE_MSG)
-
- message.append("TBR=%s" % self._options.reviewer)
- self.GitCommit("\n\n".join(message), author=self._options.author, cwd=cwd)
- if not self._options.dry_run:
- self.GitUpload(author=self._options.author,
- force=True,
- cq=self._options.use_commit_queue,
- cwd=cwd)
- print "CL uploaded."
- else:
- print "Dry run - don't upload."
-
- self.GitCheckout("master", cwd=cwd)
- self.GitDeleteBranch("work-branch", cwd=cwd)
-
-class CleanUp(Step):
- MESSAGE = "Done!"
-
- def RunStep(self):
- print("Congratulations, you have successfully rolled %s into "
- "Chromium."
- % self._options.roll)
-
- # Clean up all temporary files.
- Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
-
-
-class ChromiumRoll(ScriptsBase):
- def _PrepareOptions(self, parser):
- parser.add_argument("-c", "--chromium", required=True,
- help=("The path to your Chromium src/ "
- "directory to automate the V8 roll."))
- parser.add_argument("--last-roll", required=True,
- help="The git commit ID of the last rolled version.")
- parser.add_argument("roll", nargs=1, help="Revision to roll."),
- parser.add_argument("--use-commit-queue",
- help="Check the CQ bit on upload.",
- default=False, action="store_true")
-
- def _ProcessOptions(self, options): # pragma: no cover
- if not options.author or not options.reviewer:
- print "A reviewer (-r) and an author (-a) are required."
- return False
-
- options.requires_editor = False
- options.force = True
- options.manual = False
- options.roll = options.roll[0]
- return True
-
- def _Config(self):
- return {
- "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
- }
-
- def _Steps(self):
- return [
- Preparation,
- PrepareRollCandidate,
- DetermineV8Sheriff,
- SwitchChromium,
- UpdateChromiumCheckout,
- UploadCL,
- CleanUp,
- ]
-
-
-if __name__ == "__main__": # pragma: no cover
- sys.exit(ChromiumRoll().Run())
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index 41fe359624..c2b64c38ec 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -738,9 +738,12 @@ class Step(GitRecipesMixin):
class BootstrapStep(Step):
- MESSAGE = "Bootstapping v8 checkout."
+ MESSAGE = "Bootstrapping checkout and state."
def RunStep(self):
+ # Reserve state entry for json output.
+ self['json_output'] = {}
+
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
@@ -766,32 +769,6 @@ class UploadStep(Step):
cc=self._options.cc)
-class DetermineV8Sheriff(Step):
- MESSAGE = "Determine the V8 sheriff for code review."
-
- def RunStep(self):
- self["sheriff"] = None
- if not self._options.sheriff: # pragma: no cover
- return
-
- # The sheriff determined by the rotation on the waterfall has a
- # @google.com account.
- url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
- match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url))
-
- # If "channel is sheriff", we can't match an account.
- if match:
- g_name = match.group(1)
- # Optimistically assume that google and chromium account name are the
- # same.
- self["sheriff"] = g_name + "@chromium.org"
- self._options.reviewer = ("%s,%s" %
- (self["sheriff"], self._options.reviewer))
- print "Found active sheriff: %s" % self["sheriff"]
- else:
- print "No active sheriff found."
-
-
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
@@ -838,12 +815,10 @@ class ScriptsBase(object):
help="The author email used for rietveld.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
+ parser.add_argument("--json-output",
+ help="File to write results summary to.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
- parser.add_argument("--sheriff", default=False, action="store_true",
- help=("Determine current sheriff to review CLs. On "
- "success, this will overwrite the reviewer "
- "option."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
@@ -896,9 +871,16 @@ class ScriptsBase(object):
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
- for step in steps[options.step:]:
- if step.Run():
- return 0
+
+ try:
+ for step in steps[options.step:]:
+ if step.Run():
+ return 0
+ finally:
+ if options.json_output:
+ with open(options.json_output, "w") as f:
+ json.dump(self._state['json_output'], f)
+
return 0
def Run(self, args=None):
diff --git a/deps/v8/tools/release/releases.py b/deps/v8/tools/release/releases.py
index 5b826fccba..7b659ccb80 100755
--- a/deps/v8/tools/release/releases.py
+++ b/deps/v8/tools/release/releases.py
@@ -463,10 +463,15 @@ class RetrieveInformationOnChromeReleases(Step):
def _GetGitHashForV8Version(self, v8_version):
if v8_version == "N/A":
return ""
+
+ real_v8_version = v8_version
if v8_version.split(".")[3]== "0":
- return self.GitGetHashOfTag(v8_version[:-2])
+ real_v8_version = v8_version[:-2]
- return self.GitGetHashOfTag(v8_version)
+ try:
+ return self.GitGetHashOfTag(real_v8_version)
+ except GitFailedException:
+ return ""
def _CreateCandidate(self, current_version):
params = None
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 825c191d4e..4a3cb5b24a 100644
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -1003,30 +1003,68 @@ git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
- ROLL_COMMIT_MSG = """Update V8 to version 3.22.4 (based on abc).
+ ROLL_COMMIT_MSG = """Update V8 to version 3.22.4.
Summary of changes available at:
https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
Please follow these instructions for assigning/CC'ing issues:
-https://code.google.com/p/v8-wiki/wiki/TriagingIssues
+https://github.com/v8/v8/wiki/Triaging%20issues
Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
-TBR=g_name@chromium.org,reviewer@chromium.org"""
+TBR=reviewer@chromium.org"""
+
+ # Snippet from the original DEPS file.
+ FAKE_DEPS = """
+vars = {
+ "v8_revision": "last_roll_hsh",
+}
+deps = {
+ "src/v8":
+ (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
+ Var("v8_revision"),
+}
+"""
+
+ def testChromiumRollUpToDate(self):
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+ self.Expect([
+ Cmd("git fetch origin", ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git describe --tags last_roll_hsh", "3.22.4"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nroll_hsh\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ ])
+
+ result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
+ AUTO_PUSH_ARGS + [
+ "-c", TEST_CONFIG["CHROMIUM"],
+ "--json-output", json_output_file])
+ self.assertEquals(0, result)
+ json_output = json.loads(FileToText(json_output_file))
+ self.assertEquals("up_to_date", json_output["monitoring_state"])
+
def testChromiumRoll(self):
# Setup fake directory structures.
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+ TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
chrome_dir = TEST_CONFIG["CHROMIUM"]
os.makedirs(os.path.join(chrome_dir, "v8"))
- # Write fake deps file.
- TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
- os.path.join(chrome_dir, "DEPS"))
def WriteDeps():
TextToFile("Some line\n \"v8_revision\": \"22624\",\n some line",
os.path.join(chrome_dir, "DEPS"))
@@ -1034,12 +1072,17 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
expectations = [
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git log -1 --format=%s roll_hsh",
- "Version 3.22.4 (based on abc)\n"),
+ Cmd("git describe --tags last_roll_hsh", "3.22.3.1"),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nroll_hsh\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
+ Cmd("git log -1 --format=%s roll_hsh", "Version 3.22.4\n"),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
- URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
- "document.write('g_name')"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch", "", cwd=chrome_dir),
@@ -1052,23 +1095,23 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
"--author \"author@chromium.org <author@chromium.org>\"" %
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
- Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
- cwd=chrome_dir),
+ Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+ "--use-commit-queue", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org", "-c", chrome_dir,
- "--sheriff",
- "-r", "reviewer@chromium.org",
- "--last-roll", "last_roll_hsh",
- "roll_hsh"]
- ChromiumRoll(TEST_CONFIG, self).Run(args)
+ "-r", "reviewer@chromium.org", "--json-output", json_output_file]
+ auto_roll.AutoRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
+ json_output = json.loads(FileToText(json_output_file))
+ self.assertEquals("success", json_output["monitoring_state"])
+
def testCheckLastPushRecently(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
@@ -1103,74 +1146,6 @@ TBR=g_name@chromium.org,reviewer@chromium.org"""
self.assertEquals("abc123", state["candidate"])
- def testAutoRollExistingRoll(self):
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"},"
- "{\"subject\": \"Update V8 to Version...\"}]}")),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
- self.assertEquals(0, result)
-
- # Snippet from the original DEPS file.
- FAKE_DEPS = """
-vars = {
- "v8_revision": "abcd123455",
-}
-deps = {
- "src/v8":
- (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
- Var("v8_revision"),
-}
-"""
-
- def testAutoRollUpToDate(self):
- TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
- TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git rev-list --max-age=740800 --tags",
- "bad_tag\nhash_234\nhash_123"),
- Cmd("git describe --tags bad_tag", ""),
- Cmd("git describe --tags hash_234", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- Cmd("git describe --tags abcd123455", "3.22.4"),
- Cmd("git describe --tags hash_234", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
- self.assertEquals(0, result)
-
- def testAutoRoll(self):
- TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
- TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
-
- self.Expect([
- URL("https://codereview.chromium.org/search",
- "owner=author%40chromium.org&limit=30&closed=3&format=json",
- ("{\"results\": [{\"subject\": \"different\"}]}")),
- Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git rev-list --max-age=740800 --tags",
- "bad_tag\nhash_234\nhash_123"),
- Cmd("git describe --tags bad_tag", ""),
- Cmd("git describe --tags hash_234", "3.22.4"),
- Cmd("git describe --tags hash_123", "3.22.3"),
- Cmd("git describe --tags abcd123455", "3.22.3.1"),
- Cmd("git describe --tags hash_234", "3.22.4"),
- ])
-
- result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
- AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
- self.assertEquals(0, result)
-
def testMergeToBranch(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index f7ec819ecd..70e106ec1b 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -316,6 +316,7 @@ def Main():
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data:
@@ -380,7 +381,8 @@ def Execute(arch, mode, args, options, suites, workspace):
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No predictable mode.
- False) # No no_harness mode.
+ False, # No no_harness mode.
+ False) # Don't use perf data.
# Find available test suites and read test cases from them.
variables = {
@@ -388,6 +390,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"asan": options.asan,
"deopt_fuzzer": True,
"gc_stress": False,
+ "gcov_coverage": False,
"ignition": False,
"isolates": options.isolates,
"mode": mode,
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 1b8fdd3e13..fe8091efb3 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -77,6 +77,10 @@ TEST_MAP = {
"intl",
"unittests",
],
+ "ignition": [
+ "mjsunit",
+ "cctest",
+ ],
"optimize_for_size": [
"mjsunit",
"cctest",
@@ -224,6 +228,9 @@ def BuildOptions():
result.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
+ result.add_option("--gcov-coverage",
+ help="Uses executables instrumented for gcov coverage",
+ default=False, action="store_true")
result.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
@@ -313,6 +320,9 @@ def BuildOptions():
result.add_option("--stress-only",
help="Only run tests with --always-opt --stress-opt",
default=False, action="store_true")
+ result.add_option("--swarming",
+ help="Indicates running test driver on swarming.",
+ default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
@@ -583,7 +593,7 @@ def Main():
# suites as otherwise filters would break.
def ExpandTestGroups(name):
if name in TEST_MAP:
- return [suite for suite in TEST_MAP[arg]]
+ return [suite for suite in TEST_MAP[name]]
else:
return [name]
args = reduce(lambda x, y: x + y,
@@ -600,6 +610,7 @@ def Main():
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(BASE_DIR, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
if options.download_data or options.download_data_only:
@@ -670,7 +681,8 @@ def Execute(arch, mode, args, options, suites):
options.rerun_failures_count,
options.rerun_failures_max,
options.predictable,
- options.no_harness)
+ options.no_harness,
+ use_perf_data=not options.swarming)
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
@@ -683,6 +695,7 @@ def Execute(arch, mode, args, options, suites):
"asan": options.asan,
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
+ "gcov_coverage": options.gcov_coverage,
"ignition": options.ignition,
"isolates": options.isolates,
"mode": MODES[mode]["status_mode"],
diff --git a/deps/v8/tools/run-valgrind.py b/deps/v8/tools/run-valgrind.py
index f25f7a113c..e3f84f58fe 100755
--- a/deps/v8/tools/run-valgrind.py
+++ b/deps/v8/tools/run-valgrind.py
@@ -29,23 +29,47 @@
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
+# Uses valgrind from third_party/valgrind. Assumes the executable is passed
+# with a path relative to the v8 root.
+
+from os import path
+import platform
+import re
import subprocess
import sys
-import re
+
+V8_ROOT = path.dirname(path.dirname(path.abspath(__file__)))
+MACHINE = 'linux_x64' if platform.machine() == 'x86_64' else 'linux_x86'
+VALGRIND_ROOT = path.join(V8_ROOT, 'third_party', 'valgrind', MACHINE)
+VALGRIND_BIN = path.join(VALGRIND_ROOT, 'bin', 'valgrind')
+VALGRIND_LIB = path.join(VALGRIND_ROOT, 'lib', 'valgrind')
VALGRIND_ARGUMENTS = [
- 'valgrind',
+ VALGRIND_BIN,
'--error-exitcode=1',
'--leak-check=full',
- '--smc-check=all'
+ '--smc-check=all',
]
+if len(sys.argv) < 2:
+ print 'Please provide an executable to analyze.'
+ sys.exit(1)
+
+executable = path.join(V8_ROOT, sys.argv[1])
+if not path.exists(executable):
+ print 'Cannot find the file specified: %s' % executable
+ sys.exit(1)
+
# Compute the command line.
-command = VALGRIND_ARGUMENTS + sys.argv[1:]
+command = VALGRIND_ARGUMENTS + [executable] + sys.argv[2:]
# Run valgrind.
-process = subprocess.Popen(command, stderr=subprocess.PIPE)
+process = subprocess.Popen(
+ command,
+ stderr=subprocess.PIPE,
+ env={'VALGRIND_LIB': VALGRIND_LIB}
+)
code = process.wait();
errors = process.stderr.readlines();
@@ -74,4 +98,5 @@ if len(leaks) < 2 or len(leaks) > 3:
sys.exit(1)
# No leaks found.
+sys.stderr.writelines(errors)
sys.exit(0)
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index ff59f0bc1e..c9fe54175a 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -26,6 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import collections
import os
import shutil
import sys
@@ -35,10 +36,17 @@ from pool import Pool
from . import commands
from . import perfdata
from . import statusfile
+from . import testsuite
from . import utils
-class Job(object):
+# Base dir of the v8 checkout.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__)))))
+TEST_DIR = os.path.join(BASE_DIR, "test")
+
+
+class Instructions(object):
def __init__(self, command, dep_command, test_id, timeout, verbose):
self.command = command
self.dep_command = dep_command
@@ -47,24 +55,119 @@ class Job(object):
self.verbose = verbose
-def RunTest(job):
- start_time = time.time()
- if job.dep_command is not None:
- dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout)
- # TODO(jkummerow): We approximate the test suite specific function
- # IsFailureOutput() by just checking the exit code here. Currently
- # only cctests define dependencies, for which this simplification is
- # correct.
- if dep_output.exit_code != 0:
- return (job.id, dep_output, time.time() - start_time)
- output = commands.Execute(job.command, job.verbose, job.timeout)
- return (job.id, output, time.time() - start_time)
+# Structure that keeps global information per worker process.
+ProcessContext = collections.namedtuple(
+ "process_context", ["suites", "context"])
+
+
+def MakeProcessContext(context):
+ """Generate a process-local context.
+
+ This reloads all suites per process and stores the global context.
+
+ Args:
+ context: The global context from the test runner.
+ """
+ suite_paths = utils.GetSuitePaths(TEST_DIR)
+ suites = {}
+ for root in suite_paths:
+ # Don't reinitialize global state as this is concurrently called from
+ # different processes.
+ suite = testsuite.TestSuite.LoadTestSuite(
+ os.path.join(TEST_DIR, root), global_init=False)
+ if suite:
+ suites[suite.name] = suite
+ return ProcessContext(suites, context)
+
+
+def GetCommand(test, context):
+ d8testflag = []
+ shell = test.suite.shell()
+ if shell == "d8":
+ d8testflag = ["--test"]
+ if utils.IsWindows():
+ shell += ".exe"
+ if context.random_seed:
+ d8testflag += ["--random-seed=%s" % context.random_seed]
+ cmd = (context.command_prefix +
+ [os.path.abspath(os.path.join(context.shell_dir, shell))] +
+ d8testflag +
+ test.suite.GetFlagsForTestCase(test, context) +
+ context.extra_flags)
+ return cmd
+
+
+def _GetInstructions(test, context):
+ command = GetCommand(test, context)
+ timeout = context.timeout
+ if ("--stress-opt" in test.flags or
+ "--stress-opt" in context.mode_flags or
+ "--stress-opt" in context.extra_flags):
+ timeout *= 4
+ if "--noenable-vfp3" in context.extra_flags:
+ timeout *= 2
+ # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
+ # the like.
+ if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
+ timeout *= 2
+ if test.dependency is not None:
+ dep_command = [ c.replace(test.path, test.dependency) for c in command ]
+ else:
+ dep_command = None
+ return Instructions(
+ command, dep_command, test.id, timeout, context.verbose)
+
+
+class Job(object):
+ """Stores data to be sent over the multi-process boundary.
+
+ All contained fields will be pickled/unpickled.
+ """
+
+ def Run(self, process_context):
+ """Executes the job.
+
+ Args:
+ process_context: Process-local information that is initialized by the
+ executing worker.
+ """
+ raise NotImplementedError()
+
+
+class TestJob(Job):
+ def __init__(self, test):
+ self.test = test
+
+ def Run(self, process_context):
+ # Retrieve a new suite object on the worker-process side. The original
+ # suite object isn't pickled.
+ self.test.SetSuiteObject(process_context.suites)
+ instr = _GetInstructions(self.test, process_context.context)
+
+ start_time = time.time()
+ if instr.dep_command is not None:
+ dep_output = commands.Execute(
+ instr.dep_command, instr.verbose, instr.timeout)
+ # TODO(jkummerow): We approximate the test suite specific function
+ # IsFailureOutput() by just checking the exit code here. Currently
+ # only cctests define dependencies, for which this simplification is
+ # correct.
+ if dep_output.exit_code != 0:
+ return (instr.id, dep_output, time.time() - start_time)
+ output = commands.Execute(instr.command, instr.verbose, instr.timeout)
+ return (instr.id, output, time.time() - start_time)
+
+
+def RunTest(job, process_context):
+ return job.Run(process_context)
+
class Runner(object):
def __init__(self, suites, progress_indicator, context):
self.datapath = os.path.join("out", "testrunner_data")
- self.perf_data_manager = perfdata.PerfDataManager(self.datapath)
+ self.perf_data_manager = perfdata.GetPerfDataManager(
+ context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
@@ -99,25 +202,6 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
- def _GetJob(self, test):
- command = self.GetCommand(test)
- timeout = self.context.timeout
- if ("--stress-opt" in test.flags or
- "--stress-opt" in self.context.mode_flags or
- "--stress-opt" in self.context.extra_flags):
- timeout *= 4
- if "--noenable-vfp3" in self.context.extra_flags:
- timeout *= 2
- # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
- # the like.
- if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
- timeout *= 2
- if test.dependency is not None:
- dep_command = [ c.replace(test.path, test.dependency) for c in command ]
- else:
- dep_command = None
- return Job(command, dep_command, test.id, timeout, self.context.verbose)
-
def _MaybeRerun(self, pool, test):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
@@ -138,7 +222,7 @@ class Runner(object):
test.duration = None
test.output = None
test.run += 1
- pool.add([self._GetJob(test)])
+ pool.add([TestJob(test)])
self.remaining += 1
self.total += 1
@@ -208,7 +292,7 @@ class Runner(object):
# remember the output for comparison.
test.run += 1
test.output = result[1]
- pool.add([self._GetJob(test)])
+ pool.add([TestJob(test)])
# Always update the perf database.
return True
@@ -231,14 +315,19 @@ class Runner(object):
assert test.id >= 0
test_map[test.id] = test
try:
- yield [self._GetJob(test)]
+ yield [TestJob(test)]
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
queued_exception[0] = e
continue
try:
- it = pool.imap_unordered(RunTest, gen_tests())
+ it = pool.imap_unordered(
+ fn=RunTest,
+ gen=gen_tests(),
+ process_context_fn=MakeProcessContext,
+ process_context_args=[self.context],
+ )
for result in it:
if result.heartbeat:
self.indicator.Heartbeat()
@@ -276,22 +365,6 @@ class Runner(object):
print text
sys.stdout.flush()
- def GetCommand(self, test):
- d8testflag = []
- shell = test.suite.shell()
- if shell == "d8":
- d8testflag = ["--test"]
- if utils.IsWindows():
- shell += ".exe"
- if self.context.random_seed:
- d8testflag += ["--random-seed=%s" % self.context.random_seed]
- cmd = (self.context.command_prefix +
- [os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
- d8testflag +
- test.suite.GetFlagsForTestCase(test, self.context) +
- self.context.extra_flags)
- return cmd
-
class BreakNowException(Exception):
def __init__(self, value):
diff --git a/deps/v8/tools/testrunner/local/perfdata.py b/deps/v8/tools/testrunner/local/perfdata.py
index 2979dc4866..29ebff773a 100644
--- a/deps/v8/tools/testrunner/local/perfdata.py
+++ b/deps/v8/tools/testrunner/local/perfdata.py
@@ -118,3 +118,29 @@ class PerfDataManager(object):
if not mode in modes:
modes[mode] = PerfDataStore(self.datadir, arch, mode)
return modes[mode]
+
+
+class NullPerfDataStore(object):
+ def UpdatePerfData(self, test):
+ pass
+
+ def FetchPerfData(self, test):
+ return None
+
+
+class NullPerfDataManager(object):
+ def __init__(self):
+ pass
+
+ def GetStore(self, *args, **kwargs):
+ return NullPerfDataStore()
+
+ def close(self):
+ pass
+
+
+def GetPerfDataManager(context, datadir):
+ if context.use_perf_data:
+ return PerfDataManager(datadir)
+ else:
+ return NullPerfDataManager()
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index b933f735e5..6d123fd4e5 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -5,6 +5,8 @@
from Queue import Empty
from multiprocessing import Event, Process, Queue
+import traceback
+
class NormalResult():
def __init__(self, result):
@@ -39,17 +41,22 @@ class MaybeResult():
return MaybeResult(False, value)
-def Worker(fn, work_queue, done_queue, done):
+def Worker(fn, work_queue, done_queue, done,
+ process_context_fn=None, process_context_args=None):
"""Worker to be run in a child process.
The worker stops on two conditions. 1. When the poison pill "STOP" is
reached or 2. when the event "done" is set."""
try:
+ kwargs = {}
+ if process_context_fn and process_context_args is not None:
+ kwargs.update(process_context=process_context_fn(*process_context_args))
for args in iter(work_queue.get, "STOP"):
if done.is_set():
break
try:
- done_queue.put(NormalResult(fn(*args)))
+ done_queue.put(NormalResult(fn(*args, **kwargs)))
except Exception, e:
+ traceback.print_exc()
print(">>> EXCEPTION: %s" % e)
done_queue.put(ExceptionResult())
except KeyboardInterrupt:
@@ -84,13 +91,23 @@ class Pool():
self.done = Event()
self.heartbeat_timeout = heartbeat_timeout
- def imap_unordered(self, fn, gen):
+ def imap_unordered(self, fn, gen,
+ process_context_fn=None, process_context_args=None):
"""Maps function "fn" to items in generator "gen" on the worker processes
in an arbitrary order. The items are expected to be lists of arguments to
the function. Returns a results iterator. A result value of type
MaybeResult either indicates a heartbeat of the runner, i.e. indicating
that the runner is still waiting for the result to be computed, or it wraps
- the real result."""
+ the real result.
+
+ Args:
+ process_context_fn: Function executed once by each worker. Expected to
+ return a process-context object. If present, this object is passed
+ as additional argument to each call to fn.
+ process_context_args: List of arguments for the invocation of
+ process_context_fn. All arguments will be pickled and sent beyond the
+ process boundary.
+ """
try:
gen = iter(gen)
self.advance = self._advance_more
@@ -99,7 +116,9 @@ class Pool():
p = Process(target=Worker, args=(fn,
self.work_queue,
self.done_queue,
- self.done))
+ self.done,
+ process_context_fn,
+ process_context_args))
self.processes.append(p)
p.start()
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 85d93285eb..4e1be3e4cf 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -32,24 +32,13 @@ import os
import sys
import time
+from . import execution
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
-def EscapeCommand(command):
- parts = []
- for part in command:
- if ' ' in part:
- # Escape spaces. We may need to escape more characters for this
- # to work properly.
- parts.append('"%s"' % part)
- else:
- parts.append(part)
- return " ".join(parts)
-
-
class ProgressIndicator(object):
def __init__(self):
@@ -83,6 +72,18 @@ class ProgressIndicator(object):
'negative': negative_marker
}
+ def _EscapeCommand(self, test):
+ command = execution.GetCommand(test, self.runner.context)
+ parts = []
+ for part in command:
+ if ' ' in part:
+ # Escape spaces. We may need to escape more characters for this
+ # to work properly.
+ parts.append('"%s"' % part)
+ else:
+ parts.append(part)
+ return " ".join(parts)
+
class IndicatorNotifier(object):
"""Holds a list of progress indicators and notifies them all on events."""
@@ -124,7 +125,7 @@ class SimpleProgressIndicator(ProgressIndicator):
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
- print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
+ print "Command: %s" % self._EscapeCommand(failed)
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
@@ -212,7 +213,7 @@ class CompactProgressIndicator(ProgressIndicator):
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
- print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ print "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
@@ -300,7 +301,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ fail_text += "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
@@ -335,8 +336,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
{
"name": test.GetLabel(),
"flags": test.flags,
- "command": EscapeCommand(self.runner.GetCommand(test)).replace(
- ABS_PATH_PREFIX, ""),
+ "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
} for test in timed_tests[:20]
]
@@ -362,8 +362,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
- "command": EscapeCommand(self.runner.GetCommand(test)).replace(
- ABS_PATH_PREFIX, ""),
+ "command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index bfa53c5348..f86106b9d9 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -25,6 +25,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import os
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
@@ -125,10 +126,14 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
target_dict[rule] = result
-def ReadStatusFile(path, variables):
+def ReadContent(path):
with open(path) as f:
global KEYWORDS
- contents = eval(f.read(), KEYWORDS)
+ return eval(f.read(), KEYWORDS)
+
+
+def ReadStatusFile(path, variables):
+ contents = ReadContent(path)
rules = {}
wildcards = {}
@@ -146,3 +151,30 @@ def ReadStatusFile(path, variables):
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
+
+
+def PresubmitCheck(path):
+ contents = ReadContent(path)
+ root_prefix = os.path.basename(os.path.dirname(path)) + "/"
+ status = {"success": True}
+ def _assert(check, message): # Like "assert", but doesn't throw.
+ if not check:
+ print("%s: Error: %s" % (path, message))
+ status["success"] = False
+ try:
+ for section in contents:
+ _assert(type(section) == list, "Section must be a list")
+ _assert(len(section) == 2, "Section list must have exactly 2 entries")
+ section = section[1]
+ _assert(type(section) == dict,
+ "Second entry of section must be a dictionary")
+ for rule in section:
+ _assert(type(rule) == str, "Rule key must be a string")
+ _assert(not rule.startswith(root_prefix),
+ "Suite name prefix must not be used in rule keys")
+ _assert(not rule.endswith('.js'),
+ ".js extension must not be used in rule keys.")
+ return status["success"]
+ except Exception as e:
+ print e
+ return False
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index 449a65aa13..e3d1e232e8 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -41,9 +41,9 @@ ALL_VARIANT_FLAGS = {
"turbofan": [["--turbo"]],
"turbofan_opt": [["--turbo", "--always-opt"]],
"nocrankshaft": [["--nocrankshaft"]],
- "ignition": [["--ignition", "--ignition-filter=*",
- "--ignition-fake-try-catch",
+ "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
"--ignition-fallback-on-eval-and-catch"]],
+ "preparser": [["--min-preparse-length=0"]],
}
# FAST_VARIANTS implies no --always-opt.
@@ -52,13 +52,13 @@ FAST_VARIANT_FLAGS = {
"stress": [["--stress-opt"]],
"turbofan": [["--turbo"]],
"nocrankshaft": [["--nocrankshaft"]],
- "ignition": [["--ignition", "--ignition-filter=*",
- "--ignition-fake-try-catch",
+ "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
"--ignition-fallback-on-eval-and-catch"]],
+ "preparser": [["--min-preparse-length=0"]],
}
ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
- "nocrankshaft", "ignition"])
+ "nocrankshaft", "ignition", "preparser"])
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
@@ -88,14 +88,14 @@ class VariantGenerator(object):
class TestSuite(object):
@staticmethod
- def LoadTestSuite(root):
+ def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
- except:
+ except ImportError:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
@@ -103,6 +103,8 @@ class TestSuite(object):
f.close()
def __init__(self, name, root):
+ # Note: This might be called concurrently from different processes.
+ # Changing harddisk state should be done in 'SetupWorkingDirectory' below.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
@@ -110,6 +112,11 @@ class TestSuite(object):
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
+ def SetupWorkingDirectory(self):
+ # This is called once per test suite object in a multi-process setting.
+ # Multi-process-unsafe work-directory setup can go here.
+ pass
+
def shell(self):
return "d8"
diff --git a/deps/v8/tools/testrunner/network/endpoint.py b/deps/v8/tools/testrunner/network/endpoint.py
index d0950cf5a6..516578ace4 100644
--- a/deps/v8/tools/testrunner/network/endpoint.py
+++ b/deps/v8/tools/testrunner/network/endpoint.py
@@ -93,6 +93,7 @@ def Execute(workspace, ctx, tests, sock, server):
suite = testsuite.TestSuite.LoadTestSuite(
os.path.join(workspace, "test", root))
if suite:
+ suite.SetupWorkingDirectory()
suites.append(suite)
suites_dict = {}
diff --git a/deps/v8/tools/testrunner/objects/context.py b/deps/v8/tools/testrunner/objects/context.py
index b76e562809..c9853d07cc 100644
--- a/deps/v8/tools/testrunner/objects/context.py
+++ b/deps/v8/tools/testrunner/objects/context.py
@@ -30,7 +30,7 @@ class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max,
- predictable, no_harness):
+ predictable, no_harness, use_perf_data):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@@ -47,12 +47,14 @@ class Context():
self.rerun_failures_max = rerun_failures_max
self.predictable = predictable
self.no_harness = no_harness
+ self.use_perf_data = use_perf_data
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
self.command_prefix, self.extra_flags, self.noi18n,
self.random_seed, self.no_sorting, self.rerun_failures_count,
- self.rerun_failures_max, self.predictable, self.no_harness]
+ self.rerun_failures_max, self.predictable, self.no_harness,
+ self.use_perf_data]
@staticmethod
def Unpack(packed):
@@ -60,4 +62,4 @@ class Context():
return Context(packed[0], packed[1], None, packed[2], False,
packed[3], packed[4], packed[5], packed[6], packed[7],
packed[8], packed[9], packed[10], packed[11], packed[12],
- packed[13])
+ packed[13], packed[14])
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 0ab06361b1..fa2265c070 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -86,3 +86,11 @@ class TestCase(object):
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
+
+ def __getstate__(self):
+ """Representation to pickle test cases.
+
+ The original suite won't be sent beyond process boundaries. Instead
+ send the name only and retrieve a process-local suite later.
+ """
+ return dict(self.__dict__, suite=self.suite.name)
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index e07bf8cc27..2403f7d782 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -27,6 +27,25 @@ DEFAULT_BOTS = [
'v8_nexus10_perf_try',
]
+PUBLIC_BENCHMARKS = [
+ 'arewefastyet',
+ 'embenchen',
+ 'emscripten',
+ 'compile',
+ 'jetstream',
+ 'jsbench',
+ 'jstests',
+ 'kraken_orig',
+ 'massive',
+ 'memory',
+ 'octane',
+ 'octane-pr',
+ 'octane-tf',
+ 'octane-tf-pr',
+ 'simdjs',
+ 'sunspider',
+]
+
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
@@ -47,6 +66,16 @@ def main():
print 'Please specify the benchmarks to run as arguments.'
return 1
+ for benchmark in options.benchmarks:
+ if benchmark not in PUBLIC_BENCHMARKS:
+ print ('%s not found in our benchmark list. The respective trybot might '
+ 'fail, unless you run something this script isn\'t aware of. '
+ 'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
+ print 'Proceed anyways? [Y/n] ',
+ answer = sys.stdin.readline().strip()
+ if answer != "" and answer != "Y" and answer != "y":
+ return 1
+
assert '"' not in options.extra_flags and '\'' not in options.extra_flags, (
'Invalid flag specification.')
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index f3d5d15ab5..0461bcbb66 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -51,259 +51,247 @@ INSTANCE_TYPES = {
22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
- 134: "FLOAT32X4_TYPE",
- 129: "MAP_TYPE",
- 130: "CODE_TYPE",
+ 130: "SIMD128_VALUE_TYPE",
+ 132: "MAP_TYPE",
+ 133: "CODE_TYPE",
131: "ODDBALL_TYPE",
- 182: "CELL_TYPE",
- 184: "PROPERTY_CELL_TYPE",
- 132: "HEAP_NUMBER_TYPE",
- 133: "MUTABLE_HEAP_NUMBER_TYPE",
+ 173: "CELL_TYPE",
+ 176: "PROPERTY_CELL_TYPE",
+ 129: "HEAP_NUMBER_TYPE",
+ 134: "MUTABLE_HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "BYTECODE_ARRAY_TYPE",
138: "FREE_SPACE_TYPE",
- 139: "EXTERNAL_INT8_ARRAY_TYPE",
- 140: "EXTERNAL_UINT8_ARRAY_TYPE",
- 141: "EXTERNAL_INT16_ARRAY_TYPE",
- 142: "EXTERNAL_UINT16_ARRAY_TYPE",
- 143: "EXTERNAL_INT32_ARRAY_TYPE",
- 144: "EXTERNAL_UINT32_ARRAY_TYPE",
- 145: "EXTERNAL_FLOAT32_ARRAY_TYPE",
- 146: "EXTERNAL_FLOAT64_ARRAY_TYPE",
- 147: "EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE",
- 148: "FIXED_INT8_ARRAY_TYPE",
- 149: "FIXED_UINT8_ARRAY_TYPE",
- 150: "FIXED_INT16_ARRAY_TYPE",
- 151: "FIXED_UINT16_ARRAY_TYPE",
- 152: "FIXED_INT32_ARRAY_TYPE",
- 153: "FIXED_UINT32_ARRAY_TYPE",
- 154: "FIXED_FLOAT32_ARRAY_TYPE",
- 155: "FIXED_FLOAT64_ARRAY_TYPE",
- 156: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 158: "FILLER_TYPE",
- 159: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 160: "DECLARED_ACCESSOR_INFO_TYPE",
- 161: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 162: "ACCESSOR_PAIR_TYPE",
- 163: "ACCESS_CHECK_INFO_TYPE",
- 164: "INTERCEPTOR_INFO_TYPE",
- 165: "CALL_HANDLER_INFO_TYPE",
- 166: "FUNCTION_TEMPLATE_INFO_TYPE",
- 167: "OBJECT_TEMPLATE_INFO_TYPE",
- 168: "SIGNATURE_INFO_TYPE",
- 169: "TYPE_SWITCH_INFO_TYPE",
- 171: "ALLOCATION_MEMENTO_TYPE",
- 170: "ALLOCATION_SITE_TYPE",
- 172: "SCRIPT_TYPE",
- 173: "CODE_CACHE_TYPE",
- 174: "POLYMORPHIC_CODE_CACHE_TYPE",
- 175: "TYPE_FEEDBACK_INFO_TYPE",
- 176: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 177: "BOX_TYPE",
- 185: "PROTOTYPE_INFO_TYPE",
- 180: "FIXED_ARRAY_TYPE",
- 157: "FIXED_DOUBLE_ARRAY_TYPE",
- 181: "SHARED_FUNCTION_INFO_TYPE",
- 183: "WEAK_CELL_TYPE",
- 189: "JS_MESSAGE_OBJECT_TYPE",
- 188: "JS_VALUE_TYPE",
- 190: "JS_DATE_TYPE",
- 191: "JS_OBJECT_TYPE",
- 192: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 193: "JS_GENERATOR_OBJECT_TYPE",
- 194: "JS_MODULE_TYPE",
- 195: "JS_GLOBAL_OBJECT_TYPE",
- 196: "JS_BUILTINS_OBJECT_TYPE",
- 197: "JS_GLOBAL_PROXY_TYPE",
- 198: "JS_ARRAY_TYPE",
- 199: "JS_ARRAY_BUFFER_TYPE",
- 200: "JS_TYPED_ARRAY_TYPE",
- 201: "JS_DATA_VIEW_TYPE",
- 187: "JS_PROXY_TYPE",
- 202: "JS_SET_TYPE",
- 203: "JS_MAP_TYPE",
- 204: "JS_SET_ITERATOR_TYPE",
- 205: "JS_MAP_ITERATOR_TYPE",
- 206: "JS_WEAK_MAP_TYPE",
- 207: "JS_WEAK_SET_TYPE",
- 208: "JS_REGEXP_TYPE",
- 209: "JS_FUNCTION_TYPE",
- 186: "JS_FUNCTION_PROXY_TYPE",
- 178: "DEBUG_INFO_TYPE",
- 179: "BREAK_POINT_INFO_TYPE",
+ 139: "FIXED_INT8_ARRAY_TYPE",
+ 140: "FIXED_UINT8_ARRAY_TYPE",
+ 141: "FIXED_INT16_ARRAY_TYPE",
+ 142: "FIXED_UINT16_ARRAY_TYPE",
+ 143: "FIXED_INT32_ARRAY_TYPE",
+ 144: "FIXED_UINT32_ARRAY_TYPE",
+ 145: "FIXED_FLOAT32_ARRAY_TYPE",
+ 146: "FIXED_FLOAT64_ARRAY_TYPE",
+ 147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+ 149: "FILLER_TYPE",
+ 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 151: "DECLARED_ACCESSOR_INFO_TYPE",
+ 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 153: "ACCESSOR_PAIR_TYPE",
+ 154: "ACCESS_CHECK_INFO_TYPE",
+ 155: "INTERCEPTOR_INFO_TYPE",
+ 156: "CALL_HANDLER_INFO_TYPE",
+ 157: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 158: "OBJECT_TEMPLATE_INFO_TYPE",
+ 159: "SIGNATURE_INFO_TYPE",
+ 160: "TYPE_SWITCH_INFO_TYPE",
+ 162: "ALLOCATION_MEMENTO_TYPE",
+ 161: "ALLOCATION_SITE_TYPE",
+ 163: "SCRIPT_TYPE",
+ 164: "CODE_CACHE_TYPE",
+ 165: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 166: "TYPE_FEEDBACK_INFO_TYPE",
+ 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 168: "BOX_TYPE",
+ 177: "PROTOTYPE_INFO_TYPE",
+ 178: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
+ 171: "FIXED_ARRAY_TYPE",
+ 148: "FIXED_DOUBLE_ARRAY_TYPE",
+ 172: "SHARED_FUNCTION_INFO_TYPE",
+ 174: "WEAK_CELL_TYPE",
+ 175: "TRANSITION_ARRAY_TYPE",
+ 181: "JS_MESSAGE_OBJECT_TYPE",
+ 180: "JS_VALUE_TYPE",
+ 182: "JS_DATE_TYPE",
+ 183: "JS_OBJECT_TYPE",
+ 184: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 185: "JS_GENERATOR_OBJECT_TYPE",
+ 186: "JS_MODULE_TYPE",
+ 187: "JS_GLOBAL_OBJECT_TYPE",
+ 188: "JS_GLOBAL_PROXY_TYPE",
+ 189: "JS_ARRAY_TYPE",
+ 190: "JS_ARRAY_BUFFER_TYPE",
+ 191: "JS_TYPED_ARRAY_TYPE",
+ 192: "JS_DATA_VIEW_TYPE",
+ 179: "JS_PROXY_TYPE",
+ 193: "JS_SET_TYPE",
+ 194: "JS_MAP_TYPE",
+ 195: "JS_SET_ITERATOR_TYPE",
+ 196: "JS_MAP_ITERATOR_TYPE",
+ 197: "JS_ITERATOR_RESULT_TYPE",
+ 198: "JS_WEAK_MAP_TYPE",
+ 199: "JS_WEAK_SET_TYPE",
+ 200: "JS_PROMISE_TYPE",
+ 201: "JS_REGEXP_TYPE",
+ 202: "JS_BOUND_FUNCTION_TYPE",
+ 203: "JS_FUNCTION_TYPE",
+ 169: "DEBUG_INFO_TYPE",
+ 170: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
- 0x080ad: (129, "MetaMap"),
+ 0x080ad: (132, "MetaMap"),
0x080d9: (131, "NullMap"),
- 0x08105: (180, "FixedArrayMap"),
+ 0x08105: (171, "FixedArrayMap"),
0x08131: (4, "OneByteInternalizedStringMap"),
- 0x0815d: (183, "WeakCellMap"),
- 0x08189: (131, "TheHoleMap"),
- 0x081b5: (138, "FreeSpaceMap"),
- 0x081e1: (158, "OnePointerFillerMap"),
- 0x0820d: (158, "TwoPointerFillerMap"),
- 0x08239: (131, "UndefinedMap"),
- 0x08265: (132, "HeapNumberMap"),
- 0x08291: (131, "BooleanMap"),
- 0x082bd: (131, "UninitializedMap"),
- 0x082e9: (182, "CellMap"),
- 0x08315: (184, "GlobalPropertyCellMap"),
- 0x08341: (181, "SharedFunctionInfoMap"),
- 0x0836d: (133, "MutableHeapNumberMap"),
- 0x08399: (134, "Float32x4Map"),
- 0x083c5: (180, "NativeContextMap"),
- 0x083f1: (130, "CodeMap"),
- 0x0841d: (180, "ScopeInfoMap"),
- 0x08449: (180, "FixedCOWArrayMap"),
- 0x08475: (157, "FixedDoubleArrayMap"),
- 0x084a1: (68, "OneByteStringMap"),
- 0x084cd: (180, "FunctionContextMap"),
- 0x084f9: (131, "NoInterceptorResultSentinelMap"),
- 0x08525: (131, "ArgumentsMarkerMap"),
- 0x08551: (131, "ExceptionMap"),
- 0x0857d: (131, "TerminationExceptionMap"),
- 0x085a9: (180, "HashTableMap"),
- 0x085d5: (180, "OrderedHashTableMap"),
- 0x08601: (128, "SymbolMap"),
- 0x0862d: (64, "StringMap"),
- 0x08659: (69, "ConsOneByteStringMap"),
- 0x08685: (65, "ConsStringMap"),
- 0x086b1: (67, "SlicedStringMap"),
- 0x086dd: (71, "SlicedOneByteStringMap"),
- 0x08709: (66, "ExternalStringMap"),
- 0x08735: (74, "ExternalStringWithOneByteDataMap"),
- 0x08761: (70, "ExternalOneByteStringMap"),
- 0x0878d: (70, "NativeSourceStringMap"),
- 0x087b9: (82, "ShortExternalStringMap"),
- 0x087e5: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x08811: (0, "InternalizedStringMap"),
- 0x0883d: (2, "ExternalInternalizedStringMap"),
- 0x08869: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x08895: (6, "ExternalOneByteInternalizedStringMap"),
- 0x088c1: (18, "ShortExternalInternalizedStringMap"),
- 0x088ed: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08919: (22, "ShortExternalOneByteInternalizedStringMap"),
- 0x08945: (86, "ShortExternalOneByteStringMap"),
- 0x08971: (139, "ExternalInt8ArrayMap"),
- 0x0899d: (140, "ExternalUint8ArrayMap"),
- 0x089c9: (141, "ExternalInt16ArrayMap"),
- 0x089f5: (142, "ExternalUint16ArrayMap"),
- 0x08a21: (143, "ExternalInt32ArrayMap"),
- 0x08a4d: (144, "ExternalUint32ArrayMap"),
- 0x08a79: (145, "ExternalFloat32ArrayMap"),
- 0x08aa5: (146, "ExternalFloat64ArrayMap"),
- 0x08ad1: (147, "ExternalUint8ClampedArrayMap"),
- 0x08afd: (149, "FixedUint8ArrayMap"),
- 0x08b29: (148, "FixedInt8ArrayMap"),
- 0x08b55: (151, "FixedUint16ArrayMap"),
- 0x08b81: (150, "FixedInt16ArrayMap"),
- 0x08bad: (153, "FixedUint32ArrayMap"),
- 0x08bd9: (152, "FixedInt32ArrayMap"),
- 0x08c05: (154, "FixedFloat32ArrayMap"),
- 0x08c31: (155, "FixedFloat64ArrayMap"),
- 0x08c5d: (156, "FixedUint8ClampedArrayMap"),
- 0x08c89: (180, "SloppyArgumentsElementsMap"),
- 0x08cb5: (180, "CatchContextMap"),
- 0x08ce1: (180, "WithContextMap"),
- 0x08d0d: (180, "BlockContextMap"),
- 0x08d39: (180, "ModuleContextMap"),
- 0x08d65: (180, "ScriptContextMap"),
- 0x08d91: (180, "ScriptContextTableMap"),
- 0x08dbd: (189, "JSMessageObjectMap"),
- 0x08de9: (135, "ForeignMap"),
- 0x08e15: (191, "NeanderMap"),
- 0x08e41: (191, "ExternalMap"),
- 0x08e6d: (171, "AllocationMementoMap"),
- 0x08e99: (170, "AllocationSiteMap"),
- 0x08ec5: (174, "PolymorphicCodeCacheMap"),
- 0x08ef1: (172, "ScriptMap"),
- 0x09101: (161, "ExecutableAccessorInfoMap"),
- 0x09159: (162, "AccessorPairMap"),
- 0x09209: (185, "PrototypeInfoMap"),
- 0x09839: (137, "BytecodeArrayMap"),
- 0x09865: (177, "BoxMap"),
- 0x09891: (163, "AccessCheckInfoMap"),
- 0x098bd: (164, "InterceptorInfoMap"),
- 0x098e9: (165, "CallHandlerInfoMap"),
- 0x09915: (166, "FunctionTemplateInfoMap"),
- 0x09941: (167, "ObjectTemplateInfoMap"),
- 0x0996d: (169, "TypeSwitchInfoMap"),
- 0x09999: (173, "CodeCacheMap"),
- 0x099c5: (175, "TypeFeedbackInfoMap"),
- 0x099f1: (176, "AliasedArgumentsEntryMap"),
- 0x09a1d: (178, "DebugInfoMap"),
- 0x09a49: (179, "BreakPointInfoMap"),
+ 0x0815d: (138, "FreeSpaceMap"),
+ 0x08189: (149, "OnePointerFillerMap"),
+ 0x081b5: (149, "TwoPointerFillerMap"),
+ 0x081e1: (131, "UndefinedMap"),
+ 0x0820d: (129, "HeapNumberMap"),
+ 0x08239: (131, "TheHoleMap"),
+ 0x08265: (131, "BooleanMap"),
+ 0x08291: (131, "UninitializedMap"),
+ 0x082bd: (173, "CellMap"),
+ 0x082e9: (176, "GlobalPropertyCellMap"),
+ 0x08315: (172, "SharedFunctionInfoMap"),
+ 0x08341: (134, "MutableHeapNumberMap"),
+ 0x0836d: (130, "Float32x4Map"),
+ 0x08399: (130, "Int32x4Map"),
+ 0x083c5: (130, "Uint32x4Map"),
+ 0x083f1: (130, "Bool32x4Map"),
+ 0x0841d: (130, "Int16x8Map"),
+ 0x08449: (130, "Uint16x8Map"),
+ 0x08475: (130, "Bool16x8Map"),
+ 0x084a1: (130, "Int8x16Map"),
+ 0x084cd: (130, "Uint8x16Map"),
+ 0x084f9: (130, "Bool8x16Map"),
+ 0x08525: (171, "NativeContextMap"),
+ 0x08551: (133, "CodeMap"),
+ 0x0857d: (171, "ScopeInfoMap"),
+ 0x085a9: (171, "FixedCOWArrayMap"),
+ 0x085d5: (148, "FixedDoubleArrayMap"),
+ 0x08601: (174, "WeakCellMap"),
+ 0x0862d: (175, "TransitionArrayMap"),
+ 0x08659: (68, "OneByteStringMap"),
+ 0x08685: (171, "FunctionContextMap"),
+ 0x086b1: (131, "NoInterceptorResultSentinelMap"),
+ 0x086dd: (131, "ArgumentsMarkerMap"),
+ 0x08709: (131, "ExceptionMap"),
+ 0x08735: (131, "TerminationExceptionMap"),
+ 0x08761: (171, "HashTableMap"),
+ 0x0878d: (171, "OrderedHashTableMap"),
+ 0x087b9: (128, "SymbolMap"),
+ 0x087e5: (64, "StringMap"),
+ 0x08811: (69, "ConsOneByteStringMap"),
+ 0x0883d: (65, "ConsStringMap"),
+ 0x08869: (67, "SlicedStringMap"),
+ 0x08895: (71, "SlicedOneByteStringMap"),
+ 0x088c1: (66, "ExternalStringMap"),
+ 0x088ed: (74, "ExternalStringWithOneByteDataMap"),
+ 0x08919: (70, "ExternalOneByteStringMap"),
+ 0x08945: (70, "NativeSourceStringMap"),
+ 0x08971: (82, "ShortExternalStringMap"),
+ 0x0899d: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x089c9: (0, "InternalizedStringMap"),
+ 0x089f5: (2, "ExternalInternalizedStringMap"),
+ 0x08a21: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x08a4d: (6, "ExternalOneByteInternalizedStringMap"),
+ 0x08a79: (18, "ShortExternalInternalizedStringMap"),
+ 0x08aa5: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08ad1: (22, "ShortExternalOneByteInternalizedStringMap"),
+ 0x08afd: (86, "ShortExternalOneByteStringMap"),
+ 0x08b29: (140, "FixedUint8ArrayMap"),
+ 0x08b55: (139, "FixedInt8ArrayMap"),
+ 0x08b81: (142, "FixedUint16ArrayMap"),
+ 0x08bad: (141, "FixedInt16ArrayMap"),
+ 0x08bd9: (144, "FixedUint32ArrayMap"),
+ 0x08c05: (143, "FixedInt32ArrayMap"),
+ 0x08c31: (145, "FixedFloat32ArrayMap"),
+ 0x08c5d: (146, "FixedFloat64ArrayMap"),
+ 0x08c89: (147, "FixedUint8ClampedArrayMap"),
+ 0x08cb5: (171, "SloppyArgumentsElementsMap"),
+ 0x08ce1: (171, "CatchContextMap"),
+ 0x08d0d: (171, "WithContextMap"),
+ 0x08d39: (171, "BlockContextMap"),
+ 0x08d65: (171, "ModuleContextMap"),
+ 0x08d91: (171, "ScriptContextMap"),
+ 0x08dbd: (171, "ScriptContextTableMap"),
+ 0x08de9: (181, "JSMessageObjectMap"),
+ 0x08e15: (135, "ForeignMap"),
+ 0x08e41: (183, "NeanderMap"),
+ 0x08e6d: (183, "ExternalMap"),
+ 0x08e99: (162, "AllocationMementoMap"),
+ 0x08ec5: (161, "AllocationSiteMap"),
+ 0x08ef1: (165, "PolymorphicCodeCacheMap"),
+ 0x08f1d: (163, "ScriptMap"),
+ 0x08f75: (137, "BytecodeArrayMap"),
+ 0x08fa1: (168, "BoxMap"),
+ 0x08fcd: (152, "ExecutableAccessorInfoMap"),
+ 0x08ff9: (153, "AccessorPairMap"),
+ 0x09025: (154, "AccessCheckInfoMap"),
+ 0x09051: (155, "InterceptorInfoMap"),
+ 0x0907d: (156, "CallHandlerInfoMap"),
+ 0x090a9: (157, "FunctionTemplateInfoMap"),
+ 0x090d5: (158, "ObjectTemplateInfoMap"),
+ 0x09101: (164, "CodeCacheMap"),
+ 0x0912d: (166, "TypeFeedbackInfoMap"),
+ 0x09159: (167, "AliasedArgumentsEntryMap"),
+ 0x09185: (169, "DebugInfoMap"),
+ 0x091b1: (170, "BreakPointInfoMap"),
+ 0x091dd: (177, "PrototypeInfoMap"),
+ 0x09209: (178, "SloppyBlockWithEvalContextExtensionMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("OLD_SPACE", 0x08081): "NullValue",
- ("OLD_SPACE", 0x08091): "EmptyDescriptorArray",
- ("OLD_SPACE", 0x08099): "EmptyFixedArray",
- ("OLD_SPACE", 0x080bd): "TheHoleValue",
- ("OLD_SPACE", 0x080dd): "UndefinedValue",
- ("OLD_SPACE", 0x08105): "NanValue",
- ("OLD_SPACE", 0x08115): "TrueValue",
- ("OLD_SPACE", 0x08135): "FalseValue",
- ("OLD_SPACE", 0x08159): "empty_string",
- ("OLD_SPACE", 0x08165): "UninitializedValue",
- ("OLD_SPACE", 0x08191): "EmptyByteArray",
- ("OLD_SPACE", 0x08199): "NoInterceptorResultSentinel",
- ("OLD_SPACE", 0x081d5): "ArgumentsMarker",
- ("OLD_SPACE", 0x08201): "Exception",
- ("OLD_SPACE", 0x08229): "TerminationException",
- ("OLD_SPACE", 0x0825d): "NumberStringCache",
- ("OLD_SPACE", 0x08a65): "SingleCharacterStringCache",
- ("OLD_SPACE", 0x08efd): "StringSplitCache",
- ("OLD_SPACE", 0x09305): "RegExpMultipleCache",
- ("OLD_SPACE", 0x0970d): "EmptyExternalInt8Array",
- ("OLD_SPACE", 0x09719): "EmptyExternalUint8Array",
- ("OLD_SPACE", 0x09725): "EmptyExternalInt16Array",
- ("OLD_SPACE", 0x09731): "EmptyExternalUint16Array",
- ("OLD_SPACE", 0x0973d): "EmptyExternalInt32Array",
- ("OLD_SPACE", 0x09749): "EmptyExternalUint32Array",
- ("OLD_SPACE", 0x09755): "EmptyExternalFloat32Array",
- ("OLD_SPACE", 0x09761): "EmptyExternalFloat64Array",
- ("OLD_SPACE", 0x0976d): "EmptyExternalUint8ClampedArray",
- ("OLD_SPACE", 0x09779): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x09789): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x09799): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x097a9): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x097b9): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x097c9): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x097d9): "EmptyFixedFloat32Array",
- ("OLD_SPACE", 0x097e9): "EmptyFixedFloat64Array",
- ("OLD_SPACE", 0x097f9): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x0980d): "InfinityValue",
- ("OLD_SPACE", 0x0981d): "MinusZeroValue",
- ("OLD_SPACE", 0x0982d): "MinusInfinityValue",
- ("OLD_SPACE", 0x09839): "MessageListeners",
- ("OLD_SPACE", 0x09855): "CodeStubs",
- ("OLD_SPACE", 0x0e52d): "ArrayProtector",
- ("OLD_SPACE", 0x0e9a1): "KeyedLoadDummyVector",
- ("OLD_SPACE", 0x13ded): "NonMonomorphicCache",
- ("OLD_SPACE", 0x14131): "PolymorphicCodeCache",
- ("OLD_SPACE", 0x14139): "NativesSourceCache",
- ("OLD_SPACE", 0x14429): "ExperimentalNativesSourceCache",
- ("OLD_SPACE", 0x14461): "ExtraNativesSourceCache",
- ("OLD_SPACE", 0x1446d): "CodeStubNativesSourceCache",
- ("OLD_SPACE", 0x1448d): "EmptyScript",
- ("OLD_SPACE", 0x144cd): "IntrinsicFunctionNames",
- ("OLD_SPACE", 0x240e1): "UndefinedCell",
- ("OLD_SPACE", 0x240e9): "ObservationState",
- ("OLD_SPACE", 0x240f5): "SymbolRegistry",
- ("OLD_SPACE", 0x24f9d): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x24fc5): "AllocationSitesScratchpad",
- ("OLD_SPACE", 0x253cd): "WeakObjectToCodeTable",
- ("OLD_SPACE", 0x25461): "EmptyPropertyCell",
- ("OLD_SPACE", 0x25471): "CodeStubContext",
- ("OLD_SPACE", 0x2ba11): "CodeStubExportsObject",
- ("OLD_SPACE", 0x2be89): "EmptyBytecodeArray",
- ("OLD_SPACE", 0x594dd): "StringTable",
- ("CODE_SPACE", 0x16341): "JsEntryCode",
- ("CODE_SPACE", 0x26a61): "JsConstructEntryCode",
+ ("OLD_SPACE", 0x08095): "EmptyDescriptorArray",
+ ("OLD_SPACE", 0x0809d): "EmptyFixedArray",
+ ("OLD_SPACE", 0x080c9): "UndefinedValue",
+ ("OLD_SPACE", 0x080f5): "NanValue",
+ ("OLD_SPACE", 0x08105): "TheHoleValue",
+ ("OLD_SPACE", 0x08129): "TrueValue",
+ ("OLD_SPACE", 0x08161): "FalseValue",
+ ("OLD_SPACE", 0x08189): "empty_string",
+ ("OLD_SPACE", 0x08195): "hidden_string",
+ ("OLD_SPACE", 0x081a1): "UninitializedValue",
+ ("OLD_SPACE", 0x081d1): "EmptyByteArray",
+ ("OLD_SPACE", 0x081d9): "NoInterceptorResultSentinel",
+ ("OLD_SPACE", 0x08219): "ArgumentsMarker",
+ ("OLD_SPACE", 0x08249): "Exception",
+ ("OLD_SPACE", 0x08275): "TerminationException",
+ ("OLD_SPACE", 0x082ad): "NumberStringCache",
+ ("OLD_SPACE", 0x08ab5): "SingleCharacterStringCache",
+ ("OLD_SPACE", 0x08f4d): "StringSplitCache",
+ ("OLD_SPACE", 0x09355): "RegExpMultipleCache",
+ ("OLD_SPACE", 0x0975d): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x0976d): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x0977d): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x0978d): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x0979d): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x097ad): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x097bd): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x097cd): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x097dd): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x097ed): "InfinityValue",
+ ("OLD_SPACE", 0x097fd): "MinusZeroValue",
+ ("OLD_SPACE", 0x0980d): "MinusInfinityValue",
+ ("OLD_SPACE", 0x0981d): "MessageListeners",
+ ("OLD_SPACE", 0x09839): "CodeStubs",
+ ("OLD_SPACE", 0x10201): "DummyVector",
+ ("OLD_SPACE", 0x1403d): "NonMonomorphicCache",
+ ("OLD_SPACE", 0x14651): "PolymorphicCodeCache",
+ ("OLD_SPACE", 0x14659): "NativesSourceCache",
+ ("OLD_SPACE", 0x148f5): "ExperimentalNativesSourceCache",
+ ("OLD_SPACE", 0x14929): "ExtraNativesSourceCache",
+ ("OLD_SPACE", 0x14949): "ExperimentalExtraNativesSourceCache",
+ ("OLD_SPACE", 0x14955): "EmptyScript",
+ ("OLD_SPACE", 0x14995): "IntrinsicFunctionNames",
+ ("OLD_SPACE", 0x2e73d): "UndefinedCell",
+ ("OLD_SPACE", 0x2e745): "ObservationState",
+ ("OLD_SPACE", 0x2e751): "ScriptList",
+ ("OLD_SPACE", 0x2e8d9): "ClearedOptimizedCodeMap",
+ ("OLD_SPACE", 0x2e8e5): "EmptyWeakCell",
+ ("OLD_SPACE", 0x54715): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x54761): "WeakObjectToCodeTable",
+ ("OLD_SPACE", 0x54875): "ArrayProtector",
+ ("OLD_SPACE", 0x54885): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x54895): "NoScriptSharedFunctionInfos",
+ ("OLD_SPACE", 0x5711d): "InterpreterTable",
+ ("OLD_SPACE", 0x57325): "EmptyBytecodeArray",
+ ("OLD_SPACE", 0x5a2d1): "StringTable",
+ ("CODE_SPACE", 0x1a2a1): "JsEntryCode",
+ ("CODE_SPACE", 0x1f081): "JsConstructEntryCode",
}
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 183e4e5f61..687be113dd 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,4 +5,4 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up......
+The Smi looked at them when a crazy v8-autoroll account showed up.....